Merge branch 'touchdown' of https://bitbucket.org/yackob03/quay into touchdown
72
Dockerfile
|
@ -1,72 +0,0 @@
|
||||||
FROM phusion/baseimage:0.9.9
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
|
||||||
ENV HOME /root
|
|
||||||
|
|
||||||
# Needed for this fix: http://stackoverflow.com/a/21715730
|
|
||||||
RUN apt-get update
|
|
||||||
RUN apt-get install -y software-properties-common python-software-properties
|
|
||||||
RUN add-apt-repository ppa:chris-lea/node.js
|
|
||||||
|
|
||||||
# Install the dependencies.
|
|
||||||
RUN apt-get update
|
|
||||||
|
|
||||||
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands
|
|
||||||
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62-dev libevent-dev gdebi-core g++ libmagic1 libssl1.0.0
|
|
||||||
|
|
||||||
# PhantomJS
|
|
||||||
RUN apt-get install -y libfreetype6 libfreetype6-dev fontconfig
|
|
||||||
ADD https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-1.9.7-linux-x86_64.tar.bz2 phantomjs.tar.bz2
|
|
||||||
RUN tar xjf phantomjs.tar.bz2 && ln -s `pwd`/phantomjs*/bin/phantomjs /usr/bin/phantomjs
|
|
||||||
|
|
||||||
# Grunt
|
|
||||||
RUN apt-get install -y nodejs
|
|
||||||
RUN npm install -g grunt-cli
|
|
||||||
|
|
||||||
ADD binary_dependencies binary_dependencies
|
|
||||||
RUN gdebi --n binary_dependencies/*.deb
|
|
||||||
|
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
|
||||||
|
|
||||||
ADD requirements.txt requirements.txt
|
|
||||||
RUN virtualenv --distribute venv
|
|
||||||
RUN venv/bin/pip install -r requirements.txt
|
|
||||||
|
|
||||||
ADD auth auth
|
|
||||||
ADD buildstatus buildstatus
|
|
||||||
ADD conf conf
|
|
||||||
ADD data data
|
|
||||||
ADD endpoints endpoints
|
|
||||||
ADD features features
|
|
||||||
ADD grunt grunt
|
|
||||||
ADD screenshots screenshots
|
|
||||||
ADD static static
|
|
||||||
ADD storage storage
|
|
||||||
ADD templates templates
|
|
||||||
ADD util util
|
|
||||||
ADD workers workers
|
|
||||||
|
|
||||||
ADD app.py app.py
|
|
||||||
ADD application.py application.py
|
|
||||||
ADD config.py config.py
|
|
||||||
ADD initdb.py initdb.py
|
|
||||||
|
|
||||||
ADD conf/init/mklogsdir.sh /etc/my_init.d/
|
|
||||||
ADD conf/init/gunicorn.sh /etc/service/gunicorn/run
|
|
||||||
ADD conf/init/nginx.sh /etc/service/nginx/run
|
|
||||||
ADD conf/init/diffsworker.sh /etc/service/diffsworker/run
|
|
||||||
ADD conf/init/webhookworker.sh /etc/service/webhookworker/run
|
|
||||||
|
|
||||||
RUN cd grunt && npm install
|
|
||||||
RUN cd grunt && grunt
|
|
||||||
|
|
||||||
# Add the tests last because they're prone to accidental changes, then run them
|
|
||||||
ADD test test
|
|
||||||
RUN TEST=true venv/bin/python -m unittest discover
|
|
||||||
|
|
||||||
RUN rm -rf /conf/stack
|
|
||||||
VOLUME ["/conf/stack", "/mnt/logs"]
|
|
||||||
|
|
||||||
EXPOSE 443 80
|
|
||||||
|
|
||||||
CMD ["/sbin/my_init"]
|
|
1
Dockerfile
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
Dockerfile.web
|
48
Dockerfile.buildworker
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
FROM phusion/baseimage:0.9.10
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
ENV HOME /root
|
||||||
|
|
||||||
|
RUN apt-get update
|
||||||
|
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62-dev libevent-dev gdebi-core g++ libmagic1
|
||||||
|
|
||||||
|
### End common section ###
|
||||||
|
|
||||||
|
RUN apt-get install -y lxc aufs-tools
|
||||||
|
|
||||||
|
RUN usermod -v 100000-200000 -w 100000-200000 root
|
||||||
|
|
||||||
|
ADD binary_dependencies/builder binary_dependencies/builder
|
||||||
|
|
||||||
|
RUN gdebi --n binary_dependencies/builder/*.deb
|
||||||
|
|
||||||
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||||
|
|
||||||
|
ADD requirements.txt requirements.txt
|
||||||
|
RUN virtualenv --distribute venv
|
||||||
|
RUN venv/bin/pip install -r requirements.txt
|
||||||
|
|
||||||
|
ADD buildstatus buildstatus
|
||||||
|
ADD data data
|
||||||
|
ADD features features
|
||||||
|
ADD storage storage
|
||||||
|
ADD util util
|
||||||
|
ADD workers workers
|
||||||
|
|
||||||
|
ADD app.py app.py
|
||||||
|
ADD config.py config.py
|
||||||
|
|
||||||
|
# Remove this if we ever stop depending on test data for the default config
|
||||||
|
ADD test test
|
||||||
|
|
||||||
|
ADD conf conf
|
||||||
|
RUN rm -rf /conf/stack
|
||||||
|
|
||||||
|
ADD conf/init/svlogd_config /svlogd_config
|
||||||
|
ADD conf/init/preplogsdir.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/tutumdocker /etc/service/tutumdocker
|
||||||
|
ADD conf/init/dockerfilebuild /etc/service/dockerfilebuild
|
||||||
|
|
||||||
|
VOLUME ["/var/lib/docker", "/var/lib/lxc", "/conf/stack", "/var/log"]
|
||||||
|
|
||||||
|
CMD ["/sbin/my_init"]
|
67
Dockerfile.web
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
FROM phusion/baseimage:0.9.10
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
ENV HOME /root
|
||||||
|
|
||||||
|
# Install the dependencies.
|
||||||
|
RUN apt-get update
|
||||||
|
|
||||||
|
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands
|
||||||
|
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62-dev libevent-dev gdebi-core g++ libmagic1
|
||||||
|
|
||||||
|
# PhantomJS
|
||||||
|
RUN apt-get install -y phantomjs
|
||||||
|
|
||||||
|
# Grunt
|
||||||
|
RUN apt-get install -y nodejs npm
|
||||||
|
RUN ln -s /usr/bin/nodejs /usr/bin/node
|
||||||
|
RUN npm install -g grunt-cli
|
||||||
|
|
||||||
|
ADD binary_dependencies binary_dependencies
|
||||||
|
RUN gdebi --n binary_dependencies/*.deb
|
||||||
|
|
||||||
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||||
|
|
||||||
|
ADD requirements.txt requirements.txt
|
||||||
|
RUN virtualenv --distribute venv
|
||||||
|
RUN venv/bin/pip install -r requirements.txt
|
||||||
|
|
||||||
|
ADD auth auth
|
||||||
|
ADD buildstatus buildstatus
|
||||||
|
ADD conf conf
|
||||||
|
ADD data data
|
||||||
|
ADD endpoints endpoints
|
||||||
|
ADD features features
|
||||||
|
ADD grunt grunt
|
||||||
|
ADD screenshots screenshots
|
||||||
|
ADD static static
|
||||||
|
ADD storage storage
|
||||||
|
ADD templates templates
|
||||||
|
ADD util util
|
||||||
|
ADD workers workers
|
||||||
|
|
||||||
|
ADD app.py app.py
|
||||||
|
ADD application.py application.py
|
||||||
|
ADD config.py config.py
|
||||||
|
ADD initdb.py initdb.py
|
||||||
|
|
||||||
|
ADD conf/init/svlogd_config /svlogd_config
|
||||||
|
ADD conf/init/preplogsdir.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/gunicorn /etc/service/gunicorn
|
||||||
|
ADD conf/init/nginx /etc/service/nginx
|
||||||
|
ADD conf/init/diffsworker /etc/service/diffsworker
|
||||||
|
ADD conf/init/webhookworker /etc/service/webhookworker
|
||||||
|
|
||||||
|
RUN cd grunt && npm install
|
||||||
|
RUN cd grunt && grunt
|
||||||
|
|
||||||
|
# Add the tests last because they're prone to accidental changes, then run them
|
||||||
|
ADD test test
|
||||||
|
RUN TEST=true venv/bin/python -m unittest discover
|
||||||
|
|
||||||
|
RUN rm -rf /conf/stack
|
||||||
|
VOLUME ["/conf/stack", "/var/log"]
|
||||||
|
|
||||||
|
EXPOSE 443 80
|
||||||
|
|
||||||
|
CMD ["/sbin/my_init"]
|
|
@ -3,8 +3,10 @@ to build and upload quay to quay:
|
||||||
```
|
```
|
||||||
curl -s https://get.docker.io/ubuntu/ | sudo sh
|
curl -s https://get.docker.io/ubuntu/ | sudo sh
|
||||||
sudo apt-get update && sudo apt-get install -y git
|
sudo apt-get update && sudo apt-get install -y git
|
||||||
git clone git clone https://bitbucket.org/yackob03/quay.git
|
git clone https://bitbucket.org/yackob03/quay.git
|
||||||
cd quay
|
cd quay
|
||||||
|
rm Dockerfile
|
||||||
|
ln -s Dockerfile.web Dockerfile
|
||||||
sudo docker build -t quay.io/quay/quay .
|
sudo docker build -t quay.io/quay/quay .
|
||||||
sudo docker push quay.io/quay/quay
|
sudo docker push quay.io/quay/quay
|
||||||
```
|
```
|
||||||
|
@ -19,7 +21,7 @@ cd gantryd
|
||||||
cat requirements.system | xargs sudo apt-get install -y
|
cat requirements.system | xargs sudo apt-get install -y
|
||||||
virtualenv --distribute venv
|
virtualenv --distribute venv
|
||||||
venv/bin/pip install -r requirements.txt
|
venv/bin/pip install -r requirements.txt
|
||||||
sudo docker login -p 9Y1PX7D3IE4KPSGCIALH17EM5V3ZTMP8CNNHJNXAQ2NJGAS48BDH8J1PUOZ869ML -u 'quay+deploy' -e notused quay.io
|
sudo docker login -p 9Y1PX7D3IE4KPSGCIALH17EM5V3ZTMP8CNNHJNXAQ2NJGAS48BDH8J1PUOZ869ML -u 'quay+deploy' -e notused staging.quay.io
|
||||||
```
|
```
|
||||||
|
|
||||||
start the quay processes:
|
start the quay processes:
|
||||||
|
@ -27,8 +29,7 @@ start the quay processes:
|
||||||
```
|
```
|
||||||
cd ~
|
cd ~
|
||||||
git clone https://bitbucket.org/yackob03/quayconfig.git
|
git clone https://bitbucket.org/yackob03/quayconfig.git
|
||||||
sudo docker pull quay.io/quay/quay
|
sudo docker pull staging.quay.io/quay/quay
|
||||||
sudo mkdir -p /mnt/logs/
|
|
||||||
cd ~/gantryd
|
cd ~/gantryd
|
||||||
sudo venv/bin/python gantry.py ../quayconfig/production/gantry.json update quay
|
sudo venv/bin/python gantry.py ../quayconfig/production/gantry.json update quay
|
||||||
```
|
```
|
||||||
|
|
2
app.py
|
@ -13,6 +13,7 @@ from data.userfiles import Userfiles
|
||||||
from util.analytics import Analytics
|
from util.analytics import Analytics
|
||||||
from util.exceptionlog import Sentry
|
from util.exceptionlog import Sentry
|
||||||
from data.billing import Billing
|
from data.billing import Billing
|
||||||
|
from data.buildlogs import BuildLogs
|
||||||
|
|
||||||
|
|
||||||
OVERRIDE_CONFIG_FILENAME = 'conf/stack/config.py'
|
OVERRIDE_CONFIG_FILENAME = 'conf/stack/config.py'
|
||||||
|
@ -46,3 +47,4 @@ userfiles = Userfiles(app)
|
||||||
analytics = Analytics(app)
|
analytics = Analytics(app)
|
||||||
billing = Billing(app)
|
billing = Billing(app)
|
||||||
sentry = Sentry(app)
|
sentry = Sentry(app)
|
||||||
|
build_logs = BuildLogs(app)
|
||||||
|
|
|
@ -67,5 +67,5 @@ application.teardown_request(close_db)
|
||||||
application.request_class = RequestWithId
|
application.request_class = RequestWithId
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
logging.config.fileConfig('conf/logging_local.conf', disable_existing_loggers=False)
|
logging.config.fileConfig('conf/logging.conf', disable_existing_loggers=False)
|
||||||
application.run(port=5000, debug=True, threaded=True, host='0.0.0.0')
|
application.run(port=5000, debug=True, threaded=True, host='0.0.0.0')
|
||||||
|
|
BIN
binary_dependencies/nginx_1.4.2-nobuffer-3_amd64.deb
Normal file
|
@ -2,6 +2,6 @@ bind = 'unix:/tmp/gunicorn.sock'
|
||||||
workers = 8
|
workers = 8
|
||||||
worker_class = 'gevent'
|
worker_class = 'gevent'
|
||||||
timeout = 2000
|
timeout = 2000
|
||||||
pidfile = '/tmp/gunicorn.pid'
|
|
||||||
logconfig = 'conf/logging.conf'
|
logconfig = 'conf/logging.conf'
|
||||||
pythonpath = '.'
|
pythonpath = '.'
|
||||||
|
preload_app = True
|
|
@ -3,5 +3,5 @@ workers = 2
|
||||||
worker_class = 'gevent'
|
worker_class = 'gevent'
|
||||||
timeout = 2000
|
timeout = 2000
|
||||||
daemon = False
|
daemon = False
|
||||||
logconfig = 'conf/logging_local.conf'
|
logconfig = 'conf/logging.conf'
|
||||||
pythonpath = '.'
|
pythonpath = '.'
|
|
@ -1,20 +1,8 @@
|
||||||
log_format logstash_json '{ "@timestamp": "$time_iso8601", '
|
|
||||||
'"@fields": { '
|
|
||||||
'"remote_addr": "$remote_addr", '
|
|
||||||
'"remote_user": "$remote_user", '
|
|
||||||
'"body_bytes_sent": "$body_bytes_sent", '
|
|
||||||
'"request_time": "$request_time", '
|
|
||||||
'"status": "$status", '
|
|
||||||
'"request": "$request", '
|
|
||||||
'"request_method": "$request_method", '
|
|
||||||
'"http_referrer": "$http_referer", '
|
|
||||||
'"http_user_agent": "$http_user_agent" } }';
|
|
||||||
|
|
||||||
types_hash_max_size 2048;
|
types_hash_max_size 2048;
|
||||||
include /usr/local/nginx/conf/mime.types.default;
|
include /usr/local/nginx/conf/mime.types.default;
|
||||||
|
|
||||||
default_type application/octet-stream;
|
default_type application/octet-stream;
|
||||||
access_log /mnt/logs/nginx.access.log logstash_json;
|
access_log /var/log/nginx/nginx.access.log;
|
||||||
sendfile on;
|
sendfile on;
|
||||||
|
|
||||||
gzip on;
|
gzip on;
|
||||||
|
|
2
conf/init/diffsworker/log/run
Executable file
|
@ -0,0 +1,2 @@
|
||||||
|
#!/bin/sh
|
||||||
|
exec svlogd /var/log/diffsworker/
|
|
@ -3,6 +3,6 @@
|
||||||
echo 'Starting diffs worker'
|
echo 'Starting diffs worker'
|
||||||
|
|
||||||
cd /
|
cd /
|
||||||
venv/bin/python -m workers.diffsworker --log=/mnt/logs/diffsworker.log
|
venv/bin/python -m workers.diffsworker
|
||||||
|
|
||||||
echo 'Diffs worker exited'
|
echo 'Diffs worker exited'
|
2
conf/init/dockerfilebuild/log/run
Executable file
|
@ -0,0 +1,2 @@
|
||||||
|
#!/bin/sh
|
||||||
|
exec svlogd /var/log/dockerfilebuild/
|
6
conf/init/dockerfilebuild/run
Executable file
|
@ -0,0 +1,6 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
sv start tutumdocker || exit 1
|
||||||
|
|
||||||
|
cd /
|
||||||
|
venv/bin/python -m workers.dockerfilebuild
|
2
conf/init/gunicorn/log/run
Executable file
|
@ -0,0 +1,2 @@
|
||||||
|
#!/bin/sh
|
||||||
|
exec svlogd /var/log/gunicorn/
|
|
@ -1,4 +0,0 @@
|
||||||
#! /bin/sh
|
|
||||||
|
|
||||||
echo 'Creating logs directory'
|
|
||||||
mkdir -p /mnt/logs
|
|
2
conf/init/nginx/log/run
Executable file
|
@ -0,0 +1,2 @@
|
||||||
|
#!/bin/sh
|
||||||
|
exec svlogd /var/log/nginx/
|
|
@ -5,10 +5,10 @@ echo 'Starting nginx'
|
||||||
if [ -f /conf/stack/ssl.key ]
|
if [ -f /conf/stack/ssl.key ]
|
||||||
then
|
then
|
||||||
echo "Using HTTPS"
|
echo "Using HTTPS"
|
||||||
/usr/local/nginx/sbin/nginx -c /conf/nginx-enterprise.conf
|
/usr/local/nginx/sbin/nginx -c /conf/nginx.conf
|
||||||
else
|
else
|
||||||
echo "No SSL key provided, using HTTP"
|
echo "No SSL key provided, using HTTP"
|
||||||
/usr/local/nginx/sbin/nginx -c /conf/nginx-enterprise-nossl.conf
|
/usr/local/nginx/sbin/nginx -c /conf/nginx-nossl.conf
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo 'Nginx exited'
|
echo 'Nginx exited'
|
8
conf/init/preplogsdir.sh
Executable file
|
@ -0,0 +1,8 @@
|
||||||
|
#! /bin/sh
|
||||||
|
|
||||||
|
echo 'Linking config files to logs directory'
|
||||||
|
for svc in `ls /etc/service/`
|
||||||
|
do
|
||||||
|
mkdir -p /var/log/$svc
|
||||||
|
ln -s /svlogd_config /var/log/$svc/config
|
||||||
|
done
|
2
conf/init/svlogd_config
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
s100000000
|
||||||
|
t86400
|
2
conf/init/tutumdocker/log/run
Executable file
|
@ -0,0 +1,2 @@
|
||||||
|
#!/bin/sh
|
||||||
|
exec svlogd /var/log/tutumdocker/
|
96
conf/init/tutumdocker/run
Executable file
|
@ -0,0 +1,96 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# First, make sure that cgroups are mounted correctly.
|
||||||
|
CGROUP=/sys/fs/cgroup
|
||||||
|
|
||||||
|
[ -d $CGROUP ] ||
|
||||||
|
mkdir $CGROUP
|
||||||
|
|
||||||
|
mountpoint -q $CGROUP ||
|
||||||
|
mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
|
||||||
|
echo "Could not make a tmpfs mount. Did you use -privileged?"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
|
||||||
|
then
|
||||||
|
mount -t securityfs none /sys/kernel/security || {
|
||||||
|
echo "Could not mount /sys/kernel/security."
|
||||||
|
echo "AppArmor detection and -privileged mode might break."
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount the cgroup hierarchies exactly as they are in the parent system.
|
||||||
|
for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
|
||||||
|
do
|
||||||
|
[ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
|
||||||
|
mountpoint -q $CGROUP/$SUBSYS ||
|
||||||
|
mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
|
||||||
|
|
||||||
|
# The two following sections address a bug which manifests itself
|
||||||
|
# by a cryptic "lxc-start: no ns_cgroup option specified" when
|
||||||
|
# trying to start containers withina container.
|
||||||
|
# The bug seems to appear when the cgroup hierarchies are not
|
||||||
|
# mounted on the exact same directories in the host, and in the
|
||||||
|
# container.
|
||||||
|
|
||||||
|
# Named, control-less cgroups are mounted with "-o name=foo"
|
||||||
|
# (and appear as such under /proc/<pid>/cgroup) but are usually
|
||||||
|
# mounted on a directory named "foo" (without the "name=" prefix).
|
||||||
|
# Systemd and OpenRC (and possibly others) both create such a
|
||||||
|
# cgroup. To avoid the aforementioned bug, we symlink "foo" to
|
||||||
|
# "name=foo". This shouldn't have any adverse effect.
|
||||||
|
echo $SUBSYS | grep -q ^name= && {
|
||||||
|
NAME=$(echo $SUBSYS | sed s/^name=//)
|
||||||
|
ln -s $SUBSYS $CGROUP/$NAME
|
||||||
|
}
|
||||||
|
|
||||||
|
# Likewise, on at least one system, it has been reported that
|
||||||
|
# systemd would mount the CPU and CPU accounting controllers
|
||||||
|
# (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
|
||||||
|
# but on a directory called "cpu,cpuacct" (note the inversion
|
||||||
|
# in the order of the groups). This tries to work around it.
|
||||||
|
[ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
|
||||||
|
done
|
||||||
|
|
||||||
|
# Note: as I write those lines, the LXC userland tools cannot setup
|
||||||
|
# a "sub-container" properly if the "devices" cgroup is not in its
|
||||||
|
# own hierarchy. Let's detect this and issue a warning.
|
||||||
|
grep -q :devices: /proc/1/cgroup ||
|
||||||
|
echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
|
||||||
|
grep -qw devices /proc/1/cgroup ||
|
||||||
|
echo "WARNING: it looks like the 'devices' cgroup is not mounted."
|
||||||
|
|
||||||
|
# Now, close extraneous file descriptors.
|
||||||
|
pushd /proc/self/fd >/dev/null
|
||||||
|
for FD in *
|
||||||
|
do
|
||||||
|
case "$FD" in
|
||||||
|
# Keep stdin/stdout/stderr
|
||||||
|
[012])
|
||||||
|
;;
|
||||||
|
# Nuke everything else
|
||||||
|
*)
|
||||||
|
eval exec "$FD>&-"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
popd >/dev/null
|
||||||
|
|
||||||
|
|
||||||
|
# If a pidfile is still around (for example after a container restart),
|
||||||
|
# delete it so that docker can start.
|
||||||
|
rm -rf /var/run/docker.pid
|
||||||
|
|
||||||
|
chmod 777 /var/lib/lxc
|
||||||
|
chmod 777 /var/lib/docker
|
||||||
|
|
||||||
|
|
||||||
|
# If we were given a PORT environment variable, start as a simple daemon;
|
||||||
|
# otherwise, spawn a shell as well
|
||||||
|
if [ "$PORT" ]
|
||||||
|
then
|
||||||
|
exec docker -d -H 0.0.0.0:$PORT
|
||||||
|
else
|
||||||
|
docker -d -D -e lxc 2>&1
|
||||||
|
fi
|
2
conf/init/webhookworker/log/run
Executable file
|
@ -0,0 +1,2 @@
|
||||||
|
#!/bin/sh
|
||||||
|
exec svlogd -t /var/log/webhookworker/
|
|
@ -3,6 +3,6 @@
|
||||||
echo 'Starting webhook worker'
|
echo 'Starting webhook worker'
|
||||||
|
|
||||||
cd /
|
cd /
|
||||||
venv/bin/python -m workers.webhookworker --log=/mnt/logs/webhookworker.log
|
venv/bin/python -m workers.webhookworker
|
||||||
|
|
||||||
echo 'Webhook worker exited'
|
echo 'Webhook worker exited'
|
|
@ -1,38 +1,38 @@
|
||||||
[loggers]
|
[loggers]
|
||||||
keys=root, gunicorn.error, gunicorn.access
|
keys=root, gunicorn.error, gunicorn.access, application.profiler
|
||||||
|
|
||||||
[handlers]
|
[handlers]
|
||||||
keys=error_file
|
keys=console
|
||||||
|
|
||||||
[formatters]
|
[formatters]
|
||||||
keys=generic
|
keys=generic
|
||||||
|
|
||||||
[logger_application.profiler]
|
[logger_application.profiler]
|
||||||
level=DEBUG
|
level=DEBUG
|
||||||
handlers=error_file
|
handlers=console
|
||||||
propagate=0
|
propagate=0
|
||||||
qualname=application.profiler
|
qualname=application.profiler
|
||||||
|
|
||||||
[logger_root]
|
[logger_root]
|
||||||
level=DEBUG
|
level=DEBUG
|
||||||
handlers=error_file
|
handlers=console
|
||||||
|
|
||||||
[logger_gunicorn.error]
|
[logger_gunicorn.error]
|
||||||
level=INFO
|
level=INFO
|
||||||
handlers=error_file
|
handlers=console
|
||||||
propagate=1
|
propagate=1
|
||||||
qualname=gunicorn.error
|
qualname=gunicorn.error
|
||||||
|
|
||||||
[logger_gunicorn.access]
|
[logger_gunicorn.access]
|
||||||
level=INFO
|
level=INFO
|
||||||
handlers=error_file
|
handlers=console
|
||||||
propagate=0
|
propagate=0
|
||||||
qualname=gunicorn.access
|
qualname=gunicorn.access
|
||||||
|
|
||||||
[handler_error_file]
|
[handler_console]
|
||||||
class=logging.FileHandler
|
class=StreamHandler
|
||||||
formatter=generic
|
formatter=generic
|
||||||
args=('/mnt/logs/application.log',)
|
args=(sys.stdout, )
|
||||||
|
|
||||||
[formatter_generic]
|
[formatter_generic]
|
||||||
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
[loggers]
|
|
||||||
keys=root, gunicorn.error, gunicorn.access, application.profiler
|
|
||||||
|
|
||||||
[handlers]
|
|
||||||
keys=console
|
|
||||||
|
|
||||||
[formatters]
|
|
||||||
keys=generic
|
|
||||||
|
|
||||||
[logger_application.profiler]
|
|
||||||
level=DEBUG
|
|
||||||
handlers=console
|
|
||||||
propagate=0
|
|
||||||
qualname=application.profiler
|
|
||||||
|
|
||||||
[logger_root]
|
|
||||||
level=DEBUG
|
|
||||||
handlers=console
|
|
||||||
|
|
||||||
[logger_gunicorn.error]
|
|
||||||
level=INFO
|
|
||||||
handlers=console
|
|
||||||
propagate=1
|
|
||||||
qualname=gunicorn.error
|
|
||||||
|
|
||||||
[logger_gunicorn.access]
|
|
||||||
level=INFO
|
|
||||||
handlers=console
|
|
||||||
propagate=0
|
|
||||||
qualname=gunicorn.access
|
|
||||||
|
|
||||||
[handler_console]
|
|
||||||
class=StreamHandler
|
|
||||||
formatter=generic
|
|
||||||
args=(sys.stdout, )
|
|
||||||
|
|
||||||
[formatter_generic]
|
|
||||||
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
|
||||||
class=logging.Formatter
|
|
|
@ -1,41 +0,0 @@
|
||||||
/mnt/logs/nginx.access.log {
|
|
||||||
daily
|
|
||||||
rotate 7
|
|
||||||
compress
|
|
||||||
delaycompress
|
|
||||||
missingok
|
|
||||||
notifempty
|
|
||||||
create 644 root root
|
|
||||||
|
|
||||||
postrotate
|
|
||||||
kill -USR1 `cat /mnt/logs/nginx.pid`
|
|
||||||
endscript
|
|
||||||
}
|
|
||||||
|
|
||||||
/mnt/logs/nginx.error.log {
|
|
||||||
daily
|
|
||||||
rotate 7
|
|
||||||
compress
|
|
||||||
delaycompress
|
|
||||||
missingok
|
|
||||||
notifempty
|
|
||||||
create 644 root root
|
|
||||||
|
|
||||||
postrotate
|
|
||||||
kill -USR1 `cat /mnt/logs/nginx.pid`
|
|
||||||
endscript
|
|
||||||
}
|
|
||||||
|
|
||||||
/mnt/logs/application.log {
|
|
||||||
daily
|
|
||||||
rotate 7
|
|
||||||
compress
|
|
||||||
delaycompress
|
|
||||||
missingok
|
|
||||||
notifempty
|
|
||||||
create 644 ubuntu ubuntu
|
|
||||||
|
|
||||||
postrotate
|
|
||||||
kill -USR1 `cat /mnt/logs/gunicorn.pid`
|
|
||||||
endscript
|
|
||||||
}
|
|
|
@ -1,5 +1,5 @@
|
||||||
pid /tmp/nginx.pid;
|
pid /tmp/nginx.pid;
|
||||||
error_log /mnt/logs/nginx.error.log;
|
error_log /var/log/nginx/nginx.error.log;
|
||||||
|
|
||||||
events {
|
events {
|
||||||
worker_connections 1024;
|
worker_connections 1024;
|
||||||
|
|
|
@ -1,7 +1,10 @@
|
||||||
client_max_body_size 8G;
|
client_max_body_size 8G;
|
||||||
client_body_temp_path /mnt/logs/client_body 1 2;
|
client_body_temp_path /var/log/nginx/client_body 1 2;
|
||||||
server_name _;
|
server_name _;
|
||||||
|
|
||||||
|
set_real_ip_from 172.17.0.0/16;
|
||||||
|
real_ip_header X-Forwarded-For;
|
||||||
|
|
||||||
keepalive_timeout 5;
|
keepalive_timeout 5;
|
||||||
|
|
||||||
if ($args ~ "_escaped_fragment_") {
|
if ($args ~ "_escaped_fragment_") {
|
||||||
|
@ -20,5 +23,5 @@ location / {
|
||||||
|
|
||||||
proxy_pass http://app_server;
|
proxy_pass http://app_server;
|
||||||
proxy_read_timeout 2000;
|
proxy_read_timeout 2000;
|
||||||
proxy_temp_path /mnt/logs/proxy_temp 1 2;
|
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
|
||||||
}
|
}
|
|
@ -73,7 +73,7 @@ class DefaultConfig(object):
|
||||||
STORAGE_PATH = 'test/data/registry'
|
STORAGE_PATH = 'test/data/registry'
|
||||||
|
|
||||||
# Build logs
|
# Build logs
|
||||||
BUILDLOGS = BuildLogs('logs.quay.io') # Change me
|
BUILDLOGS_OPTIONS = ['logs.quay.io']
|
||||||
|
|
||||||
# Real-time user events
|
# Real-time user events
|
||||||
USER_EVENTS = UserEventBuilder('logs.quay.io')
|
USER_EVENTS = UserEventBuilder('logs.quay.io')
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
import redis
|
import redis
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
from util.dynamic import import_class
|
||||||
|
|
||||||
class BuildStatusRetrievalError(Exception):
|
class BuildStatusRetrievalError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class BuildLogs(object):
|
class RedisBuildLogs(object):
|
||||||
ERROR = 'error'
|
ERROR = 'error'
|
||||||
COMMAND = 'command'
|
COMMAND = 'command'
|
||||||
PHASE = 'phase'
|
PHASE = 'phase'
|
||||||
|
@ -70,3 +72,37 @@ class BuildLogs(object):
|
||||||
raise BuildStatusRetrievalError('Cannot retrieve build status')
|
raise BuildStatusRetrievalError('Cannot retrieve build status')
|
||||||
|
|
||||||
return json.loads(fetched) if fetched else None
|
return json.loads(fetched) if fetched else None
|
||||||
|
|
||||||
|
def check_health(self):
|
||||||
|
try:
|
||||||
|
return self._redis.ping() == True
|
||||||
|
except redis.ConnectionError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class BuildLogs(object):
|
||||||
|
def __init__(self, app=None):
|
||||||
|
self.app = app
|
||||||
|
if app is not None:
|
||||||
|
self.state = self.init_app(app)
|
||||||
|
else:
|
||||||
|
self.state = None
|
||||||
|
|
||||||
|
def init_app(self, app):
|
||||||
|
buildlogs_options = app.config.get('BUILDLOGS_OPTIONS', [])
|
||||||
|
buildlogs_import = app.config.get('BUILDLOGS_MODULE_AND_CLASS', None)
|
||||||
|
|
||||||
|
if buildlogs_import is None:
|
||||||
|
klass = RedisBuildLogs
|
||||||
|
else:
|
||||||
|
klass = import_class(buildlogs_import[0], buildlogs_import[1])
|
||||||
|
|
||||||
|
buildlogs = klass(*buildlogs_options)
|
||||||
|
|
||||||
|
# register extension with app
|
||||||
|
app.extensions = getattr(app, 'extensions', {})
|
||||||
|
app.extensions['buildlogs'] = buildlogs
|
||||||
|
return buildlogs
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return getattr(self.state, name, None)
|
|
@ -1647,3 +1647,11 @@ def delete_user(user):
|
||||||
user.delete_instance(recursive=True, delete_nullable=True)
|
user.delete_instance(recursive=True, delete_nullable=True)
|
||||||
|
|
||||||
# TODO: also delete any repository data associated
|
# TODO: also delete any repository data associated
|
||||||
|
|
||||||
|
def check_health():
|
||||||
|
# We will connect to the db, check that it contains some log entry kinds
|
||||||
|
try:
|
||||||
|
found_count = LogEntryKind.select().count()
|
||||||
|
return found_count > 0
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
|
@ -3,7 +3,7 @@ import json
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
|
|
||||||
from app import app, userfiles as user_files
|
from app import app, userfiles as user_files, build_logs
|
||||||
from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource,
|
from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource,
|
||||||
require_repo_read, require_repo_write, validate_json_request,
|
require_repo_read, require_repo_write, validate_json_request,
|
||||||
ApiResource, internal_only, format_date, api, Unauthorized, NotFound)
|
ApiResource, internal_only, format_date, api, Unauthorized, NotFound)
|
||||||
|
@ -17,7 +17,6 @@ from util.names import parse_robot_username
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
build_logs = app.config['BUILDLOGS']
|
|
||||||
|
|
||||||
|
|
||||||
def get_trigger_config(trigger):
|
def get_trigger_config(trigger):
|
||||||
|
|
|
@ -239,7 +239,7 @@ def put_image_checksum(namespace, repository, image_id):
|
||||||
abort(400, "Missing checksum for image %(image_id)s", issue='missing-checksum', image_id=image_id)
|
abort(400, "Missing checksum for image %(image_id)s", issue='missing-checksum', image_id=image_id)
|
||||||
|
|
||||||
if not session.get('checksum'):
|
if not session.get('checksum'):
|
||||||
abort(400, 'Checksum not found in Cookie for image %(imaage_id)s',
|
abort(400, 'Checksum not found in Cookie for image %(image_id)s',
|
||||||
issue='missing-checksum-cookie', image_id=image_id)
|
issue='missing-checksum-cookie', image_id=image_id)
|
||||||
|
|
||||||
profile.debug('Looking up repo image')
|
profile.debug('Looking up repo image')
|
||||||
|
|
|
@ -315,8 +315,8 @@ class GithubBuildTrigger(BuildTrigger):
|
||||||
|
|
||||||
def handle_trigger_request(self, request, auth_token, config):
|
def handle_trigger_request(self, request, auth_token, config):
|
||||||
payload = request.get_json()
|
payload = request.get_json()
|
||||||
if not payload:
|
if not payload or not 'head_commit' in payload:
|
||||||
raise SkipRequestException()
|
raise SkipRequestException()
|
||||||
|
|
||||||
if 'zen' in payload:
|
if 'zen' in payload:
|
||||||
raise ValidationRequestException()
|
raise ValidationRequestException()
|
||||||
|
|
|
@ -2,13 +2,13 @@ import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from flask import (abort, redirect, request, url_for, make_response, Response,
|
from flask import (abort, redirect, request, url_for, make_response, Response,
|
||||||
Blueprint, send_from_directory)
|
Blueprint, send_from_directory, jsonify)
|
||||||
from flask.ext.login import current_user
|
from flask.ext.login import current_user
|
||||||
from urlparse import urlparse
|
from urlparse import urlparse
|
||||||
|
|
||||||
from data import model
|
from data import model
|
||||||
from data.model.oauth import DatabaseAuthorizationProvider
|
from data.model.oauth import DatabaseAuthorizationProvider
|
||||||
from app import app, billing as stripe
|
from app import app, billing as stripe, build_logs
|
||||||
from auth.auth import require_session_login
|
from auth.auth import require_session_login
|
||||||
from auth.permissions import AdministerOrganizationPermission
|
from auth.permissions import AdministerOrganizationPermission
|
||||||
from util.invoice import renderInvoiceToPdf
|
from util.invoice import renderInvoiceToPdf
|
||||||
|
@ -146,7 +146,16 @@ def v1():
|
||||||
@web.route('/status', methods=['GET'])
|
@web.route('/status', methods=['GET'])
|
||||||
@no_cache
|
@no_cache
|
||||||
def status():
|
def status():
|
||||||
return make_response('Healthy')
|
db_healthy = model.check_health()
|
||||||
|
buildlogs_healthy = build_logs.check_health()
|
||||||
|
|
||||||
|
response = jsonify({
|
||||||
|
'db_healthy': db_healthy,
|
||||||
|
'buildlogs_healthy': buildlogs_healthy,
|
||||||
|
})
|
||||||
|
response.status_code = 200 if db_healthy and buildlogs_healthy else 503
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
@web.route('/tos', methods=['GET'])
|
@web.route('/tos', methods=['GET'])
|
||||||
|
|
11
initdb.py
|
@ -344,11 +344,6 @@ def populate_database():
|
||||||
'docker_tags': ['latest'],
|
'docker_tags': ['latest'],
|
||||||
'build_subdir': '',
|
'build_subdir': '',
|
||||||
}
|
}
|
||||||
build = model.create_repository_build(building, token, job_config,
|
|
||||||
'701dcc3724fb4f2ea6c31400528343cd',
|
|
||||||
'build-name', trigger)
|
|
||||||
build.uuid = 'deadbeef-dead-beef-dead-beefdeadbeef'
|
|
||||||
build.save()
|
|
||||||
|
|
||||||
build2 = model.create_repository_build(building, token, job_config,
|
build2 = model.create_repository_build(building, token, job_config,
|
||||||
'68daeebd-a5b9-457f-80a0-4363b882f8ea',
|
'68daeebd-a5b9-457f-80a0-4363b882f8ea',
|
||||||
|
@ -362,6 +357,12 @@ def populate_database():
|
||||||
build3.uuid = 'deadduck-dead-duck-dead-duckdeadduck'
|
build3.uuid = 'deadduck-dead-duck-dead-duckdeadduck'
|
||||||
build3.save()
|
build3.save()
|
||||||
|
|
||||||
|
build = model.create_repository_build(building, token, job_config,
|
||||||
|
'701dcc3724fb4f2ea6c31400528343cd',
|
||||||
|
'build-name', trigger)
|
||||||
|
build.uuid = 'deadbeef-dead-beef-dead-beefdeadbeef'
|
||||||
|
build.save()
|
||||||
|
|
||||||
org = model.create_organization('buynlarge', 'quay@devtable.com',
|
org = model.create_organization('buynlarge', 'quay@devtable.com',
|
||||||
new_user_1)
|
new_user_1)
|
||||||
org.stripe_id = TEST_STRIPE_ID
|
org.stripe_id = TEST_STRIPE_ID
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
var width = 1024;
|
var width = 1060;
|
||||||
var height = 768;
|
var height = 768;
|
||||||
|
|
||||||
var casper = require('casper').create({
|
var casper = require('casper').create({
|
||||||
|
@ -76,11 +76,14 @@ casper.then(function() {
|
||||||
this.capture(outputDir + 'repo-view.png');
|
this.capture(outputDir + 'repo-view.png');
|
||||||
});
|
});
|
||||||
|
|
||||||
casper.then(function() {
|
|
||||||
this.log('Generating repository changes screenshot.');
|
casper.thenClick('a[data-image="c3d710edbd3b"]', function() {
|
||||||
|
this.waitForText('And 3048 more...', function() {
|
||||||
|
this.capture(outputDir + 'image-view.png');
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
casper.thenClick('#current-image dd a', function() {
|
casper.thenClick('.image-link', function() {
|
||||||
this.waitForSelector('.result-count', function() {
|
this.waitForSelector('.result-count', function() {
|
||||||
this.capture(outputDir + 'repo-changes.png', {
|
this.capture(outputDir + 'repo-changes.png', {
|
||||||
top: 0,
|
top: 0,
|
||||||
|
@ -89,7 +92,8 @@ casper.thenClick('#current-image dd a', function() {
|
||||||
height: height
|
height: height
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
})
|
});
|
||||||
|
|
||||||
|
|
||||||
casper.then(function() {
|
casper.then(function() {
|
||||||
this.log('Generating repository admin screenshot.');
|
this.log('Generating repository admin screenshot.');
|
||||||
|
|
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 58 KiB |
Before Width: | Height: | Size: 81 KiB After Width: | Height: | Size: 72 KiB |
Before Width: | Height: | Size: 92 KiB After Width: | Height: | Size: 75 KiB |
Before Width: | Height: | Size: 74 KiB After Width: | Height: | Size: 65 KiB |
Before Width: | Height: | Size: 65 KiB After Width: | Height: | Size: 56 KiB |
Before Width: | Height: | Size: 56 KiB After Width: | Height: | Size: 50 KiB |
Before Width: | Height: | Size: 72 KiB After Width: | Height: | Size: 67 KiB |
Before Width: | Height: | Size: 55 KiB After Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 79 KiB |
Before Width: | Height: | Size: 183 KiB After Width: | Height: | Size: 176 KiB |
|
@ -1,5 +1,5 @@
|
||||||
var TEAM_PATTERN = '^[a-zA-Z][a-zA-Z0-9]+$';
|
var TEAM_PATTERN = '^[a-zA-Z][a-zA-Z0-9]+$';
|
||||||
var ROBOT_PATTERN = '^[a-zA-Z][a-zA-Z0-9]+$';
|
var ROBOT_PATTERN = '^[a-zA-Z][a-zA-Z0-9]{3,29}$';
|
||||||
|
|
||||||
function getRestUrl(args) {
|
function getRestUrl(args) {
|
||||||
var url = '';
|
var url = '';
|
||||||
|
@ -61,7 +61,7 @@ function getFirstTextLine(commentString) {
|
||||||
function createRobotAccount(ApiService, is_org, orgname, name, callback) {
|
function createRobotAccount(ApiService, is_org, orgname, name, callback) {
|
||||||
ApiService.createRobot(is_org ? orgname : null, null, {'robot_shortname': name}).then(callback, function(resp) {
|
ApiService.createRobot(is_org ? orgname : null, null, {'robot_shortname': name}).then(callback, function(resp) {
|
||||||
bootbox.dialog({
|
bootbox.dialog({
|
||||||
"message": resp.data ? resp.data : 'The robot account could not be created',
|
"message": resp.data ? resp.data['message'] : 'The robot account could not be created',
|
||||||
"title": "Cannot create robot account",
|
"title": "Cannot create robot account",
|
||||||
"buttons": {
|
"buttons": {
|
||||||
"close": {
|
"close": {
|
||||||
|
@ -84,7 +84,7 @@ function createOrganizationTeam(ApiService, orgname, teamname, callback) {
|
||||||
'teamname': teamname
|
'teamname': teamname
|
||||||
};
|
};
|
||||||
|
|
||||||
ApiService.updateOrganizationTeam(data, params).then(callback, function() {
|
ApiService.updateOrganizationTeam(data, params).then(callback, function(resp) {
|
||||||
bootbox.dialog({
|
bootbox.dialog({
|
||||||
"message": resp.data ? resp.data : 'The team could not be created',
|
"message": resp.data ? resp.data : 'The team could not be created',
|
||||||
"title": "Cannot create team",
|
"title": "Cannot create team",
|
||||||
|
@ -3829,6 +3829,8 @@ quayApp.directive('setupTriggerDialog', function () {
|
||||||
var modalSetup = false;
|
var modalSetup = false;
|
||||||
|
|
||||||
$scope.show = function() {
|
$scope.show = function() {
|
||||||
|
if (!$scope.trigger || !$scope.repository) { return; }
|
||||||
|
|
||||||
$scope.activating = false;
|
$scope.activating = false;
|
||||||
$scope.pullEntity = null;
|
$scope.pullEntity = null;
|
||||||
$scope.publicPull = true;
|
$scope.publicPull = true;
|
||||||
|
@ -3838,7 +3840,7 @@ quayApp.directive('setupTriggerDialog', function () {
|
||||||
|
|
||||||
if (!modalSetup) {
|
if (!modalSetup) {
|
||||||
$('#setupTriggerModal').on('hidden.bs.modal', function () {
|
$('#setupTriggerModal').on('hidden.bs.modal', function () {
|
||||||
if ($scope.trigger['is_active']) { return; }
|
if (!$scope.trigger || $scope.trigger['is_active']) { return; }
|
||||||
|
|
||||||
$scope.$apply(function() {
|
$scope.$apply(function() {
|
||||||
$scope.cancelSetupTrigger();
|
$scope.cancelSetupTrigger();
|
||||||
|
|
|
@ -4,11 +4,16 @@ $.fn.clipboardCopy = function() {
|
||||||
clip.on('complete', function() {
|
clip.on('complete', function() {
|
||||||
// Resets the animation.
|
// Resets the animation.
|
||||||
var elem = $('#clipboardCopied')[0];
|
var elem = $('#clipboardCopied')[0];
|
||||||
|
if (!elem) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
elem.style.display = 'none';
|
elem.style.display = 'none';
|
||||||
elem.classList.remove('animated');
|
elem.classList.remove('animated');
|
||||||
|
|
||||||
// Show the notification.
|
// Show the notification.
|
||||||
setTimeout(function() {
|
setTimeout(function() {
|
||||||
|
if (!elem) { return; }
|
||||||
elem.style.display = 'inline-block';
|
elem.style.display = 'inline-block';
|
||||||
elem.classList.add('animated');
|
elem.classList.add('animated');
|
||||||
}, 10);
|
}, 10);
|
||||||
|
@ -1071,7 +1076,6 @@ function RepoBuildCtrl($scope, Restangular, ApiService, $routeParams, $rootScope
|
||||||
$scope.currentParentEntry = null;
|
$scope.currentParentEntry = null;
|
||||||
|
|
||||||
$scope.currentBuild = build;
|
$scope.currentBuild = build;
|
||||||
$scope.currentBuildIndex = index;
|
|
||||||
|
|
||||||
if (opt_updateURL) {
|
if (opt_updateURL) {
|
||||||
if (build) {
|
if (build) {
|
||||||
|
@ -1149,8 +1153,18 @@ function RepoBuildCtrl($scope, Restangular, ApiService, $routeParams, $rootScope
|
||||||
ApiService.getRepoBuildStatus(null, params, true).then(function(resp) {
|
ApiService.getRepoBuildStatus(null, params, true).then(function(resp) {
|
||||||
// Note: We use extend here rather than replacing as Angular is depending on the
|
// Note: We use extend here rather than replacing as Angular is depending on the
|
||||||
// root build object to remain the same object.
|
// root build object to remain the same object.
|
||||||
$.extend(true, $scope.builds[$scope.currentBuildIndex], resp);
|
var matchingBuilds = $.grep($scope.builds, function(elem) {
|
||||||
var currentBuild = $scope.builds[$scope.currentBuildIndex];
|
return elem['id'] == resp['id']
|
||||||
|
});
|
||||||
|
|
||||||
|
var currentBuild = matchingBuilds.length > 0 ? matchingBuilds[0] : null;
|
||||||
|
if (currentBuild) {
|
||||||
|
currentBuild = $.extend(true, currentBuild, resp);
|
||||||
|
} else {
|
||||||
|
currentBuild = resp;
|
||||||
|
$scope.builds.push(currentBuild);
|
||||||
|
}
|
||||||
|
|
||||||
checkPollTimer();
|
checkPollTimer();
|
||||||
|
|
||||||
// Load the updated logs for the build.
|
// Load the updated logs for the build.
|
||||||
|
@ -1239,12 +1253,12 @@ function RepoAdminCtrl($scope, Restangular, ApiService, KeyService, $routeParams
|
||||||
$scope.getBadgeFormat = function(format, repo) {
|
$scope.getBadgeFormat = function(format, repo) {
|
||||||
if (!repo) { return; }
|
if (!repo) { return; }
|
||||||
|
|
||||||
var imageUrl = Config.getUrl('/' + namespace + '/' + name + '/status');
|
var imageUrl = Config.getUrl('/repository/' + namespace + '/' + name + '/status');
|
||||||
if (!$scope.repo.is_public) {
|
if (!$scope.repo.is_public) {
|
||||||
imageUrl += '?token=' + $scope.repo.status_token;
|
imageUrl += '?token=' + $scope.repo.status_token;
|
||||||
}
|
}
|
||||||
|
|
||||||
var linkUrl = Config.getUrl('/' + namespace + '/' + name);
|
var linkUrl = Config.getUrl('/repository/' + namespace + '/' + name);
|
||||||
|
|
||||||
switch (format) {
|
switch (format) {
|
||||||
case 'svg':
|
case 'svg':
|
||||||
|
@ -1642,12 +1656,14 @@ function UserAdminCtrl($scope, $timeout, $location, ApiService, PlanService, Use
|
||||||
|
|
||||||
$scope.cuser = jQuery.extend({}, user);
|
$scope.cuser = jQuery.extend({}, user);
|
||||||
|
|
||||||
for (var i = 0; i < $scope.cuser.logins.length; i++) {
|
if ($scope.cuser.logins) {
|
||||||
if ($scope.cuser.logins[i].service == 'github') {
|
for (var i = 0; i < $scope.cuser.logins.length; i++) {
|
||||||
var githubId = $scope.cuser.logins[i].service_identifier;
|
if ($scope.cuser.logins[i].service == 'github') {
|
||||||
$http.get('https://api.github.com/user/' + githubId).success(function(resp) {
|
var githubId = $scope.cuser.logins[i].service_identifier;
|
||||||
$scope.githubLogin = resp.login;
|
$http.get('https://api.github.com/user/' + githubId).success(function(resp) {
|
||||||
});
|
$scope.githubLogin = resp.login;
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -1940,7 +1956,7 @@ function NewRepoCtrl($scope, $location, $http, $timeout, UserService, ApiService
|
||||||
$scope.githubClientId = KeyService.githubClientId;
|
$scope.githubClientId = KeyService.githubClientId;
|
||||||
|
|
||||||
$scope.repo = {
|
$scope.repo = {
|
||||||
'is_public': 1,
|
'is_public': 0,
|
||||||
'description': '',
|
'description': '',
|
||||||
'initialize': ''
|
'initialize': ''
|
||||||
};
|
};
|
||||||
|
@ -1959,9 +1975,6 @@ function NewRepoCtrl($scope, $location, $http, $timeout, UserService, ApiService
|
||||||
|
|
||||||
// Determine whether private repositories are allowed for the namespace.
|
// Determine whether private repositories are allowed for the namespace.
|
||||||
checkPrivateAllowed();
|
checkPrivateAllowed();
|
||||||
|
|
||||||
// Default to private repos for organizations.
|
|
||||||
$scope.repo.is_public = isUserNamespace ? '1' : '0';
|
|
||||||
});
|
});
|
||||||
|
|
||||||
$scope.changeNamespace = function(namespace) {
|
$scope.changeNamespace = function(namespace) {
|
||||||
|
|
|
@ -166,7 +166,8 @@
|
||||||
<div class="col-md-8">
|
<div class="col-md-8">
|
||||||
<button class="btn btn-large btn-success" type="submit"
|
<button class="btn btn-large btn-success" type="submit"
|
||||||
ng-disabled="uploading || building || newRepoForm.$invalid || (repo.is_public == '0' && (planRequired || checkingPlan)) || ((repo.initialize == 'dockerfile' || repo.initialize == 'zipfile') && !hasDockerfile)">
|
ng-disabled="uploading || building || newRepoForm.$invalid || (repo.is_public == '0' && (planRequired || checkingPlan)) || ((repo.initialize == 'dockerfile' || repo.initialize == 'zipfile') && !hasDockerfile)">
|
||||||
Create Repository
|
<i class="fa fa-large" ng-class="repo.is_public == '1' ? 'fa-unlock' : 'fa-lock'" style="margin-right: 4px"></i>
|
||||||
|
Create {{ repo.is_public == '1' ? 'Public' : 'Private' }} Repository
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -177,7 +177,12 @@
|
||||||
<span class="size-bar" style="{{ 'width:' + (image.size / getTotalSize(currentTag)) * 100 + '%' }}"
|
<span class="size-bar" style="{{ 'width:' + (image.size / getTotalSize(currentTag)) * 100 + '%' }}"
|
||||||
bs-tooltip="" data-title="{{ image.size | bytes }}"></span>
|
bs-tooltip="" data-title="{{ image.size | bytes }}"></span>
|
||||||
</span>
|
</span>
|
||||||
<span class="size-title"><a href="javascript:void(0)" ng-click="setImage(image.id, true)">{{ image.id.substr(0, 12) }}</a></span>
|
<span class="size-title">
|
||||||
|
<a class="image-size-link" href="javascript:void(0)" ng-click="setImage(image.id, true)"
|
||||||
|
data-image="{{ image.id.substr(0, 12) }}">
|
||||||
|
{{ image.id.substr(0, 12) }}
|
||||||
|
</a>
|
||||||
|
</span>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -199,7 +204,7 @@
|
||||||
<div class="image-section">
|
<div class="image-section">
|
||||||
<i class="fa fa-code section-icon" bs-tooltip="tooltip.title" data-title="Full Image ID"></i>
|
<i class="fa fa-code section-icon" bs-tooltip="tooltip.title" data-title="Full Image ID"></i>
|
||||||
<span class="section-info">
|
<span class="section-info">
|
||||||
<a href="{{'/repository/' + repo.namespace + '/' + repo.name + '/image/' + currentImage.id}}">{{ currentImage.id }}</a>
|
<a class="image-link" href="{{'/repository/' + repo.namespace + '/' + repo.name + '/image/' + currentImage.id}}">{{ currentImage.id }}</a>
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
|
@ -68,6 +68,8 @@ CSRF_TOKEN_KEY = '_csrf_token'
|
||||||
CSRF_TOKEN = '123csrfforme'
|
CSRF_TOKEN = '123csrfforme'
|
||||||
|
|
||||||
class ApiTestCase(unittest.TestCase):
|
class ApiTestCase(unittest.TestCase):
|
||||||
|
maxDiff = None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _add_csrf(without_csrf):
|
def _add_csrf(without_csrf):
|
||||||
parts = urlparse(without_csrf)
|
parts = urlparse(without_csrf)
|
||||||
|
@ -968,7 +970,7 @@ class TestRepoBuilds(ApiTestCase):
|
||||||
params=dict(repository=ADMIN_ACCESS_USER + '/building'))
|
params=dict(repository=ADMIN_ACCESS_USER + '/building'))
|
||||||
|
|
||||||
assert len(json['builds']) > 0
|
assert len(json['builds']) > 0
|
||||||
build = json['builds'][0]
|
build = json['builds'][-1]
|
||||||
|
|
||||||
assert 'id' in build
|
assert 'id' in build
|
||||||
assert 'status' in build
|
assert 'status' in build
|
||||||
|
|
|
@ -24,7 +24,8 @@ class TestConfig(DefaultConfig):
|
||||||
|
|
||||||
STORAGE_TYPE = 'FakeStorage'
|
STORAGE_TYPE = 'FakeStorage'
|
||||||
|
|
||||||
BUILDLOGS = TestBuildLogs('logs.quay.io', 'devtable', 'building',
|
BUILDLOGS_MODULE_AND_CLASS = ('test.testlogs', 'testlogs.TestBuildLogs')
|
||||||
'deadbeef-dead-beef-dead-beefdeadbeef')
|
BUILDLOGS_OPTIONS = ['logs.quay.io', 'devtable', 'building',
|
||||||
|
'deadbeef-dead-beef-dead-beefdeadbeef']
|
||||||
|
|
||||||
USERFILES_TYPE = 'FakeUserfiles'
|
USERFILES_TYPE = 'FakeUserfiles'
|
||||||
|
|
|
@ -5,7 +5,7 @@ from loremipsum import get_sentence
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
|
||||||
from data.buildlogs import BuildLogs
|
from data.buildlogs import RedisBuildLogs
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -32,7 +32,7 @@ def maybe_advance_script(is_get_status=False):
|
||||||
return inner_advance
|
return inner_advance
|
||||||
|
|
||||||
|
|
||||||
class TestBuildLogs(BuildLogs):
|
class TestBuildLogs(RedisBuildLogs):
|
||||||
COMMAND_TYPES = ['FROM', 'MAINTAINER', 'RUN', 'CMD', 'EXPOSE', 'ENV', 'ADD',
|
COMMAND_TYPES = ['FROM', 'MAINTAINER', 'RUN', 'CMD', 'EXPOSE', 'ENV', 'ADD',
|
||||||
'ENTRYPOINT', 'VOLUME', 'USER', 'WORKDIR']
|
'ENTRYPOINT', 'VOLUME', 'USER', 'WORKDIR']
|
||||||
STATUS_TEMPLATE = {
|
STATUS_TEMPLATE = {
|
||||||
|
|
7
util/dynamic.py
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
def import_class(module_name, class_name):
|
||||||
|
""" Import a class given the specified module name and class name. """
|
||||||
|
klass = __import__(module_name)
|
||||||
|
class_segments = class_name.split('.')
|
||||||
|
for segment in class_segments:
|
||||||
|
klass = getattr(klass, segment)
|
||||||
|
return klass
|
|
@ -1,7 +1,15 @@
|
||||||
from raven.contrib.flask import Sentry as FlaskSentry
|
from raven.contrib.flask import Sentry as FlaskSentry
|
||||||
|
|
||||||
|
class FakeSentryClient(object):
|
||||||
|
def captureException(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def user_context(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
class FakeSentry(object):
|
class FakeSentry(object):
|
||||||
pass
|
def __init__(self):
|
||||||
|
self.client = FakeSentryClient()
|
||||||
|
|
||||||
class Sentry(object):
|
class Sentry(object):
|
||||||
def __init__(self, app=None):
|
def __init__(self, app=None):
|
||||||
|
|
|
@ -1,37 +1,55 @@
|
||||||
var system = require('system');
|
var system = require('system');
|
||||||
var url = system.args[1] || '';
|
var url = system.args[1] || '';
|
||||||
|
var count = 0;
|
||||||
|
|
||||||
if(url.length > 0) {
|
if(url.length > 0) {
|
||||||
var page = require('webpage').create();
|
var page = require('webpage').create();
|
||||||
page.open(url, function (status) {
|
page.open(url, function (status) {
|
||||||
if (status == 'success') {
|
try {
|
||||||
var delay, checker = (function() {
|
if (status == 'success') {
|
||||||
var html = page.evaluate(function () {
|
var delay;
|
||||||
var found = document.getElementsByTagName('html')[0].outerHTML || '';
|
var checker = (function() {
|
||||||
if (window.__isLoading && !window.__isLoading()) {
|
count++;
|
||||||
return found;
|
|
||||||
}
|
|
||||||
if (found.indexOf('404 Not Found') > 0) {
|
|
||||||
return found;
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
});
|
|
||||||
|
|
||||||
if (html) {
|
if (count > 100) {
|
||||||
if (html.indexOf('404 Not Found') > 0) {
|
|
||||||
console.log('Not Found');
|
console.log('Not Found');
|
||||||
phantom.exit();
|
phantom.exit();
|
||||||
return;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
clearTimeout(delay);
|
var html = page.evaluate(function () {
|
||||||
console.log(html);
|
var found = document.getElementsByTagName('html')[0].outerHTML || '';
|
||||||
phantom.exit();
|
if (window.__isLoading && !window.__isLoading()) {
|
||||||
}
|
return found;
|
||||||
});
|
}
|
||||||
delay = setInterval(checker, 100);
|
if (found.indexOf('404 Not Found') > 0) {
|
||||||
} else {
|
return found;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (html) {
|
||||||
|
if (html.indexOf('404 Not Found') > 0) {
|
||||||
|
console.log('Not Found');
|
||||||
|
phantom.exit();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
clearTimeout(delay);
|
||||||
|
console.log(html);
|
||||||
|
phantom.exit();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
delay = setInterval(checker, 100);
|
||||||
|
} else {
|
||||||
|
console.log('Not Found');
|
||||||
|
phantom.exit();
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
console.log('Not Found');
|
console.log('Not Found');
|
||||||
phantom.exit();
|
phantom.exit();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
} else {
|
||||||
|
phantom.exit();
|
||||||
}
|
}
|
|
@ -3,22 +3,27 @@ import logging
|
||||||
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def render_snapshot(url):
|
def render_snapshot(url):
|
||||||
logger.info('Snapshotting url: %s' % url)
|
logger.info('Snapshotting url: %s' % url)
|
||||||
|
|
||||||
out_html = subprocess.check_output(['phantomjs', '--ignore-ssl-errors=yes',
|
out_html = subprocess.check_output(['phantomjs', '--ignore-ssl-errors=yes',
|
||||||
|
'--disk-cache=yes',
|
||||||
'util/phantomjs-runner.js', url])
|
'util/phantomjs-runner.js', url])
|
||||||
|
|
||||||
if not out_html or out_html.strip() == 'Not Found':
|
if not out_html or out_html.strip() == 'Not Found':
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Remove script tags
|
# Remove script tags
|
||||||
|
logger.info('Removing script tags: %s' % url)
|
||||||
|
|
||||||
soup = BeautifulSoup(out_html.decode('utf8'))
|
soup = BeautifulSoup(out_html.decode('utf8'))
|
||||||
to_extract = soup.findAll('script')
|
to_extract = soup.findAll('script')
|
||||||
for item in to_extract:
|
for item in to_extract:
|
||||||
item.extract()
|
item.extract()
|
||||||
|
|
||||||
|
logger.info('Snapshotted url: %s' % url)
|
||||||
|
|
||||||
return str(soup)
|
return str(soup)
|
||||||
|
|
|
@ -1,31 +1,35 @@
|
||||||
to prepare a new build node host starting from a 14.04 base server:
|
to build and upload the builder to quay
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo apt-get update
|
curl -s https://get.docker.io/ubuntu/ | sudo sh
|
||||||
sudo apt-get install -y git python-virtualenv python-dev phantomjs libjpeg8 libjpeg62-dev libfreetype6 libfreetype6-dev libevent-dev gdebi-core lxc
|
sudo apt-get update && sudo apt-get install -y git
|
||||||
```
|
git clone git clone https://bitbucket.org/yackob03/quay.git
|
||||||
|
|
||||||
check out the code, install the kernel, custom docker, nsexec, and reboot:
|
|
||||||
|
|
||||||
```
|
|
||||||
git clone https://bitbucket.org/yackob03/quay.git
|
|
||||||
cd quay
|
cd quay
|
||||||
sudo gdebi --n binary_dependencies/builder/nsexec_1.22ubuntu1trusty1_amd64.deb
|
rm Dockerfile
|
||||||
sudo gdebi --n binary_dependencies/builder/lxc-docker-0.9.0_0.9.0-20140501212101-72572f0-dirty_amd64.deb
|
ln -s Dockerfile.buildworker Dockerfile
|
||||||
sudo usermod -v 100000-200000 -w 100000-200000 root
|
sudo docker build -t quay.io/quay/builder .
|
||||||
sudo chmod +x /var/lib/lxc
|
sudo docker push quay.io/quay/builder
|
||||||
sudo chmod +x /var/lib/docker
|
```
|
||||||
cd ~
|
|
||||||
git clone https://bitbucket.org/yackob03/quayconfig.git
|
to run the code from a fresh 14.04 server:
|
||||||
ln -s ../../quayconfig/production/ quay/conf/stack
|
|
||||||
|
```
|
||||||
|
sudo apt-get update && sudo apt-get install -y git lxc linux-image-extra-`uname -r`
|
||||||
|
curl -s https://get.docker.io/ubuntu/ | sudo sh
|
||||||
|
git clone https://github.com/DevTable/gantryd.git
|
||||||
|
cd gantryd
|
||||||
|
cat requirements.system | xargs sudo apt-get install -y
|
||||||
|
virtualenv --distribute venv
|
||||||
|
venv/bin/pip install -r requirements.txt
|
||||||
|
sudo docker login -p 9Y1PX7D3IE4KPSGCIALH17EM5V3ZTMP8CNNHJNXAQ2NJGAS48BDH8J1PUOZ869ML -u 'quay+deploy' -e notused quay.io
|
||||||
```
|
```
|
||||||
|
|
||||||
start the worker
|
start the worker
|
||||||
|
|
||||||
```
|
```
|
||||||
cd quay
|
cd ~
|
||||||
virtualenv --distribute venv
|
git clone https://bitbucket.org/yackob03/quayconfig.git
|
||||||
source venv/bin/activate
|
sudo docker pull quay.io/quay/builder
|
||||||
pip install -r requirements.txt
|
cd ~/gantryd
|
||||||
sudo venv/bin/python -m workers.dockerfilebuild -D
|
sudo venv/bin/python gantry.py ../quayconfig/production/gantry.json update builder
|
||||||
```
|
```
|
||||||
|
|
|
@ -33,16 +33,7 @@ class DiffsWorker(Worker):
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Worker daemon to compute diffs')
|
logging.config.fileConfig('conf/logging.conf', disable_existing_loggers=False)
|
||||||
parser.add_argument('--log', help='Specify the log file for the worker as a daemon.')
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if args.log is not None:
|
|
||||||
handler = logging.FileHandler(args.log)
|
|
||||||
else:
|
|
||||||
handler = logging.StreamHandler()
|
|
||||||
handler.setFormatter(formatter)
|
|
||||||
root_logger.addHandler(handler)
|
|
||||||
|
|
||||||
worker = DiffsWorker(image_diff_queue)
|
worker = DiffsWorker(image_diff_queue)
|
||||||
worker.start()
|
worker.start()
|
||||||
|
|
|
@ -21,7 +21,7 @@ from collections import defaultdict
|
||||||
from data.queue import dockerfile_build_queue
|
from data.queue import dockerfile_build_queue
|
||||||
from data import model
|
from data import model
|
||||||
from workers.worker import Worker, WorkerUnhealthyException, JobException
|
from workers.worker import Worker, WorkerUnhealthyException, JobException
|
||||||
from app import app, userfiles as user_files
|
from app import userfiles as user_files, build_logs, sentry
|
||||||
from util.safetar import safe_extractall
|
from util.safetar import safe_extractall
|
||||||
from util.dockerfileparse import parse_dockerfile, ParsedDockerfile, serialize_dockerfile
|
from util.dockerfileparse import parse_dockerfile, ParsedDockerfile, serialize_dockerfile
|
||||||
|
|
||||||
|
@ -34,8 +34,6 @@ formatter = logging.Formatter(FORMAT)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
build_logs = app.config['BUILDLOGS']
|
|
||||||
|
|
||||||
TIMEOUT_PERIOD_MINUTES = 20
|
TIMEOUT_PERIOD_MINUTES = 20
|
||||||
CACHE_EXPIRATION_PERIOD_HOURS = 24
|
CACHE_EXPIRATION_PERIOD_HOURS = 24
|
||||||
NO_TAGS = ['<none>:<none>']
|
NO_TAGS = ['<none>:<none>']
|
||||||
|
@ -143,6 +141,7 @@ class DockerfileBuildContext(object):
|
||||||
self.__cleanup_images()
|
self.__cleanup_images()
|
||||||
self.__prune_cache()
|
self.__prune_cache()
|
||||||
except APIError:
|
except APIError:
|
||||||
|
sentry.client.captureException()
|
||||||
message = 'Docker installation is no longer healthy.'
|
message = 'Docker installation is no longer healthy.'
|
||||||
logger.exception(message)
|
logger.exception(message)
|
||||||
raise WorkerUnhealthyException(message)
|
raise WorkerUnhealthyException(message)
|
||||||
|
@ -452,6 +451,9 @@ class DockerfileBuildWorker(Worker):
|
||||||
def process_queue_item(self, job_details):
|
def process_queue_item(self, job_details):
|
||||||
self._timeout.clear()
|
self._timeout.clear()
|
||||||
|
|
||||||
|
# Make sure we have more information for debugging problems
|
||||||
|
sentry.client.user_context(job_details)
|
||||||
|
|
||||||
repository_build = model.get_repository_build(job_details['namespace'],
|
repository_build = model.get_repository_build(job_details['namespace'],
|
||||||
job_details['repository'],
|
job_details['repository'],
|
||||||
job_details['build_uuid'])
|
job_details['build_uuid'])
|
||||||
|
@ -542,6 +544,7 @@ class DockerfileBuildWorker(Worker):
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
|
sentry.client.captureException()
|
||||||
log_appender('error', build_logs.PHASE)
|
log_appender('error', build_logs.PHASE)
|
||||||
logger.exception('Exception when processing request.')
|
logger.exception('Exception when processing request.')
|
||||||
repository_build.phase = 'error'
|
repository_build.phase = 'error'
|
||||||
|
@ -552,27 +555,12 @@ class DockerfileBuildWorker(Worker):
|
||||||
|
|
||||||
desc = 'Worker daemon to monitor dockerfile build'
|
desc = 'Worker daemon to monitor dockerfile build'
|
||||||
parser = argparse.ArgumentParser(description=desc)
|
parser = argparse.ArgumentParser(description=desc)
|
||||||
parser.add_argument('-D', action='store_true', default=False,
|
|
||||||
help='Run the worker in daemon mode.')
|
|
||||||
parser.add_argument('--log', default='dockerfilebuild.log',
|
|
||||||
help='Specify the log file for the worker as a daemon.')
|
|
||||||
parser.add_argument('--cachegb', default=20, type=float,
|
parser.add_argument('--cachegb', default=20, type=float,
|
||||||
help='Maximum cache size in gigabytes.')
|
help='Maximum cache size in gigabytes.')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
logging.config.fileConfig('conf/logging.conf', disable_existing_loggers=False)
|
||||||
|
|
||||||
worker = DockerfileBuildWorker(args.cachegb, dockerfile_build_queue,
|
worker = DockerfileBuildWorker(args.cachegb, dockerfile_build_queue,
|
||||||
reservation_seconds=RESERVATION_TIME)
|
reservation_seconds=RESERVATION_TIME)
|
||||||
|
worker.start(start_status_server_port=8000)
|
||||||
if args.D:
|
|
||||||
handler = logging.FileHandler(args.log)
|
|
||||||
handler.setFormatter(formatter)
|
|
||||||
root_logger.addHandler(handler)
|
|
||||||
with daemon.DaemonContext(files_preserve=[handler.stream]):
|
|
||||||
worker.start()
|
|
||||||
|
|
||||||
else:
|
|
||||||
handler = logging.StreamHandler()
|
|
||||||
handler.setFormatter(formatter)
|
|
||||||
root_logger.addHandler(handler)
|
|
||||||
worker.start()
|
|
||||||
|
|
|
@ -34,17 +34,7 @@ class WebhookWorker(Worker):
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
logging.config.fileConfig('conf/logging.conf', disable_existing_loggers=False)
|
||||||
parser = argparse.ArgumentParser(description='Worker daemon to send webhooks')
|
|
||||||
parser.add_argument('--log', help='Specify the log file for the worker as a daemon.')
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if args.log is not None:
|
|
||||||
handler = logging.FileHandler(args.log)
|
|
||||||
else:
|
|
||||||
handler = logging.StreamHandler()
|
|
||||||
handler.setFormatter(formatter)
|
|
||||||
root_logger.addHandler(handler)
|
|
||||||
|
|
||||||
worker = WebhookWorker(webhook_queue, poll_period_seconds=15,
|
worker = WebhookWorker(webhook_queue, poll_period_seconds=15,
|
||||||
reservation_seconds=3600)
|
reservation_seconds=3600)
|
||||||
|
|
|
@ -1,11 +1,16 @@
|
||||||
import logging
|
import logging
|
||||||
import json
|
import json
|
||||||
import signal
|
import signal
|
||||||
|
import sys
|
||||||
|
|
||||||
from threading import Event
|
from threading import Event
|
||||||
from apscheduler.scheduler import Scheduler
|
from apscheduler.scheduler import Scheduler
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
|
||||||
|
from threading import Thread
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
from data.model import db
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -23,6 +28,36 @@ class WorkerUnhealthyException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class WorkerStatusServer(HTTPServer):
|
||||||
|
def __init__(self, worker, *args, **kwargs):
|
||||||
|
HTTPServer.__init__(self, *args, **kwargs)
|
||||||
|
self.worker = worker
|
||||||
|
|
||||||
|
|
||||||
|
class WorkerStatusHandler(BaseHTTPRequestHandler):
|
||||||
|
def do_GET(self):
|
||||||
|
if self.path == '/status':
|
||||||
|
# Return the worker status
|
||||||
|
code = 200 if self.server.worker.is_healthy() else 503
|
||||||
|
self.send_response(code)
|
||||||
|
elif self.path == '/terminate':
|
||||||
|
# Return whether it is safe to terminate the worker process
|
||||||
|
code = 200 if self.server.worker.is_terminated() else 503
|
||||||
|
self.send_response(code)
|
||||||
|
else:
|
||||||
|
self.send_error(404)
|
||||||
|
|
||||||
|
def do_POST(self):
|
||||||
|
if self.path == '/terminate':
|
||||||
|
try:
|
||||||
|
self.server.worker.join()
|
||||||
|
self.send_response(200)
|
||||||
|
except:
|
||||||
|
self.send_response(500)
|
||||||
|
else:
|
||||||
|
self.send_error(404)
|
||||||
|
|
||||||
|
|
||||||
class Worker(object):
|
class Worker(object):
|
||||||
def __init__(self, queue, poll_period_seconds=30, reservation_seconds=300,
|
def __init__(self, queue, poll_period_seconds=30, reservation_seconds=300,
|
||||||
watchdog_period_seconds=60):
|
watchdog_period_seconds=60):
|
||||||
|
@ -31,6 +66,7 @@ class Worker(object):
|
||||||
self._reservation_seconds = reservation_seconds
|
self._reservation_seconds = reservation_seconds
|
||||||
self._watchdog_period_seconds = watchdog_period_seconds
|
self._watchdog_period_seconds = watchdog_period_seconds
|
||||||
self._stop = Event()
|
self._stop = Event()
|
||||||
|
self._terminated = Event()
|
||||||
self._queue = queue
|
self._queue = queue
|
||||||
self.current_queue_item = None
|
self.current_queue_item = None
|
||||||
|
|
||||||
|
@ -42,6 +78,17 @@ class Worker(object):
|
||||||
""" Function that gets run once every watchdog_period_seconds. """
|
""" Function that gets run once every watchdog_period_seconds. """
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def _close_db_handle(self):
|
||||||
|
if not db.is_closed():
|
||||||
|
logger.debug('Disconnecting from database.')
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
def is_healthy(self):
|
||||||
|
return not self._stop.is_set()
|
||||||
|
|
||||||
|
def is_terminated(self):
|
||||||
|
return self._terminated.is_set()
|
||||||
|
|
||||||
def extend_processing(self, seconds_from_now):
|
def extend_processing(self, seconds_from_now):
|
||||||
if self.current_queue_item is not None:
|
if self.current_queue_item is not None:
|
||||||
self._queue.extend_processing(self.current_queue_item, seconds_from_now)
|
self._queue.extend_processing(self.current_queue_item, seconds_from_now)
|
||||||
|
@ -51,7 +98,7 @@ class Worker(object):
|
||||||
|
|
||||||
self.current_queue_item = self._queue.get()
|
self.current_queue_item = self._queue.get()
|
||||||
while self.current_queue_item:
|
while self.current_queue_item:
|
||||||
logger.debug('Queue gave us some work: %s' % self.current_queue_item.body)
|
logger.debug('Queue gave us some work: %s', self.current_queue_item.body)
|
||||||
|
|
||||||
job_details = json.loads(self.current_queue_item.body)
|
job_details = json.loads(self.current_queue_item.body)
|
||||||
|
|
||||||
|
@ -68,13 +115,24 @@ class Worker(object):
|
||||||
finally:
|
finally:
|
||||||
self.current_queue_item = None
|
self.current_queue_item = None
|
||||||
|
|
||||||
|
# Close the db handle periodically
|
||||||
|
self._close_db_handle()
|
||||||
|
|
||||||
if not self._stop.is_set():
|
if not self._stop.is_set():
|
||||||
self.current_queue_item = self._queue.get(processing_time=self._reservation_seconds)
|
self.current_queue_item = self._queue.get(processing_time=self._reservation_seconds)
|
||||||
|
|
||||||
if not self._stop.is_set():
|
if not self._stop.is_set():
|
||||||
logger.debug('No more work.')
|
logger.debug('No more work.')
|
||||||
|
|
||||||
def start(self):
|
def start(self, start_status_server_port=None):
|
||||||
|
if start_status_server_port is not None:
|
||||||
|
# Start a status server on a thread
|
||||||
|
server_address = ('', start_status_server_port)
|
||||||
|
httpd = WorkerStatusServer(self, server_address, WorkerStatusHandler)
|
||||||
|
server_thread = Thread(target=httpd.serve_forever)
|
||||||
|
server_thread.daemon = True
|
||||||
|
server_thread.start()
|
||||||
|
|
||||||
logger.debug("Scheduling worker.")
|
logger.debug("Scheduling worker.")
|
||||||
|
|
||||||
soon = datetime.now() + timedelta(seconds=.001)
|
soon = datetime.now() + timedelta(seconds=.001)
|
||||||
|
@ -84,8 +142,8 @@ class Worker(object):
|
||||||
start_date=soon)
|
start_date=soon)
|
||||||
self._sched.add_interval_job(self.watchdog, seconds=self._watchdog_period_seconds)
|
self._sched.add_interval_job(self.watchdog, seconds=self._watchdog_period_seconds)
|
||||||
|
|
||||||
signal.signal(signal.SIGTERM, self.join)
|
signal.signal(signal.SIGTERM, self.terminate)
|
||||||
signal.signal(signal.SIGINT, self.join)
|
signal.signal(signal.SIGINT, self.terminate)
|
||||||
|
|
||||||
while not self._stop.wait(1):
|
while not self._stop.wait(1):
|
||||||
pass
|
pass
|
||||||
|
@ -94,11 +152,25 @@ class Worker(object):
|
||||||
self._sched.shutdown()
|
self._sched.shutdown()
|
||||||
logger.debug('Finished.')
|
logger.debug('Finished.')
|
||||||
|
|
||||||
def join(self, signal_num=None, stack_frame=None):
|
self._terminated.set()
|
||||||
logger.debug('Shutting down worker gracefully.')
|
|
||||||
self._stop.set()
|
|
||||||
|
|
||||||
# Give back the retry that we took for this queue item so that if it were down to zero
|
# Wait forever if we're running a server
|
||||||
# retries it will still be picked up by another worker
|
while start_status_server_port is not None:
|
||||||
if self.current_queue_item is not None:
|
sleep(60)
|
||||||
self._queue.incomplete(self.current_queue_item, restore_retry=True)
|
|
||||||
|
def terminate(self, signal_num=None, stack_frame=None, graceful=False):
|
||||||
|
if self._terminated.is_set():
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.debug('Shutting down worker.')
|
||||||
|
self._stop.set()
|
||||||
|
|
||||||
|
if not graceful:
|
||||||
|
# Give back the retry that we took for this queue item so that if it were down to zero
|
||||||
|
# retries it will still be picked up by another worker
|
||||||
|
if self.current_queue_item is not None:
|
||||||
|
self._queue.incomplete(self.current_queue_item, restore_retry=True)
|
||||||
|
|
||||||
|
def join(self):
|
||||||
|
self.terminate(graceful=True)
|
||||||
|
|