This repository has been archived on 2020-03-24. You can view files and clone it, but cannot push or open issues or pull requests.
quay/workers/dockerfilebuild.py

205 lines
6 KiB
Python
Raw Normal View History

import logging
import json
import daemon
import time
import argparse
import digitalocean
import requests
from apscheduler.scheduler import Scheduler
from multiprocessing.pool import ThreadPool
from data.queue import dockerfile_build_queue
from data.userfiles import UserRequestFiles
from data import model
from app import app
root_logger = logging.getLogger('')
root_logger.setLevel(logging.DEBUG)
FORMAT = '%(asctime)-15s - %(levelname)s - %(pathname)s - %(funcName)s - %(message)s'
formatter = logging.Formatter(FORMAT)
logger = logging.getLogger(__name__)
def try_connection(url, retries=5, period=5):
try:
return requests.get(url)
except ConnectionError as ex:
if retries:
logger.debug('Retrying connection to url: %s after %ss' % (url, period))
time.sleep(period)
return try_connection(url, retries-1, period)
raise ex
def get_status(url):
return requests.get(url).json()['status']
def babysit_builder(request):
manager = digitalocean.Manager(client_id=app.config['DO_CLIENT_ID'],
api_key=app.config['DO_CLIENT_SECRET'])
repository_build = model.get_repository_build(request['build_id'])
# check if there is already a DO node for this build job, if so clean it up
old_id = repository_build.build_node_id
if old_id
old_droplet = digitalocean.Droplet(old_id)
old_droplet.destroy()
# start the DO node
name = 'dockerfile-build-%s' % repository_build.id
droplet = digitalocean.Droplet(client_id=app.config['DO_CLIENT_ID'],
api_key=app.config['DO_CLIENT_SECRET'],
name=name,
region_id=1, # New York,
image_id=1004145, # Docker on 13.04
size_id=66, # 512MB,
backup_active=False)
droplet.create(ssh_key_ids=[app.config['DO_SSH_KEY_ID']])
repository_build.build_node_id = droplet.id
repository_build.phase = 'starting'
repository_build.save()
startup = droplet.get_events()[0]
while int(startup.percentage) != 100:
logger.debug('Droplet startup percentage: %s' % startup.percentage)
time.sleep(5)
startup.load()
droplet.load()
logger.debug('Droplet started at ip address: %s' % droplet.ip_address)
# connect to it with ssh
repository_build.phase = 'initializing'
repository_build.save()
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(self._container_ip, self._config.sshd_port, "root",
look_for_keys=False,
key_filename=app.config['DO_SSH_PRIVATE_KEY_FILENAME'])
# Pull and run the buildserver
pull_cmd = 'docker pull quay.io/quay/buildserver'
_, stdout, _ = ssh_client.exec_command(pull_cmd)
start_cmd = 'sudo docker run -d -privileged quay.io/quay/buildserver'
_, stdout, _ = ssh_client.exec_command(start_cmd)
# wait for the server to be ready
logger.debug('Waiting for buildserver to be ready')
build_endpoint = 'http://%s:5002/build/' % droplet.ip_address
try:
try_connection()
except ConnectionError:
#TODO cleanup
pass
# send it the job
logger.debug('Sending build server request')
user_files = UserRequestFiles(app.config['AWS_ACCESS_KEY'],
app.config['AWS_SECRET_KEY'],
app.config['REGISTRY_S3_BUCKET'])
repo = repository_build.repository
payload = {
'tag': repository_build.tag,
'resource_url': user_files.get_file_url(repository_build.resource_key),
'token': repository_build.access_token.code,
}
start_build = requests.post(build_endpoint, data=payload)
# wait for the job to be complete
status_url = start_build.headers['Location']
repository_build.phase = 'building'
repository_build.status_url = status_url
repository_build.save()
logger.debug('Waiting for job to be complete')
status = get_status(status_url)
while status != 'error' and status != 'complete':
logger.debug('Job status is: %s' % status)
time.sleep(5)
status = get_status(status_url)
logger.debug('Job complete with status: %s' % status)
if status == 'error':
repository_build.phase = 'error'
else:
repository_build.phase = 'complete'
# clean up the DO node
logger.debug('Cleaning up DO node.')
droplet.destroy()
repository_build.status_url = None
repository_build.build_node_id = None;
repository_build.save()
return True
def process_work_items(pool):
logger.debug('Getting work item from queue.')
item = dockerfile_build_queue.get(processing_time=60*60) # allow 1 hr
while item:
logger.debug('Queue gave us some work: %s' % item.body)
request = json.loads(item.body)
def build_callback(item):
local_item = item
def complete_callback(completed):
if completed:
dockerfile_build_queue.complete(local_item)
return complete_callback
pool.apply_async(babysit_builder, [request],
callback=build_callback(item))
item = dockerfile_build_queue.get()
logger.debug('No more work.')
def start_worker():
pool = ThreadPool(3)
logger.debug("Scheduling worker.")
sched = Scheduler()
sched.start()
sched.add_interval_job(process_work_items, args=[pool], seconds=30)
while True:
time.sleep(60 * 60 * 24) # sleep one day, basically forever
desc = 'Worker daemon to monitor dockerfile build'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', action='store_true', default=False,
help='Run the worker in daemon mode.')
parser.add_argument('--log', default='dockerfilebuild.log',
help='Specify the log file for the worker as a daemon.')
args = parser.parse_args()
if args.D:
handler = logging.FileHandler(args.log)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
with daemon.DaemonContext(files_preserve=[handler.stream]):
start_worker()
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
root_logger.addHandler(handler)
start_worker()