Add the first part of the build worker that runs on quay.
This commit is contained in:
parent
9dc9e0c940
commit
847e91b696
10 changed files with 196 additions and 11 deletions
136
workers/dockerfilebuild.py
Normal file
136
workers/dockerfilebuild.py
Normal file
|
@ -0,0 +1,136 @@
|
|||
import logging
|
||||
import json
|
||||
import daemon
|
||||
import time
|
||||
import argparse
|
||||
import digitalocean
|
||||
|
||||
from apscheduler.scheduler import Scheduler
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from fabric.api import env
|
||||
|
||||
from data.queue import dockerfile_build_queue
|
||||
from data import model
|
||||
from app import app
|
||||
|
||||
|
||||
root_logger = logging.getLogger('')
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
|
||||
FORMAT = '%(asctime)-15s - %(levelname)s - %(pathname)s - %(funcName)s - %(message)s'
|
||||
formatter = logging.Formatter(FORMAT)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def babysit_builder(request):
|
||||
manager = digitalocean.Manager(client_id=app.config['DO_CLIENT_ID'],
|
||||
api_key=app.config['DO_CLIENT_SECRET'])
|
||||
repository_build = model.get_repository_build(request['build_id'])
|
||||
|
||||
# check if there is already a DO node for this build job, if so clean it up
|
||||
old_id = repository_build.digitalocean_build_node_id
|
||||
if old_id
|
||||
old_droplet = digitalocean.Droplet(old_id)
|
||||
old_droplet.destroy()
|
||||
|
||||
# start the DO node
|
||||
name = 'dockerfile-build-%s' % repository_build.id
|
||||
droplet = digitalocean.Droplet(client_id=app.config['DO_CLIENT_ID'],
|
||||
api_key=app.config['DO_CLIENT_SECRET'],
|
||||
name=name,
|
||||
region_id=1, # New York,
|
||||
image_id=1004145, # Docker on 13.04
|
||||
size_id=66, # 512MB,
|
||||
backup_active=False)
|
||||
droplet.create(ssh_key_ids=[app.config['DO_SSH_KEY_ID']])
|
||||
repository_build.digitalocean_build_node_id = droplet.id
|
||||
repository_build.phase = 'starting'
|
||||
repository_build.save()
|
||||
|
||||
startup = droplet.get_events()[0]
|
||||
while int(startup.percentage) != 100:
|
||||
logger.debug('Droplet startup percentage: %s' % startup.percentage)
|
||||
time.sleep(5)
|
||||
startup.load()
|
||||
|
||||
droplet.load()
|
||||
logger.debug('Droplet started at ip address: %s' % droplet.ip_address)
|
||||
|
||||
# connect to it with ssh
|
||||
repository_build.phase = 'initializing'
|
||||
repository_build.save()
|
||||
|
||||
env.host_string = 'root@%s' % droplet.ip_address
|
||||
|
||||
# tell it to pull and run the buildserver
|
||||
|
||||
# wait for the server to be ready
|
||||
|
||||
# send it the job
|
||||
|
||||
# wait for the job to be complete
|
||||
|
||||
# clean up the DO node
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def process_work_items(pool):
|
||||
logger.debug('Getting work item from queue.')
|
||||
|
||||
item = dockerfile_build_queue.get()
|
||||
|
||||
while item:
|
||||
logger.debug('Queue gave us some work: %s' % item.body)
|
||||
|
||||
request = json.loads(item.body)
|
||||
|
||||
def build_callback(item):
|
||||
local_item = item
|
||||
def complete_callback(completed):
|
||||
if completed:
|
||||
dockerfile_build_queue.complete(local_item)
|
||||
return complete_callback
|
||||
|
||||
pool.apply_async(babysit_builder, [request], callback=build_callback(item))
|
||||
|
||||
item = dockerfile_build_queue.get()
|
||||
|
||||
logger.debug('No more work.')
|
||||
|
||||
|
||||
def start_worker():
|
||||
pool = ThreadPool(3)
|
||||
logger.debug("Scheduling worker.")
|
||||
|
||||
sched = Scheduler()
|
||||
sched.start()
|
||||
|
||||
sched.add_interval_job(process_work_items, args=[pool], seconds=30)
|
||||
|
||||
while True:
|
||||
time.sleep(60 * 60 * 24) # sleep one day, basically forever
|
||||
|
||||
|
||||
desc = 'Worker daemon to monitor dockerfile build'
|
||||
parser = argparse.ArgumentParser(description=desc)
|
||||
parser.add_argument('-D', action='store_true', default=False,
|
||||
help='Run the worker in daemon mode.')
|
||||
parser.add_argument('--log', default='dockerfilebuild.log',
|
||||
help='Specify the log file for the worker as a daemon.')
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
if args.D:
|
||||
handler = logging.FileHandler(args.log)
|
||||
handler.setFormatter(formatter)
|
||||
root_logger.addHandler(handler)
|
||||
with daemon.DaemonContext(files_preserve=[handler.stream]):
|
||||
start_worker()
|
||||
|
||||
else:
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
root_logger.addHandler(handler)
|
||||
start_worker()
|
Reference in a new issue