Allow build node logs to flow to docker. Clean up the pull key before executing user code. Actually clean up the DO node when we are done. Run the status server threaded just in case.
This commit is contained in:
parent
bb2446c45c
commit
1c7398e920
3 changed files with 14 additions and 7 deletions
|
@ -218,4 +218,4 @@ def get_all_status():
|
|||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
|
||||
app.run(host='0.0.0.0', port=5002, debug=True)
|
||||
app.run(host='0.0.0.0', port=5002, debug=True, threaded=True)
|
||||
|
|
|
@ -45,4 +45,4 @@ done
|
|||
popd
|
||||
|
||||
docker -d &
|
||||
exec venv/bin/python buildserver.py 2> buildserver.log
|
||||
exec venv/bin/python buildserver.py
|
|
@ -58,8 +58,10 @@ def get_status(url):
|
|||
|
||||
|
||||
def babysit_builder(request):
|
||||
""" Spin up a build node and ask it to build our job. Retryable errors
|
||||
should return False, while fatal errors should return True.
|
||||
"""
|
||||
try:
|
||||
|
||||
logger.debug('Starting work item: %s' % request)
|
||||
repository_build = model.get_repository_build(request['build_id'])
|
||||
logger.debug('Request details: %s' % repository_build)
|
||||
|
@ -143,6 +145,11 @@ def babysit_builder(request):
|
|||
else:
|
||||
logger.debug('Pull status was: %s' % pull_status)
|
||||
|
||||
# Remove the credentials we used to pull so crafty users cant steal them
|
||||
remove_auth_cmd = 'rm .dockercfg'
|
||||
ssh_client.exec_command(remove_auth_cmd)
|
||||
|
||||
# Start the build server
|
||||
start_cmd = 'docker run -d -privileged -lxc-conf="lxc.aa_profile=unconfined" quay.io/quay/buildserver'
|
||||
ssh_client.exec_command(start_cmd)
|
||||
|
||||
|
@ -191,7 +198,7 @@ def babysit_builder(request):
|
|||
|
||||
# clean up the DO node
|
||||
logger.debug('Cleaning up DO node.')
|
||||
# droplet.destroy()
|
||||
droplet.destroy()
|
||||
|
||||
repository_build.status_url = None
|
||||
repository_build.build_node_id = None;
|
||||
|
@ -200,7 +207,9 @@ def babysit_builder(request):
|
|||
return True
|
||||
|
||||
except Exception as outer_ex:
|
||||
# We don't really know what these are, but they are probably retryable
|
||||
logger.exception('Exception processing job: %s' % outer_ex.message)
|
||||
return False
|
||||
|
||||
|
||||
def process_work_items(pool):
|
||||
|
@ -236,9 +245,7 @@ def start_worker():
|
|||
sched = Scheduler()
|
||||
sched.start()
|
||||
|
||||
# sched.add_interval_job(process_work_items, args=[pool], seconds=30)
|
||||
|
||||
process_work_items(pool)
|
||||
sched.add_interval_job(process_work_items, args=[pool], seconds=30)
|
||||
|
||||
while True:
|
||||
time.sleep(60 * 60 * 24) # sleep one day, basically forever
|
||||
|
|
Reference in a new issue