2013-11-15 20:50:20 +00:00
|
|
|
import logging
|
|
|
|
import json
|
2014-04-11 17:32:45 +00:00
|
|
|
import signal
|
2014-05-16 22:31:24 +00:00
|
|
|
import sys
|
2013-11-15 20:50:20 +00:00
|
|
|
|
2014-07-30 22:30:54 +00:00
|
|
|
from threading import Event, Lock
|
2014-08-01 22:38:02 +00:00
|
|
|
from apscheduler.schedulers.background import BackgroundScheduler
|
2014-04-22 17:55:54 +00:00
|
|
|
from datetime import datetime, timedelta
|
2014-05-16 22:31:24 +00:00
|
|
|
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
|
|
|
|
from threading import Thread
|
|
|
|
from time import sleep
|
2013-11-15 20:50:20 +00:00
|
|
|
|
2014-05-16 22:31:24 +00:00
|
|
|
from data.model import db
|
2013-11-15 20:50:20 +00:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2014-05-06 22:46:19 +00:00
|
|
|
class JobException(Exception):
|
|
|
|
""" A job exception is an exception that is caused by something being malformed in the job. When
|
|
|
|
a worker raises this exception the job will be terminated and the retry will not be returned
|
|
|
|
to the queue. """
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class WorkerUnhealthyException(Exception):
|
|
|
|
""" When this exception is raised, the worker is no longer healthy and will not accept any more
|
|
|
|
work. When this is raised while processing a queue item, the item should be returned to the
|
|
|
|
queue along with another retry. """
|
|
|
|
pass
|
|
|
|
|
2013-11-15 20:50:20 +00:00
|
|
|
|
2014-05-16 22:31:24 +00:00
|
|
|
class WorkerStatusServer(HTTPServer):
|
|
|
|
def __init__(self, worker, *args, **kwargs):
|
|
|
|
HTTPServer.__init__(self, *args, **kwargs)
|
|
|
|
self.worker = worker
|
|
|
|
|
|
|
|
|
|
|
|
class WorkerStatusHandler(BaseHTTPRequestHandler):
|
|
|
|
def do_GET(self):
|
|
|
|
if self.path == '/status':
|
|
|
|
# Return the worker status
|
|
|
|
code = 200 if self.server.worker.is_healthy() else 503
|
|
|
|
self.send_response(code)
|
2014-07-18 19:04:20 +00:00
|
|
|
self.send_header('Content-Type', 'text/plain')
|
|
|
|
self.end_headers()
|
|
|
|
self.wfile.write('OK')
|
2014-05-16 22:31:24 +00:00
|
|
|
elif self.path == '/terminate':
|
|
|
|
# Return whether it is safe to terminate the worker process
|
|
|
|
code = 200 if self.server.worker.is_terminated() else 503
|
|
|
|
self.send_response(code)
|
|
|
|
else:
|
|
|
|
self.send_error(404)
|
|
|
|
|
|
|
|
def do_POST(self):
|
|
|
|
if self.path == '/terminate':
|
|
|
|
try:
|
|
|
|
self.server.worker.join()
|
|
|
|
self.send_response(200)
|
|
|
|
except:
|
|
|
|
self.send_response(500)
|
|
|
|
else:
|
|
|
|
self.send_error(404)
|
|
|
|
|
|
|
|
|
2013-11-15 20:50:20 +00:00
|
|
|
class Worker(object):
|
2014-04-02 23:32:41 +00:00
|
|
|
def __init__(self, queue, poll_period_seconds=30, reservation_seconds=300,
|
2014-09-22 16:52:57 +00:00
|
|
|
watchdog_period_seconds=60, retry_after_seconds=300):
|
2014-08-01 22:38:02 +00:00
|
|
|
self._sched = BackgroundScheduler()
|
2013-11-15 20:50:20 +00:00
|
|
|
self._poll_period_seconds = poll_period_seconds
|
|
|
|
self._reservation_seconds = reservation_seconds
|
2014-04-02 23:32:41 +00:00
|
|
|
self._watchdog_period_seconds = watchdog_period_seconds
|
2014-09-22 16:52:57 +00:00
|
|
|
self._retry_after_seconds = retry_after_seconds
|
2013-11-15 20:50:20 +00:00
|
|
|
self._stop = Event()
|
2014-05-16 22:31:24 +00:00
|
|
|
self._terminated = Event()
|
2013-11-15 20:50:20 +00:00
|
|
|
self._queue = queue
|
2014-07-30 22:30:54 +00:00
|
|
|
self._current_item_lock = Lock()
|
2014-05-06 22:46:19 +00:00
|
|
|
self.current_queue_item = None
|
2013-11-15 20:50:20 +00:00
|
|
|
|
|
|
|
def process_queue_item(self, job_details):
|
|
|
|
""" Return True if complete, False if it should be retried. """
|
|
|
|
raise NotImplementedError('Workers must implement run.')
|
|
|
|
|
2014-04-02 23:32:41 +00:00
|
|
|
def watchdog(self):
|
|
|
|
""" Function that gets run once every watchdog_period_seconds. """
|
|
|
|
pass
|
|
|
|
|
2014-05-16 22:31:24 +00:00
|
|
|
def _close_db_handle(self):
|
|
|
|
if not db.is_closed():
|
|
|
|
logger.debug('Disconnecting from database.')
|
|
|
|
db.close()
|
|
|
|
|
|
|
|
def is_healthy(self):
|
|
|
|
return not self._stop.is_set()
|
|
|
|
|
|
|
|
def is_terminated(self):
|
|
|
|
return self._terminated.is_set()
|
|
|
|
|
2014-05-06 22:46:19 +00:00
|
|
|
def extend_processing(self, seconds_from_now):
|
2014-07-30 22:30:54 +00:00
|
|
|
with self._current_item_lock:
|
|
|
|
if self.current_queue_item is not None:
|
|
|
|
self._queue.extend_processing(self.current_queue_item, seconds_from_now)
|
2014-05-06 22:46:19 +00:00
|
|
|
|
2014-07-30 21:54:58 +00:00
|
|
|
def run_watchdog(self):
|
|
|
|
logger.debug('Running watchdog.')
|
|
|
|
try:
|
|
|
|
self.watchdog()
|
2014-08-27 23:02:53 +00:00
|
|
|
except WorkerUnhealthyException as exc:
|
2014-09-22 16:52:57 +00:00
|
|
|
logger.error('The worker has encountered an error via watchdog and will not take new jobs')
|
|
|
|
logger.error(exc.message)
|
2014-08-13 23:04:51 +00:00
|
|
|
self.mark_current_incomplete(restore_retry=True)
|
2014-07-30 21:54:58 +00:00
|
|
|
self._stop.set()
|
|
|
|
|
2013-11-15 20:50:20 +00:00
|
|
|
def poll_queue(self):
|
|
|
|
logger.debug('Getting work item from queue.')
|
|
|
|
|
2014-07-30 22:30:54 +00:00
|
|
|
with self._current_item_lock:
|
2014-09-22 16:52:57 +00:00
|
|
|
self.current_queue_item = self._queue.get(processing_time=self._reservation_seconds)
|
2014-07-30 22:30:54 +00:00
|
|
|
|
2014-07-30 22:45:40 +00:00
|
|
|
while True:
|
|
|
|
# Retrieve the current item in the queue over which to operate. We do so under
|
|
|
|
# a lock to make sure we are always retrieving an item when in a healthy state.
|
|
|
|
current_queue_item = None
|
|
|
|
with self._current_item_lock:
|
|
|
|
current_queue_item = self.current_queue_item
|
|
|
|
if current_queue_item is None:
|
2014-10-24 15:40:02 +00:00
|
|
|
# Close the db handle.
|
|
|
|
self._close_db_handle()
|
2014-07-30 22:45:40 +00:00
|
|
|
break
|
|
|
|
|
|
|
|
logger.debug('Queue gave us some work: %s', current_queue_item.body)
|
|
|
|
job_details = json.loads(current_queue_item.body)
|
2013-11-15 20:50:20 +00:00
|
|
|
|
2014-05-06 22:46:19 +00:00
|
|
|
try:
|
|
|
|
self.process_queue_item(job_details)
|
2014-07-30 22:45:40 +00:00
|
|
|
self.mark_current_complete()
|
|
|
|
|
2014-09-22 16:52:57 +00:00
|
|
|
except JobException as jex:
|
2014-07-30 22:45:40 +00:00
|
|
|
logger.warning('An error occurred processing request: %s', current_queue_item.body)
|
2014-09-22 16:52:57 +00:00
|
|
|
logger.warning('Job exception: %s' % jex)
|
2014-08-10 22:28:20 +00:00
|
|
|
self.mark_current_incomplete(restore_retry=False)
|
2014-07-30 22:45:40 +00:00
|
|
|
|
2014-08-27 23:02:53 +00:00
|
|
|
except WorkerUnhealthyException as exc:
|
2014-09-22 16:52:57 +00:00
|
|
|
logger.error('The worker has encountered an error via the job and will not take new jobs')
|
|
|
|
logger.error(exc.message)
|
2014-08-10 22:28:20 +00:00
|
|
|
self.mark_current_incomplete(restore_retry=True)
|
2014-08-26 16:41:43 +00:00
|
|
|
self._stop.set()
|
2013-11-15 20:50:20 +00:00
|
|
|
|
2014-07-30 22:45:40 +00:00
|
|
|
finally:
|
2014-10-24 15:40:02 +00:00
|
|
|
# Close the db handle.
|
2014-05-16 22:31:24 +00:00
|
|
|
self._close_db_handle()
|
|
|
|
|
2014-05-06 22:46:19 +00:00
|
|
|
if not self._stop.is_set():
|
2014-07-30 22:30:54 +00:00
|
|
|
with self._current_item_lock:
|
|
|
|
self.current_queue_item = self._queue.get(processing_time=self._reservation_seconds)
|
2013-11-15 20:50:20 +00:00
|
|
|
|
2014-05-06 22:46:19 +00:00
|
|
|
if not self._stop.is_set():
|
|
|
|
logger.debug('No more work.')
|
2013-11-15 20:50:20 +00:00
|
|
|
|
2014-05-23 18:16:26 +00:00
|
|
|
def update_queue_metrics(self):
|
|
|
|
self._queue.update_metrics()
|
|
|
|
|
2014-05-16 22:31:24 +00:00
|
|
|
def start(self, start_status_server_port=None):
|
|
|
|
if start_status_server_port is not None:
|
|
|
|
# Start a status server on a thread
|
|
|
|
server_address = ('', start_status_server_port)
|
|
|
|
httpd = WorkerStatusServer(self, server_address, WorkerStatusHandler)
|
|
|
|
server_thread = Thread(target=httpd.serve_forever)
|
|
|
|
server_thread.daemon = True
|
|
|
|
server_thread.start()
|
|
|
|
|
2013-11-15 20:50:20 +00:00
|
|
|
logger.debug("Scheduling worker.")
|
|
|
|
|
2014-04-22 17:55:54 +00:00
|
|
|
soon = datetime.now() + timedelta(seconds=.001)
|
|
|
|
|
2013-11-15 20:50:20 +00:00
|
|
|
self._sched.start()
|
2014-08-01 22:38:02 +00:00
|
|
|
self._sched.add_job(self.poll_queue, 'interval', seconds=self._poll_period_seconds,
|
2014-10-22 22:09:00 +00:00
|
|
|
start_date=soon, max_instances=1)
|
|
|
|
self._sched.add_job(self.update_queue_metrics, 'interval', seconds=60, start_date=soon,
|
|
|
|
max_instances=1)
|
|
|
|
self._sched.add_job(self.run_watchdog, 'interval', seconds=self._watchdog_period_seconds,
|
|
|
|
max_instances=1)
|
2013-11-15 20:50:20 +00:00
|
|
|
|
2014-05-16 22:31:24 +00:00
|
|
|
signal.signal(signal.SIGTERM, self.terminate)
|
|
|
|
signal.signal(signal.SIGINT, self.terminate)
|
2014-04-11 17:32:45 +00:00
|
|
|
|
2013-11-15 20:50:20 +00:00
|
|
|
while not self._stop.wait(1):
|
|
|
|
pass
|
|
|
|
|
2014-04-11 17:32:45 +00:00
|
|
|
logger.debug('Waiting for running tasks to complete.')
|
|
|
|
self._sched.shutdown()
|
|
|
|
logger.debug('Finished.')
|
|
|
|
|
2014-05-16 22:31:24 +00:00
|
|
|
self._terminated.set()
|
2014-05-06 22:46:19 +00:00
|
|
|
|
2014-05-16 22:31:24 +00:00
|
|
|
# Wait forever if we're running a server
|
|
|
|
while start_status_server_port is not None:
|
|
|
|
sleep(60)
|
|
|
|
|
2014-08-10 22:28:20 +00:00
|
|
|
def mark_current_incomplete(self, restore_retry=False):
|
2014-07-30 22:30:54 +00:00
|
|
|
with self._current_item_lock:
|
2014-07-30 22:45:40 +00:00
|
|
|
if self.current_queue_item is not None:
|
2014-09-22 16:52:57 +00:00
|
|
|
self._queue.incomplete(self.current_queue_item, restore_retry=restore_retry,
|
|
|
|
retry_after=self._retry_after_seconds)
|
2014-07-30 22:45:40 +00:00
|
|
|
self.current_queue_item = None
|
|
|
|
|
|
|
|
def mark_current_complete(self):
|
|
|
|
with self._current_item_lock:
|
|
|
|
if self.current_queue_item is not None:
|
|
|
|
self._queue.complete(self.current_queue_item)
|
|
|
|
self.current_queue_item = None
|
2014-07-30 22:30:54 +00:00
|
|
|
|
2014-05-16 22:31:24 +00:00
|
|
|
def terminate(self, signal_num=None, stack_frame=None, graceful=False):
|
|
|
|
if self._terminated.is_set():
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
else:
|
|
|
|
logger.debug('Shutting down worker.')
|
|
|
|
self._stop.set()
|
|
|
|
|
|
|
|
if not graceful:
|
|
|
|
# Give back the retry that we took for this queue item so that if it were down to zero
|
|
|
|
# retries it will still be picked up by another worker
|
2014-07-30 22:30:54 +00:00
|
|
|
self.mark_current_incomplete()
|
2014-05-16 22:31:24 +00:00
|
|
|
|
|
|
|
def join(self):
|
|
|
|
self.terminate(graceful=True)
|