2013-10-17 22:25:19 +00:00
|
|
|
from datetime import datetime, timedelta
|
|
|
|
|
2016-05-24 21:42:11 +00:00
|
|
|
from data.database import QueueItem, db, db_for_update, db_random_func
|
2014-10-01 18:23:15 +00:00
|
|
|
from util.morecollections import AttrDict
|
2013-10-17 22:25:19 +00:00
|
|
|
|
|
|
|
|
2014-05-06 22:46:19 +00:00
|
|
|
MINIMUM_EXTENSION = timedelta(seconds=20)
|
|
|
|
|
2015-11-20 20:32:17 +00:00
|
|
|
|
2015-02-18 00:15:54 +00:00
|
|
|
class NoopWith:
|
|
|
|
def __enter__(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def __exit__(self, type, value, traceback):
|
|
|
|
pass
|
2014-05-06 22:46:19 +00:00
|
|
|
|
2016-02-01 20:07:46 +00:00
|
|
|
|
2016-06-28 18:36:17 +00:00
|
|
|
class BuildMetricQueueReporter(object):
|
|
|
|
""" Metric queue reporter for the build system. """
|
2015-08-17 21:22:46 +00:00
|
|
|
def __init__(self, metric_queue):
|
|
|
|
self._metric_queue = metric_queue
|
|
|
|
|
|
|
|
def __call__(self, currently_processing, running_count, total_count):
|
|
|
|
need_capacity_count = total_count - running_count
|
2016-06-28 18:36:17 +00:00
|
|
|
self._metric_queue.put_deprecated('BuildCapacityShortage', need_capacity_count, unit='Count')
|
2016-02-01 20:07:46 +00:00
|
|
|
self._metric_queue.build_capacity_shortage.Set(need_capacity_count)
|
2015-08-17 21:22:46 +00:00
|
|
|
|
|
|
|
building_percent = 100 if currently_processing else 0
|
2016-06-28 18:36:17 +00:00
|
|
|
self._metric_queue.put_deprecated('PercentBuilding', building_percent, unit='Percent')
|
2016-02-01 20:07:46 +00:00
|
|
|
self._metric_queue.percent_building.Set(building_percent)
|
|
|
|
|
2015-08-17 21:22:46 +00:00
|
|
|
|
2013-10-17 22:25:19 +00:00
|
|
|
class WorkQueue(object):
|
2016-06-28 18:36:17 +00:00
|
|
|
""" Work queue defines methods for interacting with a queue backed by the database. """
|
2014-05-22 16:13:41 +00:00
|
|
|
def __init__(self, queue_name, transaction_factory,
|
2016-08-09 21:58:33 +00:00
|
|
|
canonical_name_match_list=None, reporter=None, metric_queue=None,
|
|
|
|
has_namespace=False):
|
2014-05-21 23:50:37 +00:00
|
|
|
self._queue_name = queue_name
|
2015-08-17 21:22:46 +00:00
|
|
|
self._reporter = reporter
|
2015-09-14 19:57:08 +00:00
|
|
|
self._metric_queue = metric_queue
|
2014-05-22 16:13:41 +00:00
|
|
|
self._transaction_factory = transaction_factory
|
2014-05-23 18:16:26 +00:00
|
|
|
self._currently_processing = False
|
2016-08-09 21:58:33 +00:00
|
|
|
self._has_namespaced_items = has_namespace
|
2013-10-17 22:25:19 +00:00
|
|
|
|
2014-04-11 22:34:47 +00:00
|
|
|
if canonical_name_match_list is None:
|
2014-05-21 23:50:37 +00:00
|
|
|
self._canonical_name_match_list = []
|
2014-04-11 22:34:47 +00:00
|
|
|
else:
|
2014-05-21 23:50:37 +00:00
|
|
|
self._canonical_name_match_list = canonical_name_match_list
|
2014-04-11 22:34:47 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _canonical_name(name_list):
|
|
|
|
return '/'.join(name_list) + '/'
|
|
|
|
|
2016-05-27 18:41:53 +00:00
|
|
|
@classmethod
|
|
|
|
def _running_jobs(cls, now, name_match_query):
|
2014-05-21 23:50:37 +00:00
|
|
|
return (QueueItem
|
2014-10-01 18:23:15 +00:00
|
|
|
.select(QueueItem.queue_name)
|
|
|
|
.where(QueueItem.available == False,
|
|
|
|
QueueItem.processing_expires > now,
|
|
|
|
QueueItem.queue_name ** name_match_query))
|
2014-05-21 23:50:37 +00:00
|
|
|
|
2016-05-27 18:41:53 +00:00
|
|
|
@classmethod
|
|
|
|
def _available_jobs(cls, now, name_match_query):
|
2016-05-31 19:44:11 +00:00
|
|
|
return (cls
|
|
|
|
._available_jobs_where(QueueItem.select(), now)
|
|
|
|
.where(QueueItem.queue_name ** name_match_query))
|
2015-11-03 16:32:28 +00:00
|
|
|
|
2016-05-27 18:41:53 +00:00
|
|
|
@staticmethod
|
2016-05-31 19:44:11 +00:00
|
|
|
def _available_jobs_where(query, now):
|
|
|
|
return query.where(QueueItem.available_after <= now,
|
2016-05-27 18:41:53 +00:00
|
|
|
((QueueItem.available == True) | (QueueItem.processing_expires <= now)),
|
|
|
|
QueueItem.retries_remaining > 0)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _available_jobs_not_running(cls, now, name_match_query, running_query):
|
|
|
|
return (cls
|
2015-02-03 19:50:01 +00:00
|
|
|
._available_jobs(now, name_match_query)
|
|
|
|
.where(~(QueueItem.queue_name << running_query)))
|
2014-05-22 17:50:06 +00:00
|
|
|
|
2014-05-21 23:50:37 +00:00
|
|
|
def _name_match_query(self):
|
|
|
|
return '%s%%' % self._canonical_name([self._queue_name] + self._canonical_name_match_list)
|
|
|
|
|
2016-05-27 18:41:53 +00:00
|
|
|
@staticmethod
|
|
|
|
def _item_by_id_for_update(queue_id):
|
2015-01-30 21:32:13 +00:00
|
|
|
return db_for_update(QueueItem.select().where(QueueItem.id == queue_id)).get()
|
|
|
|
|
2015-10-06 05:28:43 +00:00
|
|
|
def get_metrics(self):
|
|
|
|
now = datetime.utcnow()
|
|
|
|
name_match_query = self._name_match_query()
|
|
|
|
|
|
|
|
running_query = self._running_jobs(now, name_match_query)
|
|
|
|
running_count = running_query.distinct().count()
|
|
|
|
|
|
|
|
available_query = self._available_jobs(now, name_match_query)
|
|
|
|
available_count = available_query.select(QueueItem.queue_name).distinct().count()
|
|
|
|
|
|
|
|
available_not_running_query = self._available_jobs_not_running(now, name_match_query,
|
|
|
|
running_query)
|
2016-05-27 18:41:53 +00:00
|
|
|
available_not_running_count = (available_not_running_query
|
|
|
|
.select(QueueItem.queue_name)
|
|
|
|
.distinct()
|
|
|
|
.count())
|
2015-02-18 00:15:54 +00:00
|
|
|
|
2015-09-21 17:34:12 +00:00
|
|
|
return (running_count, available_not_running_count, available_count)
|
2015-02-18 00:15:54 +00:00
|
|
|
|
|
|
|
def update_metrics(self):
|
2015-09-14 19:57:08 +00:00
|
|
|
if self._reporter is None and self._metric_queue is None:
|
2015-02-18 00:15:54 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
(running_count, available_not_running_count, available_count) = self.get_metrics()
|
2015-09-14 19:57:08 +00:00
|
|
|
|
|
|
|
if self._metric_queue:
|
|
|
|
dim = {'queue': self._queue_name}
|
2016-06-28 18:36:17 +00:00
|
|
|
self._metric_queue.put_deprecated('Running', running_count, dimensions=dim)
|
|
|
|
self._metric_queue.put_deprecated('AvailableNotRunning', available_not_running_count,
|
2016-07-14 18:52:59 +00:00
|
|
|
dimensions=dim)
|
2016-06-28 18:36:17 +00:00
|
|
|
self._metric_queue.put_deprecated('Available', available_count, dimensions=dim)
|
|
|
|
|
|
|
|
self._metric_queue.work_queue_running.set(running_count, labelvalues=[self._queue_name])
|
|
|
|
self._metric_queue.work_queue_available.set(available_count, labelvalues=[self._queue_name])
|
2015-09-14 19:57:08 +00:00
|
|
|
|
|
|
|
if self._reporter:
|
|
|
|
self._reporter(self._currently_processing, running_count,
|
|
|
|
running_count + available_not_running_count)
|
2014-05-21 23:50:37 +00:00
|
|
|
|
2015-02-23 18:38:01 +00:00
|
|
|
def has_retries_remaining(self, item_id):
|
|
|
|
""" Returns whether the queue item with the given id has any retries remaining. If the
|
|
|
|
queue item does not exist, returns False. """
|
|
|
|
with self._transaction_factory(db):
|
|
|
|
try:
|
|
|
|
return QueueItem.get(id=item_id).retries_remaining > 0
|
|
|
|
except QueueItem.DoesNotExist:
|
|
|
|
return False
|
|
|
|
|
2016-08-09 21:58:33 +00:00
|
|
|
def delete_namespaced_items(self, namespace, subpath=None):
|
|
|
|
""" Deletes all items in this queue that exist under the given namespace. """
|
|
|
|
if not self._has_namespaced_items:
|
|
|
|
return False
|
|
|
|
|
|
|
|
subpath_query = '%s/' % subpath if subpath else ''
|
|
|
|
queue_prefix = '%s/%s/%s%%' % (self._queue_name, namespace, subpath_query)
|
|
|
|
QueueItem.delete().where(QueueItem.queue_name ** queue_prefix).execute()
|
|
|
|
|
2014-04-11 22:34:47 +00:00
|
|
|
def put(self, canonical_name_list, message, available_after=0, retries_remaining=5):
|
2013-10-17 22:25:19 +00:00
|
|
|
"""
|
|
|
|
Put an item, if it shouldn't be processed for some number of seconds,
|
2015-02-23 18:38:01 +00:00
|
|
|
specify that amount as available_after. Returns the ID of the queue item added.
|
2013-10-17 22:25:19 +00:00
|
|
|
"""
|
2016-07-14 18:52:59 +00:00
|
|
|
item = QueueItem.create(
|
|
|
|
queue_name=self._canonical_name([self._queue_name] + canonical_name_list),
|
|
|
|
body=message,
|
|
|
|
retries_remaining=retries_remaining,
|
|
|
|
available_after=datetime.utcnow() + timedelta(seconds=available_after or 0),
|
|
|
|
)
|
2016-06-28 18:36:17 +00:00
|
|
|
|
2015-09-21 17:37:49 +00:00
|
|
|
if self._metric_queue:
|
2016-06-28 18:36:17 +00:00
|
|
|
self._metric_queue.put_deprecated('Added', 1, dimensions={'queue': self._queue_name})
|
|
|
|
|
2016-07-14 18:52:59 +00:00
|
|
|
return str(item.id)
|
2013-10-17 22:25:19 +00:00
|
|
|
|
2016-05-24 21:42:11 +00:00
|
|
|
def get(self, processing_time=300, ordering_required=False):
|
2013-10-17 22:25:19 +00:00
|
|
|
"""
|
|
|
|
Get an available item and mark it as unavailable for the default of five
|
2014-12-31 16:33:56 +00:00
|
|
|
minutes. The result of this method must always be composed of simple
|
|
|
|
python objects which are JSON serializable for network portability reasons.
|
2013-10-17 22:25:19 +00:00
|
|
|
"""
|
2014-05-23 18:16:26 +00:00
|
|
|
now = datetime.utcnow()
|
2014-05-21 23:50:37 +00:00
|
|
|
name_match_query = self._name_match_query()
|
2015-02-03 19:50:01 +00:00
|
|
|
item = None
|
2016-05-24 21:42:11 +00:00
|
|
|
|
2015-02-03 19:50:01 +00:00
|
|
|
try:
|
2016-05-24 21:42:11 +00:00
|
|
|
if ordering_required:
|
|
|
|
# The previous solution to this used a select for update in a
|
|
|
|
# transaction to prevent multiple instances from processing the
|
|
|
|
# same queue item. This suffered performance problems. This solution
|
|
|
|
# instead has instances attempt to update the potential queue item to be
|
|
|
|
# unavailable. However, since their update clause is restricted to items
|
|
|
|
# that are available=False, only one instance's update will succeed, and
|
|
|
|
# it will have a changed row count of 1. Instances that have 0 changed
|
|
|
|
# rows know that another instance is already handling that item.
|
|
|
|
running = self._running_jobs(now, name_match_query)
|
|
|
|
avail = self._available_jobs_not_running(now, name_match_query, running)
|
|
|
|
db_item = avail.order_by(QueueItem.id).get()
|
|
|
|
else:
|
2016-05-27 18:41:44 +00:00
|
|
|
# If we don't require ordering, we grab a random item from any of the first 50 available.
|
|
|
|
subquery = self._available_jobs(now, name_match_query).limit(50).alias('j1')
|
|
|
|
db_item = (QueueItem
|
|
|
|
.select()
|
|
|
|
.join(subquery, on=QueueItem.id == subquery.c.id)
|
2016-05-24 21:42:11 +00:00
|
|
|
.order_by(db_random_func())
|
|
|
|
.get())
|
|
|
|
|
|
|
|
set_unavailable_query = (QueueItem
|
|
|
|
.update(available=False,
|
|
|
|
processing_expires=now + timedelta(seconds=processing_time),
|
|
|
|
retries_remaining=QueueItem.retries_remaining-1)
|
|
|
|
.where(QueueItem.id == db_item.id))
|
2016-07-14 18:53:16 +00:00
|
|
|
changed_query = (self._available_jobs_where(set_unavailable_query, now)
|
|
|
|
.where(QueueItem.processing_expires == db_item.processing_expires))
|
2015-11-03 16:32:28 +00:00
|
|
|
changed = changed_query.execute()
|
|
|
|
if changed == 1:
|
2014-10-01 18:23:15 +00:00
|
|
|
item = AttrDict({
|
|
|
|
'id': db_item.id,
|
|
|
|
'body': db_item.body,
|
2015-11-03 16:32:28 +00:00
|
|
|
'retries_remaining': db_item.retries_remaining - 1,
|
2014-10-01 18:23:15 +00:00
|
|
|
})
|
2014-05-23 18:16:26 +00:00
|
|
|
self._currently_processing = True
|
2015-02-03 19:50:01 +00:00
|
|
|
except QueueItem.DoesNotExist:
|
|
|
|
self._currently_processing = False
|
2013-10-17 22:25:19 +00:00
|
|
|
|
2015-02-03 19:50:01 +00:00
|
|
|
# Return a view of the queue item rather than an active db object
|
|
|
|
return item
|
2013-10-17 22:25:19 +00:00
|
|
|
|
2015-02-23 18:38:01 +00:00
|
|
|
def cancel(self, item_id):
|
|
|
|
""" Attempts to cancel the queue item with the given ID from the queue. Returns true on success
|
|
|
|
and false if the queue item could not be canceled. A queue item can only be canceled if
|
|
|
|
if is available and has retries remaining.
|
|
|
|
"""
|
|
|
|
|
|
|
|
with self._transaction_factory(db):
|
|
|
|
# Load the build queue item for update.
|
|
|
|
try:
|
|
|
|
queue_item = db_for_update(QueueItem.select()
|
2016-05-27 18:41:53 +00:00
|
|
|
.where(QueueItem.id == item_id)).get()
|
2015-02-23 18:38:01 +00:00
|
|
|
except QueueItem.DoesNotExist:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Check the queue item.
|
|
|
|
if not queue_item.available or queue_item.retries_remaining == 0:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Delete the queue item.
|
|
|
|
queue_item.delete_instance(recursive=True)
|
|
|
|
return True
|
|
|
|
|
2014-05-21 23:50:37 +00:00
|
|
|
def complete(self, completed_item):
|
2014-05-22 16:13:41 +00:00
|
|
|
with self._transaction_factory(db):
|
2015-09-10 18:12:16 +00:00
|
|
|
try:
|
|
|
|
completed_item_obj = self._item_by_id_for_update(completed_item.id)
|
|
|
|
except QueueItem.DoesNotExist:
|
|
|
|
self._currently_processing = False
|
|
|
|
return
|
|
|
|
|
2015-02-23 18:38:01 +00:00
|
|
|
completed_item_obj.delete_instance(recursive=True)
|
2014-05-23 18:16:26 +00:00
|
|
|
self._currently_processing = False
|
2013-10-18 21:27:09 +00:00
|
|
|
|
2014-05-21 23:50:37 +00:00
|
|
|
def incomplete(self, incomplete_item, retry_after=300, restore_retry=False):
|
2014-05-22 16:13:41 +00:00
|
|
|
with self._transaction_factory(db):
|
2014-05-23 18:16:26 +00:00
|
|
|
retry_date = datetime.utcnow() + timedelta(seconds=retry_after)
|
2014-05-06 22:46:19 +00:00
|
|
|
|
2015-12-22 20:58:51 +00:00
|
|
|
try:
|
|
|
|
incomplete_item_obj = self._item_by_id_for_update(incomplete_item.id)
|
|
|
|
incomplete_item_obj.available_after = retry_date
|
|
|
|
incomplete_item_obj.available = True
|
2014-05-06 22:46:19 +00:00
|
|
|
|
2015-12-22 20:58:51 +00:00
|
|
|
if restore_retry:
|
|
|
|
incomplete_item_obj.retries_remaining += 1
|
|
|
|
|
|
|
|
incomplete_item_obj.save()
|
|
|
|
self._currently_processing = False
|
|
|
|
return incomplete_item_obj.retries_remaining > 0
|
|
|
|
except QueueItem.DoesNotExist:
|
|
|
|
return False
|
2013-10-29 19:42:19 +00:00
|
|
|
|
2016-02-25 20:58:42 +00:00
|
|
|
def extend_processing(self, item, seconds_from_now, minimum_extension=MINIMUM_EXTENSION,
|
|
|
|
updated_data=None):
|
2015-01-29 23:40:41 +00:00
|
|
|
with self._transaction_factory(db):
|
2015-12-22 20:58:51 +00:00
|
|
|
try:
|
|
|
|
queue_item = self._item_by_id_for_update(item.id)
|
|
|
|
new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now)
|
2016-02-25 20:58:42 +00:00
|
|
|
has_change = False
|
2015-12-22 20:58:51 +00:00
|
|
|
|
|
|
|
# Only actually write the new expiration to the db if it moves the expiration some minimum
|
|
|
|
if new_expiration - queue_item.processing_expires > minimum_extension:
|
|
|
|
queue_item.processing_expires = new_expiration
|
2016-02-25 20:58:42 +00:00
|
|
|
has_change = True
|
|
|
|
|
|
|
|
if updated_data is not None:
|
|
|
|
queue_item.body = updated_data
|
|
|
|
has_change = True
|
|
|
|
|
|
|
|
if has_change:
|
2015-12-22 20:58:51 +00:00
|
|
|
queue_item.save()
|
2016-02-25 20:58:42 +00:00
|
|
|
|
2015-12-22 20:58:51 +00:00
|
|
|
except QueueItem.DoesNotExist:
|
|
|
|
return
|
2015-01-29 23:40:41 +00:00
|
|
|
|
2016-10-20 17:46:00 +00:00
|
|
|
|
|
|
|
def delete_expired(expiration_threshold, deletion_threshold, batch_size):
|
|
|
|
"""
|
|
|
|
Deletes all queue items that are older than the provided expiration threshold in batches of the
|
|
|
|
provided size. If there are less items than the deletion threshold, this method does nothing.
|
|
|
|
|
|
|
|
Returns the number of items deleted.
|
|
|
|
"""
|
|
|
|
to_delete = list(QueueItem
|
|
|
|
.select()
|
|
|
|
.where(QueueItem.processing_expires <= expiration_threshold)
|
|
|
|
.limit(batch_size))
|
|
|
|
|
|
|
|
if len(to_delete) < deletion_threshold:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
QueueItem.delete().where(QueueItem.id << to_delete).execute()
|
|
|
|
return len(to_delete)
|