Merge pull request #2049 from jzelinskie/stagger

stagger worker startup
This commit is contained in:
Jimmy Zelinskie 2016-10-31 13:53:58 -04:00 committed by GitHub
commit b6ec09a65b
5 changed files with 34 additions and 19 deletions

View file

@ -382,3 +382,6 @@ class DefaultConfig(object):
# Overridable list of reverse DNS prefixes that are reserved for internal use on labels.
LABEL_KEY_RESERVED_PREFIXES = []
# Delays workers from starting until a random point in time between 0 and their regular interval.
STAGGERED_WORKERS = True

View file

@ -1,8 +1,9 @@
import logging
from app import app
from data.database import Repository, LogEntry, RepositoryActionCount, db_random_func
from datetime import date, timedelta
from app import app # This is required to initialize the database.
from data.database import Repository, LogEntry, RepositoryActionCount, db_random_func
from workers.worker import Worker
POLL_PERIOD_SECONDS = 10
@ -18,20 +19,23 @@ def count_repository_actions():
# Get a random repository to count.
today = date.today()
yesterday = today - timedelta(days=1)
has_yesterday_actions = (RepositoryActionCount.select(RepositoryActionCount.repository)
.where(RepositoryActionCount.date == yesterday))
has_yesterday_actions = (RepositoryActionCount
.select(RepositoryActionCount.repository)
.where(RepositoryActionCount.date == yesterday))
to_count = (Repository.select()
.where(~(Repository.id << (has_yesterday_actions)))
.order_by(db_random_func()).get())
to_count = (Repository
.select()
.where(~(Repository.id << (has_yesterday_actions)))
.order_by(db_random_func()).get())
logger.debug('Counting: %s', to_count.id)
actions = (LogEntry.select()
.where(LogEntry.repository == to_count,
LogEntry.datetime >= yesterday,
LogEntry.datetime < today)
.count())
actions = (LogEntry
.select()
.where(LogEntry.repository == to_count,
LogEntry.datetime >= yesterday,
LogEntry.datetime < today)
.count())
# Create the row.
try:

View file

@ -8,12 +8,15 @@ from app import secscan_notification_queue, secscan_api
from workers.queueworker import QueueWorker, JobException
from util.secscan.notifier import process_notification_data
logger = logging.getLogger(__name__)
_READING_SECONDS = 120 # 2 minutes
_PROCESSING_SECONDS = 60 * 60 # 1 hour
_LAYER_LIMIT = 100 # The number of layers to request on each page.
class SecurityNotificationWorker(QueueWorker):
def process_queue_item(self, data):
self.perform_notification_work(data)

View file

@ -1,8 +1,8 @@
import logging
import logging.config
import time
import features
import time
from peewee import fn

View file

@ -3,14 +3,17 @@ import signal
import sys
import socket
from threading import Event
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime, timedelta
from functools import wraps
from random import randint
from threading import Event
from apscheduler.schedulers.background import BackgroundScheduler
from raven import Client
from app import app
from data.database import UseThenDisconnect
from functools import wraps
logger = logging.getLogger(__name__)
@ -69,12 +72,14 @@ class Worker(object):
logger.debug('Scheduling worker.')
soon = datetime.now() + timedelta(seconds=.001)
self._sched.start()
for operation_func, operation_sec in self._operations:
start_date = datetime.now() + timedelta(seconds=0.001)
if app.config.get('STAGGER_WORKERS'):
start_date += timedelta(seconds=randint(1, operation_sec))
logger.debug('First run scheduled for %s', start_date)
self._sched.add_job(operation_func, 'interval', seconds=operation_sec,
start_date=soon, max_instances=1)
start_date=start_date, max_instances=1)
self._setup_and_wait_for_shutdown()