Merge pull request #2049 from jzelinskie/stagger
stagger worker startup
This commit is contained in:
commit
b6ec09a65b
5 changed files with 34 additions and 19 deletions
|
@ -382,3 +382,6 @@ class DefaultConfig(object):
|
||||||
|
|
||||||
# Overridable list of reverse DNS prefixes that are reserved for internal use on labels.
|
# Overridable list of reverse DNS prefixes that are reserved for internal use on labels.
|
||||||
LABEL_KEY_RESERVED_PREFIXES = []
|
LABEL_KEY_RESERVED_PREFIXES = []
|
||||||
|
|
||||||
|
# Delays workers from starting until a random point in time between 0 and their regular interval.
|
||||||
|
STAGGERED_WORKERS = True
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from app import app
|
|
||||||
from data.database import Repository, LogEntry, RepositoryActionCount, db_random_func
|
|
||||||
from datetime import date, timedelta
|
from datetime import date, timedelta
|
||||||
|
|
||||||
|
from app import app # This is required to initialize the database.
|
||||||
|
from data.database import Repository, LogEntry, RepositoryActionCount, db_random_func
|
||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
|
|
||||||
POLL_PERIOD_SECONDS = 10
|
POLL_PERIOD_SECONDS = 10
|
||||||
|
@ -18,20 +19,23 @@ def count_repository_actions():
|
||||||
# Get a random repository to count.
|
# Get a random repository to count.
|
||||||
today = date.today()
|
today = date.today()
|
||||||
yesterday = today - timedelta(days=1)
|
yesterday = today - timedelta(days=1)
|
||||||
has_yesterday_actions = (RepositoryActionCount.select(RepositoryActionCount.repository)
|
has_yesterday_actions = (RepositoryActionCount
|
||||||
.where(RepositoryActionCount.date == yesterday))
|
.select(RepositoryActionCount.repository)
|
||||||
|
.where(RepositoryActionCount.date == yesterday))
|
||||||
|
|
||||||
to_count = (Repository.select()
|
to_count = (Repository
|
||||||
.where(~(Repository.id << (has_yesterday_actions)))
|
.select()
|
||||||
.order_by(db_random_func()).get())
|
.where(~(Repository.id << (has_yesterday_actions)))
|
||||||
|
.order_by(db_random_func()).get())
|
||||||
|
|
||||||
logger.debug('Counting: %s', to_count.id)
|
logger.debug('Counting: %s', to_count.id)
|
||||||
|
|
||||||
actions = (LogEntry.select()
|
actions = (LogEntry
|
||||||
.where(LogEntry.repository == to_count,
|
.select()
|
||||||
LogEntry.datetime >= yesterday,
|
.where(LogEntry.repository == to_count,
|
||||||
LogEntry.datetime < today)
|
LogEntry.datetime >= yesterday,
|
||||||
.count())
|
LogEntry.datetime < today)
|
||||||
|
.count())
|
||||||
|
|
||||||
# Create the row.
|
# Create the row.
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -8,12 +8,15 @@ from app import secscan_notification_queue, secscan_api
|
||||||
from workers.queueworker import QueueWorker, JobException
|
from workers.queueworker import QueueWorker, JobException
|
||||||
from util.secscan.notifier import process_notification_data
|
from util.secscan.notifier import process_notification_data
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
_READING_SECONDS = 120 # 2 minutes
|
_READING_SECONDS = 120 # 2 minutes
|
||||||
_PROCESSING_SECONDS = 60 * 60 # 1 hour
|
_PROCESSING_SECONDS = 60 * 60 # 1 hour
|
||||||
_LAYER_LIMIT = 100 # The number of layers to request on each page.
|
_LAYER_LIMIT = 100 # The number of layers to request on each page.
|
||||||
|
|
||||||
|
|
||||||
class SecurityNotificationWorker(QueueWorker):
|
class SecurityNotificationWorker(QueueWorker):
|
||||||
def process_queue_item(self, data):
|
def process_queue_item(self, data):
|
||||||
self.perform_notification_work(data)
|
self.perform_notification_work(data)
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import logging
|
import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
|
import time
|
||||||
|
|
||||||
import features
|
import features
|
||||||
import time
|
|
||||||
|
|
||||||
from peewee import fn
|
from peewee import fn
|
||||||
|
|
||||||
|
|
|
@ -3,14 +3,17 @@ import signal
|
||||||
import sys
|
import sys
|
||||||
import socket
|
import socket
|
||||||
|
|
||||||
from threading import Event
|
|
||||||
from apscheduler.schedulers.background import BackgroundScheduler
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
from functools import wraps
|
||||||
|
from random import randint
|
||||||
|
from threading import Event
|
||||||
|
|
||||||
|
from apscheduler.schedulers.background import BackgroundScheduler
|
||||||
from raven import Client
|
from raven import Client
|
||||||
|
|
||||||
from app import app
|
from app import app
|
||||||
from data.database import UseThenDisconnect
|
from data.database import UseThenDisconnect
|
||||||
from functools import wraps
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -69,12 +72,14 @@ class Worker(object):
|
||||||
|
|
||||||
logger.debug('Scheduling worker.')
|
logger.debug('Scheduling worker.')
|
||||||
|
|
||||||
soon = datetime.now() + timedelta(seconds=.001)
|
|
||||||
|
|
||||||
self._sched.start()
|
self._sched.start()
|
||||||
for operation_func, operation_sec in self._operations:
|
for operation_func, operation_sec in self._operations:
|
||||||
|
start_date = datetime.now() + timedelta(seconds=0.001)
|
||||||
|
if app.config.get('STAGGER_WORKERS'):
|
||||||
|
start_date += timedelta(seconds=randint(1, operation_sec))
|
||||||
|
logger.debug('First run scheduled for %s', start_date)
|
||||||
self._sched.add_job(operation_func, 'interval', seconds=operation_sec,
|
self._sched.add_job(operation_func, 'interval', seconds=operation_sec,
|
||||||
start_date=soon, max_instances=1)
|
start_date=start_date, max_instances=1)
|
||||||
|
|
||||||
|
|
||||||
self._setup_and_wait_for_shutdown()
|
self._setup_and_wait_for_shutdown()
|
||||||
|
|
Reference in a new issue