Unify the database connection lifecycle across all workers

This commit is contained in:
Jake Moshenko 2015-12-04 15:51:53 -05:00
parent 38cb63d195
commit 2f626f2691
7 changed files with 111 additions and 130 deletions

View file

@ -1,13 +1,12 @@
import logging
from peewee import fn
from tempfile import SpooledTemporaryFile
from gzip import GzipFile
from data import model
from data.archivedlogs import JSON_MIMETYPE
from data.database import RepositoryBuild, db_random_func
from app import build_logs, log_archive
from data.database import CloseForLongOperation
from app import build_logs, log_archive, app
from util.streamingjsonencoder import StreamingJSONEncoder
from workers.worker import Worker
@ -39,6 +38,7 @@ class ArchiveBuildLogsWorker(Worker):
'logs': entries,
}
with CloseForLongOperation(app.config):
with SpooledTemporaryFile(MEMORY_TEMPFILE_SIZE) as tempfile:
with GzipFile('testarchive', fileobj=tempfile) as zipstream:
for chunk in StreamingJSONEncoder().iterencode(to_encode):
@ -48,10 +48,11 @@ class ArchiveBuildLogsWorker(Worker):
log_archive.store_file(tempfile, JSON_MIMETYPE, content_encoding='gzip',
file_id=to_archive.uuid)
to_archive.logs_archived = True
to_archive.save()
to_update = model.build.get_repository_build(to_archive.uuid)
to_update.logs_archived = True
to_update.save()
build_logs.expire_log_entries(to_archive.uuid)
build_logs.expire_log_entries(to_update.uuid)
if __name__ == "__main__":

View file

@ -1,7 +1,6 @@
import logging
from app import app
from data.database import UseThenDisconnect
from data.model.repository import find_repository_with_garbage, garbage_collect_repo
from workers.worker import Worker
@ -14,7 +13,6 @@ class GarbageCollectionWorker(Worker):
def _garbage_collection_repos(self):
""" Performs garbage collection on repositories. """
with UseThenDisconnect(app.config):
repository = find_repository_with_garbage()
if repository is None:
logger.debug('No repository with garbage found')
@ -23,7 +21,6 @@ class GarbageCollectionWorker(Worker):
logger.debug('Starting GC of repository #%s (%s)', repository.id, repository.name)
garbage_collect_repo(repository)
logger.debug('Finished GC of repository #%s (%s)', repository.id, repository.name)
return
if __name__ == "__main__":
worker = GarbageCollectionWorker()

View file

@ -1,19 +1,11 @@
import logging
import json
import signal
import sys
from threading import Event, Lock
from datetime import datetime, timedelta
from threading import Thread
from time import sleep
from app import app
from data.model import db
from data.queue import WorkQueue
from data.database import UseThenDisconnect
from data.database import CloseForLongOperation
from workers.worker import Worker
logger = logging.getLogger(__name__)
@ -92,20 +84,20 @@ class QueueWorker(Worker):
with self._current_item_lock:
current_queue_item = self.current_queue_item
if current_queue_item is None:
# Close the db handle.
self._close_db_handle()
break
logger.debug('Queue gave us some work: %s', current_queue_item.body)
job_details = json.loads(current_queue_item.body)
try:
with CloseForLongOperation(app.config):
self.process_queue_item(job_details)
self.mark_current_complete()
except JobException as jex:
logger.warning('An error occurred processing request: %s', current_queue_item.body)
logger.warning('Job exception: %s' % jex)
logger.warning('Job exception: %s', jex)
self.mark_current_incomplete(restore_retry=False)
except WorkerUnhealthyException as exc:
@ -114,10 +106,6 @@ class QueueWorker(Worker):
self.mark_current_incomplete(restore_retry=True)
self._stop.set()
finally:
# Close the db handle.
self._close_db_handle()
if not self._stop.is_set():
with self._current_item_lock:
self.current_queue_item = self._queue.get(processing_time=self._reservation_seconds)
@ -126,7 +114,6 @@ class QueueWorker(Worker):
logger.debug('No more work.')
def update_queue_metrics(self):
with UseThenDisconnect(app.config):
self._queue.update_metrics()
def mark_current_incomplete(self, restore_retry=False):

View file

@ -1,9 +1,8 @@
import logging
from app import app
from data.database import (Repository, LogEntry, RepositoryActionCount, db_random_func, fn,
UseThenDisconnect)
from datetime import date, datetime, timedelta
from data.database import Repository, LogEntry, RepositoryActionCount, db_random_func
from datetime import date, timedelta
from workers.worker import Worker
POLL_PERIOD_SECONDS = 10
@ -11,7 +10,6 @@ POLL_PERIOD_SECONDS = 10
logger = logging.getLogger(__name__)
def count_repository_actions():
with UseThenDisconnect(app.config):
try:
# Get a random repository to count.
today = date.today()

View file

@ -13,7 +13,7 @@ from data import model
from data.model.tag import filter_tags_have_repository_event, get_tags_for_image
from data.model.image import get_secscan_candidates, set_secscan_status
from data.model.storage import get_storage_locations
from data.database import (UseThenDisconnect, ExternalNotificationEvent)
from data.database import ExternalNotificationEvent
from util.secscan.api import SecurityConfigValidator
logger = logging.getLogger(__name__)
@ -150,7 +150,6 @@ class SecurityWorker(Worker):
logger.debug('Started indexing')
event = ExternalNotificationEvent.get(name='vulnerability_found')
with UseThenDisconnect(app.config):
while True:
# Lookup the images to index.
images = []

View file

@ -3,7 +3,7 @@ import features
import time
from app import app, storage, image_replication_queue
from data.database import UseThenDisconnect, CloseForLongOperation
from data.database import CloseForLongOperation
from data import model
from storage.basestorage import StoragePaths
from workers.queueworker import QueueWorker

View file

@ -6,12 +6,10 @@ import socket
from threading import Event
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime, timedelta
from threading import Thread
from time import sleep
from raven import Client
from app import app
from data.model import db
from data.database import UseThenDisconnect
from functools import wraps
logger = logging.getLogger(__name__)
@ -44,6 +42,7 @@ class Worker(object):
@wraps(operation_func)
def _operation_func():
try:
with UseThenDisconnect(app.config):
return operation_func()
except Exception:
logger.exception('Operation raised exception')