Temporarily change to storing logs in a new LogEntry2 table

This will prevent us from running out of auto-incrementing ID values until such time as we can upgrade to peewee 3 and change the field type to a BigInt

Fixes https://jira.coreos.com/browse/QUAY-943
This commit is contained in:
Joseph Schorr 2018-05-18 12:54:38 -04:00
parent 66b4e45929
commit a007332d4c
13 changed files with 201 additions and 113 deletions

View file

@ -8,7 +8,7 @@ from tempfile import SpooledTemporaryFile
import features
from app import app, storage
from data.database import UseThenDisconnect
from data.database import UseThenDisconnect, LogEntry, LogEntry2
from data.model.log import (get_stale_logs, get_stale_logs_start_id,
get_stale_logs_cutoff_id, delete_stale_logs)
from data.userfiles import DelegateUserfiles
@ -35,11 +35,17 @@ class LogRotateWorker(Worker):
self.add_operation(self._archive_logs, WORKER_FREQUENCY)
def _archive_logs(self):
# TODO(LogMigrate): Remove the branch once we're back on LogEntry only.
models = [LogEntry, LogEntry2]
for model in models:
self._archive_logs_for_model(model)
def _archive_logs_for_model(self, model):
logger.debug('Attempting to rotate log entries')
with UseThenDisconnect(app.config):
cutoff_date = datetime.now() - STALE_AFTER
cutoff_id = get_stale_logs_cutoff_id(cutoff_date)
cutoff_id = get_stale_logs_cutoff_id(cutoff_date, model)
if cutoff_id is None:
logger.warning('Failed to find cutoff id')
return
@ -48,11 +54,11 @@ class LogRotateWorker(Worker):
while logs_archived:
try:
with GlobalLock('ACTION_LOG_ROTATION'):
logs_archived = self._perform_archiving(cutoff_id)
logs_archived = self._perform_archiving(cutoff_id, model)
except LockNotAcquiredException:
return
def _perform_archiving(self, cutoff_id):
def _perform_archiving(self, cutoff_id, model):
save_location = SAVE_LOCATION
if not save_location:
# Pick the *same* save location for all instances. This is a fallback if
@ -62,7 +68,7 @@ class LogRotateWorker(Worker):
log_archive = DelegateUserfiles(app, storage, save_location, SAVE_PATH)
with UseThenDisconnect(app.config):
start_id = get_stale_logs_start_id()
start_id = get_stale_logs_start_id(model)
if start_id is None:
logger.warning('Failed to find start id')
@ -76,7 +82,7 @@ class LogRotateWorker(Worker):
return False
end_id = start_id + MIN_LOGS_PER_ROTATION
logs = [log_dict(log) for log in get_stale_logs(start_id, end_id)]
logs = [log_dict(log) for log in get_stale_logs(start_id, end_id, model)]
logger.debug('Archiving logs from IDs %s to %s', start_id, end_id)
with SpooledTemporaryFile(MEMORY_TEMPFILE_SIZE) as tempfile:
@ -85,14 +91,14 @@ class LogRotateWorker(Worker):
zipstream.write(chunk)
tempfile.seek(0)
filename = '%d-%d.txt.gz' % (start_id, end_id)
filename = '%d-%d-%s.txt.gz' % (start_id, end_id, model.__name__.lower())
log_archive.store_file(tempfile, JSON_MIMETYPE, content_encoding='gzip',
file_id=filename)
logger.debug('Finished archiving logs from IDs %s to %s', start_id, end_id)
with UseThenDisconnect(app.config):
logger.debug('Deleting logs from IDs %s to %s', start_id, end_id)
delete_stale_logs(start_id, end_id)
delete_stale_logs(start_id, end_id, model)
return True