2015-10-09 19:41:56 +00:00
|
|
|
import logging
|
|
|
|
import json
|
|
|
|
import time
|
|
|
|
|
|
|
|
from datetime import timedelta, datetime
|
2016-04-08 17:04:55 +00:00
|
|
|
from gzip import GzipFile
|
|
|
|
from tempfile import SpooledTemporaryFile
|
2015-10-09 19:41:56 +00:00
|
|
|
|
|
|
|
import features
|
|
|
|
from app import app, storage
|
|
|
|
from data.database import UseThenDisconnect
|
|
|
|
from data.model.log import (get_stale_logs, get_stale_logs_start_id,
|
2016-02-09 20:20:52 +00:00
|
|
|
get_stale_logs_cutoff_id, delete_stale_logs)
|
2016-04-08 17:04:55 +00:00
|
|
|
from data.userfiles import DelegateUserfiles
|
2016-03-24 18:04:52 +00:00
|
|
|
from util.locking import GlobalLock
|
2016-04-08 17:04:55 +00:00
|
|
|
from util.streamingjsonencoder import StreamingJSONEncoder
|
2016-03-24 18:04:52 +00:00
|
|
|
from workers.worker import Worker
|
2015-10-09 19:41:56 +00:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2016-04-08 17:04:55 +00:00
|
|
|
JSON_MIMETYPE = 'application/json'
|
2015-10-09 19:41:56 +00:00
|
|
|
STALE_AFTER = timedelta(days=30)
|
|
|
|
MIN_LOGS_PER_ROTATION = 10000
|
2016-04-15 20:38:41 +00:00
|
|
|
MEMORY_TEMPFILE_SIZE = 12 * 1024 * 1024
|
2016-03-24 18:04:52 +00:00
|
|
|
|
2016-04-18 17:02:30 +00:00
|
|
|
WORKER_FREQUENCY = app.config.get('ACTION_LOG_ROTATION_FREQUENCY', 60 * 60 * 24)
|
2015-10-09 19:41:56 +00:00
|
|
|
SAVE_PATH = app.config.get('ACTION_LOG_ARCHIVE_PATH')
|
|
|
|
SAVE_LOCATION = app.config.get('ACTION_LOG_ARCHIVE_LOCATION')
|
|
|
|
|
2016-03-24 18:04:52 +00:00
|
|
|
class LogRotateWorker(Worker):
|
2015-10-09 19:41:56 +00:00
|
|
|
""" Worker used to rotate old logs out the database and into storage. """
|
|
|
|
def __init__(self):
|
2016-03-24 18:04:52 +00:00
|
|
|
super(LogRotateWorker, self).__init__()
|
|
|
|
self.add_operation(self._archive_logs, WORKER_FREQUENCY)
|
2015-10-09 19:41:56 +00:00
|
|
|
|
2016-03-24 18:04:52 +00:00
|
|
|
def _archive_logs(self):
|
2015-10-09 19:41:56 +00:00
|
|
|
logger.debug('Attempting to rotate log entries')
|
2016-04-15 17:51:54 +00:00
|
|
|
|
|
|
|
log_archive = DelegateUserfiles(app, storage, SAVE_LOCATION, SAVE_PATH)
|
|
|
|
|
2016-04-15 20:38:41 +00:00
|
|
|
with UseThenDisconnect(app.config):
|
|
|
|
cutoff_date = datetime.now() - STALE_AFTER
|
|
|
|
cutoff_id = get_stale_logs_cutoff_id(cutoff_date)
|
|
|
|
if cutoff_id is None:
|
|
|
|
logger.warning('Failed to find cutoff id')
|
|
|
|
return
|
|
|
|
|
2016-02-09 20:20:52 +00:00
|
|
|
while True:
|
2016-03-24 18:04:52 +00:00
|
|
|
with GlobalLock('ACTION_LOG_ROTATION') as gl:
|
|
|
|
if not gl:
|
|
|
|
logger.debug('Could not acquire global lock; sleeping')
|
|
|
|
return
|
|
|
|
|
|
|
|
with UseThenDisconnect(app.config):
|
2016-04-15 20:38:41 +00:00
|
|
|
start_id = get_stale_logs_start_id()
|
2016-02-09 20:20:52 +00:00
|
|
|
|
2016-04-15 20:38:41 +00:00
|
|
|
if start_id is None:
|
|
|
|
logger.warning('Failed to find start id')
|
|
|
|
return
|
2016-02-09 20:20:52 +00:00
|
|
|
|
2016-04-15 20:38:41 +00:00
|
|
|
logger.debug('Found starting ID %s and cutoff ID %s', start_id, cutoff_id)
|
2016-02-09 20:20:52 +00:00
|
|
|
|
2016-04-15 20:38:41 +00:00
|
|
|
approx_count = cutoff_id - start_id
|
|
|
|
if approx_count < MIN_LOGS_PER_ROTATION:
|
|
|
|
logger.debug('Not enough stale logs to warrant rotation (approx %d)', approx_count)
|
|
|
|
return
|
2016-02-09 20:20:52 +00:00
|
|
|
|
2016-04-15 20:38:41 +00:00
|
|
|
end_id = start_id + MIN_LOGS_PER_ROTATION
|
|
|
|
logs = [log_dict(log) for log in get_stale_logs(start_id, end_id)]
|
2016-02-09 20:20:52 +00:00
|
|
|
|
2016-03-24 18:04:52 +00:00
|
|
|
logger.debug('Archiving logs from IDs %s to %s', start_id, end_id)
|
2016-04-08 17:04:55 +00:00
|
|
|
with SpooledTemporaryFile(MEMORY_TEMPFILE_SIZE) as tempfile:
|
2016-04-15 20:38:41 +00:00
|
|
|
with GzipFile('temp_action_log_rotate', fileobj=tempfile, compresslevel=1) as zipstream:
|
|
|
|
for chunk in StreamingJSONEncoder().iterencode(logs):
|
2016-04-08 17:04:55 +00:00
|
|
|
zipstream.write(chunk)
|
|
|
|
|
|
|
|
tempfile.seek(0)
|
|
|
|
filename = '%d-%d.txt.gz' % (start_id, end_id)
|
|
|
|
log_archive.store_file(tempfile, JSON_MIMETYPE, content_encoding='gzip',
|
|
|
|
file_id=filename)
|
2016-04-15 20:38:41 +00:00
|
|
|
logger.debug('Finished archiving logs from IDs %s to %s', start_id, end_id)
|
2016-02-09 20:20:52 +00:00
|
|
|
|
2016-03-24 18:04:52 +00:00
|
|
|
with UseThenDisconnect(app.config):
|
2016-04-15 20:38:41 +00:00
|
|
|
logger.debug('Deleting logs from IDs %s to %s', start_id, end_id)
|
2016-02-09 20:20:52 +00:00
|
|
|
delete_stale_logs(start_id, end_id)
|
|
|
|
|
2015-10-09 19:41:56 +00:00
|
|
|
|
2016-04-08 17:04:55 +00:00
|
|
|
def log_dict(log):
|
2015-10-09 19:41:56 +00:00
|
|
|
""" Pretty prints a LogEntry in JSON. """
|
2016-04-08 17:04:55 +00:00
|
|
|
return {'kind_id': log.kind_id,
|
|
|
|
'account_id': log.account_id,
|
|
|
|
'performer_id': log.performer_id,
|
|
|
|
'repository_id': log.repository_id,
|
|
|
|
'datetime': str(log.datetime),
|
|
|
|
'ip': str(log.ip),
|
|
|
|
'metadata_json': json.loads(str(log.metadata_json))}
|
2015-10-09 19:41:56 +00:00
|
|
|
|
|
|
|
|
|
|
|
def main():
|
2016-03-24 18:04:52 +00:00
|
|
|
logging.config.fileConfig('conf/logging_debug.conf', disable_existing_loggers=False)
|
|
|
|
|
2015-10-09 19:41:56 +00:00
|
|
|
if not features.ACTION_LOG_ROTATION or None in [SAVE_PATH, SAVE_LOCATION]:
|
2016-03-24 18:04:52 +00:00
|
|
|
logger.debug('Action log rotation worker not enabled; skipping')
|
2015-10-09 19:41:56 +00:00
|
|
|
while True:
|
|
|
|
time.sleep(100000)
|
|
|
|
|
|
|
|
worker = LogRotateWorker()
|
|
|
|
worker.start()
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|