Fix logs view and API

- We needed to use an engine-agnostic way to extract the days
- Joining with the LogEntryKind table has *horrible* performance in MySQL, so do it ourselves
- Limit to 50 logs per page
This commit is contained in:
Joseph Schorr 2015-08-05 17:36:17 -04:00
parent d480a204f5
commit d34afde954
3 changed files with 39 additions and 14 deletions

View file

@ -2,15 +2,14 @@ import json
from peewee import JOIN_LEFT_OUTER, SQL, fn
from datetime import datetime, timedelta, date
from cachetools import lru_cache
from data.database import LogEntry, LogEntryKind, User
from data.database import LogEntry, LogEntryKind, User, db
def _logs_query(selections, start_time, end_time, performer=None, repository=None, namespace=None):
joined = (LogEntry
.select(*selections)
.switch(LogEntry)
.join(LogEntryKind)
.switch(LogEntry)
.where(LogEntry.datetime >= start_time, LogEntry.datetime < end_time))
if repository:
@ -25,17 +24,27 @@ def _logs_query(selections, start_time, end_time, performer=None, repository=Non
return joined
@lru_cache(maxsize=1)
def get_log_entry_kinds():
kind_map = {}
for kind in LogEntryKind.select():
kind_map[kind.id] = kind.name
return kind_map
def get_aggregated_logs(start_time, end_time, performer=None, repository=None, namespace=None):
selections = [LogEntryKind, fn.date(LogEntry.datetime, '%d'), fn.Count(LogEntry.id).alias('count')]
date = db.extract_date('day', LogEntry.datetime)
selections = [LogEntry.kind, LogEntry.datetime, fn.Count(LogEntry.id).alias('count')]
query = _logs_query(selections, start_time, end_time, performer, repository, namespace)
return query.group_by(fn.date(LogEntry.datetime, '%d'), LogEntryKind)
return query.group_by(date, LogEntry.kind)
def list_logs(start_time, end_time, performer=None, repository=None, namespace=None, page=None,
count=None):
Performer = User.alias()
selections = [LogEntry, LogEntryKind, Performer]
selections = [LogEntry, Performer]
query = _logs_query(selections, start_time, end_time, performer, repository, namespace)
query = (query.switch(LogEntry)