Merge pull request #294 from coreos-inc/logsload

Switch to using an aggregated logs query and infinite scrolling
This commit is contained in:
Jimmy Zelinskie 2015-08-03 14:52:04 -04:00
commit 8e6a0fbbee
15 changed files with 270 additions and 99 deletions

View file

@ -645,6 +645,9 @@ class LogEntry(BaseModel):
indexes = (
# create an index on repository and date
(('repository', 'datetime'), False),
# create an index on repository, date and kind
(('repository', 'datetime', 'kind'), False),
)

View file

@ -10,8 +10,8 @@ up_mysql() {
docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql
# Sleep for 10s to get MySQL get started.
echo 'Sleeping for 10...'
sleep 10
echo 'Sleeping for 20...'
sleep 20
# Add the database to mysql.
docker run --rm --link mysql:mysql mysql sh -c 'echo "create database genschema" | mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -ppassword'

View file

@ -0,0 +1,26 @@
"""Add LogEntry repo-datetime-kind index
Revision ID: 5232a5610a0a
Revises: 437ee6269a9d
Create Date: 2015-07-31 13:25:41.877733
"""
# revision identifiers, used by Alembic.
revision = '5232a5610a0a'
down_revision = '437ee6269a9d'
from alembic import op
import sqlalchemy as sa
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.create_index('logentry_repository_id_datetime_kind_id', 'logentry', ['repository_id', 'datetime', 'kind_id'], unique=False)
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_index('logentry_repository_id_datetime_kind_id', table_name='logentry')
### end Alembic commands ###

View file

@ -5,18 +5,13 @@ from datetime import datetime, timedelta, date
from data.database import LogEntry, LogEntryKind, User
def list_logs(start_time, end_time, performer=None, repository=None, namespace=None):
Performer = User.alias()
def _logs_query(selections, start_time, end_time, performer=None, repository=None, namespace=None):
joined = (LogEntry
.select(LogEntry, LogEntryKind, User, Performer)
.join(User)
.switch(LogEntry)
.join(Performer, JOIN_LEFT_OUTER,
on=(LogEntry.performer == Performer.id).alias('performer'))
.select(*selections)
.switch(LogEntry)
.join(LogEntryKind)
.switch(LogEntry))
.switch(LogEntry)
.where(LogEntry.datetime >= start_time, LogEntry.datetime < end_time))
if repository:
joined = joined.where(LogEntry.repository == repository)
@ -25,10 +20,32 @@ def list_logs(start_time, end_time, performer=None, repository=None, namespace=N
joined = joined.where(LogEntry.performer == performer)
if namespace:
joined = joined.where(User.username == namespace)
joined = joined.join(User).where(User.username == namespace)
return list(joined.where(LogEntry.datetime >= start_time,
LogEntry.datetime < end_time).order_by(LogEntry.datetime.desc()))
return joined
def get_aggregated_logs(start_time, end_time, performer=None, repository=None, namespace=None):
selections = [LogEntryKind, fn.date(LogEntry.datetime, '%d'), fn.Count(LogEntry.id).alias('count')]
query = _logs_query(selections, start_time, end_time, performer, repository, namespace)
return query.group_by(fn.date(LogEntry.datetime, '%d'), LogEntryKind)
def list_logs(start_time, end_time, performer=None, repository=None, namespace=None, page=None,
count=None):
Performer = User.alias()
selections = [LogEntry, LogEntryKind, Performer]
query = _logs_query(selections, start_time, end_time, performer, repository, namespace)
query = (query.switch(LogEntry)
.join(Performer, JOIN_LEFT_OUTER,
on=(LogEntry.performer == Performer.id).alias('performer')))
if page and count:
query = query.paginate(page, count)
return list(query.order_by(LogEntry.datetime.desc()))
def log_action(kind_name, user_or_organization_name, performer=None, repository=None,