diff --git a/data/model/log.py b/data/model/log.py index 519647ddb..ad5713d6d 100644 --- a/data/model/log.py +++ b/data/model/log.py @@ -35,7 +35,7 @@ def get_log_entry_kinds(): def get_aggregated_logs(start_time, end_time, performer=None, repository=None, namespace=None): date = db.extract_date('day', LogEntry.datetime) - selections = [LogEntry.kind, LogEntry.datetime, fn.Count(LogEntry.id).alias('count')] + selections = [LogEntry.kind, date.alias('day'), fn.Count(LogEntry.id).alias('count')] query = _logs_query(selections, start_time, end_time, performer, repository, namespace) return query.group_by(date, LogEntry.kind) diff --git a/endpoints/api/logs.py b/endpoints/api/logs.py index 10ddc8d97..d3ad8f712 100644 --- a/endpoints/api/logs.py +++ b/endpoints/api/logs.py @@ -34,11 +34,19 @@ def log_view(log, kinds): return view -def aggregated_log_view(log, kinds): +def aggregated_log_view(log, kinds, start_time): + # Because we aggregate based on the day of the month in SQL, we only have that information. + # Therefore, create a synthetic date based on the day and the month of the start time. + # Logs are allowed for a maximum period of one week, so this calculation should always work. + day = int(log.day) + month = start_time.month + if day < start_time.day: + month = month + 1 + view = { 'kind': kinds[log.kind_id], 'count': log.count, - 'datetime': format_date(log.datetime) + 'date': format_date(datetime(start_time.year, month, day)) } return view @@ -93,7 +101,7 @@ def get_aggregate_logs(start_time, end_time, performer_name=None, repository=Non repository=repository, namespace=namespace) return { - 'aggregated': [aggregated_log_view(log, kinds) for log in aggregated_logs] + 'aggregated': [aggregated_log_view(log, kinds, start_time) for log in aggregated_logs] }