Change id column in LogEntry to a BigInt and migrate back to using that table

This commit is contained in:
Joseph Schorr 2018-07-27 17:46:46 -04:00
parent 5e4d52f1fd
commit 7325b22c90
8 changed files with 56 additions and 22 deletions

View file

@ -987,6 +987,7 @@ class LogEntryKind(BaseModel):
class LogEntry(BaseModel): class LogEntry(BaseModel):
id = BigAutoField()
kind = ForeignKeyField(LogEntryKind) kind = ForeignKeyField(LogEntryKind)
account = IntegerField(index=True, column_name='account_id') account = IntegerField(index=True, column_name='account_id')
performer = IntegerField(index=True, null=True, column_name='performer_id') performer = IntegerField(index=True, null=True, column_name='performer_id')

View file

@ -0,0 +1,33 @@
"""Change LogEntry to use a BigInteger as its primary key
Revision ID: 6c21e2cfb8b6
Revises: d17c695859ea
Create Date: 2018-07-27 16:30:02.877346
"""
# revision identifiers, used by Alembic.
revision = '6c21e2cfb8b6'
down_revision = 'd17c695859ea'
from alembic import op
import sqlalchemy as sa
def upgrade(tables, tester):
op.alter_column(
table_name='logentry',
column_name='id',
nullable=False,
autoincrement=True,
type_=sa.BigInteger(),
)
def downgrade(tables, tester):
op.alter_column(
table_name='logentry',
column_name='id',
nullable=False,
autoincrement=True,
type_=sa.Integer(),
)

View file

@ -7,7 +7,7 @@ from datetime import datetime, timedelta
from cachetools import lru_cache from cachetools import lru_cache
import data import data
from data.database import LogEntry, LogEntry2, LogEntryKind, User, RepositoryActionCount, db from data.database import LogEntry, LogEntryKind, User, RepositoryActionCount, db
from data.model import config, user, DataModelException from data.model import config, user, DataModelException
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -101,7 +101,7 @@ def _json_serialize(obj):
def log_action(kind_name, user_or_organization_name, performer=None, repository=None, ip=None, def log_action(kind_name, user_or_organization_name, performer=None, repository=None, ip=None,
metadata={}, timestamp=None): metadata={}, timestamp=None):
""" Logs an entry in the LogEntry2 table. """ """ Logs an entry in the LogEntry table. """
if not timestamp: if not timestamp:
timestamp = datetime.today() timestamp = datetime.today()
@ -132,7 +132,7 @@ def log_action(kind_name, user_or_organization_name, performer=None, repository=
} }
try: try:
LogEntry2.create(**log_data) LogEntry.create(**log_data)
except PeeweeException as ex: except PeeweeException as ex:
strict_logging_disabled = config.app_config.get('ALLOW_PULLS_WITHOUT_STRICT_LOGGING') strict_logging_disabled = config.app_config.get('ALLOW_PULLS_WITHOUT_STRICT_LOGGING')
if strict_logging_disabled and kind_name in ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING: if strict_logging_disabled and kind_name in ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING:

View file

@ -1,6 +1,6 @@
import pytest import pytest
from data.database import LogEntry, LogEntry2, User from data.database import LogEntry, User
from data.model import config as _config from data.model import config as _config
from data.model.log import log_action from data.model.log import log_action
@ -21,8 +21,8 @@ def logentry_kind():
@pytest.fixture() @pytest.fixture()
def logentry(logentry_kind): def logentry(logentry_kind):
with patch('data.database.LogEntry2.create', spec=True): with patch('data.database.LogEntry.create', spec=True):
yield LogEntry2 yield LogEntry
@pytest.fixture() @pytest.fixture()
def user(): def user():

View file

@ -60,19 +60,19 @@ class PreOCIModel(LogEntryDataInterface):
limit=20) limit=20)
return LogEntryPage([_create_log(log) for log in logs], next_page_token) return LogEntryPage([_create_log(log) for log in logs], next_page_token)
# First check the LogEntry2 table for the most recent logs, unless we've been expressly told # First check the LogEntry table for the most recent logs, unless we've been expressly told
# to look inside the first table. # to look inside the "second" table.
TOKEN_TABLE_KEY = 'ttk' TOKEN_TABLE_KEY2 = 'ttk2'
is_old_table = page_token is not None and page_token.get(TOKEN_TABLE_KEY) == 1 is_temp_table = page_token is not None and page_token.get(TOKEN_TABLE_KEY2) == 1
if is_old_table: if is_temp_table:
page_result = get_logs(database.LogEntry)
else:
page_result = get_logs(database.LogEntry2) page_result = get_logs(database.LogEntry2)
else:
page_result = get_logs(database.LogEntry)
if page_result.next_page_token is None and not is_old_table: if page_result.next_page_token is None and not is_temp_table:
page_result = page_result._replace(next_page_token={TOKEN_TABLE_KEY: 1}) page_result = page_result._replace(next_page_token={TOKEN_TABLE_KEY2: 1})
elif is_old_table and page_result.next_page_token is not None: elif is_temp_table and page_result.next_page_token is not None:
page_result.next_page_token[TOKEN_TABLE_KEY] = 1 page_result.next_page_token[TOKEN_TABLE_KEY2] = 1
return page_result return page_result

View file

@ -902,7 +902,7 @@ def populate_database(minimal=False, with_storage=False):
model.repositoryactioncount.update_repository_score(to_count) model.repositoryactioncount.update_repository_score(to_count)
WHITELISTED_EMPTY_MODELS = ['DeletedNamespace', 'LogEntry'] WHITELISTED_EMPTY_MODELS = ['DeletedNamespace', 'LogEntry2']
def find_models_missing_data(): def find_models_missing_data():
# As a sanity check we are going to make sure that all db tables have some data, unless explicitly # As a sanity check we are going to make sure that all db tables have some data, unless explicitly

View file

@ -2,14 +2,14 @@ set -e
up_mysql() { up_mysql() {
# Run a SQL database on port 3306 inside of Docker. # Run a SQL database on port 3306 inside of Docker.
docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql:5.7
# Sleep for 10s to get MySQL get started. # Sleep for 10s to get MySQL get started.
echo 'Sleeping for 10...' echo 'Sleeping for 10...'
sleep 10 sleep 10
# Add the database to mysql. # Add the database to mysql.
docker run --rm --link mysql:mysql mysql sh -c 'echo "create database genschema;" | mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -ppassword' docker run --rm --link mysql:mysql mysql:5.7 sh -c 'echo "create database genschema;" | mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -ppassword'
} }
down_mysql() { down_mysql() {

View file

@ -3,7 +3,7 @@ import time
import socket import socket
from contextlib import contextmanager from contextlib import contextmanager
from data.database import LogEntryKind, LogEntry2 from data.database import LogEntryKind, LogEntry
class assert_action_logged(object): class assert_action_logged(object):
""" Specialized assertion for ensuring that a log entry of a particular kind was added under the """ Specialized assertion for ensuring that a log entry of a particular kind was added under the
@ -14,7 +14,7 @@ class assert_action_logged(object):
self.existing_count = 0 self.existing_count = 0
def _get_log_count(self): def _get_log_count(self):
return LogEntry2.select().where(LogEntry2.kind == LogEntryKind.get(name=self.log_kind)).count() return LogEntry.select().where(LogEntry.kind == LogEntryKind.get(name=self.log_kind)).count()
def __enter__(self): def __enter__(self):
self.existing_count = self._get_log_count() self.existing_count = self._get_log_count()