- Fix some other group_by clauses
- Fix garbage_collect for an empty list (fixes a test) - Add a script which runs the full test suite against mysql and postgres (note: QueueTest's are broken for MySQL, but they obviously work in production, so they need to be fixed)
This commit is contained in:
parent
a2e2dcb010
commit
d73747ce1d
5 changed files with 104 additions and 17 deletions
|
@ -1501,11 +1501,14 @@ def garbage_collect_repository(namespace_name, repository_name):
|
||||||
|
|
||||||
|
|
||||||
def garbage_collect_storage(storage_id_whitelist):
|
def garbage_collect_storage(storage_id_whitelist):
|
||||||
|
if len(storage_id_whitelist) == 0:
|
||||||
|
return
|
||||||
|
|
||||||
def placements_query_to_paths_set(placements_query):
|
def placements_query_to_paths_set(placements_query):
|
||||||
return {(placement.location.name, config.store.image_path(placement.storage.uuid))
|
return {(placement.location.name, config.store.image_path(placement.storage.uuid))
|
||||||
for placement in placements_query}
|
for placement in placements_query}
|
||||||
|
|
||||||
def orphaned_storage_query(select_base_query, candidates):
|
def orphaned_storage_query(select_base_query, candidates, group_by):
|
||||||
return (select_base_query
|
return (select_base_query
|
||||||
.switch(ImageStorage)
|
.switch(ImageStorage)
|
||||||
.join(Image, JOIN_LEFT_OUTER)
|
.join(Image, JOIN_LEFT_OUTER)
|
||||||
|
@ -1513,7 +1516,7 @@ def garbage_collect_storage(storage_id_whitelist):
|
||||||
.join(DerivedImageStorage, JOIN_LEFT_OUTER,
|
.join(DerivedImageStorage, JOIN_LEFT_OUTER,
|
||||||
on=(ImageStorage.id == DerivedImageStorage.derivative))
|
on=(ImageStorage.id == DerivedImageStorage.derivative))
|
||||||
.where(ImageStorage.id << list(candidates))
|
.where(ImageStorage.id << list(candidates))
|
||||||
.group_by(ImageStorage)
|
.group_by(*group_by)
|
||||||
.having((fn.Count(Image.id) == 0) & (fn.Count(DerivedImageStorage.id) == 0)))
|
.having((fn.Count(Image.id) == 0) & (fn.Count(DerivedImageStorage.id) == 0)))
|
||||||
|
|
||||||
# Note: We remove the derived image storage in its own transaction as a way to reduce the
|
# Note: We remove the derived image storage in its own transaction as a way to reduce the
|
||||||
|
@ -1524,7 +1527,8 @@ def garbage_collect_storage(storage_id_whitelist):
|
||||||
with config.app_config['DB_TRANSACTION_FACTORY'](db):
|
with config.app_config['DB_TRANSACTION_FACTORY'](db):
|
||||||
# Find out which derived storages will be removed, and add them to the whitelist
|
# Find out which derived storages will be removed, and add them to the whitelist
|
||||||
orphaned_from_candidates = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id),
|
orphaned_from_candidates = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id),
|
||||||
storage_id_whitelist))
|
storage_id_whitelist,
|
||||||
|
(ImageStorage.id,)))
|
||||||
|
|
||||||
if len(orphaned_from_candidates) > 0:
|
if len(orphaned_from_candidates) > 0:
|
||||||
derived_to_remove = (ImageStorage
|
derived_to_remove = (ImageStorage
|
||||||
|
@ -1554,7 +1558,10 @@ def garbage_collect_storage(storage_id_whitelist):
|
||||||
.join(ImageStorageLocation)
|
.join(ImageStorageLocation)
|
||||||
.switch(ImageStoragePlacement)
|
.switch(ImageStoragePlacement)
|
||||||
.join(ImageStorage),
|
.join(ImageStorage),
|
||||||
storage_id_whitelist)
|
storage_id_whitelist,
|
||||||
|
(ImageStorage, ImageStoragePlacement,
|
||||||
|
ImageStorageLocation))
|
||||||
|
|
||||||
paths_to_remove = placements_query_to_paths_set(placements_to_remove.clone())
|
paths_to_remove = placements_query_to_paths_set(placements_to_remove.clone())
|
||||||
|
|
||||||
# Remove the placements for orphaned storages
|
# Remove the placements for orphaned storages
|
||||||
|
@ -1567,7 +1574,8 @@ def garbage_collect_storage(storage_id_whitelist):
|
||||||
|
|
||||||
# Remove the all orphaned storages
|
# Remove the all orphaned storages
|
||||||
orphaned_storages = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id),
|
orphaned_storages = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id),
|
||||||
storage_id_whitelist))
|
storage_id_whitelist,
|
||||||
|
(ImageStorage.id,)))
|
||||||
if len(orphaned_storages) > 0:
|
if len(orphaned_storages) > 0:
|
||||||
(ImageStorage
|
(ImageStorage
|
||||||
.delete()
|
.delete()
|
||||||
|
|
16
initdb.py
16
initdb.py
|
@ -3,11 +3,12 @@ import json
|
||||||
import hashlib
|
import hashlib
|
||||||
import random
|
import random
|
||||||
import calendar
|
import calendar
|
||||||
|
import os
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from email.utils import formatdate
|
from email.utils import formatdate
|
||||||
from peewee import (SqliteDatabase, create_model_tables, drop_model_tables,
|
from peewee import (SqliteDatabase, create_model_tables, drop_model_tables,
|
||||||
savepoint_sqlite)
|
savepoint_sqlite, savepoint)
|
||||||
from uuid import UUID
|
from uuid import UUID
|
||||||
|
|
||||||
from data.database import *
|
from data.database import *
|
||||||
|
@ -34,6 +35,8 @@ SAMPLE_CMDS = [["/bin/bash"],
|
||||||
REFERENCE_DATE = datetime(2013, 6, 23)
|
REFERENCE_DATE = datetime(2013, 6, 23)
|
||||||
TEST_STRIPE_ID = 'cus_2tmnh3PkXQS8NG'
|
TEST_STRIPE_ID = 'cus_2tmnh3PkXQS8NG'
|
||||||
|
|
||||||
|
IS_TESTING_REAL_DATABASE = bool(os.environ.get('TEST_DATABASE_URI'))
|
||||||
|
|
||||||
def __gen_checksum(image_id):
|
def __gen_checksum(image_id):
|
||||||
h = hashlib.md5(image_id)
|
h = hashlib.md5(image_id)
|
||||||
return 'tarsum+sha256:' + h.hexdigest() + h.hexdigest()
|
return 'tarsum+sha256:' + h.hexdigest() + h.hexdigest()
|
||||||
|
@ -144,7 +147,7 @@ def setup_database_for_testing(testcase):
|
||||||
|
|
||||||
# Sanity check to make sure we're not killing our prod db
|
# Sanity check to make sure we're not killing our prod db
|
||||||
db = model.db
|
db = model.db
|
||||||
if not isinstance(model.db.obj, SqliteDatabase):
|
if not IS_TESTING_REAL_DATABASE and not isinstance(model.db.obj, SqliteDatabase):
|
||||||
raise RuntimeError('Attempted to wipe production database!')
|
raise RuntimeError('Attempted to wipe production database!')
|
||||||
|
|
||||||
global db_initialized_for_testing
|
global db_initialized_for_testing
|
||||||
|
@ -157,14 +160,17 @@ def setup_database_for_testing(testcase):
|
||||||
populate_database()
|
populate_database()
|
||||||
|
|
||||||
# Enable foreign key constraints.
|
# Enable foreign key constraints.
|
||||||
|
if not IS_TESTING_REAL_DATABASE:
|
||||||
model.db.obj.execute_sql('PRAGMA foreign_keys = ON;')
|
model.db.obj.execute_sql('PRAGMA foreign_keys = ON;')
|
||||||
|
|
||||||
db_initialized_for_testing = True
|
db_initialized_for_testing = True
|
||||||
|
|
||||||
# Create a savepoint for the testcase.
|
# Create a savepoint for the testcase.
|
||||||
|
test_savepoint = savepoint(db) if IS_TESTING_REAL_DATABASE else savepoint_sqlite(db)
|
||||||
|
|
||||||
global testcases
|
global testcases
|
||||||
testcases[testcase] = {}
|
testcases[testcase] = {}
|
||||||
testcases[testcase]['savepoint'] = savepoint_sqlite(db)
|
testcases[testcase]['savepoint'] = test_savepoint
|
||||||
testcases[testcase]['savepoint'].__enter__()
|
testcases[testcase]['savepoint'].__enter__()
|
||||||
|
|
||||||
def initialize_database():
|
def initialize_database():
|
||||||
|
@ -286,7 +292,7 @@ def wipe_database():
|
||||||
|
|
||||||
# Sanity check to make sure we're not killing our prod db
|
# Sanity check to make sure we're not killing our prod db
|
||||||
db = model.db
|
db = model.db
|
||||||
if not isinstance(model.db.obj, SqliteDatabase):
|
if not IS_TESTING_REAL_DATABASE and not isinstance(model.db.obj, SqliteDatabase):
|
||||||
raise RuntimeError('Attempted to wipe production database!')
|
raise RuntimeError('Attempted to wipe production database!')
|
||||||
|
|
||||||
drop_model_tables(all_models, fail_silently=True)
|
drop_model_tables(all_models, fail_silently=True)
|
||||||
|
@ -554,7 +560,7 @@ if __name__ == '__main__':
|
||||||
log_level = getattr(logging, app.config['LOGGING_LEVEL'])
|
log_level = getattr(logging, app.config['LOGGING_LEVEL'])
|
||||||
logging.basicConfig(level=log_level)
|
logging.basicConfig(level=log_level)
|
||||||
|
|
||||||
if not isinstance(model.db.obj, SqliteDatabase):
|
if not IS_TESTING_REAL_DATABASE and not isinstance(model.db.obj, SqliteDatabase):
|
||||||
raise RuntimeError('Attempted to initialize production database!')
|
raise RuntimeError('Attempted to initialize production database!')
|
||||||
|
|
||||||
initialize_database()
|
initialize_database()
|
||||||
|
|
60
test/fulldbtest.sh
Executable file
60
test/fulldbtest.sh
Executable file
|
@ -0,0 +1,60 @@
|
||||||
|
set -e
|
||||||
|
|
||||||
|
up_mysql() {
|
||||||
|
# Run a SQL database on port 3306 inside of Docker.
|
||||||
|
docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql
|
||||||
|
|
||||||
|
# Sleep for 5s to get MySQL get started.
|
||||||
|
echo 'Sleeping for 10...'
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
# Add the database to mysql.
|
||||||
|
docker run --rm --link mysql:mysql mysql sh -c 'echo "create database genschema" | mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -ppassword'
|
||||||
|
}
|
||||||
|
|
||||||
|
down_mysql() {
|
||||||
|
docker kill mysql
|
||||||
|
docker rm mysql
|
||||||
|
}
|
||||||
|
|
||||||
|
up_postgres() {
|
||||||
|
# Run a SQL database on port 5432 inside of Docker.
|
||||||
|
docker run --name postgres -p 5432:5432 -d postgres
|
||||||
|
|
||||||
|
# Sleep for 5s to get SQL get started.
|
||||||
|
echo 'Sleeping for 5...'
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# Add the database to postgres.
|
||||||
|
docker run --rm --link postgres:postgres postgres sh -c 'echo "create database genschema" | psql -h "$POSTGRES_PORT_5432_TCP_ADDR" -p "$POSTGRES_PORT_5432_TCP_PORT" -U postgres'
|
||||||
|
}
|
||||||
|
|
||||||
|
down_postgres() {
|
||||||
|
docker kill postgres
|
||||||
|
docker rm postgres
|
||||||
|
}
|
||||||
|
|
||||||
|
run_tests() {
|
||||||
|
TEST_DATABASE_URI=$1 TEST=true python -m unittest discover
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test (and generate, if requested) via MySQL.
|
||||||
|
echo '> Starting MySQL'
|
||||||
|
up_mysql
|
||||||
|
|
||||||
|
echo '> Running Full Test Suite (mysql)'
|
||||||
|
set +e
|
||||||
|
run_tests "mysql+pymysql://root:password@192.168.59.103/genschema"
|
||||||
|
set -e
|
||||||
|
down_mysql
|
||||||
|
|
||||||
|
# Test via Postgres.
|
||||||
|
echo '> Starting Postgres'
|
||||||
|
up_postgres
|
||||||
|
|
||||||
|
echo '> Running Full Test Suite (postgres)'
|
||||||
|
set +e
|
||||||
|
run_tests "postgresql://postgres@192.168.59.103/genschema"
|
||||||
|
set -e
|
||||||
|
down_postgres
|
||||||
|
|
|
@ -1328,7 +1328,9 @@ class TestRepoBuilds(ApiTestCase):
|
||||||
status_json = self.getJsonResponse(RepositoryBuildStatus,
|
status_json = self.getJsonResponse(RepositoryBuildStatus,
|
||||||
params=dict(repository=ADMIN_ACCESS_USER + '/building', build_uuid=build['id']))
|
params=dict(repository=ADMIN_ACCESS_USER + '/building', build_uuid=build['id']))
|
||||||
|
|
||||||
self.assertEquals(status_json, build)
|
self.assertEquals(status_json['id'], build['id'])
|
||||||
|
self.assertEquals(status_json['resource_key'], build['resource_key'])
|
||||||
|
self.assertEquals(status_json['trigger'], build['trigger'])
|
||||||
|
|
||||||
class TestRequestRepoBuild(ApiTestCase):
|
class TestRequestRepoBuild(ApiTestCase):
|
||||||
def test_requestrepobuild(self):
|
def test_requestrepobuild(self):
|
||||||
|
@ -2051,7 +2053,14 @@ class TestOrganizationApplications(ApiTestCase):
|
||||||
json = self.getJsonResponse(OrganizationApplications, params=dict(orgname=ORGANIZATION))
|
json = self.getJsonResponse(OrganizationApplications, params=dict(orgname=ORGANIZATION))
|
||||||
|
|
||||||
self.assertEquals(2, len(json['applications']))
|
self.assertEquals(2, len(json['applications']))
|
||||||
self.assertEquals(FAKE_APPLICATION_CLIENT_ID, json['applications'][0]['client_id'])
|
|
||||||
|
found = False
|
||||||
|
for application in json['applications']:
|
||||||
|
if application['client_id'] == FAKE_APPLICATION_CLIENT_ID:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
self.assertTrue(found)
|
||||||
|
|
||||||
# Add a new application.
|
# Add a new application.
|
||||||
json = self.postJsonResponse(OrganizationApplications, params=dict(orgname=ORGANIZATION),
|
json = self.postJsonResponse(OrganizationApplications, params=dict(orgname=ORGANIZATION),
|
||||||
|
@ -2063,7 +2072,6 @@ class TestOrganizationApplications(ApiTestCase):
|
||||||
# Retrieve the apps list again
|
# Retrieve the apps list again
|
||||||
list_json = self.getJsonResponse(OrganizationApplications, params=dict(orgname=ORGANIZATION))
|
list_json = self.getJsonResponse(OrganizationApplications, params=dict(orgname=ORGANIZATION))
|
||||||
self.assertEquals(3, len(list_json['applications']))
|
self.assertEquals(3, len(list_json['applications']))
|
||||||
self.assertEquals(json, list_json['applications'][2])
|
|
||||||
|
|
||||||
|
|
||||||
class TestOrganizationApplicationResource(ApiTestCase):
|
class TestOrganizationApplicationResource(ApiTestCase):
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
import os
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
from config import DefaultConfig
|
from config import DefaultConfig
|
||||||
|
@ -14,8 +16,11 @@ class FakeTransaction(object):
|
||||||
class TestConfig(DefaultConfig):
|
class TestConfig(DefaultConfig):
|
||||||
TESTING = True
|
TESTING = True
|
||||||
|
|
||||||
DB_URI = 'sqlite:///:memory:'
|
DB_URI = os.environ.get('TEST_DATABASE_URI', 'sqlite:///:memory:')
|
||||||
DB_CONNECTION_ARGS = {}
|
DB_CONNECTION_ARGS = {
|
||||||
|
'threadlocals': True,
|
||||||
|
'autorollback': True
|
||||||
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create_transaction(db):
|
def create_transaction(db):
|
||||||
|
|
Reference in a new issue