Merge remote-tracking branch 'origin/laffa' into nomenclature
Conflicts: test/data/test.db
This commit is contained in:
commit
a0d94f9d59
26 changed files with 1088 additions and 83 deletions
|
@ -168,7 +168,7 @@ class Visibility(BaseModel):
|
|||
|
||||
|
||||
class Repository(BaseModel):
|
||||
namespace_user = ForeignKeyField(User)
|
||||
namespace_user = ForeignKeyField(User, null=True)
|
||||
name = CharField()
|
||||
visibility = ForeignKeyField(Visibility)
|
||||
description = TextField(null=True)
|
||||
|
@ -221,7 +221,6 @@ class PermissionPrototype(BaseModel):
|
|||
)
|
||||
|
||||
|
||||
|
||||
class AccessToken(BaseModel):
|
||||
friendly_name = CharField(null=True)
|
||||
code = CharField(default=random_string_generator(length=64), unique=True,
|
||||
|
@ -267,6 +266,23 @@ class ImageStorage(BaseModel):
|
|||
uploading = BooleanField(default=True, null=True)
|
||||
|
||||
|
||||
class ImageStorageTransformation(BaseModel):
|
||||
name = CharField(index=True, unique=True)
|
||||
|
||||
|
||||
class DerivedImageStorage(BaseModel):
|
||||
source = ForeignKeyField(ImageStorage, null=True, related_name='source')
|
||||
derivative = ForeignKeyField(ImageStorage, related_name='derivative')
|
||||
transformation = ForeignKeyField(ImageStorageTransformation)
|
||||
|
||||
class Meta:
|
||||
database = db
|
||||
read_slaves = (read_slave,)
|
||||
indexes = (
|
||||
(('source', 'transformation'), True),
|
||||
)
|
||||
|
||||
|
||||
class ImageStorageLocation(BaseModel):
|
||||
name = CharField(unique=True, index=True)
|
||||
|
||||
|
@ -451,4 +467,5 @@ all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission,
|
|||
OAuthApplication, OAuthAuthorizationCode, OAuthAccessToken, NotificationKind,
|
||||
Notification, ImageStorageLocation, ImageStoragePlacement,
|
||||
ExternalNotificationEvent, ExternalNotificationMethod, RepositoryNotification,
|
||||
RepositoryAuthorizedEmail, TeamMemberInvite]
|
||||
RepositoryAuthorizedEmail, ImageStorageTransformation, DerivedImageStorage,
|
||||
TeamMemberInvite]
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
from __future__ import with_statement
|
||||
|
||||
import os
|
||||
|
||||
from alembic import context
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
from logging.config import fileConfig
|
||||
|
@ -12,8 +15,17 @@ from util.morecollections import AttrDict
|
|||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
db_uri = unquote(app.config['DB_URI'])
|
||||
if 'GENMIGRATE' in os.environ:
|
||||
docker_host = os.environ.get('DOCKER_HOST')
|
||||
docker_host_ip = docker_host[len('tcp://'):].split(':')[0]
|
||||
if os.environ.get('GENMIGRATE') == 'mysql':
|
||||
db_uri = 'mysql+pymysql://root:password@%s/genschema' % (docker_host_ip)
|
||||
else:
|
||||
db_uri = 'postgresql://postgres@%s/genschema' % (docker_host_ip)
|
||||
|
||||
config = context.config
|
||||
config.set_main_option('sqlalchemy.url', unquote(app.config['DB_URI']))
|
||||
config.set_main_option('sqlalchemy.url', db_uri)
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
# This line sets up loggers basically.
|
||||
|
@ -57,7 +69,7 @@ def run_migrations_online():
|
|||
|
||||
"""
|
||||
|
||||
if isinstance(db.obj, SqliteDatabase):
|
||||
if isinstance(db.obj, SqliteDatabase) and not 'GENMIGRATE' in os.environ:
|
||||
print ('Skipping Sqlite migration!')
|
||||
return
|
||||
|
||||
|
|
82
data/migrations/migration.sh
Executable file
82
data/migrations/migration.sh
Executable file
|
@ -0,0 +1,82 @@
|
|||
set -e
|
||||
|
||||
up_mysql() {
|
||||
# Run a SQL database on port 3306 inside of Docker.
|
||||
docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql
|
||||
|
||||
# Sleep for 5s to get MySQL get started.
|
||||
echo 'Sleeping for 5...'
|
||||
sleep 5
|
||||
|
||||
# Add the database to mysql.
|
||||
docker run --rm --link mysql:mysql mysql sh -c 'echo "create database genschema" | mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -ppassword'
|
||||
}
|
||||
|
||||
down_mysql() {
|
||||
docker kill mysql
|
||||
docker rm mysql
|
||||
}
|
||||
|
||||
up_postgres() {
|
||||
# Run a SQL database on port 5432 inside of Docker.
|
||||
docker run --name postgres -p 5432:5432 -d postgres
|
||||
|
||||
# Sleep for 5s to get SQL get started.
|
||||
echo 'Sleeping for 5...'
|
||||
sleep 5
|
||||
|
||||
# Add the database to postgres.
|
||||
docker run --rm --link postgres:postgres postgres sh -c 'echo "create database genschema" | psql -h "$POSTGRES_PORT_5432_TCP_ADDR" -p "$POSTGRES_PORT_5432_TCP_PORT" -U postgres'
|
||||
}
|
||||
|
||||
down_postgres() {
|
||||
docker kill postgres
|
||||
docker rm postgres
|
||||
}
|
||||
|
||||
gen_migrate() {
|
||||
# Generate the migration to the current model.
|
||||
GENMIGRATE=$1 PYTHONPATH=. alembic revision --autogenerate -m "$@"
|
||||
|
||||
# Generate a SQLite database with the schema as defined by the existing alembic model.
|
||||
GENMIGRATE=$1 PYTHONPATH=. alembic upgrade head
|
||||
}
|
||||
|
||||
test_migrate() {
|
||||
# Generate a SQLite database with the schema as defined by the existing alembic model.
|
||||
GENMIGRATE=$1 PYTHONPATH=. alembic upgrade head
|
||||
|
||||
# Downgrade to verify it works in both directions.
|
||||
COUNT=`ls data/migrations/versions/*.py | wc -l | tr -d ' '`
|
||||
GENMIGRATE=$1 PYTHONPATH=. alembic downgrade "-$COUNT"
|
||||
}
|
||||
|
||||
# Test (and generate, if requested) via MySQL.
|
||||
echo '> Starting MySQL'
|
||||
up_mysql
|
||||
|
||||
if [ ! -z "$@" ]
|
||||
then
|
||||
set +e
|
||||
echo '> Generating Migration'
|
||||
gen_migrate "mysql"
|
||||
set -e
|
||||
fi
|
||||
|
||||
echo '> Testing Migration (mysql)'
|
||||
set +e
|
||||
test_migrate "mysql"
|
||||
set -e
|
||||
down_mysql
|
||||
|
||||
# Test via Postgres.
|
||||
echo '> Starting Postgres'
|
||||
up_postgres
|
||||
|
||||
echo '> Testing Migration (postgres)'
|
||||
set +e
|
||||
test_migrate "postgres"
|
||||
set -e
|
||||
down_postgres
|
||||
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
"""Calculate uncompressed sizes for all images
|
||||
|
||||
Revision ID: 2430f55c41d5
|
||||
Revises: 3b4d3a4461dc
|
||||
Create Date: 2014-10-07 14:50:04.660315
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '2430f55c41d5'
|
||||
down_revision = '3b4d3a4461dc'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from util.uncompressedsize import backfill_sizes_from_data
|
||||
|
||||
|
||||
def upgrade(tables):
|
||||
backfill_sizes_from_data()
|
||||
|
||||
def downgrade(tables):
|
||||
pass
|
|
@ -0,0 +1,57 @@
|
|||
"""Add support for squashed images
|
||||
|
||||
Revision ID: 3b4d3a4461dc
|
||||
Revises: b1d41e2071b
|
||||
Create Date: 2014-10-07 14:49:13.105746
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '3b4d3a4461dc'
|
||||
down_revision = 'b1d41e2071b'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
def upgrade(tables):
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('imagestoragetransformation',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragetransformation'))
|
||||
)
|
||||
op.create_index('imagestoragetransformation_name', 'imagestoragetransformation', ['name'], unique=True)
|
||||
op.create_table('derivedimagestorage',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('source_id', sa.Integer(), nullable=True),
|
||||
sa.Column('derivative_id', sa.Integer(), nullable=False),
|
||||
sa.Column('transformation_id', sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['derivative_id'], ['imagestorage.id'], name=op.f('fk_derivedimagestorage_derivative_id_imagestorage')),
|
||||
sa.ForeignKeyConstraint(['source_id'], ['imagestorage.id'], name=op.f('fk_derivedimagestorage_source_id_imagestorage')),
|
||||
sa.ForeignKeyConstraint(['transformation_id'], ['imagestoragetransformation.id'], name=op.f('fk_dis_transformation_id_ist')),
|
||||
sa.PrimaryKeyConstraint('id', name=op.f('pk_derivedimagestorage'))
|
||||
)
|
||||
op.create_index('derivedimagestorage_derivative_id', 'derivedimagestorage', ['derivative_id'], unique=False)
|
||||
op.create_index('derivedimagestorage_source_id', 'derivedimagestorage', ['source_id'], unique=False)
|
||||
op.create_index('derivedimagestorage_source_id_transformation_id', 'derivedimagestorage', ['source_id', 'transformation_id'], unique=True)
|
||||
op.create_index('derivedimagestorage_transformation_id', 'derivedimagestorage', ['transformation_id'], unique=False)
|
||||
op.drop_index('image_repository_id_docker_image_id', table_name='image')
|
||||
op.create_index('image_repository_id_docker_image_id', 'image', ['repository_id', 'docker_image_id'], unique=True)
|
||||
op.drop_index('imagestorage_uuid', table_name='imagestorage')
|
||||
op.create_index('imagestorage_uuid', 'imagestorage', ['uuid'], unique=False)
|
||||
op.drop_column(u'repository', 'namespace')
|
||||
op.create_index('repository_namespace_user_id', 'repository', ['namespace_user_id'], unique=False)
|
||||
### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade(tables):
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_index('repository_namespace_user_id', table_name='repository')
|
||||
op.add_column(u'repository', sa.Column('namespace', sa.String(length=255), nullable=True))
|
||||
op.drop_index('imagestorage_uuid', table_name='imagestorage')
|
||||
op.create_index('imagestorage_uuid', 'imagestorage', ['uuid'], unique=True)
|
||||
op.drop_index('image_repository_id_docker_image_id', table_name='image')
|
||||
op.create_index('image_repository_id_docker_image_id', 'image', ['repository_id', 'docker_image_id'], unique=False)
|
||||
op.drop_table('derivedimagestorage')
|
||||
op.drop_table('imagestoragetransformation')
|
||||
### end Alembic commands ###
|
|
@ -13,7 +13,7 @@ from data.database import (User, Repository, Image, AccessToken, Role, Repositor
|
|||
Notification, ImageStorageLocation, ImageStoragePlacement,
|
||||
ExternalNotificationEvent, ExternalNotificationMethod,
|
||||
RepositoryNotification, RepositoryAuthorizedEmail, TeamMemberInvite,
|
||||
random_string_generator, db, BUILD_PHASE)
|
||||
DerivedImageStorage, random_string_generator, db, BUILD_PHASE)
|
||||
from peewee import JOIN_LEFT_OUTER, fn
|
||||
from util.validation import (validate_username, validate_email, validate_password,
|
||||
INVALID_PASSWORD_MESSAGE)
|
||||
|
@ -1161,6 +1161,14 @@ def __translate_ancestry(old_ancestry, translations, repository, username, prefe
|
|||
return '/%s/' % '/'.join(new_ids)
|
||||
|
||||
|
||||
def _create_storage(location_name):
|
||||
storage = ImageStorage.create()
|
||||
location = ImageStorageLocation.get(name=location_name)
|
||||
ImageStoragePlacement.create(location=location, storage=storage)
|
||||
storage.locations = {location_name}
|
||||
return storage
|
||||
|
||||
|
||||
def find_create_or_link_image(docker_image_id, repository, username, translations,
|
||||
preferred_location):
|
||||
with config.app_config['DB_TRANSACTION_FACTORY'](db):
|
||||
|
@ -1201,10 +1209,7 @@ def find_create_or_link_image(docker_image_id, repository, username, translation
|
|||
origin_image_id = to_copy.id
|
||||
except Image.DoesNotExist:
|
||||
logger.debug('Creating new storage for docker id: %s', docker_image_id)
|
||||
storage = ImageStorage.create()
|
||||
location = ImageStorageLocation.get(name=preferred_location)
|
||||
ImageStoragePlacement.create(location=location, storage=storage)
|
||||
storage.locations = {preferred_location}
|
||||
storage = _create_storage(preferred_location)
|
||||
|
||||
logger.debug('Storage locations: %s', storage.locations)
|
||||
|
||||
|
@ -1222,6 +1227,26 @@ def find_create_or_link_image(docker_image_id, repository, username, translation
|
|||
return new_image
|
||||
|
||||
|
||||
def find_or_create_derived_storage(source, transformation_name, preferred_location):
|
||||
try:
|
||||
found = (ImageStorage
|
||||
.select(ImageStorage, DerivedImageStorage)
|
||||
.join(DerivedImageStorage, on=(ImageStorage.id == DerivedImageStorage.derivative))
|
||||
.join(ImageStorageTransformation)
|
||||
.where(DerivedImageStorage.source == source,
|
||||
ImageStorageTransformation.name == transformation_name)
|
||||
.get())
|
||||
|
||||
found.locations = {placement.location.name for placement in found.imagestorageplacement_set}
|
||||
return found
|
||||
except ImageStorage.DoesNotExist:
|
||||
logger.debug('Creating storage dervied from source: %s', source.uuid)
|
||||
trans = ImageStorageTransformation.get(name=transformation_name)
|
||||
new_storage = _create_storage(preferred_location)
|
||||
DerivedImageStorage.create(source=source, derivative=new_storage, transformation=trans)
|
||||
return new_storage
|
||||
|
||||
|
||||
def get_storage_by_uuid(storage_uuid):
|
||||
placements = list(ImageStoragePlacement
|
||||
.select(ImageStoragePlacement, ImageStorage, ImageStorageLocation)
|
||||
|
@ -1377,15 +1402,8 @@ def garbage_collect_repository(namespace_name, repository_name):
|
|||
|
||||
image_to_remove.delete_instance()
|
||||
|
||||
if uuids_to_check_for_gc:
|
||||
storage_to_remove = (ImageStorage
|
||||
.select()
|
||||
.join(Image, JOIN_LEFT_OUTER)
|
||||
.group_by(ImageStorage)
|
||||
.where(ImageStorage.uuid << list(uuids_to_check_for_gc))
|
||||
.having(fn.Count(Image.id) == 0))
|
||||
|
||||
for storage in storage_to_remove:
|
||||
def remove_storages(query):
|
||||
for storage in query:
|
||||
logger.debug('Garbage collecting image storage: %s', storage.uuid)
|
||||
|
||||
image_path = config.store.image_path(storage.uuid)
|
||||
|
@ -1394,7 +1412,24 @@ def garbage_collect_repository(namespace_name, repository_name):
|
|||
placement.delete_instance()
|
||||
config.store.remove({location_name}, image_path)
|
||||
|
||||
storage.delete_instance()
|
||||
storage.delete_instance(recursive=True)
|
||||
|
||||
if uuids_to_check_for_gc:
|
||||
storage_to_remove = (ImageStorage
|
||||
.select()
|
||||
.join(Image, JOIN_LEFT_OUTER)
|
||||
.group_by(ImageStorage)
|
||||
.where(ImageStorage.uuid << list(uuids_to_check_for_gc))
|
||||
.having(fn.Count(Image.id) == 0))
|
||||
|
||||
remove_storages(storage_to_remove)
|
||||
|
||||
# Now remove any derived image storages whose sources have been removed
|
||||
derived_storages_to_remove = (ImageStorage
|
||||
.select()
|
||||
.join(DerivedImageStorage, on=(ImageStorage.id == DerivedImageStorage.derivative))
|
||||
.where(DerivedImageStorage.source >> None))
|
||||
remove_storages(derived_storages_to_remove)
|
||||
|
||||
return len(to_remove)
|
||||
|
||||
|
|
Reference in a new issue