Merge remote-tracking branch 'upstream/master' into python-registry-v2

This commit is contained in:
Jake Moshenko 2015-09-17 16:16:27 -04:00
commit 26cea9a07c
96 changed files with 2044 additions and 626 deletions

View file

@ -123,6 +123,7 @@ db = Proxy()
read_slave = Proxy()
db_random_func = CallableProxy()
db_for_update = CallableProxy()
db_transaction = CallableProxy()
def validate_database_url(url, db_kwargs, connect_timeout=5):
@ -168,6 +169,10 @@ def configure(config_object):
if read_slave_uri is not None:
read_slave.initialize(_db_from_url(read_slave_uri, db_kwargs))
def _db_transaction():
return config_object['DB_TRANSACTION_FACTORY'](db)
db_transaction.initialize(_db_transaction)
def random_string_generator(length=16):
def random_string():
@ -377,14 +382,15 @@ class Repository(BaseModel):
return sorted_models.index(cmp_fk.model_class.__name__)
filtered_ops.sort(key=sorted_model_key)
for query, fk in filtered_ops:
model = fk.model_class
if fk.null and not delete_nullable:
model.update(**{fk.name: None}).where(query).execute()
else:
model.delete().where(query).execute()
with db_transaction():
for query, fk in filtered_ops:
model = fk.model_class
if fk.null and not delete_nullable:
model.update(**{fk.name: None}).where(query).execute()
else:
model.delete().where(query).execute()
return self.delete().where(self._pk_expr()).execute()
return self.delete().where(self._pk_expr()).execute()
class Star(BaseModel):
user = ForeignKeyField(User, index=True)
@ -469,6 +475,9 @@ class RepositoryBuildTrigger(BaseModel):
pull_robot = QuayUserField(allows_robots=True, null=True, related_name='triggerpullrobot',
robot_null_delete=True)
# TODO(jschorr): Remove this column once we verify the backfill has succeeded.
used_legacy_github = BooleanField(null=True, default=False)
class EmailConfirmation(BaseModel):
code = CharField(default=random_string_generator(), unique=True, index=True)
@ -778,6 +787,33 @@ class BlobUpload(BaseModel):
)
class QuayService(BaseModel):
name = CharField(index=True, unique=True)
class QuayRegion(BaseModel):
name = CharField(index=True, unique=True)
class QuayRelease(BaseModel):
service = ForeignKeyField(QuayService)
version = CharField()
region = ForeignKeyField(QuayRegion)
reverted = BooleanField(default=False)
created = DateTimeField(default=datetime.now, index=True)
class Meta:
database = db
read_slaves = (read_slave,)
indexes = (
# unique release per region
(('service', 'version', 'region'), True),
# get recent releases
(('service', 'region', 'created'), False),
)
all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission, Visibility,
RepositoryTag, EmailConfirmation, FederatedLogin, LoginService, QueueItem,
RepositoryBuild, Team, TeamMember, TeamRole, LogEntryKind, LogEntry,
@ -787,4 +823,5 @@ all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission,
ExternalNotificationEvent, ExternalNotificationMethod, RepositoryNotification,
RepositoryAuthorizedEmail, ImageStorageTransformation, DerivedImageStorage,
TeamMemberInvite, ImageStorageSignature, ImageStorageSignatureKind,
AccessTokenKind, Star, RepositoryActionCount, TagManifest, BlobUpload, UserRegion]
AccessTokenKind, Star, RepositoryActionCount, TagManifest, UserRegion,
QuayService, QuayRegion, QuayRelease, BlobUpload]

View file

@ -1,8 +1,11 @@
from __future__ import with_statement
import logging
import os
from alembic import context
from alembic.revision import ResolutionError
from alembic.util import CommandError
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from urllib import unquote, quote
@ -11,6 +14,7 @@ from peewee import SqliteDatabase
from data.database import all_models, db
from app import app
from data.model.sqlalchemybridge import gen_sqlalchemy_metadata
from release import GIT_HEAD, REGION, SERVICE
from util.morecollections import AttrDict
config = context.config
@ -21,6 +25,8 @@ config.set_main_option('sqlalchemy.url', unquote(app.config['DB_URI']))
if config.config_file_name:
fileConfig(config.config_file_name)
logger = logging.getLogger(__name__)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
@ -77,7 +83,23 @@ def run_migrations_online():
try:
with context.begin_transaction():
context.run_migrations(tables=tables)
try:
context.run_migrations(tables=tables)
except (CommandError, ResolutionError) as ex:
if 'No such revision' not in str(ex):
raise
if not REGION or not GIT_HEAD:
raise
from data.model.release import get_recent_releases
# ignore revision error if we're running the previous release
releases = list(get_recent_releases(SERVICE, REGION).offset(1).limit(1))
if releases and releases[0].version == GIT_HEAD:
logger.warn('Skipping database migration because revision not found')
else:
raise
finally:
connection.close()

View file

@ -0,0 +1,55 @@
"""Quay releases
Revision ID: 1c0f6ede8992
Revises: 545794454f49
Create Date: 2015-09-15 15:46:09.784607
"""
# revision identifiers, used by Alembic.
revision = '1c0f6ede8992'
down_revision = '545794454f49'
from alembic import op
import sqlalchemy as sa
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.create_table('quayregion',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_quayregion'))
)
op.create_index('quayregion_name', 'quayregion', ['name'], unique=True)
op.create_table('quayservice',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_quayservice'))
)
op.create_index('quayservice_name', 'quayservice', ['name'], unique=True)
op.create_table('quayrelease',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('service_id', sa.Integer(), nullable=False),
sa.Column('version', sa.String(length=255), nullable=False),
sa.Column('region_id', sa.Integer(), nullable=False),
sa.Column('reverted', sa.Boolean(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['region_id'], ['quayregion.id'], name=op.f('fk_quayrelease_region_id_quayregion')),
sa.ForeignKeyConstraint(['service_id'], ['quayservice.id'], name=op.f('fk_quayrelease_service_id_quayservice')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_quayrelease'))
)
op.create_index('quayrelease_created', 'quayrelease', ['created'], unique=False)
op.create_index('quayrelease_region_id', 'quayrelease', ['region_id'], unique=False)
op.create_index('quayrelease_service_id', 'quayrelease', ['service_id'], unique=False)
op.create_index('quayrelease_service_id_region_id_created', 'quayrelease', ['service_id', 'region_id', 'created'], unique=False)
op.create_index('quayrelease_service_id_version_region_id', 'quayrelease', ['service_id', 'version', 'region_id'], unique=True)
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_table('quayrelease')
op.drop_table('quayservice')
op.drop_table('quayregion')
### end Alembic commands ###

View file

@ -0,0 +1,26 @@
"""Add support for Dex login
Revision ID: 3a3bb77e17d5
Revises: 9512773a4a2
Create Date: 2015-09-04 15:57:38.007822
"""
# revision identifiers, used by Alembic.
revision = '3a3bb77e17d5'
down_revision = '9512773a4a2'
from alembic import op
import sqlalchemy as sa
def upgrade(tables):
op.bulk_insert(tables.loginservice, [{'id': 7, 'name': 'dex'}])
def downgrade(tables):
op.execute(
tables.loginservice.delete()
.where(tables.loginservice.c.name == op.inline_literal('dex'))
)

View file

@ -0,0 +1,28 @@
"""Migrate GitHub triggers to use deploy keys
Revision ID: 3ff4fbc94644
Revises: 4d5f6716df0
Create Date: 2015-09-16 17:50:22.034146
"""
# revision identifiers, used by Alembic.
revision = '3ff4fbc94644'
down_revision = '4d5f6716df0'
from alembic import op
import sqlalchemy as sa
from util.migrate.migrategithubdeploykeys import backfill_github_deploykeys
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
backfill_github_deploykeys()
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###

View file

@ -0,0 +1,26 @@
"""Add legacy column for GitHub backfill tracking
Revision ID: 4d5f6716df0
Revises: 1c0f6ede8992
Create Date: 2015-09-16 17:49:40.334540
"""
# revision identifiers, used by Alembic.
revision = '4d5f6716df0'
down_revision = '1c0f6ede8992'
from alembic import op
import sqlalchemy as sa
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.add_column('repositorybuildtrigger', sa.Column('used_legacy_github', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_column('repositorybuildtrigger', 'used_legacy_github')
### end Alembic commands ###

View file

@ -0,0 +1,34 @@
"""Migrate image data back to image table
Revision ID: 545794454f49
Revises: 3a3bb77e17d5
Create Date: 2015-09-15 11:48:47.554255
"""
# revision identifiers, used by Alembic.
revision = '545794454f49'
down_revision = '3a3bb77e17d5'
from alembic import op
import sqlalchemy as sa
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.add_column('image', sa.Column('aggregate_size', sa.BigInteger(), nullable=True))
op.add_column('image', sa.Column('command', sa.Text(), nullable=True))
op.add_column('image', sa.Column('comment', sa.Text(), nullable=True))
op.add_column('image', sa.Column('created', sa.DateTime(), nullable=True))
op.add_column('image', sa.Column('v1_json_metadata', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_column('image', 'v1_json_metadata')
op.drop_column('image', 'created')
op.drop_column('image', 'comment')
op.drop_column('image', 'command')
op.drop_column('image', 'aggregate_size')
### end Alembic commands ###

View file

@ -1,4 +1,4 @@
from data.database import db
from data.database import db, db_transaction
class DataModelException(Exception):
@ -88,10 +88,6 @@ class Config(object):
config = Config()
def db_transaction():
return config.app_config['DB_TRANSACTION_FACTORY'](db)
# There MUST NOT be any circular dependencies between these subsections. If there are fix it by
# moving the minimal number of things to _basequery
# TODO document the methods and modules for each one of the submodules below.

View file

@ -4,7 +4,7 @@ from peewee import JOIN_LEFT_OUTER
from datetime import timedelta, datetime
from data.database import (BuildTriggerService, RepositoryBuildTrigger, Repository, Namespace, User,
RepositoryBuild, BUILD_PHASE, db_for_update)
RepositoryBuild, BUILD_PHASE, db_for_update, db_random_func)
from data.model import (InvalidBuildTriggerException, InvalidRepositoryBuildException,
db_transaction, user as user_model)
@ -163,11 +163,24 @@ def cancel_repository_build(build, work_queue):
return True
def archivable_buildlogs_query():
def get_archivable_build():
presumed_dead_date = datetime.utcnow() - PRESUMED_DEAD_BUILD_AGE
return (RepositoryBuild
.select()
candidates = (RepositoryBuild
.select(RepositoryBuild.id)
.where((RepositoryBuild.phase == BUILD_PHASE.COMPLETE) |
(RepositoryBuild.phase == BUILD_PHASE.ERROR) |
(RepositoryBuild.started < presumed_dead_date),
RepositoryBuild.logs_archived == False))
RepositoryBuild.logs_archived == False)
.limit(50)
.alias('candidates'))
try:
found_id = (RepositoryBuild
.select(candidates.c.id)
.from_(candidates)
.order_by(db_random_func())
.get())
return RepositoryBuild.get(id=found_id)
except RepositoryBuild.DoesNotExist:
return None

View file

@ -277,15 +277,24 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
# We cleanup any old checksum in case it's a retry after a fail
fetched.storage.checksum = None
fetched.created = datetime.now()
now = datetime.now()
# TODO stop writing to storage when all readers are removed
fetched.storage.created = now
fetched.created = now
if created_date_str is not None:
try:
fetched.storage.created = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
# TODO stop writing to storage fields when all readers are removed
parsed_created_time = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
fetched.created = parsed_created_time
fetched.storage.created = parsed_created_time
except:
# parse raises different exceptions, so we cannot use a specific kind of handler here.
pass
# TODO stop writing to storage fields when all readers are removed
fetched.storage.comment = comment
fetched.storage.command = command
fetched.comment = comment
fetched.command = command
fetched.v1_json_metadata = v1_json_metadata
@ -327,13 +336,18 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size,
.where(Image.id << ancestors)
.scalar()) + image_size
# TODO stop writing to storage when all readers are removed
image.storage.aggregate_size = total_size
image.aggregate_size = total_size
except Image.DoesNotExist:
pass
else:
# TODO stop writing to storage when all readers are removed
image.storage.aggregate_size = image_size
image.aggregate_size = image_size
image.storage.save()
image.save()
return image

23
data/model/release.py Normal file
View file

@ -0,0 +1,23 @@
from data.database import QuayRelease, QuayRegion, QuayService
def set_region_release(service_name, region_name, version):
service, _ = QuayService.create_or_get(name=service_name)
region, _ = QuayRegion.create_or_get(name=region_name)
return QuayRelease.create_or_get(service=service, version=version, region=region)
def get_recent_releases(service_name, region_name):
return (QuayRelease
.select(QuayRelease)
.join(QuayService)
.switch(QuayRelease)
.join(QuayRegion)
.where(
QuayService.name == service_name,
QuayRegion.name == region_name,
QuayRelease.reverted == False,
)
.order_by(QuayRelease.created.desc())
)

View file

@ -135,6 +135,7 @@ def list_repository_tag_history(repo_obj, page=1, size=100, specific_tag=None):
.where(RepositoryTag.repository == repo_obj)
.where(RepositoryTag.hidden == False)
.order_by(RepositoryTag.lifetime_start_ts.desc())
.order_by(RepositoryTag.name)
.paginate(page, size))
if specific_tag:

View file

@ -26,9 +26,10 @@ class MetricQueueReporter(object):
class WorkQueue(object):
def __init__(self, queue_name, transaction_factory,
canonical_name_match_list=None, reporter=None):
canonical_name_match_list=None, reporter=None, metric_queue=None):
self._queue_name = queue_name
self._reporter = reporter
self._metric_queue = metric_queue
self._transaction_factory = transaction_factory
self._currently_processing = False
@ -86,12 +87,20 @@ class WorkQueue(object):
return (running_count, available_not_running_count, available_count)
def update_metrics(self):
if self._reporter is None:
if self._reporter is None and self._metric_queue is None:
return
(running_count, available_not_running_count, available_count) = self.get_metrics()
self._reporter(self._currently_processing, running_count,
running_count + available_not_running_count)
if self._metric_queue:
dim = {'queue': self._queue_name}
self._metric_queue.put('Running', running_count, dimensions=dim)
self._metric_queue.put('AvailableNotRunning', available_not_running_count, dimensions=dim)
self._metric_queue.put('Available', available_count, dimensions=dim)
if self._reporter:
self._reporter(self._currently_processing, running_count,
running_count + available_not_running_count)
def has_retries_remaining(self, item_id):
""" Returns whether the queue item with the given id has any retries remaining. If the
@ -185,7 +194,12 @@ class WorkQueue(object):
def complete(self, completed_item):
with self._transaction_factory(db):
completed_item_obj = self._item_by_id_for_update(completed_item.id)
try:
completed_item_obj = self._item_by_id_for_update(completed_item.id)
except QueueItem.DoesNotExist:
self._currently_processing = False
return
completed_item_obj.delete_instance(recursive=True)
self._currently_processing = False

View file

@ -1,11 +1,13 @@
import os
import logging
import magic
import urlparse
from uuid import uuid4
from flask import url_for, request, send_file, make_response, abort
from flask.views import View
from _pyio import BufferedReader
from util import get_app_url
logger = logging.getLogger(__name__)
@ -77,7 +79,9 @@ class DelegateUserfiles(object):
if url is None:
with self._app.app_context() as ctx:
ctx.url_adapter = self._build_url_adapter()
return (url_for(self._handler_name, file_id=file_id, _external=True), file_id)
file_relative_url = url_for(self._handler_name, file_id=file_id)
file_url = urlparse.urljoin(get_app_url(self._app.config), file_relative_url)
return (file_url, file_id)
return (url, file_id)
@ -97,7 +101,8 @@ class DelegateUserfiles(object):
if url is None:
with self._app.app_context() as ctx:
ctx.url_adapter = self._build_url_adapter()
return url_for(self._handler_name, file_id=file_id, _external=True)
file_relative_url = url_for(self._handler_name, file_id=file_id)
return urlparse.urljoin(get_app_url(self._app.config), file_relative_url)
return url