Phase 4 of the namespace to user migration: actually remove the column from the db and remove the dependence on serialized namespaces in the workers and queues
This commit is contained in:
parent
2c5cc7990f
commit
e8b3d1cc4a
17 changed files with 273 additions and 123 deletions
|
@ -0,0 +1,61 @@
|
|||
"""Translate the queue names to reference namespace by id, remove the namespace column.
|
||||
|
||||
Revision ID: 2fb36d4be80d
|
||||
Revises: 3f4fe1194671
|
||||
Create Date: 2014-09-30 17:31:33.308490
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '2fb36d4be80d'
|
||||
down_revision = '3f4fe1194671'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
import re
|
||||
from app import app
|
||||
from data.database import QueueItem, User, db
|
||||
|
||||
|
||||
NAMESPACE_EXTRACTOR = re.compile(r'^([a-z]+/)([a-z0-9_]+)(/.*$)')
|
||||
|
||||
|
||||
def upgrade(tables):
|
||||
# Rename the namespace component of the existing queue items to reference user ids
|
||||
with app.config['DB_TRANSACTION_FACTORY'](db):
|
||||
for item in QueueItem.select():
|
||||
namespace_match = NAMESPACE_EXTRACTOR.match(item.queue_name)
|
||||
if namespace_match is not None:
|
||||
namespace_name = namespace_match.group(2)
|
||||
namespace_user = User.get(User.username == namespace_name)
|
||||
item.queue_name = '%s%s%s' % (namespace_match.group(1), str(namespace_user.id),
|
||||
namespace_match.group(3))
|
||||
item.save()
|
||||
else:
|
||||
raise RuntimeError('Invalid queue name: %s' % item.queue_name)
|
||||
|
||||
op.drop_index('repository_namespace_name', table_name='repository')
|
||||
op.drop_column('repository', 'namespace')
|
||||
|
||||
|
||||
def downgrade(tables):
|
||||
# Add the namespace column back in and fill it in
|
||||
op.add_column('repository', sa.Column('namespace', sa.String(length=255)))
|
||||
conn = op.get_bind()
|
||||
conn.execute('update repository set namespace = (select username from user where user.id = repository.namespace_user_id) where namespace is NULL')
|
||||
op.create_index('repository_namespace_name', 'repository', ['namespace', 'name'], unique=True)
|
||||
|
||||
# Rename the namespace component of existing queue items to reference namespace strings
|
||||
with app.config['DB_TRANSACTION_FACTORY'](db):
|
||||
for item in QueueItem.select():
|
||||
namespace_match = NAMESPACE_EXTRACTOR.match(item.queue_name)
|
||||
if namespace_match is not None:
|
||||
namespace_id = namespace_match.group(2)
|
||||
namespace_user = User.get(User.id == namespace_id)
|
||||
item.queue_name = '%s%s%s' % (namespace_match.group(1),
|
||||
str(namespace_user.username),
|
||||
namespace_match.group(3))
|
||||
item.save()
|
||||
else:
|
||||
raise RuntimeError('Invalid queue name: %s' % item.queue_name)
|
|
@ -592,6 +592,13 @@ def get_user_by_id(user_db_id):
|
|||
return None
|
||||
|
||||
|
||||
def get_namespace_by_user_id(namespace_user_db_id):
|
||||
try:
|
||||
return User.get(User.id == namespace_user_db_id, User.robot == False).username
|
||||
except User.DoesNotExist:
|
||||
raise InvalidUsernameException('User with id does not exist: %s' % namespace_user_db_id)
|
||||
|
||||
|
||||
def get_user_or_org_by_customer_id(customer_id):
|
||||
try:
|
||||
return User.get(User.stripe_id == customer_id)
|
||||
|
@ -858,6 +865,15 @@ def change_password(user, new_password):
|
|||
delete_notifications_by_kind(user, 'password_required')
|
||||
|
||||
|
||||
def change_username(user, new_username):
|
||||
(username_valid, username_issue) = validate_username(new_username)
|
||||
if not username_valid:
|
||||
raise InvalidUsernameException('Invalid username %s: %s' % (new_username, username_issue))
|
||||
|
||||
user.username = new_username
|
||||
user.save()
|
||||
|
||||
|
||||
def change_invoice_email(user, invoice_email):
|
||||
user.invoice_email = invoice_email
|
||||
user.save()
|
||||
|
@ -1676,10 +1692,21 @@ def load_token_data(code):
|
|||
raise InvalidTokenException('Invalid delegate token code: %s' % code)
|
||||
|
||||
|
||||
def get_repository_build(namespace_name, repository_name, build_uuid):
|
||||
def _get_build_base_query():
|
||||
return (RepositoryBuild
|
||||
.select(RepositoryBuild, RepositoryBuildTrigger, BuildTriggerService, Repository,
|
||||
Namespace)
|
||||
.join(Repository)
|
||||
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
||||
.switch(RepositoryBuild)
|
||||
.join(RepositoryBuildTrigger, JOIN_LEFT_OUTER)
|
||||
.join(BuildTriggerService, JOIN_LEFT_OUTER)
|
||||
.order_by(RepositoryBuild.started.desc()))
|
||||
|
||||
|
||||
def get_repository_build(build_uuid):
|
||||
try:
|
||||
query = list_repository_builds(namespace_name, repository_name, 1)
|
||||
return query.where(RepositoryBuild.uuid == build_uuid).get()
|
||||
return _get_build_base_query().where(RepositoryBuild.uuid == build_uuid).get()
|
||||
|
||||
except RepositoryBuild.DoesNotExist:
|
||||
msg = 'Unable to locate a build by id: %s' % build_uuid
|
||||
|
@ -1688,15 +1715,8 @@ def get_repository_build(namespace_name, repository_name, build_uuid):
|
|||
|
||||
def list_repository_builds(namespace_name, repository_name, limit,
|
||||
include_inactive=True):
|
||||
query = (RepositoryBuild
|
||||
.select(RepositoryBuild, RepositoryBuildTrigger, BuildTriggerService)
|
||||
.join(Repository)
|
||||
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
||||
.switch(RepositoryBuild)
|
||||
.join(RepositoryBuildTrigger, JOIN_LEFT_OUTER)
|
||||
.join(BuildTriggerService, JOIN_LEFT_OUTER)
|
||||
query = (_get_build_base_query()
|
||||
.where(Repository.name == repository_name, Namespace.username == namespace_name)
|
||||
.order_by(RepositoryBuild.started.desc())
|
||||
.limit(limit))
|
||||
|
||||
if not include_inactive:
|
||||
|
@ -1760,21 +1780,23 @@ def create_repo_notification(repo, event_name, method_name, config):
|
|||
config_json=json.dumps(config))
|
||||
|
||||
|
||||
def get_repo_notification(namespace_name, repository_name, uuid):
|
||||
def get_repo_notification(uuid):
|
||||
try:
|
||||
return (RepositoryNotification
|
||||
.select(RepositoryNotification, Repository, Namespace)
|
||||
.join(Repository)
|
||||
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
||||
.where(Namespace.username == namespace_name, Repository.name == repository_name,
|
||||
RepositoryNotification.uuid == uuid)
|
||||
.where(RepositoryNotification.uuid == uuid)
|
||||
.get())
|
||||
except RepositoryNotification.DoesNotExist:
|
||||
raise InvalidNotificationException('No repository notification found with id: %s' % uuid)
|
||||
|
||||
|
||||
def delete_repo_notification(namespace_name, repository_name, uuid):
|
||||
found = get_repo_notification(namespace_name, repository_name, uuid)
|
||||
found = get_repo_notification(uuid)
|
||||
if (found.repository.namespace_user.username != namespace_name or
|
||||
found.repository.name != repository_name):
|
||||
raise InvalidNotificationException('No repository notifiation found with id: %s' % uuid)
|
||||
found.delete_instance()
|
||||
return found
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from datetime import datetime, timedelta
|
||||
|
||||
from data.database import QueueItem, db
|
||||
from util.morecollections import AttrDict
|
||||
|
||||
|
||||
MINIMUM_EXTENSION = timedelta(seconds=20)
|
||||
|
@ -25,17 +26,17 @@ class WorkQueue(object):
|
|||
|
||||
def _running_jobs(self, now, name_match_query):
|
||||
return (QueueItem
|
||||
.select(QueueItem.queue_name)
|
||||
.where(QueueItem.available == False,
|
||||
QueueItem.processing_expires > now,
|
||||
QueueItem.queue_name ** name_match_query))
|
||||
.select(QueueItem.queue_name)
|
||||
.where(QueueItem.available == False,
|
||||
QueueItem.processing_expires > now,
|
||||
QueueItem.queue_name ** name_match_query))
|
||||
|
||||
def _available_jobs(self, now, name_match_query, running_query):
|
||||
return (QueueItem
|
||||
.select()
|
||||
.where(QueueItem.queue_name ** name_match_query, QueueItem.available_after <= now,
|
||||
((QueueItem.available == True) | (QueueItem.processing_expires <= now)),
|
||||
QueueItem.retries_remaining > 0, ~(QueueItem.queue_name << running_query)))
|
||||
.select()
|
||||
.where(QueueItem.queue_name ** name_match_query, QueueItem.available_after <= now,
|
||||
((QueueItem.available == True) | (QueueItem.processing_expires <= now)),
|
||||
QueueItem.retries_remaining > 0, ~(QueueItem.queue_name << running_query)))
|
||||
|
||||
def _name_match_query(self):
|
||||
return '%s%%' % self._canonical_name([self._queue_name] + self._canonical_name_match_list)
|
||||
|
@ -49,7 +50,7 @@ class WorkQueue(object):
|
|||
name_match_query = self._name_match_query()
|
||||
|
||||
running_query = self._running_jobs(now, name_match_query)
|
||||
running_count =running_query.distinct().count()
|
||||
running_count = running_query.distinct().count()
|
||||
|
||||
avialable_query = self._available_jobs(now, name_match_query, running_query)
|
||||
available_count = avialable_query.select(QueueItem.queue_name).distinct().count()
|
||||
|
@ -89,41 +90,49 @@ class WorkQueue(object):
|
|||
|
||||
item = None
|
||||
try:
|
||||
item = avail.order_by(QueueItem.id).get()
|
||||
item.available = False
|
||||
item.processing_expires = now + timedelta(seconds=processing_time)
|
||||
item.retries_remaining -= 1
|
||||
item.save()
|
||||
db_item = avail.order_by(QueueItem.id).get()
|
||||
db_item.available = False
|
||||
db_item.processing_expires = now + timedelta(seconds=processing_time)
|
||||
db_item.retries_remaining -= 1
|
||||
db_item.save()
|
||||
|
||||
item = AttrDict({
|
||||
'id': db_item.id,
|
||||
'body': db_item.body,
|
||||
})
|
||||
|
||||
self._currently_processing = True
|
||||
except QueueItem.DoesNotExist:
|
||||
self._currently_processing = False
|
||||
pass
|
||||
|
||||
# Return a view of the queue item rather than an active db object
|
||||
return item
|
||||
|
||||
def complete(self, completed_item):
|
||||
with self._transaction_factory(db):
|
||||
completed_item.delete_instance()
|
||||
completed_item_obj = QueueItem.get(QueueItem.id == completed_item.id)
|
||||
completed_item_obj.delete_instance()
|
||||
self._currently_processing = False
|
||||
|
||||
def incomplete(self, incomplete_item, retry_after=300, restore_retry=False):
|
||||
with self._transaction_factory(db):
|
||||
retry_date = datetime.utcnow() + timedelta(seconds=retry_after)
|
||||
incomplete_item.available_after = retry_date
|
||||
incomplete_item.available = True
|
||||
incomplete_item_obj = QueueItem.get(QueueItem.id == incomplete_item.id)
|
||||
incomplete_item_obj.available_after = retry_date
|
||||
incomplete_item_obj.available = True
|
||||
|
||||
if restore_retry:
|
||||
incomplete_item.retries_remaining += 1
|
||||
incomplete_item_obj.retries_remaining += 1
|
||||
|
||||
incomplete_item.save()
|
||||
incomplete_item_obj.save()
|
||||
self._currently_processing = False
|
||||
|
||||
@staticmethod
|
||||
def extend_processing(queue_item, seconds_from_now):
|
||||
def extend_processing(self, queue_item, seconds_from_now):
|
||||
new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now)
|
||||
|
||||
# Only actually write the new expiration to the db if it moves the expiration some minimum
|
||||
if new_expiration - queue_item.processing_expires > MINIMUM_EXTENSION:
|
||||
queue_item.processing_expires = new_expiration
|
||||
queue_item.save()
|
||||
queue_item_obj = QueueItem.get(QueueItem.id == queue_item.id)
|
||||
if new_expiration - queue_item_obj.processing_expires > MINIMUM_EXTENSION:
|
||||
with self._transaction_factory(db):
|
||||
queue_item_obj.processing_expires = new_expiration
|
||||
queue_item_obj.save()
|
||||
|
|
Reference in a new issue