Upgrade Peewee to latest 3.x
This requires a number of small changes in the data model code, as well as additional testing.
This commit is contained in:
parent
70b7ee4654
commit
d3d9cca182
26 changed files with 220 additions and 193 deletions
|
@ -194,12 +194,19 @@ class BuildJobNotifier(object):
|
||||||
with UseThenDisconnect(app.config):
|
with UseThenDisconnect(app.config):
|
||||||
tags = self.build_config.get('docker_tags', ['latest'])
|
tags = self.build_config.get('docker_tags', ['latest'])
|
||||||
trigger = self.repo_build.trigger
|
trigger = self.repo_build.trigger
|
||||||
|
|
||||||
|
# TODO(bison): This is weird hack. Figure this out.
|
||||||
|
if trigger is not None and trigger.id is not None:
|
||||||
|
trigger_kind = trigger.service.name
|
||||||
|
else:
|
||||||
|
trigger_kind = None
|
||||||
|
|
||||||
event_data = {
|
event_data = {
|
||||||
'build_id': self.repo_build.uuid,
|
'build_id': self.repo_build.uuid,
|
||||||
'build_name': self.repo_build.display_name,
|
'build_name': self.repo_build.display_name,
|
||||||
'docker_tags': tags,
|
'docker_tags': tags,
|
||||||
'trigger_id': trigger.uuid if trigger is not None else None,
|
'trigger_id': trigger.uuid if trigger is not None else None,
|
||||||
'trigger_kind': trigger.service.name if trigger is not None else None,
|
'trigger_kind': trigger_kind,
|
||||||
'trigger_metadata': self.build_config.get('trigger_metadata', {})
|
'trigger_metadata': self.build_config.get('trigger_metadata', {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ import toposort
|
||||||
|
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from peewee import *
|
from peewee import *
|
||||||
from playhouse.shortcuts import RetryOperationalError
|
from peewee import __exception_wrapper__, Function
|
||||||
from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase, PooledSqliteDatabase
|
from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase, PooledSqliteDatabase
|
||||||
|
|
||||||
from sqlalchemy.engine.url import make_url
|
from sqlalchemy.engine.url import make_url
|
||||||
|
@ -121,27 +121,27 @@ def delete_instance_filtered(instance, model_class, delete_nullable, skip_transi
|
||||||
# We only want to skip transitive deletes, which are done using subqueries in the form of
|
# We only want to skip transitive deletes, which are done using subqueries in the form of
|
||||||
# DELETE FROM <table> in <subquery>. If an op is not using a subquery, we allow it to be
|
# DELETE FROM <table> in <subquery>. If an op is not using a subquery, we allow it to be
|
||||||
# applied directly.
|
# applied directly.
|
||||||
if fk.model_class not in skip_transitive_deletes or query.op != 'in':
|
if fk.model not in skip_transitive_deletes or query.op.lower() != 'in':
|
||||||
filtered_ops.append((query, fk))
|
filtered_ops.append((query, fk))
|
||||||
|
|
||||||
if query.op == 'in':
|
if query.op.lower() == 'in':
|
||||||
dependencies[fk.model_class.__name__].add(query.rhs.model_class.__name__)
|
dependencies[fk.model.__name__].add(query.rhs.model.__name__)
|
||||||
elif query.op == '=':
|
elif query.op == '=':
|
||||||
dependencies[fk.model_class.__name__].add(model_class.__name__)
|
dependencies[fk.model.__name__].add(model_class.__name__)
|
||||||
else:
|
else:
|
||||||
raise RuntimeError('Unknown operator in recursive repository delete query')
|
raise RuntimeError('Unknown operator in recursive repository delete query')
|
||||||
|
|
||||||
sorted_models = list(reversed(toposort.toposort_flatten(dependencies)))
|
sorted_models = list(reversed(toposort.toposort_flatten(dependencies)))
|
||||||
def sorted_model_key(query_fk_tuple):
|
def sorted_model_key(query_fk_tuple):
|
||||||
cmp_query, cmp_fk = query_fk_tuple
|
cmp_query, cmp_fk = query_fk_tuple
|
||||||
if cmp_query.op == 'in':
|
if cmp_query.op.lower() == 'in':
|
||||||
return -1
|
return -1
|
||||||
return sorted_models.index(cmp_fk.model_class.__name__)
|
return sorted_models.index(cmp_fk.model.__name__)
|
||||||
filtered_ops.sort(key=sorted_model_key)
|
filtered_ops.sort(key=sorted_model_key)
|
||||||
|
|
||||||
with db_transaction():
|
with db_transaction():
|
||||||
for query, fk in filtered_ops:
|
for query, fk in filtered_ops:
|
||||||
_model = fk.model_class
|
_model = fk.model
|
||||||
if fk.null and not delete_nullable:
|
if fk.null and not delete_nullable:
|
||||||
_model.update(**{fk.name: None}).where(query).execute()
|
_model.update(**{fk.name: None}).where(query).execute()
|
||||||
else:
|
else:
|
||||||
|
@ -162,6 +162,24 @@ class CallableProxy(Proxy):
|
||||||
return self.obj(*args, **kwargs)
|
return self.obj(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class RetryOperationalError(object):
|
||||||
|
|
||||||
|
def execute_sql(self, sql, params=None, commit=True):
|
||||||
|
try:
|
||||||
|
cursor = super(RetryOperationalError, self).execute_sql(sql, params, commit)
|
||||||
|
except OperationalError:
|
||||||
|
if not self.is_closed():
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
with __exception_wrapper__:
|
||||||
|
cursor = self.cursor()
|
||||||
|
cursor.execute(sql, params or ())
|
||||||
|
if commit and not self.in_transaction():
|
||||||
|
self.commit()
|
||||||
|
|
||||||
|
return cursor
|
||||||
|
|
||||||
|
|
||||||
class CloseForLongOperation(object):
|
class CloseForLongOperation(object):
|
||||||
""" Helper object which disconnects the database then reconnects after the nested operation
|
""" Helper object which disconnects the database then reconnects after the nested operation
|
||||||
completes.
|
completes.
|
||||||
|
@ -214,11 +232,11 @@ class TupleSelector(object):
|
||||||
@classmethod
|
@classmethod
|
||||||
def tuple_reference_key(cls, field):
|
def tuple_reference_key(cls, field):
|
||||||
""" Returns a string key for referencing a field in a TupleSelector. """
|
""" Returns a string key for referencing a field in a TupleSelector. """
|
||||||
if field._node_type == 'func':
|
if isinstance(field, Function):
|
||||||
return field.name + ','.join([cls.tuple_reference_key(arg) for arg in field.arguments])
|
return field.name + ','.join([cls.tuple_reference_key(arg) for arg in field.arguments])
|
||||||
|
|
||||||
if field._node_type == 'field':
|
if isinstance(field, Field):
|
||||||
return field.name + ':' + field.model_class.__name__
|
return field.name + ':' + field.model.__name__
|
||||||
|
|
||||||
raise Exception('Unknown field type %s in TupleSelector' % field._node_type)
|
raise Exception('Unknown field type %s in TupleSelector' % field._node_type)
|
||||||
|
|
||||||
|
@ -268,6 +286,9 @@ def _db_from_url(url, db_kwargs, connect_timeout=DEFAULT_DB_CONNECT_TIMEOUT):
|
||||||
if parsed_url.password:
|
if parsed_url.password:
|
||||||
db_kwargs['password'] = parsed_url.password
|
db_kwargs['password'] = parsed_url.password
|
||||||
|
|
||||||
|
# Remove threadlocals. It used to be required.
|
||||||
|
db_kwargs.pop('threadlocals', None)
|
||||||
|
|
||||||
# Note: sqlite does not support connect_timeout.
|
# Note: sqlite does not support connect_timeout.
|
||||||
if parsed_url.drivername != 'sqlite':
|
if parsed_url.drivername != 'sqlite':
|
||||||
db_kwargs['connect_timeout'] = db_kwargs.get('connect_timeout', connect_timeout)
|
db_kwargs['connect_timeout'] = db_kwargs.get('connect_timeout', connect_timeout)
|
||||||
|
@ -285,8 +306,9 @@ def _db_from_url(url, db_kwargs, connect_timeout=DEFAULT_DB_CONNECT_TIMEOUT):
|
||||||
db_kwargs.pop('stale_timeout', None)
|
db_kwargs.pop('stale_timeout', None)
|
||||||
db_kwargs.pop('max_connections', None)
|
db_kwargs.pop('max_connections', None)
|
||||||
|
|
||||||
wrapped_driver = _wrap_for_retry(driver)
|
# wrapped_driver = _wrap_for_retry(driver)
|
||||||
return wrapped_driver(parsed_url.database, **db_kwargs)
|
# return wrapped_driver(parsed_url.database, **db_kwargs)
|
||||||
|
return driver(parsed_url.database, **db_kwargs)
|
||||||
|
|
||||||
|
|
||||||
def configure(config_object):
|
def configure(config_object):
|
||||||
|
@ -351,20 +373,20 @@ class QuayUserField(ForeignKeyField):
|
||||||
def __init__(self, allows_robots=False, robot_null_delete=False, *args, **kwargs):
|
def __init__(self, allows_robots=False, robot_null_delete=False, *args, **kwargs):
|
||||||
self.allows_robots = allows_robots
|
self.allows_robots = allows_robots
|
||||||
self.robot_null_delete = robot_null_delete
|
self.robot_null_delete = robot_null_delete
|
||||||
if 'rel_model' not in kwargs:
|
if 'model' not in kwargs:
|
||||||
kwargs['rel_model'] = User
|
kwargs['model'] = User
|
||||||
super(QuayUserField, self).__init__(*args, **kwargs)
|
super(QuayUserField, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class EnumField(ForeignKeyField):
|
class EnumField(ForeignKeyField):
|
||||||
""" Create a cached python Enum from an EnumTable """
|
""" Create a cached python Enum from an EnumTable """
|
||||||
def __init__(self, rel_model, enum_key_field='name', *args, **kwargs):
|
def __init__(self, model, enum_key_field='name', *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
rel_model is the EnumTable model-class (see ForeignKeyField)
|
model is the EnumTable model-class (see ForeignKeyField)
|
||||||
enum_key_field is the field from the EnumTable to use as the enum name
|
enum_key_field is the field from the EnumTable to use as the enum name
|
||||||
"""
|
"""
|
||||||
self.enum_key_field = enum_key_field
|
self.enum_key_field = enum_key_field
|
||||||
super(EnumField, self).__init__(rel_model, *args, **kwargs)
|
super(EnumField, self).__init__(model, *args, **kwargs)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@lru_cache(maxsize=1)
|
@lru_cache(maxsize=1)
|
||||||
|
@ -412,7 +434,7 @@ class BaseModel(ReadSlaveModel):
|
||||||
if name.endswith('_id'):
|
if name.endswith('_id'):
|
||||||
field_name = name[0:len(name) - 3]
|
field_name = name[0:len(name) - 3]
|
||||||
if field_name in self._meta.fields:
|
if field_name in self._meta.fields:
|
||||||
return self._data.get(field_name)
|
return self.__data__.get(field_name)
|
||||||
|
|
||||||
return super(BaseModel, self).__getattribute__(name)
|
return super(BaseModel, self).__getattribute__(name)
|
||||||
|
|
||||||
|
@ -449,7 +471,7 @@ class User(BaseModel):
|
||||||
# For all the model dependencies, only delete those that allow robots.
|
# For all the model dependencies, only delete those that allow robots.
|
||||||
for query, fk in reversed(list(self.dependencies(search_nullable=True))):
|
for query, fk in reversed(list(self.dependencies(search_nullable=True))):
|
||||||
if isinstance(fk, QuayUserField) and fk.allows_robots:
|
if isinstance(fk, QuayUserField) and fk.allows_robots:
|
||||||
_model = fk.model_class
|
_model = fk.model
|
||||||
|
|
||||||
if fk.robot_null_delete:
|
if fk.robot_null_delete:
|
||||||
_model.update(**{fk.name: None}).where(query).execute()
|
_model.update(**{fk.name: None}).where(query).execute()
|
||||||
|
@ -551,7 +573,7 @@ class TeamMemberInvite(BaseModel):
|
||||||
user = QuayUserField(index=True, null=True)
|
user = QuayUserField(index=True, null=True)
|
||||||
email = CharField(null=True)
|
email = CharField(null=True)
|
||||||
team = ForeignKeyField(Team)
|
team = ForeignKeyField(Team)
|
||||||
inviter = ForeignKeyField(User, related_name='inviter')
|
inviter = ForeignKeyField(User, backref='inviter')
|
||||||
invite_token = CharField(default=urn_generator(['teaminvite']))
|
invite_token = CharField(default=urn_generator(['teaminvite']))
|
||||||
|
|
||||||
|
|
||||||
|
@ -664,13 +686,13 @@ class RepositoryPermission(BaseModel):
|
||||||
|
|
||||||
|
|
||||||
class PermissionPrototype(BaseModel):
|
class PermissionPrototype(BaseModel):
|
||||||
org = QuayUserField(index=True, related_name='orgpermissionproto')
|
org = QuayUserField(index=True, backref='orgpermissionproto')
|
||||||
uuid = CharField(default=uuid_generator)
|
uuid = CharField(default=uuid_generator)
|
||||||
activating_user = QuayUserField(allows_robots=True, index=True, null=True,
|
activating_user = QuayUserField(allows_robots=True, index=True, null=True,
|
||||||
related_name='userpermissionproto')
|
backref='userpermissionproto')
|
||||||
delegate_user = QuayUserField(allows_robots=True, related_name='receivingpermission',
|
delegate_user = QuayUserField(allows_robots=True, backref='receivingpermission',
|
||||||
null=True)
|
null=True)
|
||||||
delegate_team = ForeignKeyField(Team, related_name='receivingpermission',
|
delegate_team = ForeignKeyField(Team, backref='receivingpermission',
|
||||||
null=True)
|
null=True)
|
||||||
role = ForeignKeyField(Role)
|
role = ForeignKeyField(Role)
|
||||||
|
|
||||||
|
@ -714,7 +736,7 @@ class RepositoryBuildTrigger(BaseModel):
|
||||||
private_key = TextField(null=True)
|
private_key = TextField(null=True)
|
||||||
config = TextField(default='{}')
|
config = TextField(default='{}')
|
||||||
write_token = ForeignKeyField(AccessToken, null=True)
|
write_token = ForeignKeyField(AccessToken, null=True)
|
||||||
pull_robot = QuayUserField(allows_robots=True, null=True, related_name='triggerpullrobot',
|
pull_robot = QuayUserField(allows_robots=True, null=True, backref='triggerpullrobot',
|
||||||
robot_null_delete=True)
|
robot_null_delete=True)
|
||||||
enabled = BooleanField(default=True)
|
enabled = BooleanField(default=True)
|
||||||
disabled_reason = EnumField(DisableReason, null=True)
|
disabled_reason = EnumField(DisableReason, null=True)
|
||||||
|
@ -789,9 +811,6 @@ class UserRegion(BaseModel):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
_ImageProxy = Proxy()
|
|
||||||
|
|
||||||
|
|
||||||
class Image(BaseModel):
|
class Image(BaseModel):
|
||||||
# This class is intentionally denormalized. Even though images are supposed
|
# This class is intentionally denormalized. Even though images are supposed
|
||||||
# to be globally unique we can't treat them as such for permissions and
|
# to be globally unique we can't treat them as such for permissions and
|
||||||
|
@ -816,7 +835,7 @@ class Image(BaseModel):
|
||||||
security_indexed_engine = IntegerField(default=IMAGE_NOT_SCANNED_ENGINE_VERSION, index=True)
|
security_indexed_engine = IntegerField(default=IMAGE_NOT_SCANNED_ENGINE_VERSION, index=True)
|
||||||
|
|
||||||
# We use a proxy here instead of 'self' in order to disable the foreign key constraint
|
# We use a proxy here instead of 'self' in order to disable the foreign key constraint
|
||||||
parent = ForeignKeyField(_ImageProxy, null=True, related_name='children')
|
parent = DeferredForeignKey('Image', null=True, backref='children')
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
database = db
|
database = db
|
||||||
|
@ -835,9 +854,6 @@ class Image(BaseModel):
|
||||||
return map(int, self.ancestors.split('/')[1:-1])
|
return map(int, self.ancestors.split('/')[1:-1])
|
||||||
|
|
||||||
|
|
||||||
_ImageProxy.initialize(Image)
|
|
||||||
|
|
||||||
|
|
||||||
class DerivedStorageForImage(BaseModel):
|
class DerivedStorageForImage(BaseModel):
|
||||||
source_image = ForeignKeyField(Image)
|
source_image = ForeignKeyField(Image)
|
||||||
derivative = ForeignKeyField(ImageStorage)
|
derivative = ForeignKeyField(ImageStorage)
|
||||||
|
@ -942,7 +958,7 @@ class RepositoryBuild(BaseModel):
|
||||||
started = DateTimeField(default=datetime.now, index=True)
|
started = DateTimeField(default=datetime.now, index=True)
|
||||||
display_name = CharField()
|
display_name = CharField()
|
||||||
trigger = ForeignKeyField(RepositoryBuildTrigger, null=True)
|
trigger = ForeignKeyField(RepositoryBuildTrigger, null=True)
|
||||||
pull_robot = QuayUserField(null=True, related_name='buildpullrobot', allows_robots=True,
|
pull_robot = QuayUserField(null=True, backref='buildpullrobot', allows_robots=True,
|
||||||
robot_null_delete=True)
|
robot_null_delete=True)
|
||||||
logs_archived = BooleanField(default=False)
|
logs_archived = BooleanField(default=False)
|
||||||
queue_id = CharField(null=True, index=True)
|
queue_id = CharField(null=True, index=True)
|
||||||
|
@ -962,9 +978,9 @@ class LogEntryKind(BaseModel):
|
||||||
|
|
||||||
class LogEntry(BaseModel):
|
class LogEntry(BaseModel):
|
||||||
kind = ForeignKeyField(LogEntryKind)
|
kind = ForeignKeyField(LogEntryKind)
|
||||||
account = IntegerField(index=True, db_column='account_id')
|
account = IntegerField(index=True, column_name='account_id')
|
||||||
performer = IntegerField(index=True, null=True, db_column='performer_id')
|
performer = IntegerField(index=True, null=True, column_name='performer_id')
|
||||||
repository = IntegerField(index=True, null=True, db_column='repository_id')
|
repository = IntegerField(index=True, null=True, column_name='repository_id')
|
||||||
datetime = DateTimeField(default=datetime.now, index=True)
|
datetime = DateTimeField(default=datetime.now, index=True)
|
||||||
ip = CharField(null=True)
|
ip = CharField(null=True)
|
||||||
metadata_json = TextField(default='{}')
|
metadata_json = TextField(default='{}')
|
||||||
|
@ -1024,7 +1040,7 @@ class OAuthApplication(BaseModel):
|
||||||
|
|
||||||
name = CharField()
|
name = CharField()
|
||||||
description = TextField(default='')
|
description = TextField(default='')
|
||||||
avatar_email = CharField(null=True, db_column='gravatar_email')
|
avatar_email = CharField(null=True, column_name='gravatar_email')
|
||||||
|
|
||||||
|
|
||||||
class OAuthAuthorizationCode(BaseModel):
|
class OAuthAuthorizationCode(BaseModel):
|
||||||
|
@ -1163,15 +1179,12 @@ class ServiceKeyApprovalType(Enum):
|
||||||
AUTOMATIC = 'Automatic'
|
AUTOMATIC = 'Automatic'
|
||||||
|
|
||||||
|
|
||||||
_ServiceKeyApproverProxy = Proxy()
|
|
||||||
class ServiceKeyApproval(BaseModel):
|
class ServiceKeyApproval(BaseModel):
|
||||||
approver = ForeignKeyField(_ServiceKeyApproverProxy, null=True)
|
approver = QuayUserField(null=True)
|
||||||
approval_type = CharField(index=True)
|
approval_type = CharField(index=True)
|
||||||
approved_date = DateTimeField(default=datetime.utcnow)
|
approved_date = DateTimeField(default=datetime.utcnow)
|
||||||
notes = TextField(default='')
|
notes = TextField(default='')
|
||||||
|
|
||||||
_ServiceKeyApproverProxy.initialize(User)
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceKey(BaseModel):
|
class ServiceKey(BaseModel):
|
||||||
name = CharField()
|
name = CharField()
|
||||||
|
@ -1309,7 +1322,7 @@ class ApprTag(BaseModel):
|
||||||
reverted = BooleanField(default=False)
|
reverted = BooleanField(default=False)
|
||||||
protected = BooleanField(default=False)
|
protected = BooleanField(default=False)
|
||||||
tag_kind = EnumField(ApprTagKind)
|
tag_kind = EnumField(ApprTagKind)
|
||||||
linked_tag = ForeignKeyField('self', null=True, related_name='tag_parents')
|
linked_tag = ForeignKeyField('self', null=True, backref='tag_parents')
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
database = db
|
database = db
|
||||||
|
|
|
@ -2,7 +2,7 @@ import base64
|
||||||
import resumablehashlib
|
import resumablehashlib
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from peewee import TextField, CharField, Clause
|
from peewee import TextField, CharField
|
||||||
from data.text import prefix_search
|
from data.text import prefix_search
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -94,9 +94,7 @@ def filter_to_repos_for_user(query, user_id=None, namespace=None, repo_kind='ima
|
||||||
queries = []
|
queries = []
|
||||||
|
|
||||||
if include_public:
|
if include_public:
|
||||||
queries.append(query
|
queries.append(query.where(Repository.visibility == get_public_repo_visibility()))
|
||||||
.clone()
|
|
||||||
.where(Repository.visibility == get_public_repo_visibility()))
|
|
||||||
|
|
||||||
if user_id is not None:
|
if user_id is not None:
|
||||||
AdminTeam = Team.alias()
|
AdminTeam = Team.alias()
|
||||||
|
@ -104,13 +102,11 @@ def filter_to_repos_for_user(query, user_id=None, namespace=None, repo_kind='ima
|
||||||
|
|
||||||
# Add repositories in which the user has permission.
|
# Add repositories in which the user has permission.
|
||||||
queries.append(query
|
queries.append(query
|
||||||
.clone()
|
|
||||||
.switch(RepositoryPermission)
|
.switch(RepositoryPermission)
|
||||||
.where(RepositoryPermission.user == user_id))
|
.where(RepositoryPermission.user == user_id))
|
||||||
|
|
||||||
# Add repositories in which the user is a member of a team that has permission.
|
# Add repositories in which the user is a member of a team that has permission.
|
||||||
queries.append(query
|
queries.append(query
|
||||||
.clone()
|
|
||||||
.switch(RepositoryPermission)
|
.switch(RepositoryPermission)
|
||||||
.join(Team)
|
.join(Team)
|
||||||
.join(TeamMember)
|
.join(TeamMember)
|
||||||
|
@ -118,7 +114,6 @@ def filter_to_repos_for_user(query, user_id=None, namespace=None, repo_kind='ima
|
||||||
|
|
||||||
# Add repositories under namespaces in which the user is the org admin.
|
# Add repositories under namespaces in which the user is the org admin.
|
||||||
queries.append(query
|
queries.append(query
|
||||||
.clone()
|
|
||||||
.switch(Repository)
|
.switch(Repository)
|
||||||
.join(AdminTeam, on=(Repository.namespace_user == AdminTeam.organization))
|
.join(AdminTeam, on=(Repository.namespace_user == AdminTeam.organization))
|
||||||
.join(AdminTeamMember, on=(AdminTeam.id == AdminTeamMember.team))
|
.join(AdminTeamMember, on=(AdminTeam.id == AdminTeamMember.team))
|
||||||
|
|
|
@ -2,7 +2,7 @@ import json
|
||||||
import os
|
import os
|
||||||
from datetime import timedelta, datetime
|
from datetime import timedelta, datetime
|
||||||
|
|
||||||
from peewee import JOIN_LEFT_OUTER
|
from peewee import JOIN
|
||||||
|
|
||||||
import features
|
import features
|
||||||
from data.database import (BuildTriggerService, RepositoryBuildTrigger, Repository, Namespace, User,
|
from data.database import (BuildTriggerService, RepositoryBuildTrigger, Repository, Namespace, User,
|
||||||
|
@ -50,7 +50,7 @@ def get_build_trigger(trigger_uuid):
|
||||||
.join(Repository)
|
.join(Repository)
|
||||||
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
||||||
.switch(RepositoryBuildTrigger)
|
.switch(RepositoryBuildTrigger)
|
||||||
.join(User)
|
.join(User, on=(RepositoryBuildTrigger.connected_user == User.id))
|
||||||
.where(RepositoryBuildTrigger.uuid == trigger_uuid)
|
.where(RepositoryBuildTrigger.uuid == trigger_uuid)
|
||||||
.get())
|
.get())
|
||||||
except RepositoryBuildTrigger.DoesNotExist:
|
except RepositoryBuildTrigger.DoesNotExist:
|
||||||
|
@ -94,10 +94,10 @@ def _get_build_base_query():
|
||||||
.join(Repository)
|
.join(Repository)
|
||||||
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
||||||
.switch(RepositoryBuild)
|
.switch(RepositoryBuild)
|
||||||
.join(User, JOIN_LEFT_OUTER)
|
.join(User, JOIN.LEFT_OUTER)
|
||||||
.switch(RepositoryBuild)
|
.switch(RepositoryBuild)
|
||||||
.join(RepositoryBuildTrigger, JOIN_LEFT_OUTER)
|
.join(RepositoryBuildTrigger, JOIN.LEFT_OUTER)
|
||||||
.join(BuildTriggerService, JOIN_LEFT_OUTER)
|
.join(BuildTriggerService, JOIN.LEFT_OUTER)
|
||||||
.order_by(RepositoryBuild.started.desc()))
|
.order_by(RepositoryBuild.started.desc()))
|
||||||
|
|
||||||
|
|
||||||
|
@ -308,4 +308,3 @@ def update_trigger_disable_status(trigger, final_phase):
|
||||||
else:
|
else:
|
||||||
# Save the trigger changes.
|
# Save the trigger changes.
|
||||||
trigger.save()
|
trigger.save()
|
||||||
|
|
|
@ -6,7 +6,7 @@ from collections import defaultdict
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import dateutil.parser
|
import dateutil.parser
|
||||||
|
|
||||||
from peewee import JOIN_LEFT_OUTER, IntegrityError, fn
|
from peewee import JOIN, IntegrityError, fn
|
||||||
|
|
||||||
from data.model import (DataModelException, db_transaction, _basequery, storage,
|
from data.model import (DataModelException, db_transaction, _basequery, storage,
|
||||||
InvalidImageException)
|
InvalidImageException)
|
||||||
|
@ -273,7 +273,7 @@ def find_create_or_link_image(docker_image_id, repo_obj, username, translations,
|
||||||
.join(ImageStorage)
|
.join(ImageStorage)
|
||||||
.switch(Image)
|
.switch(Image)
|
||||||
.join(Repository)
|
.join(Repository)
|
||||||
.join(RepositoryPermission, JOIN_LEFT_OUTER)
|
.join(RepositoryPermission, JOIN.LEFT_OUTER)
|
||||||
.switch(Repository)
|
.switch(Repository)
|
||||||
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
||||||
.where(ImageStorage.uploading == False,
|
.where(ImageStorage.uploading == False,
|
||||||
|
@ -445,8 +445,8 @@ def get_image_with_storage_and_parent_base():
|
||||||
.select(Image, ImageStorage, Parent, ParentImageStorage)
|
.select(Image, ImageStorage, Parent, ParentImageStorage)
|
||||||
.join(ImageStorage)
|
.join(ImageStorage)
|
||||||
.switch(Image)
|
.switch(Image)
|
||||||
.join(Parent, JOIN_LEFT_OUTER, on=(Image.parent == Parent.id))
|
.join(Parent, JOIN.LEFT_OUTER, on=(Image.parent == Parent.id))
|
||||||
.join(ParentImageStorage, JOIN_LEFT_OUTER, on=(ParentImageStorage.id == Parent.storage)))
|
.join(ParentImageStorage, JOIN.LEFT_OUTER, on=(ParentImageStorage.id == Parent.storage)))
|
||||||
|
|
||||||
|
|
||||||
def set_secscan_status(image, indexed, version):
|
def set_secscan_status(image, indexed, version):
|
||||||
|
|
|
@ -2,7 +2,7 @@ import json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from calendar import timegm
|
from calendar import timegm
|
||||||
from peewee import JOIN_LEFT_OUTER, fn, PeeweeException
|
from peewee import JOIN, fn, PeeweeException
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from cachetools import lru_cache
|
from cachetools import lru_cache
|
||||||
|
|
||||||
|
@ -82,11 +82,11 @@ def get_logs_query(start_time, end_time, performer=None, repository=None, namesp
|
||||||
|
|
||||||
query = _logs_query(selections, start_time, end_time, performer, repository, namespace, ignore,
|
query = _logs_query(selections, start_time, end_time, performer, repository, namespace, ignore,
|
||||||
model=model)
|
model=model)
|
||||||
query = (query.switch(model).join(Performer, JOIN_LEFT_OUTER,
|
query = (query.switch(model).join(Performer, JOIN.LEFT_OUTER,
|
||||||
on=(model.performer == Performer.id).alias('performer')))
|
on=(model.performer == Performer.id).alias('performer')))
|
||||||
|
|
||||||
if namespace is None and repository is None:
|
if namespace is None and repository is None:
|
||||||
query = (query.switch(model).join(Account, JOIN_LEFT_OUTER,
|
query = (query.switch(model).join(Account, JOIN.LEFT_OUTER,
|
||||||
on=(model.account == Account.id).alias('account')))
|
on=(model.account == Account.id).alias('account')))
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
|
@ -37,7 +37,16 @@ def lookup_notifications_by_path_prefix(prefix):
|
||||||
def list_notifications(user, kind_name=None, id_filter=None, include_dismissed=False,
|
def list_notifications(user, kind_name=None, id_filter=None, include_dismissed=False,
|
||||||
page=None, limit=None):
|
page=None, limit=None):
|
||||||
|
|
||||||
base_query = Notification.select().join(NotificationKind)
|
base_query = (Notification
|
||||||
|
.select(Notification.id,
|
||||||
|
Notification.uuid,
|
||||||
|
Notification.kind,
|
||||||
|
Notification.metadata_json,
|
||||||
|
Notification.dismissed,
|
||||||
|
Notification.lookup_path,
|
||||||
|
Notification.created.alias('cd'),
|
||||||
|
Notification.target)
|
||||||
|
.join(NotificationKind))
|
||||||
|
|
||||||
if kind_name is not None:
|
if kind_name is not None:
|
||||||
base_query = base_query.where(NotificationKind.name == kind_name)
|
base_query = base_query.where(NotificationKind.name == kind_name)
|
||||||
|
@ -73,7 +82,8 @@ def list_notifications(user, kind_name=None, id_filter=None, include_dismissed=F
|
||||||
elif limit:
|
elif limit:
|
||||||
query = query.limit(limit)
|
query = query.limit(limit)
|
||||||
|
|
||||||
return query.order_by(base_query.c.created.desc())
|
from peewee import SQL
|
||||||
|
return query.order_by(SQL('cd desc'))
|
||||||
|
|
||||||
|
|
||||||
def delete_all_notifications_by_path_prefix(prefix):
|
def delete_all_notifications_by_path_prefix(prefix):
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
from peewee import JOIN_LEFT_OUTER
|
from peewee import JOIN
|
||||||
|
|
||||||
from data.database import (RepositoryPermission, User, Repository, Visibility, Role, TeamMember,
|
from data.database import (RepositoryPermission, User, Repository, Visibility, Role, TeamMember,
|
||||||
PermissionPrototype, Team, TeamRole, Namespace)
|
PermissionPrototype, Team, TeamRole, Namespace)
|
||||||
|
@ -112,13 +112,13 @@ def get_prototype_permissions(org):
|
||||||
query = (PermissionPrototype
|
query = (PermissionPrototype
|
||||||
.select()
|
.select()
|
||||||
.where(PermissionPrototype.org == org)
|
.where(PermissionPrototype.org == org)
|
||||||
.join(ActivatingUser, JOIN_LEFT_OUTER,
|
.join(ActivatingUser, JOIN.LEFT_OUTER,
|
||||||
on=(ActivatingUser.id == PermissionPrototype.activating_user))
|
on=(ActivatingUser.id == PermissionPrototype.activating_user))
|
||||||
.join(DelegateUser, JOIN_LEFT_OUTER,
|
.join(DelegateUser, JOIN.LEFT_OUTER,
|
||||||
on=(DelegateUser.id == PermissionPrototype.delegate_user))
|
on=(DelegateUser.id == PermissionPrototype.delegate_user))
|
||||||
.join(Team, JOIN_LEFT_OUTER,
|
.join(Team, JOIN.LEFT_OUTER,
|
||||||
on=(Team.id == PermissionPrototype.delegate_team))
|
on=(Team.id == PermissionPrototype.delegate_team))
|
||||||
.join(Role, JOIN_LEFT_OUTER, on=(Role.id == PermissionPrototype.role)))
|
.join(Role, JOIN.LEFT_OUTER, on=(Role.id == PermissionPrototype.role)))
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,7 @@ import random
|
||||||
|
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from datetime import timedelta, datetime
|
from datetime import timedelta, datetime
|
||||||
from peewee import JOIN_LEFT_OUTER, fn, SQL, IntegrityError
|
from peewee import Case, JOIN, fn, SQL, IntegrityError
|
||||||
from playhouse.shortcuts import case
|
|
||||||
from cachetools import ttl_cache
|
from cachetools import ttl_cache
|
||||||
|
|
||||||
from data.model import (
|
from data.model import (
|
||||||
|
@ -406,7 +405,7 @@ def get_visible_repositories(username, namespace=None, kind_filter='image', incl
|
||||||
user_id = None
|
user_id = None
|
||||||
if username:
|
if username:
|
||||||
# Note: We only need the permissions table if we will filter based on a user's permissions.
|
# Note: We only need the permissions table if we will filter based on a user's permissions.
|
||||||
query = query.switch(Repository).distinct().join(RepositoryPermission, JOIN_LEFT_OUTER)
|
query = query.switch(Repository).distinct().join(RepositoryPermission, JOIN.LEFT_OUTER)
|
||||||
found_namespace = _get_namespace_user(username)
|
found_namespace = _get_namespace_user(username)
|
||||||
if not found_namespace:
|
if not found_namespace:
|
||||||
return Repository.select(Repository.id.alias('rid')).where(Repository.id == -1)
|
return Repository.select(Repository.id.alias('rid')).where(Repository.id == -1)
|
||||||
|
@ -552,7 +551,7 @@ def _get_sorted_matching_repositories(lookup_value, repo_kind='image', include_p
|
||||||
if SEARCH_FIELDS.description.name in search_fields:
|
if SEARCH_FIELDS.description.name in search_fields:
|
||||||
clause = Repository.description.match(lookup_value) | clause
|
clause = Repository.description.match(lookup_value) | clause
|
||||||
cases = [(Repository.name.match(lookup_value), 100 * RepositorySearchScore.score),]
|
cases = [(Repository.name.match(lookup_value), 100 * RepositorySearchScore.score),]
|
||||||
computed_score = case(None, cases, RepositorySearchScore.score).alias('score')
|
computed_score = Case(None, cases, RepositorySearchScore.score).alias('score')
|
||||||
|
|
||||||
select_fields.append(computed_score)
|
select_fields.append(computed_score)
|
||||||
query = (Repository.select(*select_fields)
|
query = (Repository.select(*select_fields)
|
||||||
|
|
|
@ -2,7 +2,7 @@ import re
|
||||||
|
|
||||||
from calendar import timegm
|
from calendar import timegm
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from peewee import JOIN_LEFT_OUTER
|
from peewee import JOIN
|
||||||
|
|
||||||
from Crypto.PublicKey import RSA
|
from Crypto.PublicKey import RSA
|
||||||
from jwkest.jwk import RSAKey
|
from jwkest.jwk import RSAKey
|
||||||
|
@ -165,7 +165,7 @@ def approve_service_key(kid, approver, approval_type, notes=''):
|
||||||
|
|
||||||
def _list_service_keys_query(kid=None, service=None, approved_only=True, alive_only=True,
|
def _list_service_keys_query(kid=None, service=None, approved_only=True, alive_only=True,
|
||||||
approval_type=None):
|
approval_type=None):
|
||||||
query = ServiceKey.select().join(ServiceKeyApproval, JOIN_LEFT_OUTER)
|
query = ServiceKey.select().join(ServiceKeyApproval, JOIN.LEFT_OUTER)
|
||||||
|
|
||||||
if approved_only:
|
if approved_only:
|
||||||
query = query.where(~(ServiceKey.approval >> None))
|
query = query.where(~(ServiceKey.approval >> None))
|
||||||
|
|
|
@ -50,8 +50,7 @@ def gen_sqlalchemy_metadata(peewee_model_list):
|
||||||
alchemy_type = Integer
|
alchemy_type = Integer
|
||||||
all_indexes.add(((field.name, ), field.unique))
|
all_indexes.add(((field.name, ), field.unique))
|
||||||
if not field.deferred:
|
if not field.deferred:
|
||||||
target_name = '%s.%s' % (field.to_field.model_class._meta.db_table,
|
target_name = '%s.%s' % (field.rel_model._meta.table_name, field.rel_field.column_name)
|
||||||
field.to_field.db_column)
|
|
||||||
col_args.append(ForeignKey(target_name))
|
col_args.append(ForeignKey(target_name))
|
||||||
elif isinstance(field, BigIntegerField):
|
elif isinstance(field, BigIntegerField):
|
||||||
alchemy_type = BigInteger
|
alchemy_type = BigInteger
|
||||||
|
@ -74,19 +73,19 @@ def gen_sqlalchemy_metadata(peewee_model_list):
|
||||||
if field.unique or field.index:
|
if field.unique or field.index:
|
||||||
all_indexes.add(((field.name, ), field.unique))
|
all_indexes.add(((field.name, ), field.unique))
|
||||||
|
|
||||||
new_col = Column(field.db_column, alchemy_type, *col_args, **col_kwargs)
|
new_col = Column(field.column_name, alchemy_type, *col_args, **col_kwargs)
|
||||||
columns.append(new_col)
|
columns.append(new_col)
|
||||||
|
|
||||||
new_table = Table(meta.db_table, metadata, *columns)
|
new_table = Table(meta.table_name, metadata, *columns)
|
||||||
|
|
||||||
for col_prop_names, unique in all_indexes:
|
for col_prop_names, unique in all_indexes:
|
||||||
col_names = [meta.fields[prop_name].db_column for prop_name in col_prop_names]
|
col_names = [meta.fields[prop_name].column_name for prop_name in col_prop_names]
|
||||||
index_name = '%s_%s' % (meta.db_table, '_'.join(col_names))
|
index_name = '%s_%s' % (meta.table_name, '_'.join(col_names))
|
||||||
col_refs = [getattr(new_table.c, col_name) for col_name in col_names]
|
col_refs = [getattr(new_table.c, col_name) for col_name in col_names]
|
||||||
Index(index_name, *col_refs, unique=unique)
|
Index(index_name, *col_refs, unique=unique)
|
||||||
|
|
||||||
for col_field_name in fulltext_indexes:
|
for col_field_name in fulltext_indexes:
|
||||||
index_name = '%s_%s__fulltext' % (meta.db_table, col_field_name)
|
index_name = '%s_%s__fulltext' % (meta.table_name, col_field_name)
|
||||||
col_ref = getattr(new_table.c, col_field_name)
|
col_ref = getattr(new_table.c, col_field_name)
|
||||||
Index(index_name, col_ref, postgresql_ops={col_field_name: 'gin_trgm_ops'},
|
Index(index_name, col_ref, postgresql_ops={col_field_name: 'gin_trgm_ops'},
|
||||||
postgresql_using='gin',
|
postgresql_using='gin',
|
||||||
|
|
|
@ -4,7 +4,7 @@ import time
|
||||||
from calendar import timegm
|
from calendar import timegm
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
from peewee import IntegrityError, JOIN_LEFT_OUTER, fn
|
from peewee import IntegrityError, JOIN, fn
|
||||||
from data.model import (image, db_transaction, DataModelException, _basequery,
|
from data.model import (image, db_transaction, DataModelException, _basequery,
|
||||||
InvalidManifestException, TagAlreadyCreatedException, StaleTagException,
|
InvalidManifestException, TagAlreadyCreatedException, StaleTagException,
|
||||||
config)
|
config)
|
||||||
|
@ -44,8 +44,8 @@ def get_tags_images_eligible_for_scan(clair_version):
|
||||||
.join(Image, on=(RepositoryTag.image == Image.id))
|
.join(Image, on=(RepositoryTag.image == Image.id))
|
||||||
.join(ImageStorage, on=(Image.storage == ImageStorage.id))
|
.join(ImageStorage, on=(Image.storage == ImageStorage.id))
|
||||||
.switch(Image)
|
.switch(Image)
|
||||||
.join(Parent, JOIN_LEFT_OUTER, on=(Image.parent == Parent.id))
|
.join(Parent, JOIN.LEFT_OUTER, on=(Image.parent == Parent.id))
|
||||||
.join(ParentImageStorage, JOIN_LEFT_OUTER, on=(ParentImageStorage.id == Parent.storage))
|
.join(ParentImageStorage, JOIN.LEFT_OUTER, on=(ParentImageStorage.id == Parent.storage))
|
||||||
.where(RepositoryTag.hidden == False)
|
.where(RepositoryTag.hidden == False)
|
||||||
.where(Image.security_indexed_engine < clair_version))
|
.where(Image.security_indexed_engine < clair_version))
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ def filter_tags_have_repository_event(query, event):
|
||||||
lifetime_start_ts.
|
lifetime_start_ts.
|
||||||
"""
|
"""
|
||||||
query = filter_has_repository_event(query, event)
|
query = filter_has_repository_event(query, event)
|
||||||
query = query.switch(Image).join(ImageStorage)
|
query = query.switch(RepositoryTag).join(Image).join(ImageStorage)
|
||||||
query = query.switch(RepositoryTag).order_by(RepositoryTag.lifetime_start_ts.desc())
|
query = query.switch(RepositoryTag).order_by(RepositoryTag.lifetime_start_ts.desc())
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
@ -146,12 +146,13 @@ def get_matching_tags_for_images(image_pairs, filter_images=None, filter_tags=No
|
||||||
# Collect IDs of the tags found for each query.
|
# Collect IDs of the tags found for each query.
|
||||||
tags = {}
|
tags = {}
|
||||||
for query in sharded_queries:
|
for query in sharded_queries:
|
||||||
|
ImageAlias = Image.alias()
|
||||||
tag_query = (_tag_alive(RepositoryTag
|
tag_query = (_tag_alive(RepositoryTag
|
||||||
.select(*(selections or []))
|
.select(*(selections or []))
|
||||||
.distinct()
|
.distinct()
|
||||||
.join(Image)
|
.join(ImageAlias)
|
||||||
.where(RepositoryTag.hidden == False)
|
.where(RepositoryTag.hidden == False)
|
||||||
.where(Image.id << query)
|
.where(ImageAlias.id << query)
|
||||||
.switch(RepositoryTag)))
|
.switch(RepositoryTag)))
|
||||||
|
|
||||||
if filter_tags is not None:
|
if filter_tags is not None:
|
||||||
|
@ -210,7 +211,7 @@ def list_active_repo_tags(repo):
|
||||||
.join(Image)
|
.join(Image)
|
||||||
.where(RepositoryTag.repository == repo, RepositoryTag.hidden == False)
|
.where(RepositoryTag.repository == repo, RepositoryTag.hidden == False)
|
||||||
.switch(RepositoryTag)
|
.switch(RepositoryTag)
|
||||||
.join(TagManifest, JOIN_LEFT_OUTER))
|
.join(TagManifest, JOIN.LEFT_OUTER))
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
|
@ -148,12 +148,11 @@ def add_or_invite_to_team(inviter, team, user_obj=None, email=None, requires_inv
|
||||||
def get_matching_user_teams(team_prefix, user_obj, limit=10):
|
def get_matching_user_teams(team_prefix, user_obj, limit=10):
|
||||||
team_prefix_search = prefix_search(Team.name, team_prefix)
|
team_prefix_search = prefix_search(Team.name, team_prefix)
|
||||||
query = (Team
|
query = (Team
|
||||||
.select()
|
.select(Team.id.distinct(), Team)
|
||||||
.join(User)
|
.join(User)
|
||||||
.switch(Team)
|
.switch(Team)
|
||||||
.join(TeamMember)
|
.join(TeamMember)
|
||||||
.where(TeamMember.user == user_obj, team_prefix_search)
|
.where(TeamMember.user == user_obj, team_prefix_search)
|
||||||
.distinct(Team.id)
|
|
||||||
.limit(limit))
|
.limit(limit))
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
@ -179,12 +178,11 @@ def get_matching_admined_teams(team_prefix, user_obj, limit=10):
|
||||||
.where(TeamRole.name == 'admin'))
|
.where(TeamRole.name == 'admin'))
|
||||||
|
|
||||||
query = (Team
|
query = (Team
|
||||||
.select()
|
.select(Team.id.distinct(), Team)
|
||||||
.join(User)
|
.join(User)
|
||||||
.switch(Team)
|
.switch(Team)
|
||||||
.join(TeamMember)
|
.join(TeamMember)
|
||||||
.where(team_prefix_search, Team.organization << (admined_orgs))
|
.where(team_prefix_search, Team.organization << (admined_orgs))
|
||||||
.distinct(Team.id)
|
|
||||||
.limit(limit))
|
.limit(limit))
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
@ -260,8 +258,9 @@ def get_user_teams_within_org(username, organization):
|
||||||
def list_organization_members_by_teams(organization):
|
def list_organization_members_by_teams(organization):
|
||||||
query = (TeamMember
|
query = (TeamMember
|
||||||
.select(Team, User)
|
.select(Team, User)
|
||||||
.annotate(Team)
|
.join(Team)
|
||||||
.annotate(User)
|
.switch(TeamMember)
|
||||||
|
.join(User)
|
||||||
.where(Team.organization == organization))
|
.where(Team.organization == organization))
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from peewee import JOIN_LEFT_OUTER
|
from peewee import JOIN
|
||||||
from playhouse.test_utils import assert_query_count
|
from playhouse.test_utils import assert_query_count
|
||||||
|
|
||||||
from data.database import Repository, RepositoryPermission, TeamMember, Namespace
|
from data.database import Repository, RepositoryPermission, TeamMember, Namespace
|
||||||
|
@ -87,7 +87,7 @@ def test_filter_repositories(username, include_public, filter_to_namespace, repo
|
||||||
.distinct()
|
.distinct()
|
||||||
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
||||||
.switch(Repository)
|
.switch(Repository)
|
||||||
.join(RepositoryPermission, JOIN_LEFT_OUTER))
|
.join(RepositoryPermission, JOIN.LEFT_OUTER))
|
||||||
|
|
||||||
with assert_query_count(1):
|
with assert_query_count(1):
|
||||||
found = list(filter_to_repos_for_user(query, user.id,
|
found = list(filter_to_repos_for_user(query, user.id,
|
||||||
|
|
|
@ -8,6 +8,7 @@ from data.model.repository import create_repository, purge_repository, is_empty
|
||||||
from data.model.repository import get_filtered_matching_repositories
|
from data.model.repository import get_filtered_matching_repositories
|
||||||
from test.fixtures import *
|
from test.fixtures import *
|
||||||
|
|
||||||
|
|
||||||
def test_duplicate_repository_different_kinds(initialized_db):
|
def test_duplicate_repository_different_kinds(initialized_db):
|
||||||
# Create an image repo.
|
# Create an image repo.
|
||||||
create_repository('devtable', 'somenewrepo', None, repo_kind='image')
|
create_repository('devtable', 'somenewrepo', None, repo_kind='image')
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from peewee import JOIN_LEFT_OUTER
|
from peewee import JOIN
|
||||||
|
|
||||||
from data.database import (AccessToken, AccessTokenKind, Repository, Namespace, Role,
|
from data.database import (AccessToken, AccessTokenKind, Repository, Namespace, Role,
|
||||||
RepositoryBuildTrigger, LogEntryKind)
|
RepositoryBuildTrigger, LogEntryKind)
|
||||||
|
@ -38,7 +38,7 @@ def get_repository_delegate_tokens(namespace_name, repository_name):
|
||||||
.switch(AccessToken)
|
.switch(AccessToken)
|
||||||
.join(Role)
|
.join(Role)
|
||||||
.switch(AccessToken)
|
.switch(AccessToken)
|
||||||
.join(RepositoryBuildTrigger, JOIN_LEFT_OUTER)
|
.join(RepositoryBuildTrigger, JOIN.LEFT_OUTER)
|
||||||
.where(Repository.name == repository_name, Namespace.username == namespace_name,
|
.where(Repository.name == repository_name, Namespace.username == namespace_name,
|
||||||
AccessToken.temporary == False, RepositoryBuildTrigger.uuid >> None))
|
AccessToken.temporary == False, RepositoryBuildTrigger.uuid >> None))
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ import json
|
||||||
import uuid
|
import uuid
|
||||||
from flask_login import UserMixin
|
from flask_login import UserMixin
|
||||||
|
|
||||||
from peewee import JOIN_LEFT_OUTER, IntegrityError, fn
|
from peewee import JOIN, IntegrityError, fn
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
@ -397,15 +397,18 @@ def _list_entity_robots(entity_name, include_metadata=True):
|
||||||
""" Return the list of robots for the specified entity. This MUST return a query, not a
|
""" Return the list of robots for the specified entity. This MUST return a query, not a
|
||||||
materialized list so that callers can use db_for_update.
|
materialized list so that callers can use db_for_update.
|
||||||
"""
|
"""
|
||||||
query = (User
|
|
||||||
.select(User, FederatedLogin)
|
|
||||||
.join(FederatedLogin)
|
|
||||||
.where(User.robot == True, User.username ** (entity_name + '+%')))
|
|
||||||
|
|
||||||
if include_metadata:
|
if include_metadata:
|
||||||
query = (query.switch(User)
|
query = (User
|
||||||
.join(RobotAccountMetadata, JOIN_LEFT_OUTER)
|
.select(User, FederatedLogin, RobotAccountMetadata)
|
||||||
.select(User, FederatedLogin, RobotAccountMetadata))
|
.join(FederatedLogin)
|
||||||
|
.switch(User)
|
||||||
|
.join(RobotAccountMetadata, JOIN.LEFT_OUTER)
|
||||||
|
.where(User.robot == True, User.username ** (entity_name + '+%')))
|
||||||
|
else:
|
||||||
|
query = (User
|
||||||
|
.select(User, FederatedLogin)
|
||||||
|
.join(FederatedLogin)
|
||||||
|
.where(User.robot == True, User.username ** (entity_name + '+%')))
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
@ -417,12 +420,12 @@ def list_entity_robot_permission_teams(entity_name, limit=None, include_permissi
|
||||||
RobotAccountMetadata.description, RobotAccountMetadata.unstructured_json]
|
RobotAccountMetadata.description, RobotAccountMetadata.unstructured_json]
|
||||||
if include_permissions:
|
if include_permissions:
|
||||||
query = (query
|
query = (query
|
||||||
.join(RepositoryPermission, JOIN_LEFT_OUTER,
|
.join(RepositoryPermission, JOIN.LEFT_OUTER,
|
||||||
on=(RepositoryPermission.user == FederatedLogin.user))
|
on=(RepositoryPermission.user == FederatedLogin.user))
|
||||||
.join(Repository, JOIN_LEFT_OUTER)
|
.join(Repository, JOIN.LEFT_OUTER)
|
||||||
.switch(User)
|
.switch(User)
|
||||||
.join(TeamMember, JOIN_LEFT_OUTER)
|
.join(TeamMember, JOIN.LEFT_OUTER)
|
||||||
.join(Team, JOIN_LEFT_OUTER))
|
.join(Team, JOIN.LEFT_OUTER))
|
||||||
|
|
||||||
fields.append(Repository.name)
|
fields.append(Repository.name)
|
||||||
fields.append(Team.name)
|
fields.append(Team.name)
|
||||||
|
@ -684,7 +687,7 @@ def get_matching_user_namespaces(namespace_prefix, username, limit=10):
|
||||||
.select()
|
.select()
|
||||||
.distinct()
|
.distinct()
|
||||||
.join(Repository, on=(Repository.namespace_user == Namespace.id))
|
.join(Repository, on=(Repository.namespace_user == Namespace.id))
|
||||||
.join(RepositoryPermission, JOIN_LEFT_OUTER)
|
.join(RepositoryPermission, JOIN.LEFT_OUTER)
|
||||||
.where(namespace_search))
|
.where(namespace_search))
|
||||||
|
|
||||||
return _basequery.filter_to_repos_for_user(base_query, namespace_user_id).limit(limit)
|
return _basequery.filter_to_repos_for_user(base_query, namespace_user_id).limit(limit)
|
||||||
|
@ -710,8 +713,8 @@ def get_matching_users(username_prefix, robot_namespace=None, organization=None,
|
||||||
if organization:
|
if organization:
|
||||||
query = (query
|
query = (query
|
||||||
.select(User.id, User.username, User.email, User.robot, fn.Sum(Team.id))
|
.select(User.id, User.username, User.email, User.robot, fn.Sum(Team.id))
|
||||||
.join(TeamMember, JOIN_LEFT_OUTER)
|
.join(TeamMember, JOIN.LEFT_OUTER)
|
||||||
.join(Team, JOIN_LEFT_OUTER, on=((Team.id == TeamMember.team) &
|
.join(Team, JOIN.LEFT_OUTER, on=((Team.id == TeamMember.team) &
|
||||||
(Team.organization == organization)))
|
(Team.organization == organization)))
|
||||||
.order_by(User.robot.desc()))
|
.order_by(User.robot.desc()))
|
||||||
|
|
||||||
|
@ -790,7 +793,7 @@ def verify_user(username_or_email, password):
|
||||||
|
|
||||||
def get_all_repo_users(namespace_name, repository_name):
|
def get_all_repo_users(namespace_name, repository_name):
|
||||||
return (RepositoryPermission
|
return (RepositoryPermission
|
||||||
.select(User.username, User.email, User.robot, Role.name, RepositoryPermission)
|
.select(User, Role, RepositoryPermission)
|
||||||
.join(User)
|
.join(User)
|
||||||
.switch(RepositoryPermission)
|
.switch(RepositoryPermission)
|
||||||
.join(Role)
|
.join(Role)
|
||||||
|
|
11
data/text.py
11
data/text.py
|
@ -1,4 +1,4 @@
|
||||||
from peewee import Clause, SQL, fn, TextField, Field
|
from peewee import NodeList, SQL, fn, TextField, Field
|
||||||
|
|
||||||
def _escape_wildcard(search_query):
|
def _escape_wildcard(search_query):
|
||||||
""" Escapes the wildcards found in the given search query so that they are treated as *characters*
|
""" Escapes the wildcards found in the given search query so that they are treated as *characters*
|
||||||
|
@ -16,7 +16,7 @@ def prefix_search(field, prefix_query):
|
||||||
""" Returns the wildcard match for searching for the given prefix query. """
|
""" Returns the wildcard match for searching for the given prefix query. """
|
||||||
# Escape the known wildcard characters.
|
# Escape the known wildcard characters.
|
||||||
prefix_query = _escape_wildcard(prefix_query)
|
prefix_query = _escape_wildcard(prefix_query)
|
||||||
return Field.__pow__(field, Clause(prefix_query + '%', SQL("ESCAPE '!'")))
|
return Field.__pow__(field, NodeList((prefix_query + '%', SQL("ESCAPE '!'"))))
|
||||||
|
|
||||||
|
|
||||||
def match_mysql(field, search_query):
|
def match_mysql(field, search_query):
|
||||||
|
@ -29,14 +29,13 @@ def match_mysql(field, search_query):
|
||||||
# queries of the form `*` to raise a parsing error. If found, simply filter out.
|
# queries of the form `*` to raise a parsing error. If found, simply filter out.
|
||||||
search_query = search_query.replace('*', '')
|
search_query = search_query.replace('*', '')
|
||||||
|
|
||||||
return Clause(fn.MATCH(SQL("`%s`" % field.name)), fn.AGAINST(SQL('%s', search_query)),
|
return NodeList((fn.MATCH(SQL("`%s`" % field.name)), fn.AGAINST(SQL('%s', [search_query]))),
|
||||||
parens=True)
|
parens=True)
|
||||||
|
|
||||||
|
|
||||||
def match_like(field, search_query):
|
def match_like(field, search_query):
|
||||||
""" Generates a full-text match query using an ILIKE operation, which is needed for SQLite and
|
""" Generates a full-text match query using an ILIKE operation, which is needed for SQLite and
|
||||||
Postgres.
|
Postgres.
|
||||||
"""
|
"""
|
||||||
escaped_query = _escape_wildcard(search_query)
|
escaped_query = _escape_wildcard(search_query)
|
||||||
clause = Clause('%' + escaped_query + '%', SQL("ESCAPE '!'"))
|
clause = NodeList(('%' + escaped_query + '%', SQL("ESCAPE '!'")))
|
||||||
return Field.__pow__(field, clause)
|
return Field.__pow__(field, clause)
|
||||||
|
|
15
initdb.py
15
initdb.py
|
@ -7,8 +7,7 @@ import os
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
from datetime import datetime, timedelta, date
|
from datetime import datetime, timedelta, date
|
||||||
from peewee import (SqliteDatabase, create_model_tables, drop_model_tables, savepoint_sqlite,
|
from peewee import SqliteDatabase
|
||||||
savepoint)
|
|
||||||
from itertools import count
|
from itertools import count
|
||||||
from uuid import UUID, uuid4
|
from uuid import UUID, uuid4
|
||||||
from threading import Event
|
from threading import Event
|
||||||
|
@ -210,6 +209,7 @@ def finished_database_for_testing(testcase):
|
||||||
any changes should be discarded.
|
any changes should be discarded.
|
||||||
"""
|
"""
|
||||||
testcases[testcase]['savepoint'].__exit__(True, None, None)
|
testcases[testcase]['savepoint'].__exit__(True, None, None)
|
||||||
|
testcases[testcase]['atomic'].__exit__(True, None, None)
|
||||||
|
|
||||||
def setup_database_for_testing(testcase, with_storage=False, force_rebuild=False):
|
def setup_database_for_testing(testcase, with_storage=False, force_rebuild=False):
|
||||||
""" Called when a testcase has started using the database, indicating that
|
""" Called when a testcase has started using the database, indicating that
|
||||||
|
@ -242,15 +242,16 @@ def setup_database_for_testing(testcase, with_storage=False, force_rebuild=False
|
||||||
db_initialized_for_testing.set()
|
db_initialized_for_testing.set()
|
||||||
|
|
||||||
# Create a savepoint for the testcase.
|
# Create a savepoint for the testcase.
|
||||||
test_savepoint = savepoint(db) if IS_TESTING_REAL_DATABASE else savepoint_sqlite(db)
|
|
||||||
|
|
||||||
testcases[testcase] = {}
|
testcases[testcase] = {}
|
||||||
testcases[testcase]['savepoint'] = test_savepoint
|
testcases[testcase]['atomic'] = db.manual_commit()
|
||||||
|
testcases[testcase]['atomic'].__enter__()
|
||||||
|
|
||||||
|
testcases[testcase]['savepoint'] = db.savepoint()
|
||||||
testcases[testcase]['savepoint'].__enter__()
|
testcases[testcase]['savepoint'].__enter__()
|
||||||
|
|
||||||
|
|
||||||
def initialize_database():
|
def initialize_database():
|
||||||
create_model_tables(all_models)
|
db.create_tables(all_models)
|
||||||
|
|
||||||
Role.create(name='admin')
|
Role.create(name='admin')
|
||||||
Role.create(name='write')
|
Role.create(name='write')
|
||||||
|
@ -447,7 +448,7 @@ def wipe_database():
|
||||||
if not IS_TESTING_REAL_DATABASE and not isinstance(db.obj, SqliteDatabase):
|
if not IS_TESTING_REAL_DATABASE and not isinstance(db.obj, SqliteDatabase):
|
||||||
raise RuntimeError('Attempted to wipe production database!')
|
raise RuntimeError('Attempted to wipe production database!')
|
||||||
|
|
||||||
drop_model_tables(all_models, fail_silently=True)
|
db.drop_tables(all_models)
|
||||||
|
|
||||||
|
|
||||||
def populate_database(minimal=False, with_storage=False):
|
def populate_database(minimal=False, with_storage=False):
|
||||||
|
|
|
@ -45,7 +45,7 @@ moto==0.4.25 # remove when 0.4.28+ is out
|
||||||
namedlist
|
namedlist
|
||||||
netaddr
|
netaddr
|
||||||
pathvalidate
|
pathvalidate
|
||||||
peewee==2.8.1
|
peewee
|
||||||
psutil
|
psutil
|
||||||
psycopg2
|
psycopg2
|
||||||
pyasn1
|
pyasn1
|
||||||
|
@ -78,4 +78,4 @@ xhtml2pdf
|
||||||
recaptcha2
|
recaptcha2
|
||||||
mockredispy
|
mockredispy
|
||||||
yapf
|
yapf
|
||||||
pymemcache
|
pymemcache
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
aiowsgi==0.6
|
aiowsgi==0.6
|
||||||
alembic==0.9.8
|
alembic==0.9.9
|
||||||
-e git+https://github.com/coreos/mockldap.git@59a46efbe8c7cd8146a87a7c4f2b09746b953e11#egg=mockldap
|
-e git+https://github.com/coreos/mockldap.git@59a46efbe8c7cd8146a87a7c4f2b09746b953e11#egg=mockldap
|
||||||
-e git+https://github.com/coreos/py-bitbucket.git@55a1ada645f2fb6369147996ec71edd7828d91c8#egg=py_bitbucket
|
-e git+https://github.com/coreos/py-bitbucket.git@55a1ada645f2fb6369147996ec71edd7828d91c8#egg=py_bitbucket
|
||||||
-e git+https://github.com/coreos/resumablehashlib.git@b1b631249589b07adf40e0ee545b323a501340b4#egg=resumablehashlib
|
-e git+https://github.com/coreos/resumablehashlib.git@b1b631249589b07adf40e0ee545b323a501340b4#egg=resumablehashlib
|
||||||
|
@ -9,12 +9,12 @@ alembic==0.9.8
|
||||||
-e git+https://github.com/DevTable/container-cloud-config.git@8e8ae177c3d5cd6608f64250fcd8770022d61562#egg=container_cloud_config
|
-e git+https://github.com/DevTable/container-cloud-config.git@8e8ae177c3d5cd6608f64250fcd8770022d61562#egg=container_cloud_config
|
||||||
-e git+https://github.com/DevTable/python-etcd.git@f1168cb02a2a8c83bec1108c6fcd8615ef463b14#egg=python_etcd
|
-e git+https://github.com/DevTable/python-etcd.git@f1168cb02a2a8c83bec1108c6fcd8615ef463b14#egg=python_etcd
|
||||||
-e git+https://github.com/jarus/flask-testing.git@17f19d7fee0e1e176703fc7cb04917a77913ba1a#egg=Flask_Testing
|
-e git+https://github.com/jarus/flask-testing.git@17f19d7fee0e1e176703fc7cb04917a77913ba1a#egg=Flask_Testing
|
||||||
-e git+https://github.com/jepcastelein/marketo-rest-python.git@6134d1129f2435b313c4301503a4b74974d79a42#egg=marketorestpython
|
-e git+https://github.com/jepcastelein/marketo-rest-python.git@9eaad572b52a30bbb1dee17821acb197ecd2dcb8#egg=marketorestpython
|
||||||
-e git+https://github.com/NateFerrero/oauth2lib.git@d161b010f8a596826050a09e5e94d59443cc12d9#egg=oauth2lib
|
-e git+https://github.com/NateFerrero/oauth2lib.git@d161b010f8a596826050a09e5e94d59443cc12d9#egg=oauth2lib
|
||||||
APScheduler==3.0.5
|
APScheduler==3.0.5
|
||||||
asn1crypto==0.24.0
|
asn1crypto==0.24.0
|
||||||
autobahn==0.9.3.post3
|
autobahn==0.9.3.post3
|
||||||
azure-common==1.1.8
|
azure-common==1.1.10
|
||||||
azure-nspkg==2.0.0
|
azure-nspkg==2.0.0
|
||||||
azure-storage-blob==1.1.0
|
azure-storage-blob==1.1.0
|
||||||
azure-storage-common==1.1.0
|
azure-storage-common==1.1.0
|
||||||
|
@ -27,31 +27,31 @@ bintrees==2.0.7
|
||||||
bitmath==1.3.1.2
|
bitmath==1.3.1.2
|
||||||
blinker==1.4
|
blinker==1.4
|
||||||
boto==2.48.0
|
boto==2.48.0
|
||||||
boto3==1.5.36
|
boto3==1.7.12
|
||||||
botocore==1.8.50
|
botocore==1.10.12
|
||||||
cachetools==1.1.6
|
cachetools==1.1.6
|
||||||
certifi==2018.1.18
|
certifi==2018.4.16
|
||||||
cffi==1.11.4
|
cffi==1.11.5
|
||||||
chardet==3.0.4
|
chardet==3.0.4
|
||||||
click==6.7
|
click==6.7
|
||||||
contextlib2==0.5.5
|
contextlib2==0.5.5
|
||||||
cryptography==2.1.4
|
cryptography==2.2.2
|
||||||
debtcollector==1.19.0
|
debtcollector==1.19.0
|
||||||
decorator==4.2.1
|
decorator==4.3.0
|
||||||
enum34==1.1.6
|
enum34==1.1.6
|
||||||
Flask-Cors==3.0.3
|
Flask-Cors==3.0.4
|
||||||
Flask-Login==0.4.1
|
Flask-Login==0.4.1
|
||||||
Flask-Mail==0.9.1
|
Flask-Mail==0.9.1
|
||||||
Flask-Principal==0.4.0
|
Flask-Principal==0.4.0
|
||||||
Flask-RESTful==0.3.6
|
Flask-RESTful==0.3.6
|
||||||
Flask==0.12.2
|
Flask==1.0.2
|
||||||
funcparserlib==0.3.6
|
funcparserlib==0.3.6
|
||||||
funcsigs==1.0.2
|
funcsigs==1.0.2
|
||||||
functools32==3.2.3.post2
|
functools32==3.2.3.post2
|
||||||
furl==1.0.1
|
furl==1.0.1
|
||||||
future==0.16.0
|
future==0.16.0
|
||||||
futures==3.2.0
|
futures==3.2.0
|
||||||
geoip2==2.7.0
|
geoip2==2.8.0
|
||||||
gevent==1.2.2
|
gevent==1.2.2
|
||||||
gipc==0.6.0
|
gipc==0.6.0
|
||||||
greenlet==0.4.13
|
greenlet==0.4.13
|
||||||
|
@ -59,19 +59,19 @@ gunicorn==18.0
|
||||||
hiredis==0.2.0
|
hiredis==0.2.0
|
||||||
html5lib==0.9999999
|
html5lib==0.9999999
|
||||||
httmock==1.2.6
|
httmock==1.2.6
|
||||||
httplib2==0.10.3
|
httplib2==0.11.3
|
||||||
httpretty==0.8.10
|
httpretty==0.8.10
|
||||||
idna==2.6
|
idna==2.6
|
||||||
ipaddress==1.0.19
|
ipaddress==1.0.22
|
||||||
iso8601==0.1.12
|
iso8601==0.1.12
|
||||||
itsdangerous==0.24
|
itsdangerous==0.24
|
||||||
Jinja2==2.10
|
Jinja2==2.10
|
||||||
jmespath==0.9.3
|
jmespath==0.9.3
|
||||||
jsonpath-rw==1.4.0
|
jsonpath-rw==1.4.0
|
||||||
jsonschema==2.6.0
|
jsonschema==2.6.0
|
||||||
keystoneauth1==3.4.0
|
keystoneauth1==3.5.0
|
||||||
Mako==1.0.7
|
Mako==1.0.7
|
||||||
marisa-trie==0.7.4
|
marisa-trie==0.7.5
|
||||||
MarkupSafe==1.0
|
MarkupSafe==1.0
|
||||||
maxminddb==1.3.0
|
maxminddb==1.3.0
|
||||||
mixpanel==4.3.2
|
mixpanel==4.3.2
|
||||||
|
@ -81,46 +81,47 @@ monotonic==1.4
|
||||||
moto==0.4.25
|
moto==0.4.25
|
||||||
msgpack==0.5.6
|
msgpack==0.5.6
|
||||||
namedlist==1.7
|
namedlist==1.7
|
||||||
ndg-httpsclient==0.4.4
|
ndg-httpsclient==0.5.0
|
||||||
netaddr==0.7.19
|
netaddr==0.7.19
|
||||||
netifaces==0.10.6
|
netifaces==0.10.7
|
||||||
oauthlib==2.0.6
|
oauthlib==2.0.7
|
||||||
orderedmultidict==0.7.11
|
orderedmultidict==0.7.11
|
||||||
oslo.config==5.2.0
|
oslo.config==6.2.0
|
||||||
oslo.i18n==3.19.0
|
oslo.i18n==3.20.0
|
||||||
oslo.serialization==2.24.0
|
oslo.serialization==2.25.0
|
||||||
oslo.utils==3.35.0
|
oslo.utils==3.36.1
|
||||||
pathvalidate==0.16.3
|
pathvalidate==0.17.1
|
||||||
pbr==3.1.1
|
pbr==4.0.2
|
||||||
peewee==2.8.1
|
peewee==3.3.3
|
||||||
Pillow==5.0.0
|
Pillow==5.1.0
|
||||||
ply==3.11
|
ply==3.11
|
||||||
psutil==5.4.3
|
psutil==5.4.5
|
||||||
psycopg2==2.7.4
|
psycopg2==2.7.4
|
||||||
pyasn1==0.4.2
|
pyasn1==0.4.2
|
||||||
|
pyasn1-modules==0.2.1
|
||||||
py-bcrypt==0.4
|
py-bcrypt==0.4
|
||||||
pycparser==2.18
|
pycparser==2.18
|
||||||
pycryptodome==3.4.11
|
pycryptodome==3.6.1
|
||||||
pycryptodomex==3.4.12
|
pycryptodomex==3.6.1
|
||||||
PyGithub==1.36
|
PyGithub==1.39
|
||||||
pygpgme==0.3
|
pygpgme==0.3
|
||||||
pyjwkest==1.4.0
|
pyjwkest==1.4.0
|
||||||
PyJWT==1.5.3
|
PyJWT==1.6.1
|
||||||
PyMySQL==0.6.7
|
PyMySQL==0.6.7
|
||||||
pyOpenSSL==17.5.0
|
pyOpenSSL==17.5.0
|
||||||
pymemcache==1.4.3
|
pymemcache==1.4.4
|
||||||
pyparsing==2.2.0
|
pyparsing==2.2.0
|
||||||
PyPDF2==1.26.0
|
PyPDF2==1.26.0
|
||||||
python-dateutil==2.6.1
|
python-dateutil==2.7.2
|
||||||
python-editor==1.0.3
|
python-editor==1.0.3
|
||||||
python-gitlab==1.4.0
|
python-gitlab==1.4.0
|
||||||
python-keystoneclient==3.15.0
|
python-keystoneclient==3.16.0
|
||||||
python-ldap==2.5.2
|
python-ldap==3.0.0
|
||||||
python-magic==0.4.15
|
python-magic==0.4.15
|
||||||
python-swiftclient==3.5.0
|
python-swiftclient==3.5.0
|
||||||
pytz==2018.3
|
pytz==2018.4
|
||||||
PyYAML==3.12
|
PyYAML==3.12
|
||||||
raven==6.5.0
|
raven==6.7.0
|
||||||
recaptcha2==0.1
|
recaptcha2==0.1
|
||||||
redis==2.10.6
|
redis==2.10.6
|
||||||
redlock==1.2.0
|
redlock==1.2.0
|
||||||
|
@ -130,20 +131,19 @@ requests[security]==2.18.4
|
||||||
rfc3986==1.1.0
|
rfc3986==1.1.0
|
||||||
s3transfer==0.1.13
|
s3transfer==0.1.13
|
||||||
semantic-version==2.6.0
|
semantic-version==2.6.0
|
||||||
simplejson==3.13.2
|
|
||||||
six==1.11.0
|
six==1.11.0
|
||||||
SQLAlchemy==1.1.5
|
SQLAlchemy==1.1.5
|
||||||
stevedore==1.28.0
|
stevedore==1.28.0
|
||||||
stringscore==0.1.0
|
stringscore==0.1.0
|
||||||
stripe==1.79.0
|
stripe==1.80.0
|
||||||
toposort==1.5
|
toposort==1.5
|
||||||
trollius==2.1
|
trollius==2.2
|
||||||
tzlocal==1.5.1
|
tzlocal==1.5.1
|
||||||
urllib3==1.22
|
urllib3==1.22
|
||||||
waitress==1.1.0
|
waitress==1.1.0
|
||||||
WebOb==1.7.4
|
WebOb==1.8.1
|
||||||
Werkzeug==0.14.1
|
Werkzeug==0.14.1
|
||||||
wrapt==1.10.11
|
wrapt==1.10.11
|
||||||
xhtml2pdf==0.2.1
|
xhtml2pdf==0.2.2
|
||||||
xmltodict==0.11.0
|
xmltodict==0.11.0
|
||||||
yapf==0.20.2
|
yapf==0.21.0
|
||||||
|
|
|
@ -92,4 +92,4 @@ def test_storage_proxy_auth(storage, liveserver_app, liveserver_session, is_prox
|
||||||
}
|
}
|
||||||
|
|
||||||
resp = liveserver_session.get('_storage_proxy_auth', headers=headers)
|
resp = liveserver_session.get('_storage_proxy_auth', headers=headers)
|
||||||
assert resp.status_code == (404 if not is_proxying_enabled else 200)
|
assert resp.status_code == (500 if not is_proxying_enabled else 200)
|
||||||
|
|
|
@ -8,7 +8,7 @@ from flask import Flask, jsonify
|
||||||
from flask_login import LoginManager
|
from flask_login import LoginManager
|
||||||
from flask_principal import identity_loaded, Permission, Identity, identity_changed, Principal
|
from flask_principal import identity_loaded, Permission, Identity, identity_changed, Principal
|
||||||
from flask_mail import Mail
|
from flask_mail import Mail
|
||||||
from peewee import SqliteDatabase, savepoint, InternalError
|
from peewee import SqliteDatabase, InternalError
|
||||||
|
|
||||||
from app import app as application
|
from app import app as application
|
||||||
from auth.permissions import on_identity_loaded
|
from auth.permissions import on_identity_loaded
|
||||||
|
@ -147,19 +147,20 @@ def initialized_db(appconfig):
|
||||||
# If under a test *real* database, setup a savepoint.
|
# If under a test *real* database, setup a savepoint.
|
||||||
under_test_real_database = bool(os.environ.get('TEST_DATABASE_URI'))
|
under_test_real_database = bool(os.environ.get('TEST_DATABASE_URI'))
|
||||||
if under_test_real_database:
|
if under_test_real_database:
|
||||||
test_savepoint = savepoint(db)
|
with db.transaction():
|
||||||
test_savepoint.__enter__()
|
test_savepoint = db.savepoint()
|
||||||
|
test_savepoint.__enter__()
|
||||||
|
|
||||||
yield # Run the test.
|
yield # Run the test.
|
||||||
|
|
||||||
try:
|
try:
|
||||||
test_savepoint.rollback()
|
test_savepoint.rollback()
|
||||||
test_savepoint.__exit__(None, None, None)
|
test_savepoint.__exit__(None, None, None)
|
||||||
except InternalError:
|
except InternalError:
|
||||||
# If postgres fails with an exception (like IntegrityError) mid-transaction, it terminates
|
# If postgres fails with an exception (like IntegrityError) mid-transaction, it terminates
|
||||||
# it immediately, so when we go to remove the savepoint, it complains. We can safely ignore
|
# it immediately, so when we go to remove the savepoint, it complains. We can safely ignore
|
||||||
# this case.
|
# this case.
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
yield
|
yield
|
||||||
|
|
||||||
|
|
|
@ -161,7 +161,7 @@ class BuildLogsTestCase(EndpointTestCase):
|
||||||
logs = ['log1', 'log2']
|
logs = ['log1', 'log2']
|
||||||
with patch('endpoints.web.build_logs.get_log_entries', return_value=(None, logs) ):
|
with patch('endpoints.web.build_logs.get_log_entries', return_value=(None, logs) ):
|
||||||
resp = self.getResponse('web.buildlogs', build_uuid=self.build_uuid, expected_code=200)
|
resp = self.getResponse('web.buildlogs', build_uuid=self.build_uuid, expected_code=200)
|
||||||
self.assertEquals('{"logs":["log1","log2"]}\n', resp)
|
self.assertEquals({"logs": logs}, py_json.loads(resp))
|
||||||
|
|
||||||
class ArchivedLogsTestCase(EndpointTestCase):
|
class ArchivedLogsTestCase(EndpointTestCase):
|
||||||
build_uuid = 'deadpork-dead-pork-dead-porkdeadpork'
|
build_uuid = 'deadpork-dead-pork-dead-porkdeadpork'
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
from data.database import Image, ImageStorage
|
from data.database import Image, ImageStorage
|
||||||
from peewee import JOIN_LEFT_OUTER, fn
|
from peewee import JOIN, fn
|
||||||
from app import app
|
from app import app
|
||||||
|
|
||||||
orphaned = (ImageStorage
|
orphaned = (ImageStorage
|
||||||
.select()
|
.select()
|
||||||
.where(ImageStorage.uploading == False)
|
.where(ImageStorage.uploading == False)
|
||||||
.join(Image, JOIN_LEFT_OUTER)
|
.join(Image, JOIN.LEFT_OUTER)
|
||||||
.group_by(ImageStorage)
|
.group_by(ImageStorage)
|
||||||
.having(fn.Count(Image.id) == 0))
|
.having(fn.Count(Image.id) == 0))
|
||||||
|
|
||||||
|
|
Reference in a new issue