2017-01-11 19:53:14 +00:00
|
|
|
from peewee import fn
|
2015-07-15 21:25:41 +00:00
|
|
|
from cachetools import lru_cache
|
|
|
|
|
2016-01-05 17:14:52 +00:00
|
|
|
from data.model import DataModelException
|
2015-07-15 21:25:41 +00:00
|
|
|
from data.database import (Repository, User, Team, TeamMember, RepositoryPermission, TeamRole,
|
2017-03-22 18:30:13 +00:00
|
|
|
Namespace, Visibility, ImageStorage, Image, RepositoryKind,
|
|
|
|
db_for_update)
|
2015-07-15 21:25:41 +00:00
|
|
|
|
2017-05-02 19:38:25 +00:00
|
|
|
def reduce_as_tree(queries_to_reduce):
|
|
|
|
""" This method will split a list of queries into halves recursively until we reach individual
|
|
|
|
queries, at which point it will start unioning the queries, or the already unioned subqueries.
|
|
|
|
This works around a bug in peewee SQL generation where reducing linearly generates a chain
|
|
|
|
of queries that will exceed the recursion depth limit when it has around 80 queries.
|
|
|
|
"""
|
|
|
|
mid = len(queries_to_reduce)/2
|
|
|
|
left = queries_to_reduce[:mid]
|
|
|
|
right = queries_to_reduce[mid:]
|
|
|
|
|
|
|
|
to_reduce_right = right[0]
|
|
|
|
if len(right) > 1:
|
|
|
|
to_reduce_right = reduce_as_tree(right)
|
|
|
|
|
|
|
|
if len(left) > 1:
|
|
|
|
to_reduce_left = reduce_as_tree(left)
|
|
|
|
elif len(left) == 1:
|
|
|
|
to_reduce_left = left[0]
|
|
|
|
else:
|
|
|
|
return to_reduce_right
|
|
|
|
|
|
|
|
return to_reduce_left.union_all(to_reduce_right)
|
|
|
|
|
2015-07-15 21:25:41 +00:00
|
|
|
|
2017-03-22 18:30:13 +00:00
|
|
|
def get_existing_repository(namespace_name, repository_name, for_update=False, kind_filter=None):
|
2015-07-15 21:25:41 +00:00
|
|
|
query = (Repository
|
|
|
|
.select(Repository, Namespace)
|
|
|
|
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
2017-03-20 23:05:25 +00:00
|
|
|
.where(Namespace.username == namespace_name,
|
|
|
|
Repository.name == repository_name))
|
2017-03-22 18:30:13 +00:00
|
|
|
|
|
|
|
if kind_filter:
|
|
|
|
query = (query
|
|
|
|
.switch(Repository)
|
|
|
|
.join(RepositoryKind)
|
|
|
|
.where(RepositoryKind.name == kind_filter))
|
|
|
|
|
2015-07-15 21:25:41 +00:00
|
|
|
if for_update:
|
|
|
|
query = db_for_update(query)
|
|
|
|
|
|
|
|
return query.get()
|
|
|
|
|
|
|
|
|
|
|
|
@lru_cache(maxsize=1)
|
|
|
|
def get_public_repo_visibility():
|
|
|
|
return Visibility.get(name='public')
|
|
|
|
|
|
|
|
|
2016-08-31 17:51:53 +00:00
|
|
|
def _lookup_team_role(name):
|
2017-06-27 15:11:46 +00:00
|
|
|
return _lookup_team_roles()[name]
|
|
|
|
|
|
|
|
|
|
|
|
@lru_cache(maxsize=1)
|
|
|
|
def _lookup_team_roles():
|
|
|
|
return {role.name:role for role in TeamRole.select()}
|
2016-08-31 17:51:53 +00:00
|
|
|
|
|
|
|
|
2017-03-20 23:05:25 +00:00
|
|
|
def filter_to_repos_for_user(query, username=None, namespace=None, repo_kind='image',
|
|
|
|
include_public=True, start_id=None):
|
2015-07-15 21:25:41 +00:00
|
|
|
if not include_public and not username:
|
|
|
|
return Repository.select().where(Repository.id == '-1')
|
|
|
|
|
2017-03-20 23:05:25 +00:00
|
|
|
# Filter on the type of repository.
|
2017-04-07 21:25:44 +00:00
|
|
|
if repo_kind is not None:
|
|
|
|
try:
|
|
|
|
query = query.where(Repository.kind == Repository.kind.get_id(repo_kind))
|
|
|
|
except RepositoryKind.DoesNotExist:
|
|
|
|
raise DataModelException('Unknown repository kind')
|
2017-03-20 23:05:25 +00:00
|
|
|
|
2016-08-15 20:11:45 +00:00
|
|
|
# Add the start ID if necessary.
|
|
|
|
if start_id is not None:
|
|
|
|
query = query.where(Repository.id >= start_id)
|
|
|
|
|
2015-10-07 17:00:12 +00:00
|
|
|
# Build a set of queries that, when unioned together, return the full set of visible repositories
|
|
|
|
# for the filters specified.
|
|
|
|
queries = []
|
|
|
|
|
|
|
|
where_clause = (True)
|
|
|
|
if namespace:
|
|
|
|
where_clause = (Namespace.username == namespace)
|
|
|
|
|
|
|
|
if include_public:
|
2016-01-05 17:12:57 +00:00
|
|
|
queries.append(query
|
|
|
|
.clone()
|
|
|
|
.where(Repository.visibility == get_public_repo_visibility(), where_clause))
|
2015-10-07 17:00:12 +00:00
|
|
|
|
2015-07-15 21:25:41 +00:00
|
|
|
if username:
|
|
|
|
UserThroughTeam = User.alias()
|
|
|
|
Org = User.alias()
|
|
|
|
AdminTeam = Team.alias()
|
|
|
|
AdminTeamMember = TeamMember.alias()
|
|
|
|
AdminUser = User.alias()
|
|
|
|
|
2015-10-07 17:00:12 +00:00
|
|
|
# Add repositories in which the user has permission.
|
2016-01-05 17:12:57 +00:00
|
|
|
queries.append(query
|
|
|
|
.clone()
|
|
|
|
.switch(RepositoryPermission)
|
|
|
|
.join(User)
|
|
|
|
.where(User.username == username, where_clause))
|
2015-10-07 17:00:12 +00:00
|
|
|
|
|
|
|
# Add repositories in which the user is a member of a team that has permission.
|
2016-01-05 17:12:57 +00:00
|
|
|
queries.append(query
|
|
|
|
.clone()
|
|
|
|
.switch(RepositoryPermission)
|
|
|
|
.join(Team)
|
|
|
|
.join(TeamMember)
|
|
|
|
.join(UserThroughTeam, on=(UserThroughTeam.id == TeamMember.user))
|
|
|
|
.where(UserThroughTeam.username == username, where_clause))
|
2015-10-07 17:00:12 +00:00
|
|
|
|
|
|
|
# Add repositories under namespaces in which the user is the org admin.
|
2016-01-05 17:12:57 +00:00
|
|
|
queries.append(query
|
|
|
|
.clone()
|
|
|
|
.switch(Repository)
|
|
|
|
.join(Org, on=(Repository.namespace_user == Org.id))
|
|
|
|
.join(AdminTeam, on=(Org.id == AdminTeam.organization))
|
2016-08-31 17:51:53 +00:00
|
|
|
.where(AdminTeam.role == _lookup_team_role('admin'))
|
2016-01-05 17:12:57 +00:00
|
|
|
.switch(AdminTeam)
|
|
|
|
.join(AdminTeamMember, on=(AdminTeam.id == AdminTeamMember.team))
|
|
|
|
.join(AdminUser, on=(AdminTeamMember.user == AdminUser.id))
|
|
|
|
.where(AdminUser.username == username, where_clause))
|
2015-10-07 17:00:12 +00:00
|
|
|
|
|
|
|
return reduce(lambda l, r: l | r, queries)
|
2015-07-15 21:25:41 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_user_organizations(username):
|
|
|
|
UserAlias = User.alias()
|
|
|
|
return (User
|
|
|
|
.select()
|
|
|
|
.distinct()
|
|
|
|
.join(Team)
|
|
|
|
.join(TeamMember)
|
|
|
|
.join(UserAlias, on=(UserAlias.id == TeamMember.user))
|
|
|
|
.where(User.organization == True, UserAlias.username == username))
|
2016-01-05 17:14:52 +00:00
|
|
|
|
|
|
|
|
|
|
|
def calculate_image_aggregate_size(ancestors_str, image_size, parent_image):
|
|
|
|
ancestors = ancestors_str.split('/')[1:-1]
|
|
|
|
if not ancestors:
|
|
|
|
return image_size
|
|
|
|
|
|
|
|
if parent_image is None:
|
|
|
|
raise DataModelException('Could not load parent image')
|
|
|
|
|
|
|
|
ancestor_size = parent_image.aggregate_size
|
|
|
|
if ancestor_size is not None:
|
|
|
|
return ancestor_size + image_size
|
|
|
|
|
|
|
|
# Fallback to a slower path if the parent doesn't have an aggregate size saved.
|
|
|
|
# TODO: remove this code if/when we do a full backfill.
|
|
|
|
ancestor_size = (ImageStorage
|
|
|
|
.select(fn.Sum(ImageStorage.image_size))
|
|
|
|
.join(Image)
|
|
|
|
.where(Image.id << ancestors)
|
|
|
|
.scalar())
|
|
|
|
if ancestor_size is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
return ancestor_size + image_size
|