This repository has been archived on 2020-03-24. You can view files and clone it, but cannot push or open issues or pull requests.
quay/data/model/legacy.py

2631 lines
91 KiB
Python
Raw Normal View History

import bcrypt
import logging
import dateutil.parser
2013-11-15 21:45:02 +00:00
import json
import time
from datetime import datetime, timedelta, date
from uuid import uuid4
from data.database import (User, Repository, Image, AccessToken, Role, RepositoryPermission,
Visibility, RepositoryTag, EmailConfirmation, FederatedLogin,
LoginService, RepositoryBuild, Team, TeamMember, TeamRole,
LogEntryKind, LogEntry, PermissionPrototype, ImageStorage,
BuildTriggerService, RepositoryBuildTrigger, NotificationKind,
Notification, ImageStorageLocation, ImageStoragePlacement,
ExternalNotificationEvent, ExternalNotificationMethod,
RepositoryNotification, RepositoryAuthorizedEmail, TeamMemberInvite,
2014-10-07 21:33:20 +00:00
DerivedImageStorage, ImageStorageTransformation, random_string_generator,
db, BUILD_PHASE, QuayUserField, ImageStorageSignature, QueueItem,
ImageStorageSignatureKind, validate_database_url, db_for_update,
2015-02-18 22:36:58 +00:00
AccessTokenKind, Star)
from peewee import JOIN_LEFT_OUTER, fn
from util.validation import (validate_username, validate_email, validate_password,
INVALID_PASSWORD_MESSAGE)
from util.names import format_robot_username, parse_robot_username
from util.backoff import exponential_backoff
EXPONENTIAL_BACKOFF_SCALE = timedelta(seconds=1)
2014-09-08 20:43:17 +00:00
PRESUMED_DEAD_BUILD_AGE = timedelta(days=15)
Namespace = User.alias()
logger = logging.getLogger(__name__)
class Config(object):
def __init__(self):
self.app_config = None
self.store = None
config = Config()
class DataModelException(Exception):
pass
class InvalidEmailAddressException(DataModelException):
pass
class InvalidUsernameException(DataModelException):
pass
class InvalidOrganizationException(DataModelException):
pass
2013-11-20 21:13:03 +00:00
class InvalidRobotException(DataModelException):
pass
class InvalidTeamException(DataModelException):
pass
class InvalidTeamMemberException(DataModelException):
pass
class InvalidPasswordException(DataModelException):
pass
class InvalidTokenException(DataModelException):
pass
class InvalidRepositoryBuildException(DataModelException):
pass
class InvalidNotificationException(DataModelException):
pass
class InvalidBuildTriggerException(DataModelException):
pass
class InvalidImageException(DataModelException):
pass
class TooManyUsersException(DataModelException):
pass
class UserAlreadyInTeam(DataModelException):
pass
class TooManyLoginAttemptsException(Exception):
def __init__(self, message, retry_after):
super(TooManyLoginAttemptsException, self).__init__(message)
self.retry_after = retry_after
def _get_repository(namespace_name, repository_name, for_update=False):
query = (Repository
.select(Repository, Namespace)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Namespace.username == namespace_name, Repository.name == repository_name))
if for_update:
query = db_for_update(query)
return query.get()
2014-09-18 21:36:26 +00:00
def hash_password(password, salt=None):
salt = salt or bcrypt.gensalt()
return bcrypt.hashpw(password.encode('utf-8'), salt)
def is_create_user_allowed():
return True
def create_user(username, password, email, auto_verify=False):
""" Creates a regular user, if allowed. """
if not validate_password(password):
raise InvalidPasswordException(INVALID_PASSWORD_MESSAGE)
if not is_create_user_allowed():
raise TooManyUsersException()
created = _create_user(username, email)
created.password_hash = hash_password(password)
created.verified = auto_verify
created.save()
return created
def _create_user(username, email):
if not validate_email(email):
raise InvalidEmailAddressException('Invalid email address: %s' % email)
(username_valid, username_issue) = validate_username(username)
if not username_valid:
raise InvalidUsernameException('Invalid username %s: %s' % (username, username_issue))
try:
existing = User.get((User.username == username) | (User.email == email))
2013-11-20 21:13:03 +00:00
logger.info('Existing user with same username or email.')
# A user already exists with either the same username or email
if existing.username == username:
raise InvalidUsernameException('Username has already been taken: %s' %
username)
raise InvalidEmailAddressException('Email has already been used: %s' %
email)
except User.DoesNotExist:
# This is actually the happy path
logger.debug('Email and username are unique!')
pass
try:
return User.create(username=username, email=email)
except Exception as ex:
raise DataModelException(ex.message)
def is_username_unique(test_username):
try:
User.get((User.username == test_username))
return False
except User.DoesNotExist:
return True
def create_organization(name, email, creating_user):
try:
# Create the org
new_org = _create_user(name, email)
new_org.organization = True
new_org.save()
# Create a team for the owners
2013-11-04 21:57:20 +00:00
owners_team = create_team('owners', new_org, 'admin')
# Add the user who created the org to the owners team
add_user_to_team(creating_user, owners_team)
return new_org
except InvalidUsernameException:
msg = ('Invalid organization name: %s Organization names must consist ' +
'solely of lower case letters, numbers, and underscores. ' +
'[a-z0-9_]') % name
raise InvalidOrganizationException(msg)
2013-11-20 21:13:03 +00:00
def create_robot(robot_shortname, parent):
(username_valid, username_issue) = validate_username(robot_shortname)
if not username_valid:
raise InvalidRobotException('The name for the robot \'%s\' is invalid: %s' %
(robot_shortname, username_issue))
2013-11-20 21:13:03 +00:00
username = format_robot_username(parent.username, robot_shortname)
try:
User.get(User.username == username)
msg = 'Existing robot with name: %s' % username
logger.info(msg)
raise InvalidRobotException(msg)
except User.DoesNotExist:
pass
try:
created = User.create(username=username, robot=True)
service = LoginService.get(name='quayrobot')
password = created.email
FederatedLogin.create(user=created, service=service,
service_ident=password)
return created, password
except Exception as ex:
raise DataModelException(ex.message)
def get_robot(robot_shortname, parent):
robot_username = format_robot_username(parent.username, robot_shortname)
robot = lookup_robot(robot_username)
if not robot:
msg = ('Could not find robot with username: %s' %
robot_username)
raise InvalidRobotException(msg)
service = LoginService.get(name='quayrobot')
login = FederatedLogin.get(FederatedLogin.user == robot, FederatedLogin.service == service)
return robot, login.service_ident
2013-11-20 21:13:03 +00:00
def lookup_robot(robot_username):
joined = User.select().join(FederatedLogin).join(LoginService)
found = list(joined.where(LoginService.name == 'quayrobot',
User.username == robot_username))
if not found or len(found) < 1 or not found[0].robot:
return None
return found[0]
2013-11-20 21:13:03 +00:00
def verify_robot(robot_username, password):
joined = User.select().join(FederatedLogin).join(LoginService)
found = list(joined.where(FederatedLogin.service_ident == password,
LoginService.name == 'quayrobot',
User.username == robot_username))
if not found:
msg = ('Could not find robot with username: %s and supplied password.' %
robot_username)
raise InvalidRobotException(msg)
return found[0]
def regenerate_robot_token(robot_shortname, parent):
robot_username = format_robot_username(parent.username, robot_shortname)
robot = lookup_robot(robot_username)
if not robot:
raise InvalidRobotException('Could not find robot with username: %s' %
robot_username)
password = random_string_generator(length=64)()
robot.email = password
service = LoginService.get(name='quayrobot')
login = FederatedLogin.get(FederatedLogin.user == robot, FederatedLogin.service == service)
login.service_ident = password
login.save()
robot.save()
return robot, password
2013-11-20 21:13:03 +00:00
def delete_robot(robot_username):
try:
robot = User.get(username=robot_username, robot=True)
robot.delete_instance(recursive=True, delete_nullable=True)
2013-11-20 21:13:03 +00:00
except User.DoesNotExist:
raise InvalidRobotException('Could not find robot with username: %s' %
robot_username)
def _list_entity_robots(entity_name):
""" Return the list of robots for the specified entity. This MUST return a query, not a
materialized list so that callers can use db_for_update.
"""
return (User
.select()
.join(FederatedLogin)
.where(User.robot == True, User.username ** (entity_name + '+%')))
def list_entity_robot_tuples(entity_name):
return (_list_entity_robots(entity_name)
.select(User.username, FederatedLogin.service_ident)
.tuples())
2013-11-20 21:13:03 +00:00
def convert_user_to_organization(user, admin_user):
# Change the user to an organization.
user.organization = True
# disable this account for login.
user.password_hash = None
user.save()
# Clear any federated auth pointing to this user
FederatedLogin.delete().where(FederatedLogin.user == user).execute()
# Create a team for the owners
owners_team = create_team('owners', user, 'admin')
# Add the user who will admin the org to the owners team
add_user_to_team(admin_user, owners_team)
return user
2013-11-20 21:13:03 +00:00
2013-11-04 21:57:20 +00:00
def create_team(name, org, team_role_name, description=''):
(username_valid, username_issue) = validate_username(name)
if not username_valid:
raise InvalidTeamException('Invalid team name %s: %s' % (name, username_issue))
if not org.organization:
raise InvalidOrganizationException('User with name %s is not an org.' %
org.username)
team_role = TeamRole.get(TeamRole.name == team_role_name)
2013-11-04 21:57:20 +00:00
return Team.create(name=name, organization=org, role=team_role,
description=description)
def __get_user_admin_teams(org_name, teamname, username):
2013-11-20 21:13:03 +00:00
Org = User.alias()
user_teams = Team.select().join(TeamMember).join(User)
with_org = user_teams.switch(Team).join(Org,
on=(Org.id == Team.organization))
with_role = with_org.switch(Team).join(TeamRole)
admin_teams = with_role.where(User.username == username,
Org.username == org_name,
TeamRole.name == 'admin')
return admin_teams
def remove_team(org_name, team_name, removed_by_username):
joined = Team.select(Team, TeamRole).join(User).switch(Team).join(TeamRole)
found = list(joined.where(User.organization == True,
User.username == org_name,
Team.name == team_name))
2013-11-05 20:50:56 +00:00
if not found:
raise InvalidTeamException('Team \'%s\' is not a team in org \'%s\'' %
2013-11-05 20:50:56 +00:00
(team_name, org_name))
team = found[0]
if team.role.name == 'admin':
admin_teams = list(__get_user_admin_teams(org_name, team_name,
removed_by_username))
if len(admin_teams) <= 1:
# The team we are trying to remove is the only admin team for this user
msg = ('Deleting team \'%s\' would remove all admin from user \'%s\'' %
(team_name, removed_by_username))
raise DataModelException(msg)
2013-11-05 20:50:56 +00:00
team.delete_instance(recursive=True, delete_nullable=True)
2013-11-05 03:58:21 +00:00
def add_or_invite_to_team(inviter, team, user=None, email=None, requires_invite=True):
# If the user is a member of the organization, then we simply add the
# user directly to the team. Otherwise, an invite is created for the user/email.
# We return None if the user was directly added and the invite object if the user was invited.
if user and requires_invite:
orgname = team.organization.username
# If the user is part of the organization (or a robot), then no invite is required.
if user.robot:
requires_invite = False
if not user.username.startswith(orgname + '+'):
raise InvalidTeamMemberException('Cannot add the specified robot to this team, ' +
'as it is not a member of the organization')
else:
Org = User.alias()
found = User.select(User.username)
found = found.where(User.username == user.username).join(TeamMember).join(Team)
found = found.join(Org, on=(Org.username == orgname)).limit(1)
requires_invite = not any(found)
# If we have a valid user and no invite is required, simply add the user to the team.
if user and not requires_invite:
add_user_to_team(user, team)
return None
2014-08-28 23:07:22 +00:00
email_address = email if not user else None
return TeamMemberInvite.create(user=user, email=email_address, team=team, inviter=inviter)
def add_user_to_team(user, team):
try:
return TeamMember.create(user=user, team=team)
except Exception:
raise UserAlreadyInTeam('User \'%s\' is already a member of team \'%s\'' %
(user.username, team.name))
def remove_user_from_team(org_name, team_name, username, removed_by_username):
Org = User.alias()
joined = TeamMember.select().join(User).switch(TeamMember).join(Team)
with_role = joined.join(TeamRole)
with_org = with_role.switch(Team).join(Org,
on=(Org.id == Team.organization))
found = list(with_org.where(User.username == username,
Org.username == org_name,
Team.name == team_name))
if not found:
raise DataModelException('User %s does not belong to team %s' %
(username, team_name))
if username == removed_by_username:
admin_team_query = __get_user_admin_teams(org_name, team_name, username)
admin_team_names = {team.name for team in admin_team_query}
if team_name in admin_team_names and len(admin_team_names) <= 1:
msg = 'User cannot remove themselves from their only admin team.'
raise DataModelException(msg)
user_in_team = found[0]
user_in_team.delete_instance()
2013-11-05 03:58:21 +00:00
def get_team_org_role(team):
return TeamRole.get(TeamRole.id == team.role.id)
def set_team_org_permission(team, team_role_name, set_by_username):
if team.role.name == 'admin' and team_role_name != 'admin':
# We need to make sure we're not removing the users only admin role
user_admin_teams = __get_user_admin_teams(team.organization.username,
team.name, set_by_username)
admin_team_set = {admin_team.name for admin_team in user_admin_teams}
if team.name in admin_team_set and len(admin_team_set) <= 1:
msg = (('Cannot remove admin from team \'%s\' because calling user ' +
'would no longer have admin on org \'%s\'') %
(team.name, team.organization.username))
raise DataModelException(msg)
2013-11-05 03:58:21 +00:00
new_role = TeamRole.get(TeamRole.name == team_role_name)
team.role = new_role
team.save()
return team
def create_federated_user(username, email, service_name, service_id,
set_password_notification, metadata={}):
if not is_create_user_allowed():
raise TooManyUsersException()
new_user = _create_user(username, email)
2013-11-20 21:13:03 +00:00
new_user.verified = True
new_user.save()
2013-11-20 21:13:03 +00:00
service = LoginService.get(LoginService.name == service_name)
FederatedLogin.create(user=new_user, service=service,
service_ident=service_id,
metadata_json=json.dumps(metadata))
2013-11-20 21:13:03 +00:00
if set_password_notification:
create_notification('password_required', new_user)
2013-11-20 21:13:03 +00:00
return new_user
2013-10-10 03:00:34 +00:00
def attach_federated_login(user, service_name, service_id, metadata={}):
service = LoginService.get(LoginService.name == service_name)
FederatedLogin.create(user=user, service=service, service_ident=service_id,
metadata_json=json.dumps(metadata))
return user
2013-10-10 03:00:34 +00:00
def verify_federated_login(service_name, service_id):
try:
found = (FederatedLogin
.select(FederatedLogin, User)
.join(LoginService)
.switch(FederatedLogin).join(User)
.where(FederatedLogin.service_ident == service_id, LoginService.name == service_name)
.get())
return found.user
except FederatedLogin.DoesNotExist:
return None
2013-10-10 03:00:34 +00:00
def list_federated_logins(user):
selected = FederatedLogin.select(FederatedLogin.service_ident,
LoginService.name, FederatedLogin.metadata_json)
joined = selected.join(LoginService)
return joined.where(LoginService.name != 'quayrobot',
FederatedLogin.user == user)
def create_confirm_email_code(user, new_email=None):
if new_email:
if not validate_email(new_email):
raise InvalidEmailAddressException('Invalid email address: %s' %
new_email)
code = EmailConfirmation.create(user=user, email_confirm=True,
new_email=new_email)
return code
def confirm_user_email(code):
try:
code = EmailConfirmation.get(EmailConfirmation.code == code,
EmailConfirmation.email_confirm == True)
except EmailConfirmation.DoesNotExist:
raise DataModelException('Invalid email confirmation code.')
user = code.user
user.verified = True
old_email = None
new_email = code.new_email
if new_email and new_email != old_email:
if find_user_by_email(new_email):
raise DataModelException('E-mail address already used.')
old_email = user.email
user.email = new_email
user.save()
code.delete_instance()
return user, new_email, old_email
def create_reset_password_email_code(email):
try:
user = User.get(User.email == email)
except User.DoesNotExist:
raise InvalidEmailAddressException('Email address was not found.');
if user.organization:
raise InvalidEmailAddressException('Organizations can not have passwords.')
code = EmailConfirmation.create(user=user, pw_reset=True)
return code
def validate_reset_code(code):
try:
code = EmailConfirmation.get(EmailConfirmation.code == code,
EmailConfirmation.pw_reset == True)
except EmailConfirmation.DoesNotExist:
return None
user = code.user
code.delete_instance()
return user
def find_user_by_email(email):
try:
return User.get(User.email == email)
except User.DoesNotExist:
return None
2013-09-23 16:37:40 +00:00
def get_user(username):
try:
2013-11-05 20:50:56 +00:00
return User.get(User.username == username, User.organization == False)
2013-09-23 16:37:40 +00:00
except User.DoesNotExist:
return None
def get_namespace_user(username):
try:
return User.get(User.username == username)
except User.DoesNotExist:
return None
def get_user_or_org(username):
try:
return User.get(User.username == username, User.robot == False)
except User.DoesNotExist:
return None
def get_user_by_id(user_db_id):
try:
return User.get(User.id == user_db_id, User.organization == False)
except User.DoesNotExist:
return None
def get_namespace_by_user_id(namespace_user_db_id):
try:
return User.get(User.id == namespace_user_db_id, User.robot == False).username
except User.DoesNotExist:
raise InvalidUsernameException('User with id does not exist: %s' % namespace_user_db_id)
def get_user_by_uuid(user_uuid):
try:
return User.get(User.uuid == user_uuid, User.organization == False)
except User.DoesNotExist:
return None
def get_user_or_org_by_customer_id(customer_id):
try:
return User.get(User.stripe_id == customer_id)
except User.DoesNotExist:
return None
def get_matching_teams(team_prefix, organization):
query = Team.select().where(Team.name ** (team_prefix + '%'),
Team.organization == organization)
return query.limit(10)
2013-11-20 23:23:59 +00:00
def get_matching_users(username_prefix, robot_namespace=None,
organization=None):
direct_user_query = (User.username ** (username_prefix + '%') &
2013-11-20 23:23:59 +00:00
(User.organization == False) & (User.robot == False))
if robot_namespace:
robot_prefix = format_robot_username(robot_namespace, username_prefix)
direct_user_query = (direct_user_query |
(User.username ** (robot_prefix + '%') &
2013-11-20 23:23:59 +00:00
(User.robot == True)))
query = (User
.select(User.username, User.robot)
2014-11-13 01:32:06 +00:00
.group_by(User.username, User.robot)
.where(direct_user_query))
if organization:
query = (query
.select(User.username, User.robot, fn.Sum(Team.id))
.join(TeamMember, JOIN_LEFT_OUTER)
.join(Team, JOIN_LEFT_OUTER, on=((Team.id == TeamMember.team) &
(Team.organization == organization))))
class MatchingUserResult(object):
def __init__(self, *args):
self.username = args[0]
self.is_robot = args[1]
if organization:
self.is_org_member = (args[2] != None)
else:
self.is_org_member = None
return (MatchingUserResult(*args) for args in query.tuples().limit(10))
2013-09-28 00:03:07 +00:00
2013-09-27 23:21:54 +00:00
2014-01-16 21:14:38 +00:00
def verify_user(username_or_email, password):
try:
fetched = User.get((User.username == username_or_email) |
(User.email == username_or_email))
except User.DoesNotExist:
return None
now = datetime.utcnow()
if fetched.invalid_login_attempts > 0:
can_retry_at = exponential_backoff(fetched.invalid_login_attempts, EXPONENTIAL_BACKOFF_SCALE,
fetched.last_invalid_login)
if can_retry_at > now:
retry_after = can_retry_at - now
raise TooManyLoginAttemptsException('Too many login attempts.', retry_after.total_seconds())
if (fetched.password_hash and
hash_password(password, fetched.password_hash) == fetched.password_hash):
if fetched.invalid_login_attempts > 0:
fetched.invalid_login_attempts = 0
fetched.save()
return fetched
fetched.invalid_login_attempts += 1
fetched.last_invalid_login = now
fetched.save()
# We weren't able to authorize the user
return None
def get_user_organizations(username):
UserAlias = User.alias()
all_teams = User.select().distinct().join(Team).join(TeamMember)
with_user = all_teams.join(UserAlias, on=(UserAlias.id == TeamMember.user))
return with_user.where(User.organization == True,
UserAlias.username == username)
def get_organization(name):
try:
return User.get(username=name, organization=True)
except User.DoesNotExist:
raise InvalidOrganizationException('Organization does not exist: %s' %
name)
def get_organization_team(orgname, teamname):
joined = Team.select().join(User)
query = joined.where(Team.name == teamname, User.organization == True,
User.username == orgname).limit(1)
result = list(query)
if not result:
raise InvalidTeamException('Team does not exist: %s/%s', orgname,
teamname)
return result[0]
def get_organization_members_with_teams(organization, membername = None):
2013-11-07 00:06:59 +00:00
joined = TeamMember.select().annotate(Team).annotate(User)
query = joined.where(Team.organization == organization)
if membername:
query = query.where(User.username == membername)
2013-11-07 00:06:59 +00:00
return query
def get_organization_team_members(teamid):
joined = User.select().join(TeamMember).join(Team)
query = joined.where(Team.id == teamid)
return query
def get_organization_team_member_invites(teamid):
joined = TeamMemberInvite.select().join(Team).join(User)
query = joined.where(Team.id == teamid)
return query
2013-11-04 21:39:29 +00:00
def get_organization_member_set(orgname):
Org = User.alias()
org_users = (User.select(User.username)
.join(TeamMember)
.join(Team)
.join(Org, on=(Org.id == Team.organization))
.where(Org.username == orgname)
.distinct())
return {user.username for user in org_users}
2013-11-04 21:39:29 +00:00
def get_teams_within_org(organization):
2013-11-04 21:51:25 +00:00
return Team.select().where(Team.organization == organization)
2013-11-04 21:39:29 +00:00
def get_user_teams_within_org(username, organization):
joined = Team.select().join(TeamMember).join(User)
return joined.where(Team.organization == organization,
User.username == username)
def get_visible_repository_count(username=None, include_public=True,
namespace=None):
query = _visible_repository_query(username=username,
include_public=include_public,
namespace=namespace)
return query.count()
def get_visible_repositories(username=None, include_public=True, page=None,
limit=None, sort=False, namespace=None):
query = _visible_repository_query(username=username, include_public=include_public, page=page,
limit=limit, namespace=namespace,
select_models=[Repository, Namespace, Visibility])
if sort:
query = query.order_by(Repository.description.desc())
if limit:
query = query.limit(limit)
return query
def _visible_repository_query(username=None, include_public=True, limit=None,
page=None, namespace=None, select_models=[]):
query = (Repository
.select(*select_models) # MySQL/RDS complains is there are selected models for counts.
.distinct()
.join(Visibility)
.switch(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(Repository)
.join(RepositoryPermission, JOIN_LEFT_OUTER))
query = _filter_to_repos_for_user(query, username, namespace, include_public)
if page:
query = query.paginate(page, limit)
elif limit:
query = query.limit(limit)
return query
def _filter_to_repos_for_user(query, username=None, namespace=None, include_public=True):
if not include_public and not username:
return Repository.select().where(Repository.id == '-1')
where_clause = None
2013-09-28 03:25:57 +00:00
if username:
UserThroughTeam = User.alias()
Org = User.alias()
AdminTeam = Team.alias()
AdminTeamMember = TeamMember.alias()
AdminUser = User.alias()
query = (query
.switch(RepositoryPermission)
.join(User, JOIN_LEFT_OUTER)
.switch(RepositoryPermission)
.join(Team, JOIN_LEFT_OUTER)
.join(TeamMember, JOIN_LEFT_OUTER)
.join(UserThroughTeam, JOIN_LEFT_OUTER, on=(UserThroughTeam.id == TeamMember.user))
.switch(Repository)
.join(Org, JOIN_LEFT_OUTER, on=(Repository.namespace_user == Org.id))
.join(AdminTeam, JOIN_LEFT_OUTER, on=(Org.id == AdminTeam.organization))
.join(TeamRole, JOIN_LEFT_OUTER, on=(AdminTeam.role == TeamRole.id))
.switch(AdminTeam)
.join(AdminTeamMember, JOIN_LEFT_OUTER, on=(AdminTeam.id == AdminTeamMember.team))
.join(AdminUser, JOIN_LEFT_OUTER, on=(AdminTeamMember.user == AdminUser.id)))
where_clause = ((User.username == username) | (UserThroughTeam.username == username) |
((AdminUser.username == username) & (TeamRole.name == 'admin')))
if namespace:
where_clause = where_clause & (Namespace.username == namespace)
2013-09-28 04:05:32 +00:00
# TODO(jschorr, jake): Figure out why the old join on Visibility was so darn slow and
# remove this hack.
if include_public:
new_clause = (Repository.visibility == _get_public_repo_visibility())
if where_clause:
where_clause = where_clause | new_clause
else:
where_clause = new_clause
return query.where(where_clause)
2013-09-28 04:05:32 +00:00
2013-10-08 15:29:42 +00:00
_public_repo_visibility_cache = None
def _get_public_repo_visibility():
global _public_repo_visibility_cache
if not _public_repo_visibility_cache:
_public_repo_visibility_cache = Visibility.get(name='public')
return _public_repo_visibility_cache
2013-09-28 04:05:32 +00:00
def get_matching_repositories(repo_term, username=None):
namespace_term = repo_term
name_term = repo_term
2013-09-28 04:05:32 +00:00
visible = get_visible_repositories(username)
search_clauses = (Repository.name ** ('%' + name_term + '%') |
Namespace.username ** ('%' + namespace_term + '%'))
# Handle the case where the user has already entered a namespace path.
if repo_term.find('/') > 0:
parts = repo_term.split('/', 1)
namespace_term = '/'.join(parts[:-1])
name_term = parts[-1]
search_clauses = (Repository.name ** ('%' + name_term + '%') &
Namespace.username ** ('%' + namespace_term + '%'))
2013-09-28 03:25:57 +00:00
2013-09-28 04:05:32 +00:00
final = visible.where(search_clauses).limit(10)
2013-09-28 03:25:57 +00:00
return list(final)
2013-09-27 23:21:54 +00:00
def change_password(user, new_password):
if not validate_password(new_password):
raise InvalidPasswordException(INVALID_PASSWORD_MESSAGE)
2014-09-18 21:36:26 +00:00
pw_hash = hash_password(new_password)
user.invalid_login_attempts = 0
user.password_hash = pw_hash
user.save()
# Remove any password required notifications for the user.
delete_notifications_by_kind(user, 'password_required')
def change_username(user_id, new_username):
(username_valid, username_issue) = validate_username(new_username)
if not username_valid:
raise InvalidUsernameException('Invalid username %s: %s' % (new_username, username_issue))
with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Reload the user for update
user = db_for_update(User.select().where(User.id == user_id)).get()
# Rename the robots
for robot in db_for_update(_list_entity_robots(user.username)):
_, robot_shortname = parse_robot_username(robot.username)
new_robot_name = format_robot_username(new_username, robot_shortname)
robot.username = new_robot_name
robot.save()
# Rename the user
user.username = new_username
user.save()
def change_invoice_email(user, invoice_email):
user.invoice_email = invoice_email
user.save()
def change_user_tag_expiration(user, tag_expiration_s):
user.removed_tag_expiration_s = tag_expiration_s
user.save()
def update_email(user, new_email, auto_verify=False):
user.email = new_email
user.verified = auto_verify
user.save()
2013-09-27 17:24:07 +00:00
def get_all_user_permissions(user):
UserThroughTeam = User.alias()
return (RepositoryPermission
.select(RepositoryPermission, Role, Repository, Namespace)
.join(Role)
.switch(RepositoryPermission)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(RepositoryPermission)
.join(User, JOIN_LEFT_OUTER)
.switch(RepositoryPermission)
.join(Team, JOIN_LEFT_OUTER).join(TeamMember, JOIN_LEFT_OUTER)
.join(UserThroughTeam, JOIN_LEFT_OUTER, on=(UserThroughTeam.id == TeamMember.user))
.where((User.id == user) | (UserThroughTeam.id == user)))
def delete_prototype_permission(org, uid):
found = get_prototype_permission(org, uid)
if not found:
return None
found.delete_instance()
return found
def get_prototype_permission(org, uid):
try:
return PermissionPrototype.get(PermissionPrototype.org == org,
PermissionPrototype.uuid == uid)
except PermissionPrototype.DoesNotExist:
return None
def get_prototype_permissions(org):
ActivatingUser = User.alias()
DelegateUser = User.alias()
query = (PermissionPrototype
.select()
.where(PermissionPrototype.org == org)
.join(ActivatingUser, JOIN_LEFT_OUTER,
on=(ActivatingUser.id == PermissionPrototype.activating_user))
.join(DelegateUser, JOIN_LEFT_OUTER,
on=(DelegateUser.id == PermissionPrototype.delegate_user))
.join(Team, JOIN_LEFT_OUTER,
on=(Team.id == PermissionPrototype.delegate_team))
.join(Role, JOIN_LEFT_OUTER, on=(Role.id == PermissionPrototype.role)))
return query
def update_prototype_permission(org, uid, role_name):
found = get_prototype_permission(org, uid)
if not found:
return None
new_role = Role.get(Role.name == role_name)
found.role = new_role
found.save()
return found
def add_prototype_permission(org, role_name, activating_user,
delegate_user=None, delegate_team=None):
new_role = Role.get(Role.name == role_name)
return PermissionPrototype.create(org=org, role=new_role,
activating_user=activating_user,
delegate_user=delegate_user, delegate_team=delegate_team)
def get_org_wide_permissions(user):
Org = User.alias()
team_with_role = Team.select(Team, Org, TeamRole).join(TeamRole)
with_org = team_with_role.switch(Team).join(Org, on=(Team.organization ==
Org.id))
with_user = with_org.switch(Team).join(TeamMember).join(User)
return with_user.where(User.id == user, Org.organization == True)
2013-09-27 17:24:07 +00:00
def get_all_repo_teams(namespace_name, repository_name):
return (RepositoryPermission.select(Team.name.alias('team_name'), Role.name, RepositoryPermission)
.join(Team)
.switch(RepositoryPermission)
.join(Role)
.switch(RepositoryPermission)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Namespace.username == namespace_name, Repository.name == repository_name))
2013-09-27 17:24:07 +00:00
def get_all_repo_users(namespace_name, repository_name):
return (RepositoryPermission.select(User.username, User.robot, Role.name, RepositoryPermission)
.join(User)
.switch(RepositoryPermission)
.join(Role)
.switch(RepositoryPermission)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Namespace.username == namespace_name, Repository.name == repository_name))
def get_all_repo_users_transitive_via_teams(namespace_name, repository_name):
return (User
.select()
.distinct()
.join(TeamMember)
.join(Team)
.join(RepositoryPermission)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Namespace.username == namespace_name, Repository.name == repository_name))
def get_all_repo_users_transitive(namespace_name, repository_name):
# Load the users found via teams and directly via permissions.
via_teams = get_all_repo_users_transitive_via_teams(namespace_name, repository_name)
directly = [perm.user for perm in get_all_repo_users(namespace_name, repository_name)]
# Filter duplicates.
user_set = set()
def check_add(u):
if u.username in user_set:
return False
user_set.add(u.username)
return True
return [user for user in list(directly) + list(via_teams) if check_add(user)]
def get_repository_for_resource(resource_key):
try:
return (Repository
.select(Repository, Namespace)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(Repository)
.join(RepositoryBuild)
.where(RepositoryBuild.resource_key == resource_key)
.get())
except Repository.DoesNotExist:
return None
def lookup_repository(repo_id):
try:
return Repository.get(Repository.id == repo_id)
except Repository.DoesNotExist:
return None
2013-09-28 04:05:32 +00:00
def get_repository(namespace_name, repository_name):
try:
return _get_repository(namespace_name, repository_name)
except Repository.DoesNotExist:
return None
def get_image(repo, dockerfile_id):
try:
return Image.get(Image.docker_image_id == dockerfile_id, Image.repository == repo)
except Image.DoesNotExist:
return None
def find_child_image(repo, parent_image, command):
try:
return (Image.select()
.join(ImageStorage)
.switch(Image)
.where(Image.ancestors % '%/' + parent_image.id + '/%',
ImageStorage.command == command)
.order_by(ImageStorage.created.desc())
.get())
except Image.DoesNotExist:
return None
def get_repo_image(namespace_name, repository_name, docker_image_id):
def limit_to_image_id(query):
return query.where(Image.docker_image_id == docker_image_id).limit(1)
query = _get_repository_images(namespace_name, repository_name, limit_to_image_id)
try:
return query.get()
except Image.DoesNotExist:
return None
def get_repo_image_extended(namespace_name, repository_name, docker_image_id):
def limit_to_image_id(query):
return query.where(Image.docker_image_id == docker_image_id).limit(1)
images = _get_repository_images_base(namespace_name, repository_name, limit_to_image_id)
if not images:
return None
return images[0]
2013-09-28 04:05:32 +00:00
def repository_is_public(namespace_name, repository_name):
try:
(Repository
.select()
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(Repository)
.join(Visibility)
.where(Namespace.username == namespace_name, Repository.name == repository_name,
Visibility.name == 'public')
.get())
return True
except Repository.DoesNotExist:
return False
2013-09-23 16:37:40 +00:00
def set_repository_visibility(repo, visibility):
visibility_obj = Visibility.get(name=visibility)
if not visibility_obj:
return
repo.visibility = visibility_obj
repo.save()
2013-10-08 15:29:42 +00:00
def __apply_default_permissions(repo, proto_query, name_property,
create_permission_func):
final_protos = {}
for proto in proto_query:
applies_to = proto.delegate_team or proto.delegate_user
name = getattr(applies_to, name_property)
# We will skip the proto if it is pre-empted by a more important proto
if name in final_protos and proto.activating_user is None:
continue
# By this point, it is either a user specific proto, or there is no
# proto yet, so we can safely assume it applies
final_protos[name] = (applies_to, proto.role)
for delegate, role in final_protos.values():
create_permission_func(delegate, repo, role)
2014-01-21 00:05:26 +00:00
def create_repository(namespace, name, creating_user, visibility='private'):
private = Visibility.get(name=visibility)
namespace_user = User.get(username=namespace)
repo = Repository.create(name=name, visibility=private, namespace_user=namespace_user)
admin = Role.get(name='admin')
2014-01-21 00:05:26 +00:00
if creating_user and not creating_user.organization:
RepositoryPermission.create(user=creating_user, repository=repo,
role=admin)
2014-01-21 00:05:26 +00:00
if creating_user.username != namespace:
# Permission prototypes only work for orgs
org = get_organization(namespace)
user_clause = ((PermissionPrototype.activating_user == creating_user) |
(PermissionPrototype.activating_user >> None))
2014-01-21 00:05:26 +00:00
team_protos = (PermissionPrototype
.select()
.where(PermissionPrototype.org == org, user_clause,
PermissionPrototype.delegate_user >> None))
def create_team_permission(team, repo, role):
2014-01-21 00:05:26 +00:00
RepositoryPermission.create(team=team, repository=repo, role=role)
__apply_default_permissions(repo, team_protos, 'name',
create_team_permission)
user_protos = (PermissionPrototype
.select()
.where(PermissionPrototype.org == org, user_clause,
PermissionPrototype.delegate_team >> None))
def create_user_permission(user, repo, role):
# The creating user always gets admin anyway
if user.username == creating_user.username:
return
2014-01-21 00:05:26 +00:00
RepositoryPermission.create(user=user, repository=repo, role=role)
__apply_default_permissions(repo, user_protos, 'username',
create_user_permission)
return repo
def __translate_ancestry(old_ancestry, translations, repository, username, preferred_location):
if old_ancestry == '/':
return '/'
def translate_id(old_id, docker_image_id):
logger.debug('Translating id: %s', old_id)
if old_id not in translations:
image_in_repo = find_create_or_link_image(docker_image_id, repository, username,
translations, preferred_location)
translations[old_id] = image_in_repo.id
return translations[old_id]
# Select all the ancestor Docker IDs in a single query.
2014-02-21 03:26:10 +00:00
old_ids = [int(id_str) for id_str in old_ancestry.split('/')[1:-1]]
query = Image.select(Image.id, Image.docker_image_id).where(Image.id << old_ids)
old_images = {i.id: i.docker_image_id for i in query}
# Translate the old images into new ones.
new_ids = [str(translate_id(old_id, old_images[old_id])) for old_id in old_ids]
2014-02-21 03:26:10 +00:00
return '/%s/' % '/'.join(new_ids)
def _create_storage(location_name):
storage = ImageStorage.create()
location = ImageStorageLocation.get(name=location_name)
ImageStoragePlacement.create(location=location, storage=storage)
storage.locations = {location_name}
return storage
def _find_or_link_image(existing_image, repository, username, translations, preferred_location):
# TODO(jake): This call is currently recursively done under a single transaction. Can we make
# it instead be done under a set of transactions?
with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Check for an existing image, under the transaction, to make sure it doesn't already exist.
repo_image = get_repo_image(repository.namespace_user.username, repository.name,
existing_image.docker_image_id)
if repo_image:
return repo_image
# Make sure the existing base image still exists.
try:
to_copy = Image.select().join(ImageStorage).where(Image.id == existing_image.id).get()
msg = 'Linking image to existing storage with docker id: %s and uuid: %s'
logger.debug(msg, existing_image.docker_image_id, to_copy.storage.uuid)
new_image_ancestry = __translate_ancestry(to_copy.ancestors, translations, repository,
username, preferred_location)
2014-02-21 03:26:10 +00:00
storage = to_copy.storage
storage.locations = {placement.location.name
for placement in storage.imagestorageplacement_set}
new_image = Image.create(docker_image_id=existing_image.docker_image_id,
repository=repository, storage=storage,
ancestors=new_image_ancestry)
logger.debug('Storing translation %s -> %s', existing_image.id, new_image.id)
translations[existing_image.id] = new_image.id
return new_image
2014-02-21 03:26:10 +00:00
except Image.DoesNotExist:
return None
def find_create_or_link_image(docker_image_id, repository, username, translations,
preferred_location):
2014-02-21 03:26:10 +00:00
# First check for the image existing in the repository. If found, we simply return it.
repo_image = get_repo_image(repository.namespace_user.username, repository.name,
docker_image_id)
if repo_image:
return repo_image
# We next check to see if there is an existing storage the new image can link to.
existing_image_query = (Image
.select(Image, ImageStorage)
.distinct()
.join(ImageStorage)
.switch(Image)
.join(Repository)
.join(RepositoryPermission, JOIN_LEFT_OUTER)
.switch(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(ImageStorage.uploading == False))
existing_image_query = (_filter_to_repos_for_user(existing_image_query, username)
.where(Image.docker_image_id == docker_image_id))
# If there is an existing image, we try to translate its ancestry and copy its storage.
new_image = None
try:
2014-11-07 19:36:32 +00:00
logger.debug('Looking up existing image for ID: %s', docker_image_id)
existing_image = existing_image_query.get()
2014-11-07 19:36:32 +00:00
logger.debug('Existing image %s found for ID: %s', existing_image.id, docker_image_id)
new_image = _find_or_link_image(existing_image, repository, username, translations,
preferred_location)
if new_image:
return new_image
except Image.DoesNotExist:
2014-11-07 19:36:32 +00:00
logger.debug('No existing image found for ID: %s', docker_image_id)
pass
# Otherwise, create a new storage directly.
with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Final check for an existing image, under the transaction.
repo_image = get_repo_image(repository.namespace_user.username, repository.name,
docker_image_id)
if repo_image:
return repo_image
logger.debug('Creating new storage for docker id: %s', docker_image_id)
storage = _create_storage(preferred_location)
2014-02-21 03:26:10 +00:00
return Image.create(docker_image_id=docker_image_id,
repository=repository, storage=storage,
ancestors='/')
2015-02-04 20:29:24 +00:00
def find_or_create_storage_signature(storage, signature_kind):
found = lookup_storage_signature(storage, signature_kind)
if found is None:
kind = ImageStorageSignatureKind.get(name=signature_kind)
found = ImageStorageSignature.create(storage=storage, kind=kind)
return found
def lookup_storage_signature(storage, signature_kind):
kind = ImageStorageSignatureKind.get(name=signature_kind)
try:
return (ImageStorageSignature
.select()
.where(ImageStorageSignature.storage == storage,
ImageStorageSignature.kind == kind)
.get())
except ImageStorageSignature.DoesNotExist:
return None
def find_derived_storage(source, transformation_name):
try:
found = (ImageStorage
.select(ImageStorage, DerivedImageStorage)
.join(DerivedImageStorage, on=(ImageStorage.id == DerivedImageStorage.derivative))
.join(ImageStorageTransformation)
.where(DerivedImageStorage.source == source,
ImageStorageTransformation.name == transformation_name)
.get())
found.locations = {placement.location.name for placement in found.imagestorageplacement_set}
return found
except ImageStorage.DoesNotExist:
2015-02-04 20:29:24 +00:00
return None
def find_or_create_derived_storage(source, transformation_name, preferred_location):
existing = find_derived_storage(source, transformation_name)
if existing is not None:
return existing
logger.debug('Creating storage dervied from source: %s', source.uuid)
trans = ImageStorageTransformation.get(name=transformation_name)
new_storage = _create_storage(preferred_location)
DerivedImageStorage.create(source=source, derivative=new_storage, transformation=trans)
return new_storage
def delete_derived_storage_by_uuid(storage_uuid):
try:
image_storage = get_storage_by_uuid(storage_uuid)
except InvalidImageException:
return
try:
DerivedImageStorage.get(derivative=image_storage)
except DerivedImageStorage.DoesNotExist:
return
image_storage.delete_instance(recursive=True)
def get_storage_by_uuid(storage_uuid):
placements = list(ImageStoragePlacement
.select(ImageStoragePlacement, ImageStorage, ImageStorageLocation)
.join(ImageStorageLocation)
.switch(ImageStoragePlacement)
.join(ImageStorage)
.where(ImageStorage.uuid == storage_uuid))
if not placements:
raise InvalidImageException('No storage found with uuid: %s', storage_uuid)
found = placements[0].storage
found.locations = {placement.location.name for placement in placements}
return found
def set_image_size(docker_image_id, namespace_name, repository_name, image_size, uncompressed_size):
try:
image = (Image
.select(Image, ImageStorage)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(Image)
.join(ImageStorage, JOIN_LEFT_OUTER)
.where(Repository.name == repository_name, Namespace.username == namespace_name,
Image.docker_image_id == docker_image_id)
.get())
except Image.DoesNotExist:
raise DataModelException('No image with specified id and repository')
image.storage.image_size = image_size
image.storage.uncompressed_size = uncompressed_size
2015-03-10 05:03:39 +00:00
ancestors = image.ancestors.split('/')[1:-1]
if ancestors:
try:
# TODO(jschorr): Switch to this faster route once we have full ancestor aggregate_size
# parent_image = Image.get(Image.id == ancestors[-1])
# total_size = image_size + parent_image.storage.aggregate_size
total_size = (ImageStorage.select(fn.Sum(ImageStorage.image_size))
.join(Image)
.where(Image.id << ancestors)) + image_size
2015-03-10 05:03:39 +00:00
image.storage.aggregate_size = total_size
except Image.DoesNotExist:
pass
else:
image.storage.aggregate_size = image_size
image.storage.save()
return image
def set_image_metadata(docker_image_id, namespace_name, repository_name, created_date_str, comment,
command, parent=None):
with config.app_config['DB_TRANSACTION_FACTORY'](db):
query = (Image
.select(Image, ImageStorage)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(Image)
.join(ImageStorage)
.where(Repository.name == repository_name, Namespace.username == namespace_name,
Image.docker_image_id == docker_image_id))
try:
fetched = db_for_update(query).get()
except Image.DoesNotExist:
raise DataModelException('No image with specified id and repository')
# We cleanup any old checksum in case it's a retry after a fail
fetched.storage.checksum = None
fetched.storage.created = datetime.now()
if created_date_str is not None:
try:
fetched.storage.created = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
except:
# parse raises different exceptions, so we cannot use a specific kind of handler here.
pass
fetched.storage.comment = comment
fetched.storage.command = command
if parent:
fetched.ancestors = '%s%s/' % (parent.ancestors, parent.id)
fetched.save()
fetched.storage.save()
return fetched
def _get_repository_images(namespace_name, repository_name, query_modifier):
query = (Image
.select()
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Repository.name == repository_name, Namespace.username == namespace_name))
query = query_modifier(query)
return query
def _get_repository_images_base(namespace_name, repository_name, query_modifier):
query = (ImageStoragePlacement
.select(ImageStoragePlacement, Image, ImageStorage, ImageStorageLocation)
.join(ImageStorageLocation)
.switch(ImageStoragePlacement)
.join(ImageStorage, JOIN_LEFT_OUTER)
.join(Image)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Repository.name == repository_name, Namespace.username == namespace_name))
query = query_modifier(query)
location_list = list(query)
images = {}
for location in location_list:
# Make sure we're always retrieving the same image object.
image = location.storage.image
# Set the storage to the one we got from the location, to prevent another query
image.storage = location.storage
if not image.id in images:
images[image.id] = image
image.storage.locations = set()
else:
image = images[image.id]
# Add the location to the image's locations set.
image.storage.locations.add(location.location.name)
return images.values()
def lookup_repository_images(namespace_name, repository_name, docker_image_ids):
return (Image
.select()
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Repository.name == repository_name, Namespace.username == namespace_name,
Image.docker_image_id << docker_image_ids))
def get_matching_repository_images(namespace_name, repository_name, docker_image_ids):
def modify_query(q):
return q.where(Image.docker_image_id << docker_image_ids)
return _get_repository_images_base(namespace_name, repository_name, modify_query)
def get_repository_images(namespace_name, repository_name):
return _get_repository_images_base(namespace_name, repository_name, lambda q: q)
def _tag_alive(query):
return query.where((RepositoryTag.lifetime_end_ts >> None) |
(RepositoryTag.lifetime_end_ts > int(time.time())))
def list_repository_tags(namespace_name, repository_name, include_hidden=False):
query = _tag_alive(RepositoryTag
.select(RepositoryTag, Image)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(RepositoryTag)
.join(Image)
.where(Repository.name == repository_name,
Namespace.username == namespace_name))
if not include_hidden:
query = query.where(RepositoryTag.hidden == False)
return query
def _garbage_collect_tags(namespace_name, repository_name):
2015-02-25 22:49:46 +00:00
# We do this without using a join to prevent holding read locks on the repository table
repo = _get_repository(namespace_name, repository_name)
now = int(time.time())
2015-02-25 22:49:46 +00:00
(RepositoryTag
.delete()
.where(RepositoryTag.repository == repo,
~(RepositoryTag.lifetime_end_ts >> None),
(RepositoryTag.lifetime_end_ts + repo.namespace_user.removed_tag_expiration_s) <= now)
.execute())
def garbage_collect_repository(namespace_name, repository_name):
storage_id_whitelist = {}
_garbage_collect_tags(namespace_name, repository_name)
with config.app_config['DB_TRANSACTION_FACTORY'](db):
# TODO (jake): We could probably select this and all the images in a single query using
# a different kind of join.
# Get a list of all images used by tags in the repository
tag_query = (RepositoryTag
.select(RepositoryTag, Image, ImageStorage)
.join(Image)
.join(ImageStorage, JOIN_LEFT_OUTER)
.switch(RepositoryTag)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Repository.name == repository_name, Namespace.username == namespace_name))
referenced_anscestors = set()
for tag in tag_query:
# The anscestor list is in the format '/1/2/3/', extract just the ids
anscestor_id_strings = tag.image.ancestors.split('/')[1:-1]
ancestor_list = [int(img_id_str) for img_id_str in anscestor_id_strings]
referenced_anscestors = referenced_anscestors.union(set(ancestor_list))
referenced_anscestors.add(tag.image.id)
all_repo_images = _get_repository_images(namespace_name, repository_name, lambda q: q)
all_images = {int(img.id): img for img in all_repo_images}
to_remove = set(all_images.keys()).difference(referenced_anscestors)
if len(to_remove) > 0:
logger.info('Cleaning up unreferenced images: %s', to_remove)
storage_id_whitelist = {all_images[to_remove_id].storage.id for to_remove_id in to_remove}
Image.delete().where(Image.id << list(to_remove)).execute()
if len(to_remove) > 0:
logger.info('Garbage collecting storage for images: %s', to_remove)
_garbage_collect_storage(storage_id_whitelist)
def _garbage_collect_storage(storage_id_whitelist):
if len(storage_id_whitelist) == 0:
return
def placements_query_to_paths_set(placements_query):
return {(placement.location.name, config.store.image_path(placement.storage.uuid))
for placement in placements_query}
def orphaned_storage_query(select_base_query, candidates, group_by):
return (select_base_query
.switch(ImageStorage)
.join(Image, JOIN_LEFT_OUTER)
.switch(ImageStorage)
.join(DerivedImageStorage, JOIN_LEFT_OUTER,
on=(ImageStorage.id == DerivedImageStorage.derivative))
.where(ImageStorage.id << list(candidates))
.group_by(*group_by)
.having((fn.Count(Image.id) == 0) & (fn.Count(DerivedImageStorage.id) == 0)))
# Note: We remove the derived image storage in its own transaction as a way to reduce the
# time that the transaction holds on the database indicies. This could result in a derived
# image storage being deleted for an image storage which is later reused during this time,
# but since these are caches anyway, it isn't terrible and worth the tradeoff (for now).
logger.debug('Garbage collecting derived storage from candidates: %s', storage_id_whitelist)
with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Find out which derived storages will be removed, and add them to the whitelist
# The comma after ImageStorage.id is VERY important, it makes it a tuple, which is a sequence
orphaned_from_candidates = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id),
storage_id_whitelist,
(ImageStorage.id,)))
if len(orphaned_from_candidates) > 0:
derived_to_remove = (ImageStorage
.select(ImageStorage.id)
.join(DerivedImageStorage,
on=(ImageStorage.id == DerivedImageStorage.derivative))
.where(DerivedImageStorage.source << orphaned_from_candidates))
storage_id_whitelist.update({derived.id for derived in derived_to_remove})
# Remove the dervived image storages with sources of orphaned storages
(DerivedImageStorage
.delete()
.where(DerivedImageStorage.source << orphaned_from_candidates)
.execute())
# Note: Both of these deletes must occur in the same transaction (unfortunately) because a
# storage without any placement is invalid, and a placement cannot exist without a storage.
# TODO(jake): We might want to allow for null storages on placements, which would allow us to
# delete the storages, then delete the placements in a non-transaction.
logger.debug('Garbage collecting storages from candidates: %s', storage_id_whitelist)
with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Track all of the data that should be removed from blob storage
placements_to_remove = orphaned_storage_query(ImageStoragePlacement
.select(ImageStoragePlacement,
ImageStorage,
ImageStorageLocation)
.join(ImageStorageLocation)
.switch(ImageStoragePlacement)
.join(ImageStorage),
storage_id_whitelist,
(ImageStorage, ImageStoragePlacement,
ImageStorageLocation))
paths_to_remove = placements_query_to_paths_set(placements_to_remove.clone())
# Remove the placements for orphaned storages
placements_subquery = (placements_to_remove
.clone()
.select(ImageStoragePlacement.id)
.alias('ps'))
inner = (ImageStoragePlacement
.select(placements_subquery.c.id)
.from_(placements_subquery))
placements_removed = (ImageStoragePlacement
.delete()
.where(ImageStoragePlacement.id << inner)
.execute())
logger.debug('Removed %s image storage placements', placements_removed)
# Remove all orphaned storages
# The comma after ImageStorage.id is VERY important, it makes it a tuple, which is a sequence
orphaned_storages = orphaned_storage_query(ImageStorage.select(ImageStorage.id),
storage_id_whitelist,
(ImageStorage.id,)).alias('osq')
orphaned_storage_inner = (ImageStorage
.select(orphaned_storages.c.id)
.from_(orphaned_storages))
storages_removed = (ImageStorage
.delete()
.where(ImageStorage.id << orphaned_storage_inner)
.execute())
logger.debug('Removed %s image storage records', storages_removed)
# We are going to make the conscious decision to not delete image storage blobs inside
# transactions.
# This may end up producing garbage in s3, trading off for higher availability in the database.
for location_name, image_path in paths_to_remove:
logger.debug('Removing %s from %s', image_path, location_name)
config.store.remove({location_name}, image_path)
def get_tag_image(namespace_name, repository_name, tag_name):
def limit_to_tag(query):
return _tag_alive(query
.switch(Image)
.join(RepositoryTag)
.where(RepositoryTag.name == tag_name))
images = _get_repository_images_base(namespace_name, repository_name, limit_to_tag)
if not images:
raise DataModelException('Unable to find image for tag.')
else:
return images[0]
2013-09-26 21:58:41 +00:00
def get_image_by_id(namespace_name, repository_name, docker_image_id):
image = get_repo_image_extended(namespace_name, repository_name, docker_image_id)
if not image:
raise DataModelException('Unable to find image \'%s\' for repo \'%s/%s\'' %
(docker_image_id, namespace_name, repository_name))
return image
def get_parent_images(namespace_name, repository_name, image_obj):
""" Returns a list of parent Image objects in chronilogical order. """
parents = image_obj.ancestors
parent_db_ids = parents.strip('/').split('/')
2013-10-02 21:29:45 +00:00
if parent_db_ids == ['']:
return []
def filter_to_parents(query):
return query.where(Image.id << parent_db_ids)
parents = _get_repository_images_base(namespace_name, repository_name, filter_to_parents)
id_to_image = {unicode(image.id): image for image in parents}
return [id_to_image[parent_id] for parent_id in parent_db_ids]
def create_or_update_tag(namespace_name, repository_name, tag_name,
tag_docker_image_id):
with config.app_config['DB_TRANSACTION_FACTORY'](db):
try:
repo = _get_repository(namespace_name, repository_name)
except Repository.DoesNotExist:
raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
try:
image = Image.get(Image.docker_image_id == tag_docker_image_id, Image.repository == repo)
except Image.DoesNotExist:
raise DataModelException('Invalid image with id: %s' % tag_docker_image_id)
now_ts = int(time.time())
2015-02-25 22:49:46 +00:00
created = RepositoryTag.create(repository=repo, image=image, name=tag_name,
lifetime_start_ts=now_ts)
try:
# When we move a tag, we really end the timeline of the old one and create a new one
query = _tag_alive(RepositoryTag
.select()
2015-02-25 22:49:46 +00:00
.where(RepositoryTag.repository == repo, RepositoryTag.name == tag_name,
RepositoryTag.id != created.id))
tag = query.get()
tag.lifetime_end_ts = now_ts
tag.save()
except RepositoryTag.DoesNotExist:
# No tag that needs to be ended
pass
2015-02-25 22:49:46 +00:00
return created
def delete_tag(namespace_name, repository_name, tag_name):
with config.app_config['DB_TRANSACTION_FACTORY'](db):
try:
query = _tag_alive(RepositoryTag
.select(RepositoryTag, Repository)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Repository.name == repository_name,
Namespace.username == namespace_name,
RepositoryTag.name == tag_name))
found = db_for_update(query).get()
except RepositoryTag.DoesNotExist:
msg = ('Invalid repository tag \'%s\' on repository \'%s/%s\'' %
(tag_name, namespace_name, repository_name))
raise DataModelException(msg)
found.lifetime_end_ts = int(time.time())
found.save()
def create_temporary_hidden_tag(repo, image, expiration_s):
""" Create a tag with a defined timeline, that will not appear in the UI or CLI. Returns the name
of the temporary tag. """
now_ts = int(time.time())
expire_ts = now_ts + expiration_s
tag_name = str(uuid4())
RepositoryTag.create(repository=repo, image=image, name=tag_name, lifetime_start_ts=now_ts,
lifetime_end_ts=expire_ts, hidden=True)
return tag_name
def purge_all_repository_tags(namespace_name, repository_name):
""" Immediately purge all repository tags without respecting the lifeline procedure """
try:
repo = _get_repository(namespace_name, repository_name)
except Repository.DoesNotExist:
raise DataModelException('Invalid repository \'%s/%s\'' %
(namespace_name, repository_name))
RepositoryTag.delete().where(RepositoryTag.repository == repo.id).execute()
def __entity_permission_repo_query(entity_id, entity_table, entity_id_property, namespace_name,
repository_name):
""" This method works for both users and teams. """
return (RepositoryPermission
.select(entity_table, Repository, Namespace, Role, RepositoryPermission)
.join(entity_table)
.switch(RepositoryPermission)
.join(Role)
.switch(RepositoryPermission)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Repository.name == repository_name, Namespace.username == namespace_name,
entity_id_property == entity_id))
def get_user_reponame_permission(username, namespace_name, repository_name):
fetched = list(__entity_permission_repo_query(username, User, User.username,
namespace_name,
repository_name))
if not fetched:
raise DataModelException('User does not have permission for repo.')
return fetched[0]
def get_team_reponame_permission(team_name, namespace_name, repository_name):
fetched = list(__entity_permission_repo_query(team_name, Team, Team.name,
namespace_name,
repository_name))
if not fetched:
raise DataModelException('Team does not have permission for repo.')
return fetched[0]
def delete_user_permission(username, namespace_name, repository_name):
if username == namespace_name:
raise DataModelException('Namespace owner must always be admin.')
fetched = list(__entity_permission_repo_query(username, User, User.username,
namespace_name,
repository_name))
if not fetched:
raise DataModelException('User does not have permission for repo.')
fetched[0].delete_instance()
def delete_team_permission(team_name, namespace_name, repository_name):
fetched = list(__entity_permission_repo_query(team_name, Team, Team.name,
namespace_name,
repository_name))
if not fetched:
raise DataModelException('Team does not have permission for repo.')
fetched[0].delete_instance()
def __set_entity_repo_permission(entity, permission_entity_property,
namespace_name, repository_name, role_name):
repo = _get_repository(namespace_name, repository_name)
new_role = Role.get(Role.name == role_name)
# Fetch any existing permission for this entity on the repo
try:
entity_attr = getattr(RepositoryPermission, permission_entity_property)
perm = RepositoryPermission.get(entity_attr == entity,
RepositoryPermission.repository == repo)
perm.role = new_role
perm.save()
return perm
except RepositoryPermission.DoesNotExist:
set_entity_kwargs = {permission_entity_property: entity}
new_perm = RepositoryPermission.create(repository=repo, role=new_role,
**set_entity_kwargs)
return new_perm
2013-09-28 00:03:07 +00:00
def set_user_repo_permission(username, namespace_name, repository_name,
role_name):
if username == namespace_name:
raise DataModelException('Namespace owner must always be admin.')
try:
user = User.get(User.username == username)
except User.DoesNotExist:
raise InvalidUsernameException('Invalid username: %s' % username)
return __set_entity_repo_permission(user, 'user', namespace_name,
repository_name, role_name)
def set_team_repo_permission(team_name, namespace_name, repository_name,
role_name):
team = list(Team.select().join(User).where(Team.name == team_name,
User.username == namespace_name))
if not team:
raise DataModelException('No team \'%s\' in organization \'%s\'.' %
(team_name, namespace_name))
return __set_entity_repo_permission(team[0], 'team', namespace_name,
repository_name, role_name)
def purge_repository(namespace_name, repository_name):
# Delete all tags to allow gc to reclaim storage
purge_all_repository_tags(namespace_name, repository_name)
# Gc to remove the images and storage
garbage_collect_repository(namespace_name, repository_name)
# Delete the rest of the repository metadata
fetched = _get_repository(namespace_name, repository_name)
fetched.delete_instance(recursive=True, delete_nullable=True)
def get_private_repo_count(username):
return (Repository
.select()
.join(Visibility)
.switch(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Namespace.username == username, Visibility.name == 'private')
.count())
def create_access_token(repository, role, kind=None, friendly_name=None):
role = Role.get(Role.name == role)
kind_ref = None
if kind is not None:
kind_ref = AccessTokenKind.get(AccessTokenKind.name == kind)
new_token = AccessToken.create(repository=repository, temporary=True,
role=role, kind=kind_ref, friendly_name=friendly_name)
return new_token
def create_delegate_token(namespace_name, repository_name, friendly_name,
role='read'):
read_only = Role.get(name=role)
repo = _get_repository(namespace_name, repository_name)
new_token = AccessToken.create(repository=repo, role=read_only,
friendly_name=friendly_name, temporary=False)
return new_token
def get_repository_delegate_tokens(namespace_name, repository_name):
return (AccessToken
.select(AccessToken, Role)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(AccessToken)
.join(Role)
.switch(AccessToken)
.join(RepositoryBuildTrigger, JOIN_LEFT_OUTER)
.where(Repository.name == repository_name, Namespace.username == namespace_name,
AccessToken.temporary == False, RepositoryBuildTrigger.uuid >> None))
def get_repo_delegate_token(namespace_name, repository_name, code):
repo_query = get_repository_delegate_tokens(namespace_name, repository_name)
try:
return repo_query.where(AccessToken.code == code).get()
except AccessToken.DoesNotExist:
raise InvalidTokenException('Unable to find token with code: %s' % code)
def set_repo_delegate_token_role(namespace_name, repository_name, code, role):
token = get_repo_delegate_token(namespace_name, repository_name, code)
if role != 'read' and role != 'write':
raise DataModelException('Invalid role for delegate token: %s' % role)
new_role = Role.get(Role.name == role)
token.role = new_role
token.save()
return token
def delete_delegate_token(namespace_name, repository_name, code):
token = get_repo_delegate_token(namespace_name, repository_name, code)
token.delete_instance(recursive=True)
return token
def load_token_data(code):
""" Load the permissions for any token by code. """
try:
return (AccessToken
.select(AccessToken, Repository, Namespace, Role)
.join(Role)
.switch(AccessToken)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(AccessToken.code == code)
.get())
except AccessToken.DoesNotExist:
raise InvalidTokenException('Invalid delegate token code: %s' % code)
def _get_build_base_query():
return (RepositoryBuild
.select(RepositoryBuild, RepositoryBuildTrigger, BuildTriggerService, Repository,
Namespace)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(RepositoryBuild)
.join(RepositoryBuildTrigger, JOIN_LEFT_OUTER)
.join(BuildTriggerService, JOIN_LEFT_OUTER)
.order_by(RepositoryBuild.started.desc()))
def get_repository_build(build_uuid):
try:
return _get_build_base_query().where(RepositoryBuild.uuid == build_uuid).get()
except RepositoryBuild.DoesNotExist:
msg = 'Unable to locate a build by id: %s' % build_uuid
raise InvalidRepositoryBuildException(msg)
def list_repository_builds(namespace_name, repository_name, limit,
2015-03-13 22:34:28 +00:00
include_inactive=True, since=None):
query = (_get_build_base_query()
.where(Repository.name == repository_name, Namespace.username == namespace_name)
.limit(limit))
2015-03-13 22:34:28 +00:00
if since is not None:
query = query.where(RepositoryBuild.started >= since)
if not include_inactive:
query = query.where(RepositoryBuild.phase != 'error',
RepositoryBuild.phase != 'complete')
return query
def get_recent_repository_build(namespace_name, repository_name):
query = list_repository_builds(namespace_name, repository_name, 1)
try:
return query.get()
except RepositoryBuild.DoesNotExist:
return None
def create_repository_build(repo, access_token, job_config_obj, dockerfile_id,
display_name, trigger=None, pull_robot_name=None):
pull_robot = None
if pull_robot_name:
pull_robot = lookup_robot(pull_robot_name)
return RepositoryBuild.create(repository=repo, access_token=access_token,
job_config=json.dumps(job_config_obj),
display_name=display_name, trigger=trigger,
resource_key=dockerfile_id,
pull_robot=pull_robot)
2013-11-15 21:45:02 +00:00
def get_pull_robot_name(trigger):
if not trigger.pull_robot:
return None
return trigger.pull_robot.username
def get_pull_credentials(robotname):
robot = lookup_robot(robotname)
if not robot:
return None
try:
login_info = FederatedLogin.get(user=robot)
except FederatedLogin.DoesNotExist:
return None
return {
'username': robot.username,
'password': login_info.service_ident,
'registry': '%s://%s/v1/' % (config.app_config['PREFERRED_URL_SCHEME'],
config.app_config['SERVER_HOSTNAME']),
}
2013-11-15 21:45:02 +00:00
def create_repo_notification(repo, event_name, method_name, config):
event = ExternalNotificationEvent.get(ExternalNotificationEvent.name == event_name)
method = ExternalNotificationMethod.get(ExternalNotificationMethod.name == method_name)
return RepositoryNotification.create(repository=repo, event=event, method=method,
2014-07-17 17:32:39 +00:00
config_json=json.dumps(config))
def get_repo_notification(uuid):
try:
return (RepositoryNotification
.select(RepositoryNotification, Repository, Namespace)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(RepositoryNotification.uuid == uuid)
.get())
except RepositoryNotification.DoesNotExist:
raise InvalidNotificationException('No repository notification found with id: %s' % uuid)
def delete_repo_notification(namespace_name, repository_name, uuid):
found = get_repo_notification(uuid)
if (found.repository.namespace_user.username != namespace_name or
found.repository.name != repository_name):
raise InvalidNotificationException('No repository notifiation found with id: %s' % uuid)
found.delete_instance()
return found
def list_repo_notifications(namespace_name, repository_name, event_name=None):
query = (RepositoryNotification
.select(RepositoryNotification, Repository, Namespace)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Namespace.username == namespace_name, Repository.name == repository_name))
if event_name:
query = (query
.switch(RepositoryNotification)
.join(ExternalNotificationEvent)
.where(ExternalNotificationEvent.name == event_name))
return query
def list_logs(start_time, end_time, performer=None, repository=None, namespace=None):
Performer = User.alias()
joined = (LogEntry.select(LogEntry, LogEntryKind, User, Performer)
.join(User)
.switch(LogEntry)
.join(Performer, JOIN_LEFT_OUTER,
on=(LogEntry.performer == Performer.id).alias('performer'))
.switch(LogEntry)
.join(LogEntryKind)
.switch(LogEntry))
if repository:
joined = joined.where(LogEntry.repository == repository)
if performer:
joined = joined.where(LogEntry.performer == performer)
if namespace:
joined = joined.where(User.username == namespace)
return list(joined.where(
LogEntry.datetime >= start_time,
LogEntry.datetime < end_time).order_by(LogEntry.datetime.desc()))
def log_action(kind_name, user_or_organization_name, performer=None,
repository=None, access_token=None, ip=None, metadata={},
timestamp=None):
if not timestamp:
timestamp = datetime.today()
kind = LogEntryKind.get(LogEntryKind.name == kind_name)
account = User.get(User.username == user_or_organization_name)
LogEntry.create(kind=kind, account=account, performer=performer,
2015-03-10 05:03:39 +00:00
repository=repository, ip=ip, metadata_json=json.dumps(metadata),
datetime=timestamp)
def create_build_trigger(repo, service_name, auth_token, user, pull_robot=None):
service = BuildTriggerService.get(name=service_name)
trigger = RepositoryBuildTrigger.create(repository=repo, service=service,
auth_token=auth_token,
connected_user=user,
pull_robot=pull_robot)
return trigger
def get_build_trigger(trigger_uuid):
try:
return (RepositoryBuildTrigger
.select(RepositoryBuildTrigger, BuildTriggerService, Repository, Namespace)
.join(BuildTriggerService)
.switch(RepositoryBuildTrigger)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(RepositoryBuildTrigger)
.join(User)
.where(RepositoryBuildTrigger.uuid == trigger_uuid)
.get())
except RepositoryBuildTrigger.DoesNotExist:
msg = 'No build trigger with uuid: %s' % trigger_uuid
raise InvalidBuildTriggerException(msg)
def list_build_triggers(namespace_name, repository_name):
return (RepositoryBuildTrigger
.select(RepositoryBuildTrigger, BuildTriggerService, Repository)
.join(BuildTriggerService)
.switch(RepositoryBuildTrigger)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Namespace.username == namespace_name, Repository.name == repository_name))
def list_trigger_builds(namespace_name, repository_name, trigger_uuid,
limit):
2014-03-06 20:04:59 +00:00
return (list_repository_builds(namespace_name, repository_name, limit)
.where(RepositoryBuildTrigger.uuid == trigger_uuid))
def create_notification(kind_name, target, metadata={}):
kind_ref = NotificationKind.get(name=kind_name)
notification = Notification.create(kind=kind_ref, target=target,
metadata_json=json.dumps(metadata))
return notification
def create_unique_notification(kind_name, target, metadata={}):
with config.app_config['DB_TRANSACTION_FACTORY'](db):
if list_notifications(target, kind_name, limit=1).count() == 0:
create_notification(kind_name, target, metadata)
2014-07-28 22:23:46 +00:00
def lookup_notification(user, uuid):
results = list(list_notifications(user, id_filter=uuid, include_dismissed=True, limit=1))
2014-07-28 22:23:46 +00:00
if not results:
return None
return results[0]
def list_notifications(user, kind_name=None, id_filter=None, include_dismissed=False,
page=None, limit=None):
Org = User.alias()
AdminTeam = Team.alias()
AdminTeamMember = TeamMember.alias()
AdminUser = User.alias()
query = (Notification.select()
.join(User)
.switch(Notification)
.join(Org, JOIN_LEFT_OUTER, on=(Org.id == Notification.target))
.join(AdminTeam, JOIN_LEFT_OUTER, on=(Org.id ==
AdminTeam.organization))
.join(TeamRole, JOIN_LEFT_OUTER, on=(AdminTeam.role == TeamRole.id))
.switch(AdminTeam)
.join(AdminTeamMember, JOIN_LEFT_OUTER, on=(AdminTeam.id ==
AdminTeamMember.team))
.join(AdminUser, JOIN_LEFT_OUTER, on=(AdminTeamMember.user ==
AdminUser.id))
.where((Notification.target == user) |
((AdminUser.id == user) & (TeamRole.name == 'admin')))
.order_by(Notification.created)
.desc())
2014-07-28 22:23:46 +00:00
if not include_dismissed:
query = query.switch(Notification).where(Notification.dismissed == False)
if kind_name:
query = (query
.switch(Notification)
.join(NotificationKind)
.where(NotificationKind.name == kind_name))
2014-07-28 22:23:46 +00:00
if id_filter:
query = (query
.switch(Notification)
.where(Notification.uuid == id_filter))
if page:
query = query.paginate(page, limit)
elif limit:
query = query.limit(limit)
return query
def delete_all_notifications_by_kind(kind_name):
kind_ref = NotificationKind.get(name=kind_name)
(Notification.delete()
.where(Notification.kind == kind_ref)
.execute())
def delete_notifications_by_kind(target, kind_name):
kind_ref = NotificationKind.get(name=kind_name)
Notification.delete().where(Notification.target == target,
Notification.kind == kind_ref).execute()
def delete_matching_notifications(target, kind_name, **kwargs):
kind_ref = NotificationKind.get(name=kind_name)
# Load all notifications for the user with the given kind.
2014-08-28 23:07:22 +00:00
notifications = Notification.select().where(
Notification.target == target,
2014-08-28 23:07:22 +00:00
Notification.kind == kind_ref)
# For each, match the metadata to the specified values.
for notification in notifications:
matches = True
try:
metadata = json.loads(notification.metadata_json)
except:
continue
for (key, value) in kwargs.iteritems():
if not key in metadata or metadata[key] != value:
matches = False
break
if not matches:
continue
notification.delete_instance()
def get_active_users():
return User.select().where(User.organization == False, User.robot == False)
def get_active_user_count():
return get_active_users().count()
def detach_external_login(user, service_name):
try:
service = LoginService.get(name = service_name)
except LoginService.DoesNotExist:
return
FederatedLogin.delete().where(FederatedLogin.user == user,
FederatedLogin.service == service).execute()
def delete_user(user):
user.delete_instance(recursive=True, delete_nullable=True)
# TODO: also delete any repository data associated
def check_health(app_config):
# Attempt to connect to the database first. If the DB is not responding,
# using the validate_database_url will timeout quickly, as opposed to
# making a normal connect which will just hang (thus breaking the health
# check).
try:
validate_database_url(app_config['DB_URI'], connect_timeout=3)
except Exception:
logger.exception('Could not connect to the database')
return False
# We will connect to the db, check that it contains some log entry kinds
try:
return bool(list(LogEntryKind.select().limit(1)))
except:
return False
def get_email_authorized_for_repo(namespace, repository, email):
try:
return (RepositoryAuthorizedEmail
.select(RepositoryAuthorizedEmail, Repository, Namespace)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Namespace.username == namespace, Repository.name == repository,
RepositoryAuthorizedEmail.email == email)
.get())
except RepositoryAuthorizedEmail.DoesNotExist:
return None
def create_email_authorization_for_repo(namespace_name, repository_name, email):
try:
repo = _get_repository(namespace_name, repository_name)
except Repository.DoesNotExist:
raise DataModelException('Invalid repository %s/%s' %
(namespace_name, repository_name))
return RepositoryAuthorizedEmail.create(repository=repo, email=email, confirmed=False)
def confirm_email_authorization_for_repo(code):
try:
found = (RepositoryAuthorizedEmail
.select(RepositoryAuthorizedEmail, Repository, Namespace)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
2014-09-30 17:19:32 +00:00
.where(RepositoryAuthorizedEmail.code == code)
.get())
except RepositoryAuthorizedEmail.DoesNotExist:
raise DataModelException('Invalid confirmation code.')
found.confirmed = True
found.save()
return found
2014-09-08 21:20:01 +00:00
def delete_team_email_invite(team, email):
found = TeamMemberInvite.get(TeamMemberInvite.email == email, TeamMemberInvite.team == team)
found.delete_instance()
def delete_team_user_invite(team, user):
try:
found = TeamMemberInvite.get(TeamMemberInvite.user == user, TeamMemberInvite.team == team)
except TeamMemberInvite.DoesNotExist:
return False
found.delete_instance()
return True
def lookup_team_invites(user):
return TeamMemberInvite.select().where(TeamMemberInvite.user == user)
def lookup_team_invite(code, user=None):
# Lookup the invite code.
try:
found = TeamMemberInvite.get(TeamMemberInvite.invite_token == code)
except TeamMemberInvite.DoesNotExist:
raise DataModelException('Invalid confirmation code.')
if user and found.user != user:
raise DataModelException('Invalid confirmation code.')
return found
def delete_team_invite(code, user=None):
found = lookup_team_invite(code, user)
team = found.team
inviter = found.inviter
found.delete_instance()
return (team, inviter)
def confirm_team_invite(code, user):
found = lookup_team_invite(code)
# If the invite is for a specific user, we have to confirm that here.
if found.user is not None and found.user != user:
message = """This invite is intended for user "%s".
Please login to that account and try again.""" % found.user.username
raise DataModelException(message)
# Add the user to the team.
try:
add_user_to_team(user, found.team)
except UserAlreadyInTeam:
# Ignore.
pass
# Delete the invite and return the team.
team = found.team
inviter = found.inviter
found.delete_instance()
return (team, inviter)
def cancel_repository_build(build, work_queue):
with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Reload the build for update.
try:
build = db_for_update(RepositoryBuild.select().where(RepositoryBuild.id == build.id)).get()
except RepositoryBuild.DoesNotExist:
return False
if build.phase != BUILD_PHASE.WAITING or not build.queue_id:
return False
# Try to cancel the queue item.
if not work_queue.cancel(build.queue_id):
return False
# Delete the build row.
build.delete_instance()
return True
2015-03-11 00:22:46 +00:00
def get_repository_pushes(repository, time_delta):
since = date.today() - time_delta
push_repo = LogEntryKind.get(name = 'push_repo')
return (LogEntry.select()
.where(LogEntry.repository == repository)
.where(LogEntry.kind == push_repo)
.where(LogEntry.datetime >= since)
.count())
def get_repository_pulls(repository, time_delta):
since = date.today() - time_delta
repo_pull = LogEntryKind.get(name = 'pull_repo')
repo_verb = LogEntryKind.get(name = 'repo_verb')
return (LogEntry.select()
.where(LogEntry.repository == repository)
.where((LogEntry.kind == repo_pull) | (LogEntry.kind == repo_verb))
.where(LogEntry.datetime >= since)
.count())
def get_repository_usage():
one_month_ago = date.today() - timedelta(weeks=4)
repo_pull = LogEntryKind.get(name = 'pull_repo')
repo_verb = LogEntryKind.get(name = 'repo_verb')
2014-11-13 01:32:06 +00:00
return (LogEntry.select(LogEntry.ip, LogEntry.repository)
.where((LogEntry.kind == repo_pull) | (LogEntry.kind == repo_verb))
.where(~(LogEntry.repository >> None))
.where(LogEntry.datetime >= one_month_ago)
2014-11-13 01:32:06 +00:00
.group_by(LogEntry.ip, LogEntry.repository)
.count())
2014-09-08 20:43:17 +00:00
def archivable_buildlogs_query():
presumed_dead_date = datetime.utcnow() - PRESUMED_DEAD_BUILD_AGE
return (RepositoryBuild.select()
.where((RepositoryBuild.phase == BUILD_PHASE.COMPLETE) |
(RepositoryBuild.phase == BUILD_PHASE.ERROR) |
(RepositoryBuild.started < presumed_dead_date), RepositoryBuild.logs_archived == False))
def star_repository(user, repository):
""" Stars a repository. """
star = Star.create(user=user.id, repository=repository.id)
star.save()
def unstar_repository(user, repository):
""" Unstars a repository. """
try:
star = (Star
.delete()
.where(Star.repository == repository.id, Star.user == user.id)
.execute())
except Star.DoesNotExist:
raise DataModelException('Star not found.')
def get_user_starred_repositories(user, limit=None, page=None):
""" Retrieves all of the repositories a user has starred. """
query = (Repository
.select()
.join(Star)
.join(User)
.where(User.id == user.id)
.order_by(Star.created))
if page and limit:
query = query.paginate(page, limit)
elif limit:
query = query.limit(limit)
return query
def repository_is_starred(user, repository):
""" Determines whether a user has starred a repository or not. """
try:
(Star
.select()
.where(Star.repository == repository.id, Star.user == user.id)
.get())
return True
except Star.DoesNotExist:
return False