Merge branch 'master' into star
This commit is contained in:
commit
917dd6b674
229 changed files with 10807 additions and 3003 deletions
|
@ -280,6 +280,23 @@ require_user_read = require_user_permission(UserReadPermission, scopes.READ_USER
|
|||
require_user_admin = require_user_permission(UserAdminPermission, None)
|
||||
require_fresh_user_admin = require_user_permission(UserAdminPermission, None)
|
||||
|
||||
|
||||
def verify_not_prod(func):
|
||||
@add_method_metadata('enterprise_only', True)
|
||||
@wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
# Verify that we are not running on a production (i.e. hosted) stack. If so, we fail.
|
||||
# This should never happen (because of the feature-flag on SUPER_USERS), but we want to be
|
||||
# absolutely sure.
|
||||
if app.config['SERVER_HOSTNAME'].find('quay.io') >= 0:
|
||||
logger.error('!!! Super user method called IN PRODUCTION !!!')
|
||||
raise NotFound()
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def require_fresh_login(func):
|
||||
@add_method_metadata('requires_fresh_login', True)
|
||||
@wraps(func)
|
||||
|
@ -317,7 +334,11 @@ def validate_json_request(schema_name):
|
|||
def wrapped(self, *args, **kwargs):
|
||||
schema = self.schemas[schema_name]
|
||||
try:
|
||||
validate(request.get_json(), schema)
|
||||
json_data = request.get_json()
|
||||
if json_data is None:
|
||||
raise InvalidRequest('Missing JSON body')
|
||||
|
||||
validate(json_data, schema)
|
||||
return func(self, *args, **kwargs)
|
||||
except ValidationError as ex:
|
||||
raise InvalidRequest(ex.message)
|
||||
|
@ -385,8 +406,10 @@ import endpoints.api.repoemail
|
|||
import endpoints.api.repotoken
|
||||
import endpoints.api.robot
|
||||
import endpoints.api.search
|
||||
import endpoints.api.suconfig
|
||||
import endpoints.api.superuser
|
||||
import endpoints.api.tag
|
||||
import endpoints.api.team
|
||||
import endpoints.api.trigger
|
||||
import endpoints.api.user
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ from app import app, userfiles as user_files, build_logs, log_archive
|
|||
from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource,
|
||||
require_repo_read, require_repo_write, validate_json_request,
|
||||
ApiResource, internal_only, format_date, api, Unauthorized, NotFound,
|
||||
path_param)
|
||||
path_param, InvalidRequest, require_repo_admin)
|
||||
from endpoints.common import start_build
|
||||
from endpoints.trigger import BuildTrigger
|
||||
from data import model, database
|
||||
|
@ -70,10 +70,17 @@ def build_status_view(build_obj, can_write=False):
|
|||
|
||||
# If the status contains a heartbeat, then check to see if has been written in the last few
|
||||
# minutes. If not, then the build timed out.
|
||||
if status is not None and 'heartbeat' in status and status['heartbeat']:
|
||||
heartbeat = datetime.datetime.fromtimestamp(status['heartbeat'])
|
||||
if datetime.datetime.now() - heartbeat > datetime.timedelta(minutes=1):
|
||||
phase = database.BUILD_PHASE.INTERNAL_ERROR
|
||||
if phase != database.BUILD_PHASE.COMPLETE and phase != database.BUILD_PHASE.ERROR:
|
||||
if status is not None and 'heartbeat' in status and status['heartbeat']:
|
||||
heartbeat = datetime.datetime.utcfromtimestamp(status['heartbeat'])
|
||||
if datetime.datetime.utcnow() - heartbeat > datetime.timedelta(minutes=1):
|
||||
phase = database.BUILD_PHASE.INTERNAL_ERROR
|
||||
|
||||
# If the phase is internal error, return 'error' instead of the number if retries
|
||||
# on the queue item is 0.
|
||||
if phase == database.BUILD_PHASE.INTERNAL_ERROR:
|
||||
if build_obj.queue_item is None or build_obj.queue_item.retries_remaining == 0:
|
||||
phase = database.BUILD_PHASE.ERROR
|
||||
|
||||
logger.debug('Can write: %s job_config: %s', can_write, build_obj.job_config)
|
||||
resp = {
|
||||
|
@ -86,7 +93,7 @@ def build_status_view(build_obj, can_write=False):
|
|||
'is_writer': can_write,
|
||||
'trigger': trigger_view(build_obj.trigger),
|
||||
'resource_key': build_obj.resource_key,
|
||||
'pull_robot': user_view(build_obj.pull_robot) if build_obj.pull_robot else None,
|
||||
'pull_robot': user_view(build_obj.pull_robot) if build_obj.pull_robot else None
|
||||
}
|
||||
|
||||
if can_write:
|
||||
|
@ -200,6 +207,31 @@ class RepositoryBuildList(RepositoryParamResource):
|
|||
return resp, 201, headers
|
||||
|
||||
|
||||
|
||||
|
||||
@resource('/v1/repository/<repopath:repository>/build/<build_uuid>')
|
||||
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
||||
@path_param('build_uuid', 'The UUID of the build')
|
||||
class RepositoryBuildResource(RepositoryParamResource):
|
||||
""" Resource for dealing with repository builds. """
|
||||
@require_repo_admin
|
||||
@nickname('cancelRepoBuild')
|
||||
def delete(self, namespace, repository, build_uuid):
|
||||
""" Cancels a repository build if it has not yet been picked up by a build worker. """
|
||||
try:
|
||||
build = model.get_repository_build(build_uuid)
|
||||
except model.InvalidRepositoryBuildException:
|
||||
raise NotFound()
|
||||
|
||||
if build.repository.name != repository or build.repository.namespace_user.username != namespace:
|
||||
raise NotFound()
|
||||
|
||||
if model.cancel_repository_build(build):
|
||||
return 'Okay', 201
|
||||
else:
|
||||
raise InvalidRequest('Build is currently running or has finished')
|
||||
|
||||
|
||||
@resource('/v1/repository/<repopath:repository>/build/<build_uuid>/status')
|
||||
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
||||
@path_param('build_uuid', 'The UUID of the build')
|
||||
|
|
|
@ -116,6 +116,11 @@ class Organization(ApiResource):
|
|||
'type': 'boolean',
|
||||
'description': 'Whether the organization desires to receive emails for invoices',
|
||||
},
|
||||
'tag_expiration': {
|
||||
'type': 'integer',
|
||||
'maximum': 2592000,
|
||||
'minimum': 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -161,6 +166,10 @@ class Organization(ApiResource):
|
|||
logger.debug('Changing email address for organization: %s', org.username)
|
||||
model.update_email(org, new_email)
|
||||
|
||||
if 'tag_expiration' in org_data:
|
||||
logger.debug('Changing organization tag expiration to: %ss', org_data['tag_expiration'])
|
||||
model.change_user_tag_expiration(org, org_data['tag_expiration'])
|
||||
|
||||
teams = model.get_teams_within_org(org)
|
||||
return org_view(org, teams)
|
||||
raise Unauthorized()
|
||||
|
|
362
endpoints/api/suconfig.py
Normal file
362
endpoints/api/suconfig.py
Normal file
|
@ -0,0 +1,362 @@
|
|||
import logging
|
||||
import os
|
||||
import json
|
||||
import signal
|
||||
|
||||
from flask import abort, Response
|
||||
from endpoints.api import (ApiResource, nickname, resource, internal_only, show_if,
|
||||
require_fresh_login, request, validate_json_request, verify_not_prod)
|
||||
|
||||
from endpoints.common import common_login
|
||||
from app import app, CONFIG_PROVIDER, superusers
|
||||
from data import model
|
||||
from data.database import configure
|
||||
from auth.permissions import SuperUserPermission
|
||||
from auth.auth_context import get_authenticated_user
|
||||
from data.database import User
|
||||
from util.config.configutil import add_enterprise_config_defaults
|
||||
from util.config.validator import validate_service_for_config, SSL_FILENAMES
|
||||
from data.runmigration import run_alembic_migration
|
||||
|
||||
import features
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def database_is_valid():
|
||||
""" Returns whether the database, as configured, is valid. """
|
||||
if app.config['TESTING']:
|
||||
return False
|
||||
|
||||
try:
|
||||
list(User.select().limit(1))
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def database_has_users():
|
||||
""" Returns whether the database has any users defined. """
|
||||
return bool(list(User.select().limit(1)))
|
||||
|
||||
|
||||
@resource('/v1/superuser/registrystatus')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserRegistryStatus(ApiResource):
|
||||
""" Resource for determining the status of the registry, such as if config exists,
|
||||
if a database is configured, and if it has any defined users.
|
||||
"""
|
||||
@nickname('scRegistryStatus')
|
||||
@verify_not_prod
|
||||
def get(self):
|
||||
""" Returns the status of the registry. """
|
||||
|
||||
# If there is no conf/stack volume, then report that status.
|
||||
if not CONFIG_PROVIDER.volume_exists():
|
||||
return {
|
||||
'status': 'missing-config-dir'
|
||||
}
|
||||
|
||||
# If there is no config file, we need to setup the database.
|
||||
if not CONFIG_PROVIDER.yaml_exists():
|
||||
return {
|
||||
'status': 'config-db'
|
||||
}
|
||||
|
||||
# If the database isn't yet valid, then we need to set it up.
|
||||
if not database_is_valid():
|
||||
return {
|
||||
'status': 'setup-db'
|
||||
}
|
||||
|
||||
# If we have SETUP_COMPLETE, then we're ready to go!
|
||||
if app.config.get('SETUP_COMPLETE', False):
|
||||
return {
|
||||
'requires_restart': CONFIG_PROVIDER.requires_restart(app.config),
|
||||
'status': 'ready'
|
||||
}
|
||||
|
||||
return {
|
||||
'status': 'create-superuser' if not database_has_users() else 'config'
|
||||
}
|
||||
|
||||
|
||||
class _AlembicLogHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
super(_AlembicLogHandler, self).__init__()
|
||||
self.records = []
|
||||
|
||||
def emit(self, record):
|
||||
self.records.append({
|
||||
'level': record.levelname,
|
||||
'message': record.getMessage()
|
||||
})
|
||||
|
||||
@resource('/v1/superuser/setupdb')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserSetupDatabase(ApiResource):
|
||||
""" Resource for invoking alembic to setup the database. """
|
||||
@verify_not_prod
|
||||
@nickname('scSetupDatabase')
|
||||
def get(self):
|
||||
""" Invokes the alembic upgrade process. """
|
||||
# Note: This method is called after the database configured is saved, but before the
|
||||
# database has any tables. Therefore, we only allow it to be run in that unique case.
|
||||
if CONFIG_PROVIDER.yaml_exists() and not database_is_valid():
|
||||
# Note: We need to reconfigure the database here as the config has changed.
|
||||
combined = dict(**app.config)
|
||||
combined.update(CONFIG_PROVIDER.get_yaml())
|
||||
|
||||
configure(combined)
|
||||
app.config['DB_URI'] = combined['DB_URI']
|
||||
|
||||
log_handler = _AlembicLogHandler()
|
||||
|
||||
try:
|
||||
run_alembic_migration(log_handler)
|
||||
except Exception as ex:
|
||||
return {
|
||||
'error': str(ex)
|
||||
}
|
||||
|
||||
return {
|
||||
'logs': log_handler.records
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
|
||||
@resource('/v1/superuser/shutdown')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserShutdown(ApiResource):
|
||||
""" Resource for sending a shutdown signal to the container. """
|
||||
|
||||
@verify_not_prod
|
||||
@nickname('scShutdownContainer')
|
||||
def post(self):
|
||||
""" Sends a signal to the phusion init system to shut down the container. """
|
||||
# Note: This method is called to set the database configuration before super users exists,
|
||||
# so we also allow it to be called if there is no valid registry configuration setup.
|
||||
if app.config['TESTING'] or not database_has_users() or SuperUserPermission().can():
|
||||
# Note: We skip if debugging locally.
|
||||
if app.config.get('DEBUGGING') == True:
|
||||
return {}
|
||||
|
||||
os.kill(1, signal.SIGINT)
|
||||
return {}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@resource('/v1/superuser/config')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserConfig(ApiResource):
|
||||
""" Resource for fetching and updating the current configuration, if any. """
|
||||
schemas = {
|
||||
'UpdateConfig': {
|
||||
'id': 'UpdateConfig',
|
||||
'type': 'object',
|
||||
'description': 'Updates the YAML config file',
|
||||
'required': [
|
||||
'config',
|
||||
'hostname'
|
||||
],
|
||||
'properties': {
|
||||
'config': {
|
||||
'type': 'object'
|
||||
},
|
||||
'hostname': {
|
||||
'type': 'string'
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('scGetConfig')
|
||||
def get(self):
|
||||
""" Returns the currently defined configuration, if any. """
|
||||
if SuperUserPermission().can():
|
||||
config_object = CONFIG_PROVIDER.get_yaml()
|
||||
return {
|
||||
'config': config_object
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
@nickname('scUpdateConfig')
|
||||
@verify_not_prod
|
||||
@validate_json_request('UpdateConfig')
|
||||
def put(self):
|
||||
""" Updates the config.yaml file. """
|
||||
# Note: This method is called to set the database configuration before super users exists,
|
||||
# so we also allow it to be called if there is no valid registry configuration setup.
|
||||
if not CONFIG_PROVIDER.yaml_exists() or SuperUserPermission().can():
|
||||
config_object = request.get_json()['config']
|
||||
hostname = request.get_json()['hostname']
|
||||
|
||||
# Add any enterprise defaults missing from the config.
|
||||
add_enterprise_config_defaults(config_object, app.config['SECRET_KEY'], hostname)
|
||||
|
||||
# Write the configuration changes to the YAML file.
|
||||
CONFIG_PROVIDER.save_yaml(config_object)
|
||||
|
||||
return {
|
||||
'exists': True,
|
||||
'config': config_object
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@resource('/v1/superuser/config/file/<filename>')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserConfigFile(ApiResource):
|
||||
""" Resource for fetching the status of config files and overriding them. """
|
||||
@nickname('scConfigFileExists')
|
||||
@verify_not_prod
|
||||
def get(self, filename):
|
||||
""" Returns whether the configuration file with the given name exists. """
|
||||
if not filename in SSL_FILENAMES:
|
||||
abort(404)
|
||||
|
||||
if SuperUserPermission().can():
|
||||
return {
|
||||
'exists': CONFIG_PROVIDER.volume_file_exists(filename)
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
@nickname('scUpdateConfigFile')
|
||||
@verify_not_prod
|
||||
def post(self, filename):
|
||||
""" Updates the configuration file with the given name. """
|
||||
if not filename in SSL_FILENAMES:
|
||||
abort(404)
|
||||
|
||||
if SuperUserPermission().can():
|
||||
uploaded_file = request.files['file']
|
||||
if not uploaded_file:
|
||||
abort(400)
|
||||
|
||||
CONFIG_PROVIDER.save_volume_file(filename, uploaded_file)
|
||||
return {
|
||||
'status': True
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@resource('/v1/superuser/config/createsuperuser')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserCreateInitialSuperUser(ApiResource):
|
||||
""" Resource for creating the initial super user. """
|
||||
schemas = {
|
||||
'CreateSuperUser': {
|
||||
'id': 'CreateSuperUser',
|
||||
'type': 'object',
|
||||
'description': 'Information for creating the initial super user',
|
||||
'required': [
|
||||
'username',
|
||||
'password',
|
||||
'email'
|
||||
],
|
||||
'properties': {
|
||||
'username': {
|
||||
'type': 'string',
|
||||
'description': 'The username for the superuser'
|
||||
},
|
||||
'password': {
|
||||
'type': 'string',
|
||||
'description': 'The password for the superuser'
|
||||
},
|
||||
'email': {
|
||||
'type': 'string',
|
||||
'description': 'The e-mail address for the superuser'
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@nickname('scCreateInitialSuperuser')
|
||||
@verify_not_prod
|
||||
@validate_json_request('CreateSuperUser')
|
||||
def post(self):
|
||||
""" Creates the initial super user, updates the underlying configuration and
|
||||
sets the current session to have that super user. """
|
||||
|
||||
# Special security check: This method is only accessible when:
|
||||
# - There is a valid config YAML file.
|
||||
# - There are currently no users in the database (clean install)
|
||||
#
|
||||
# We do this special security check because at the point this method is called, the database
|
||||
# is clean but does not (yet) have any super users for our permissions code to check against.
|
||||
if CONFIG_PROVIDER.yaml_exists() and not database_has_users():
|
||||
data = request.get_json()
|
||||
username = data['username']
|
||||
password = data['password']
|
||||
email = data['email']
|
||||
|
||||
# Create the user in the database.
|
||||
superuser = model.create_user(username, password, email, auto_verify=True)
|
||||
|
||||
# Add the user to the config.
|
||||
config_object = CONFIG_PROVIDER.get_yaml()
|
||||
config_object['SUPER_USERS'] = [username]
|
||||
CONFIG_PROVIDER.save_yaml(config_object)
|
||||
|
||||
# Update the in-memory config for the new superuser.
|
||||
superusers.register_superuser(username)
|
||||
|
||||
# Conduct login with that user.
|
||||
common_login(superuser)
|
||||
|
||||
return {
|
||||
'status': True
|
||||
}
|
||||
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@resource('/v1/superuser/config/validate/<service>')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserConfigValidate(ApiResource):
|
||||
""" Resource for validating a block of configuration against an external service. """
|
||||
schemas = {
|
||||
'ValidateConfig': {
|
||||
'id': 'ValidateConfig',
|
||||
'type': 'object',
|
||||
'description': 'Validates configuration',
|
||||
'required': [
|
||||
'config'
|
||||
],
|
||||
'properties': {
|
||||
'config': {
|
||||
'type': 'object'
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@nickname('scValidateConfig')
|
||||
@verify_not_prod
|
||||
@validate_json_request('ValidateConfig')
|
||||
def post(self, service):
|
||||
""" Validates the given config for the given service. """
|
||||
# Note: This method is called to validate the database configuration before super users exists,
|
||||
# so we also allow it to be called if there is no valid registry configuration setup. Note that
|
||||
# this is also safe since this method does not access any information not given in the request.
|
||||
if not CONFIG_PROVIDER.yaml_exists() or SuperUserPermission().can():
|
||||
config = request.get_json()['config']
|
||||
return validate_service_for_config(service, config)
|
||||
|
||||
abort(403)
|
|
@ -1,15 +1,16 @@
|
|||
import string
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
|
||||
from random import SystemRandom
|
||||
from app import app
|
||||
from app import app, avatar, superusers
|
||||
from flask import request
|
||||
|
||||
from endpoints.api import (ApiResource, nickname, resource, validate_json_request, request_error,
|
||||
log_action, internal_only, NotFound, require_user_admin, format_date,
|
||||
InvalidToken, require_scope, format_date, hide_if, show_if, parse_args,
|
||||
query_param, abort, require_fresh_login, path_param)
|
||||
query_param, abort, require_fresh_login, path_param, verify_not_prod)
|
||||
|
||||
from endpoints.api.logs import get_logs
|
||||
|
||||
|
@ -22,18 +23,76 @@ import features
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def get_immediate_subdirectories(directory):
|
||||
return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))]
|
||||
|
||||
def get_services():
|
||||
services = set(get_immediate_subdirectories(app.config['SYSTEM_SERVICES_PATH']))
|
||||
services = services - set(app.config['SYSTEM_SERVICE_BLACKLIST'])
|
||||
return services
|
||||
|
||||
|
||||
@resource('/v1/superuser/systemlogs/<service>')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserGetLogsForService(ApiResource):
|
||||
""" Resource for fetching the kinds of system logs in the system. """
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('getSystemLogs')
|
||||
def get(self, service):
|
||||
""" Returns the logs for the specific service. """
|
||||
if SuperUserPermission().can():
|
||||
if not service in get_services():
|
||||
abort(404)
|
||||
|
||||
try:
|
||||
with open(app.config['SYSTEM_SERVICE_LOGS_PATH'] % service, 'r') as f:
|
||||
logs = f.read()
|
||||
except Exception as ex:
|
||||
logger.exception('Cannot read logs')
|
||||
abort(400)
|
||||
|
||||
return {
|
||||
'logs': logs
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@resource('/v1/superuser/systemlogs/')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserSystemLogServices(ApiResource):
|
||||
""" Resource for fetching the kinds of system logs in the system. """
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('listSystemLogServices')
|
||||
def get(self):
|
||||
""" List the system logs for the current system. """
|
||||
if SuperUserPermission().can():
|
||||
return {
|
||||
'services': list(get_services())
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
|
||||
@resource('/v1/superuser/logs')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserLogs(ApiResource):
|
||||
""" Resource for fetching all logs in the system. """
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('listAllLogs')
|
||||
@parse_args
|
||||
@query_param('starttime', 'Earliest time from which to get logs. (%m/%d/%Y %Z)', type=str)
|
||||
@query_param('endtime', 'Latest time to which to get logs. (%m/%d/%Y %Z)', type=str)
|
||||
@query_param('performer', 'Username for which to filter logs.', type=str)
|
||||
def get(self, args):
|
||||
""" List the logs for the current system. """
|
||||
""" List the usage logs for the current system. """
|
||||
if SuperUserPermission().can():
|
||||
performer_name = args['performer']
|
||||
start_time = args['starttime']
|
||||
|
@ -49,7 +108,8 @@ def user_view(user):
|
|||
'username': user.username,
|
||||
'email': user.email,
|
||||
'verified': user.verified,
|
||||
'super_user': user.username in app.config['SUPER_USERS']
|
||||
'avatar': avatar.compute_hash(user.email, name=user.username),
|
||||
'super_user': superusers.is_superuser(user.username)
|
||||
}
|
||||
|
||||
@resource('/v1/superuser/usage/')
|
||||
|
@ -58,6 +118,7 @@ def user_view(user):
|
|||
class UsageInformation(ApiResource):
|
||||
""" Resource for returning the usage information for enterprise customers. """
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('getSystemUsage')
|
||||
def get(self):
|
||||
""" Returns the number of repository handles currently held. """
|
||||
|
@ -96,6 +157,7 @@ class SuperUserList(ApiResource):
|
|||
}
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('listAllUsers')
|
||||
def get(self):
|
||||
""" Returns a list of all users in the system. """
|
||||
|
@ -109,6 +171,7 @@ class SuperUserList(ApiResource):
|
|||
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('createInstallUser')
|
||||
@validate_json_request('CreateInstallUser')
|
||||
def post(self):
|
||||
|
@ -146,6 +209,7 @@ class SuperUserList(ApiResource):
|
|||
class SuperUserSendRecoveryEmail(ApiResource):
|
||||
""" Resource for sending a recovery user on behalf of a user. """
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('sendInstallUserRecoveryEmail')
|
||||
def post(self, username):
|
||||
if SuperUserPermission().can():
|
||||
|
@ -153,7 +217,7 @@ class SuperUserSendRecoveryEmail(ApiResource):
|
|||
if not user or user.organization or user.robot:
|
||||
abort(404)
|
||||
|
||||
if username in app.config['SUPER_USERS']:
|
||||
if superusers.is_superuser(username):
|
||||
abort(403)
|
||||
|
||||
code = model.create_reset_password_email_code(user.email)
|
||||
|
@ -190,6 +254,7 @@ class SuperUserManagement(ApiResource):
|
|||
}
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('getInstallUser')
|
||||
def get(self, username):
|
||||
""" Returns information about the specified user. """
|
||||
|
@ -203,6 +268,7 @@ class SuperUserManagement(ApiResource):
|
|||
abort(403)
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('deleteInstallUser')
|
||||
def delete(self, username):
|
||||
""" Deletes the specified user. """
|
||||
|
@ -211,7 +277,7 @@ class SuperUserManagement(ApiResource):
|
|||
if not user or user.organization or user.robot:
|
||||
abort(404)
|
||||
|
||||
if username in app.config['SUPER_USERS']:
|
||||
if superusers.is_superuser(username):
|
||||
abort(403)
|
||||
|
||||
model.delete_user(user)
|
||||
|
@ -220,6 +286,7 @@ class SuperUserManagement(ApiResource):
|
|||
abort(403)
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('changeInstallUser')
|
||||
@validate_json_request('UpdateUser')
|
||||
def put(self, username):
|
||||
|
@ -229,7 +296,7 @@ class SuperUserManagement(ApiResource):
|
|||
if not user or user.organization or user.robot:
|
||||
abort(404)
|
||||
|
||||
if username in app.config['SUPER_USERS']:
|
||||
if superusers.is_superuser(username):
|
||||
abort(403)
|
||||
|
||||
user_data = request.get_json()
|
||||
|
|
|
@ -54,8 +54,8 @@ class RepositoryTag(RepositoryParamResource):
|
|||
|
||||
username = get_authenticated_user().username
|
||||
log_action('move_tag' if original_image_id else 'create_tag', namespace,
|
||||
{ 'username': username, 'repo': repository, 'tag': tag,
|
||||
'image': image_id, 'original_image': original_image_id },
|
||||
{'username': username, 'repo': repository, 'tag': tag,
|
||||
'image': image_id, 'original_image': original_image_id},
|
||||
repo=model.get_repository(namespace, repository))
|
||||
|
||||
return 'Updated', 201
|
||||
|
|
|
@ -415,13 +415,13 @@ class ActivateBuildTrigger(RepositoryParamResource):
|
|||
try:
|
||||
run_parameters = request.get_json()
|
||||
specs = handler.manual_start(trigger.auth_token, config_dict, run_parameters=run_parameters)
|
||||
dockerfile_id, tags, name, subdir = specs
|
||||
dockerfile_id, tags, name, subdir, metadata = specs
|
||||
|
||||
repo = model.get_repository(namespace, repository)
|
||||
pull_robot_name = model.get_pull_robot_name(trigger)
|
||||
|
||||
build_request = start_build(repo, dockerfile_id, tags, name, subdir, True,
|
||||
pull_robot_name=pull_robot_name)
|
||||
pull_robot_name=pull_robot_name, trigger_metadata=metadata)
|
||||
except TriggerStartException as tse:
|
||||
raise InvalidRequest(tse.message)
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@ def user_view(user):
|
|||
'can_create_repo': True,
|
||||
'invoice_email': user.invoice_email,
|
||||
'preferred_namespace': not (user.stripe_id is None),
|
||||
'tag_expiration': user.removed_tag_expiration_s,
|
||||
})
|
||||
|
||||
if features.SUPER_USERS:
|
||||
|
@ -144,6 +145,11 @@ class User(ApiResource):
|
|||
'type': 'string',
|
||||
'description': 'The user\'s email address',
|
||||
},
|
||||
'tag_expiration': {
|
||||
'type': 'integer',
|
||||
'maximum': 2592000,
|
||||
'minimum': 0,
|
||||
},
|
||||
'username': {
|
||||
'type': 'string',
|
||||
'description': 'The user\'s username',
|
||||
|
@ -227,6 +233,10 @@ class User(ApiResource):
|
|||
logger.debug('Changing invoice_email for user: %s', user.username)
|
||||
model.change_invoice_email(user, user_data['invoice_email'])
|
||||
|
||||
if 'tag_expiration' in user_data:
|
||||
logger.debug('Changing user tag expiration to: %ss', user_data['tag_expiration'])
|
||||
model.change_user_tag_expiration(user, user_data['tag_expiration'])
|
||||
|
||||
if 'email' in user_data and user_data['email'] != user.email:
|
||||
new_email = user_data['email']
|
||||
if model.find_user_by_email(new_email):
|
||||
|
@ -248,7 +258,8 @@ class User(ApiResource):
|
|||
# Username already used
|
||||
raise request_error(message='Username is already in use')
|
||||
|
||||
model.change_username(user, new_username)
|
||||
model.change_username(user.id, new_username)
|
||||
|
||||
except model.InvalidPasswordException, ex:
|
||||
raise request_error(exception=ex)
|
||||
|
||||
|
|
|
@ -3,15 +3,19 @@ import urlparse
|
|||
import json
|
||||
import string
|
||||
import datetime
|
||||
import os
|
||||
|
||||
# Register the various exceptions via decorators.
|
||||
import endpoints.decorated
|
||||
|
||||
from flask import make_response, render_template, request, abort, session
|
||||
from flask.ext.login import login_user, UserMixin
|
||||
from flask.ext.login import login_user
|
||||
from flask.ext.principal import identity_changed
|
||||
from random import SystemRandom
|
||||
|
||||
from data import model
|
||||
from data.database import db
|
||||
from app import app, login_manager, dockerfile_build_queue, notification_queue, oauth_apps
|
||||
from app import app, oauth_apps, dockerfile_build_queue, LoginWrappedDBUser
|
||||
|
||||
from auth.permissions import QuayDeferredPermissionUser
|
||||
from auth import scopes
|
||||
|
@ -21,15 +25,30 @@ from functools import wraps
|
|||
from config import getFrontendVisibleConfig
|
||||
from external_libraries import get_external_javascript, get_external_css
|
||||
from endpoints.notificationhelper import spawn_notification
|
||||
from util.useremails import CannotSendEmailException
|
||||
|
||||
import features
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
route_data = None
|
||||
|
||||
CACHE_BUSTERS_JSON = 'static/dist/cachebusters.json'
|
||||
CACHE_BUSTERS = None
|
||||
|
||||
def get_cache_busters():
|
||||
""" Retrieves the cache busters hashes. """
|
||||
global CACHE_BUSTERS
|
||||
if CACHE_BUSTERS is not None:
|
||||
return CACHE_BUSTERS
|
||||
|
||||
if not os.path.exists(CACHE_BUSTERS_JSON):
|
||||
return {}
|
||||
|
||||
with open(CACHE_BUSTERS_JSON, 'r') as f:
|
||||
CACHE_BUSTERS = json.loads(f.read())
|
||||
return CACHE_BUSTERS
|
||||
|
||||
|
||||
class RepoPathConverter(BaseConverter):
|
||||
regex = '[\.a-zA-Z0-9_\-]+/[\.a-zA-Z0-9_\-]+'
|
||||
weight = 200
|
||||
|
@ -84,34 +103,8 @@ def param_required(param_name):
|
|||
return wrapper
|
||||
|
||||
|
||||
@login_manager.user_loader
|
||||
def load_user(user_uuid):
|
||||
logger.debug('User loader loading deferred user with uuid: %s' % user_uuid)
|
||||
return _LoginWrappedDBUser(user_uuid)
|
||||
|
||||
|
||||
class _LoginWrappedDBUser(UserMixin):
|
||||
def __init__(self, user_uuid, db_user=None):
|
||||
self._uuid = user_uuid
|
||||
self._db_user = db_user
|
||||
|
||||
def db_user(self):
|
||||
if not self._db_user:
|
||||
self._db_user = model.get_user_by_uuid(self._uuid)
|
||||
return self._db_user
|
||||
|
||||
def is_authenticated(self):
|
||||
return self.db_user() is not None
|
||||
|
||||
def is_active(self):
|
||||
return self.db_user().verified
|
||||
|
||||
def get_id(self):
|
||||
return unicode(self._uuid)
|
||||
|
||||
|
||||
def common_login(db_user):
|
||||
if login_user(_LoginWrappedDBUser(db_user.uuid, db_user)):
|
||||
if login_user(LoginWrappedDBUser(db_user.uuid, db_user)):
|
||||
logger.debug('Successfully signed in as: %s (%s)' % (db_user.username, db_user.uuid))
|
||||
new_identity = QuayDeferredPermissionUser(db_user.uuid, 'user_uuid', {scopes.DIRECT_LOGIN})
|
||||
identity_changed.send(app, identity=new_identity)
|
||||
|
@ -121,17 +114,6 @@ def common_login(db_user):
|
|||
logger.debug('User could not be logged in, inactive?.')
|
||||
return False
|
||||
|
||||
|
||||
@app.errorhandler(model.DataModelException)
|
||||
def handle_dme(ex):
|
||||
logger.exception(ex)
|
||||
return make_response(json.dumps({'message': ex.message}), 400)
|
||||
|
||||
@app.errorhandler(CannotSendEmailException)
|
||||
def handle_emailexception(ex):
|
||||
message = 'Could not send email. Please contact an administrator and report this problem.'
|
||||
return make_response(json.dumps({'message': message}), 400)
|
||||
|
||||
def random_string():
|
||||
random = SystemRandom()
|
||||
return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(8)])
|
||||
|
@ -148,17 +130,15 @@ def list_files(path, extension):
|
|||
filepath = 'static/' + path
|
||||
return [join_path(dp, f) for dp, dn, files in os.walk(filepath) for f in files if matches(f)]
|
||||
|
||||
SAVED_CACHE_STRING = random_string()
|
||||
|
||||
def render_page_template(name, **kwargs):
|
||||
if app.config.get('DEBUGGING', False):
|
||||
debugging = app.config.get('DEBUGGING', False)
|
||||
if debugging:
|
||||
# If DEBUGGING is enabled, then we load the full set of individual JS and CSS files
|
||||
# from the file system.
|
||||
library_styles = list_files('lib', 'css')
|
||||
main_styles = list_files('css', 'css')
|
||||
library_scripts = list_files('lib', 'js')
|
||||
main_scripts = list_files('js', 'js')
|
||||
cache_buster = 'debugging'
|
||||
|
||||
file_lists = [library_styles, main_styles, library_scripts, main_scripts]
|
||||
for file_list in file_lists:
|
||||
|
@ -168,7 +148,6 @@ def render_page_template(name, **kwargs):
|
|||
main_styles = ['dist/quay-frontend.css']
|
||||
library_scripts = []
|
||||
main_scripts = ['dist/quay-frontend.min.js']
|
||||
cache_buster = SAVED_CACHE_STRING
|
||||
|
||||
use_cdn = app.config.get('USE_CDN', True)
|
||||
if request.args.get('use_cdn') is not None:
|
||||
|
@ -177,6 +156,12 @@ def render_page_template(name, **kwargs):
|
|||
external_styles = get_external_css(local=not use_cdn)
|
||||
external_scripts = get_external_javascript(local=not use_cdn)
|
||||
|
||||
def add_cachebusters(filenames):
|
||||
cachebusters = get_cache_busters()
|
||||
for filename in filenames:
|
||||
cache_buster = cachebusters.get(filename, random_string()) if not debugging else 'debugging'
|
||||
yield (filename, cache_buster)
|
||||
|
||||
def get_oauth_config():
|
||||
oauth_config = {}
|
||||
for oauth_app in oauth_apps:
|
||||
|
@ -188,13 +173,14 @@ def render_page_template(name, **kwargs):
|
|||
if len(app.config.get('CONTACT_INFO', [])) == 1:
|
||||
contact_href = app.config['CONTACT_INFO'][0]
|
||||
|
||||
resp = make_response(render_template(name, route_data=json.dumps(get_route_data()),
|
||||
resp = make_response(render_template(name,
|
||||
route_data=json.dumps(get_route_data()),
|
||||
external_styles=external_styles,
|
||||
external_scripts=external_scripts,
|
||||
main_styles=main_styles,
|
||||
library_styles=library_styles,
|
||||
main_scripts=main_scripts,
|
||||
library_scripts=library_scripts,
|
||||
main_styles=add_cachebusters(main_styles),
|
||||
library_styles=add_cachebusters(library_styles),
|
||||
main_scripts=add_cachebusters(main_scripts),
|
||||
library_scripts=add_cachebusters(library_scripts),
|
||||
feature_set=json.dumps(features.get_features()),
|
||||
config_set=json.dumps(getFrontendVisibleConfig(app.config)),
|
||||
oauth_set=json.dumps(get_oauth_config()),
|
||||
|
@ -204,9 +190,10 @@ def render_page_template(name, **kwargs):
|
|||
sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''),
|
||||
is_debug=str(app.config.get('DEBUGGING', False)).lower(),
|
||||
show_chat=features.OLARK_CHAT,
|
||||
cache_buster=cache_buster,
|
||||
has_billing=features.BILLING,
|
||||
contact_href=contact_href,
|
||||
hostname=app.config['SERVER_HOSTNAME'],
|
||||
preferred_scheme=app.config['PREFERRED_URL_SCHEME'],
|
||||
**kwargs))
|
||||
|
||||
resp.headers['X-FRAME-OPTIONS'] = 'DENY'
|
||||
|
@ -224,18 +211,20 @@ def check_repository_usage(user_or_org, plan_found):
|
|||
|
||||
|
||||
def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
|
||||
trigger=None, pull_robot_name=None):
|
||||
trigger=None, pull_robot_name=None, trigger_metadata=None):
|
||||
host = urlparse.urlparse(request.url).netloc
|
||||
repo_path = '%s/%s/%s' % (host, repository.namespace_user.username, repository.name)
|
||||
|
||||
token = model.create_access_token(repository, 'write')
|
||||
token = model.create_access_token(repository, 'write', kind='build-worker',
|
||||
friendly_name='Repository Build Token')
|
||||
logger.debug('Creating build %s with repo %s tags %s and dockerfile_id %s',
|
||||
build_name, repo_path, tags, dockerfile_id)
|
||||
|
||||
job_config = {
|
||||
'docker_tags': tags,
|
||||
'registry': host,
|
||||
'build_subdir': subdir
|
||||
'build_subdir': subdir,
|
||||
'trigger_metadata': trigger_metadata or {}
|
||||
}
|
||||
|
||||
with app.config['DB_TRANSACTION_FACTORY'](db):
|
||||
|
@ -243,10 +232,17 @@ def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
|
|||
dockerfile_id, build_name,
|
||||
trigger, pull_robot_name=pull_robot_name)
|
||||
|
||||
dockerfile_build_queue.put([repository.namespace_user.username, repository.name], json.dumps({
|
||||
json_data = json.dumps({
|
||||
'build_uuid': build_request.uuid,
|
||||
'pull_credentials': model.get_pull_credentials(pull_robot_name) if pull_robot_name else None
|
||||
}), retries_remaining=1)
|
||||
})
|
||||
|
||||
queue_item = dockerfile_build_queue.put([repository.namespace_user.username, repository.name],
|
||||
json_data,
|
||||
retries_remaining=3)
|
||||
|
||||
build_request.queue_item = queue_item
|
||||
build_request.save()
|
||||
|
||||
# Add the build to the repo's log.
|
||||
metadata = {
|
||||
|
@ -265,7 +261,7 @@ def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
|
|||
metadata=metadata, repository=repository)
|
||||
|
||||
# Add notifications for the build queue.
|
||||
profile.debug('Adding notifications for repository')
|
||||
logger.debug('Adding notifications for repository')
|
||||
event_data = {
|
||||
'build_id': build_request.uuid,
|
||||
'build_name': build_name,
|
||||
|
|
|
@ -19,19 +19,21 @@ def generate_csrf_token():
|
|||
|
||||
return session['_csrf_token']
|
||||
|
||||
def verify_csrf():
|
||||
token = session.get('_csrf_token', None)
|
||||
found_token = request.values.get('_csrf_token', None)
|
||||
|
||||
if not token or token != found_token:
|
||||
msg = 'CSRF Failure. Session token was %s and request token was %s'
|
||||
logger.error(msg, token, found_token)
|
||||
abort(403, message='CSRF token was invalid or missing.')
|
||||
|
||||
def csrf_protect(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
oauth_token = get_validated_oauth_token()
|
||||
if oauth_token is None and request.method != "GET" and request.method != "HEAD":
|
||||
token = session.get('_csrf_token', None)
|
||||
found_token = request.values.get('_csrf_token', None)
|
||||
|
||||
if not token or token != found_token:
|
||||
msg = 'CSRF Failure. Session token was %s and request token was %s'
|
||||
logger.error(msg, token, found_token)
|
||||
abort(403, message='CSRF token was invalid or missing.')
|
||||
verify_csrf()
|
||||
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
|
|
19
endpoints/decorated.py
Normal file
19
endpoints/decorated.py
Normal file
|
@ -0,0 +1,19 @@
|
|||
import logging
|
||||
import json
|
||||
|
||||
from flask import make_response
|
||||
from app import app
|
||||
from util.useremails import CannotSendEmailException
|
||||
from data import model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@app.errorhandler(model.DataModelException)
|
||||
def handle_dme(ex):
|
||||
logger.exception(ex)
|
||||
return make_response(json.dumps({'message': ex.message}), 400)
|
||||
|
||||
@app.errorhandler(CannotSendEmailException)
|
||||
def handle_emailexception(ex):
|
||||
message = 'Could not send email. Please contact an administrator and report this problem.'
|
||||
return make_response(json.dumps({'message': message}), 400)
|
|
@ -23,7 +23,6 @@ from endpoints.notificationhelper import spawn_notification
|
|||
import features
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
index = Blueprint('index', __name__)
|
||||
|
||||
|
@ -51,7 +50,7 @@ def generate_headers(role='read'):
|
|||
if has_token_request:
|
||||
repo = model.get_repository(namespace, repository)
|
||||
if repo:
|
||||
token = model.create_access_token(repo, role)
|
||||
token = model.create_access_token(repo, role, 'pushpull-token')
|
||||
token_str = 'signature=%s' % token.code
|
||||
response.headers['WWW-Authenticate'] = token_str
|
||||
response.headers['X-Docker-Token'] = token_str
|
||||
|
@ -120,7 +119,7 @@ def create_user():
|
|||
|
||||
else:
|
||||
# New user case
|
||||
profile.debug('Creating user')
|
||||
logger.debug('Creating user')
|
||||
new_user = None
|
||||
|
||||
try:
|
||||
|
@ -128,10 +127,10 @@ def create_user():
|
|||
except model.TooManyUsersException as ex:
|
||||
abort(402, 'Seat limit has been reached for this license', issue='seat-limit')
|
||||
|
||||
profile.debug('Creating email code for user')
|
||||
logger.debug('Creating email code for user')
|
||||
code = model.create_confirm_email_code(new_user)
|
||||
|
||||
profile.debug('Sending email code to user')
|
||||
logger.debug('Sending email code to user')
|
||||
send_confirmation_email(new_user.username, new_user.email, code.code)
|
||||
|
||||
return make_response('Created', 201)
|
||||
|
@ -168,12 +167,12 @@ def update_user(username):
|
|||
update_request = request.get_json()
|
||||
|
||||
if 'password' in update_request:
|
||||
profile.debug('Updating user password')
|
||||
logger.debug('Updating user password')
|
||||
model.change_password(get_authenticated_user(),
|
||||
update_request['password'])
|
||||
|
||||
if 'email' in update_request:
|
||||
profile.debug('Updating user email')
|
||||
logger.debug('Updating user email')
|
||||
model.update_email(get_authenticated_user(), update_request['email'])
|
||||
|
||||
return jsonify({
|
||||
|
@ -189,13 +188,13 @@ def update_user(username):
|
|||
@parse_repository_name
|
||||
@generate_headers(role='write')
|
||||
def create_repository(namespace, repository):
|
||||
profile.debug('Parsing image descriptions')
|
||||
logger.debug('Parsing image descriptions')
|
||||
image_descriptions = json.loads(request.data.decode('utf8'))
|
||||
|
||||
profile.debug('Looking up repository')
|
||||
logger.debug('Looking up repository')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
|
||||
profile.debug('Repository looked up')
|
||||
logger.debug('Repository looked up')
|
||||
if not repo and get_authenticated_user() is None:
|
||||
logger.debug('Attempt to create new repository without user auth.')
|
||||
abort(401,
|
||||
|
@ -219,36 +218,10 @@ def create_repository(namespace, repository):
|
|||
issue='no-create-permission',
|
||||
namespace=namespace)
|
||||
|
||||
profile.debug('Creaing repository with owner: %s', get_authenticated_user().username)
|
||||
logger.debug('Creaing repository with owner: %s', get_authenticated_user().username)
|
||||
repo = model.create_repository(namespace, repository,
|
||||
get_authenticated_user())
|
||||
|
||||
profile.debug('Determining already added images')
|
||||
added_images = OrderedDict([(desc['id'], desc) for desc in image_descriptions])
|
||||
new_repo_images = dict(added_images)
|
||||
|
||||
# Optimization: Lookup any existing images in the repository with matching docker IDs and
|
||||
# remove them from the added dict, so we don't need to look them up one-by-one.
|
||||
def chunks(l, n):
|
||||
for i in xrange(0, len(l), n):
|
||||
yield l[i:i+n]
|
||||
|
||||
# Note: We do this in chunks in an effort to not hit the SQL query size limit.
|
||||
for chunk in chunks(new_repo_images.keys(), 50):
|
||||
existing_images = model.lookup_repository_images(namespace, repository, chunk)
|
||||
for existing in existing_images:
|
||||
added_images.pop(existing.docker_image_id)
|
||||
|
||||
profile.debug('Creating/Linking necessary images')
|
||||
username = get_authenticated_user() and get_authenticated_user().username
|
||||
translations = {}
|
||||
for image_description in added_images.values():
|
||||
model.find_create_or_link_image(image_description['id'], repo, username,
|
||||
translations, storage.preferred_locations[0])
|
||||
|
||||
|
||||
profile.debug('Created images')
|
||||
track_and_log('push_repo', repo)
|
||||
return make_response('Created', 201)
|
||||
|
||||
|
||||
|
@ -260,14 +233,14 @@ def update_images(namespace, repository):
|
|||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
|
||||
if permission.can():
|
||||
profile.debug('Looking up repository')
|
||||
logger.debug('Looking up repository')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
if not repo:
|
||||
# Make sure the repo actually exists.
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
||||
if get_authenticated_user():
|
||||
profile.debug('Publishing push event')
|
||||
logger.debug('Publishing push event')
|
||||
username = get_authenticated_user().username
|
||||
|
||||
# Mark that the user has pushed the repo.
|
||||
|
@ -280,17 +253,17 @@ def update_images(namespace, repository):
|
|||
event = userevents.get_event(username)
|
||||
event.publish_event_data('docker-cli', user_data)
|
||||
|
||||
profile.debug('GCing repository')
|
||||
num_removed = model.garbage_collect_repository(namespace, repository)
|
||||
logger.debug('GCing repository')
|
||||
model.garbage_collect_repository(namespace, repository)
|
||||
|
||||
# Generate a job for each notification that has been added to this repo
|
||||
profile.debug('Adding notifications for repository')
|
||||
logger.debug('Adding notifications for repository')
|
||||
|
||||
updated_tags = session.get('pushed_tags', {})
|
||||
event_data = {
|
||||
'updated_tags': updated_tags,
|
||||
'pruned_image_count': num_removed
|
||||
}
|
||||
track_and_log('push_repo', repo)
|
||||
spawn_notification(repo, 'repo_push', event_data)
|
||||
return make_response('Updated', 204)
|
||||
|
||||
|
@ -305,17 +278,15 @@ def get_repository_images(namespace, repository):
|
|||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
|
||||
# TODO invalidate token?
|
||||
profile.debug('Looking up public status of repository')
|
||||
is_public = model.repository_is_public(namespace, repository)
|
||||
if permission.can() or is_public:
|
||||
if permission.can() or model.repository_is_public(namespace, repository):
|
||||
# We can't rely on permissions to tell us if a repo exists anymore
|
||||
profile.debug('Looking up repository')
|
||||
logger.debug('Looking up repository')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
if not repo:
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
||||
all_images = []
|
||||
profile.debug('Retrieving repository images')
|
||||
logger.debug('Retrieving repository images')
|
||||
for image in model.get_repository_images(namespace, repository):
|
||||
new_image_view = {
|
||||
'id': image.docker_image_id,
|
||||
|
@ -323,7 +294,7 @@ def get_repository_images(namespace, repository):
|
|||
}
|
||||
all_images.append(new_image_view)
|
||||
|
||||
profile.debug('Building repository image response')
|
||||
logger.debug('Building repository image response')
|
||||
resp = make_response(json.dumps(all_images), 200)
|
||||
resp.mimetype = 'application/json'
|
||||
|
||||
|
@ -382,6 +353,11 @@ def get_search():
|
|||
resp.mimetype = 'application/json'
|
||||
return resp
|
||||
|
||||
# Note: This is *not* part of the Docker index spec. This is here for our own health check,
|
||||
# since we have nginx handle the _ping below.
|
||||
@index.route('/_internal_ping')
|
||||
def internal_ping():
|
||||
return make_response('true', 200)
|
||||
|
||||
@index.route('/_ping')
|
||||
@index.route('/_ping')
|
||||
|
|
|
@ -1,14 +1,10 @@
|
|||
import logging
|
||||
import io
|
||||
import os.path
|
||||
import tarfile
|
||||
import base64
|
||||
import json
|
||||
import requests
|
||||
import re
|
||||
|
||||
from flask.ext.mail import Message
|
||||
from app import mail, app, get_app_url
|
||||
from app import mail, app
|
||||
from data import model
|
||||
from workers.worker import JobException
|
||||
|
||||
|
@ -363,11 +359,8 @@ class SlackMethod(NotificationMethod):
|
|||
return 'slack'
|
||||
|
||||
def validate(self, repository, config_data):
|
||||
if not config_data.get('token', ''):
|
||||
raise CannotValidateNotificationMethodException('Missing Slack Token')
|
||||
|
||||
if not config_data.get('subdomain', '').isalnum():
|
||||
raise CannotValidateNotificationMethodException('Missing Slack Subdomain Name')
|
||||
if not config_data.get('url', ''):
|
||||
raise CannotValidateNotificationMethodException('Missing Slack Callback URL')
|
||||
|
||||
def format_for_slack(self, message):
|
||||
message = message.replace('\n', '')
|
||||
|
@ -378,10 +371,8 @@ class SlackMethod(NotificationMethod):
|
|||
def perform(self, notification, event_handler, notification_data):
|
||||
config_data = json.loads(notification.config_json)
|
||||
|
||||
token = config_data.get('token', '')
|
||||
subdomain = config_data.get('subdomain', '')
|
||||
|
||||
if not token or not subdomain:
|
||||
url = config_data.get('url', '')
|
||||
if not url:
|
||||
return
|
||||
|
||||
owner = model.get_user_or_org(notification.repository.namespace_user.username)
|
||||
|
@ -389,8 +380,6 @@ class SlackMethod(NotificationMethod):
|
|||
# Something went wrong.
|
||||
return
|
||||
|
||||
url = 'https://%s.slack.com/services/hooks/incoming-webhook?token=%s' % (subdomain, token)
|
||||
|
||||
level = event_handler.get_level(notification_data['event_data'], notification_data)
|
||||
color = {
|
||||
'info': '#ffffff',
|
||||
|
@ -426,5 +415,5 @@ class SlackMethod(NotificationMethod):
|
|||
raise NotificationMethodPerformException(error_message)
|
||||
|
||||
except requests.exceptions.RequestException as ex:
|
||||
logger.exception('Slack method was unable to be sent: %s' % ex.message)
|
||||
logger.exception('Slack method was unable to be sent: %s', ex.message)
|
||||
raise NotificationMethodPerformException(ex.message)
|
||||
|
|
|
@ -4,13 +4,62 @@ import json
|
|||
from flask import request, Blueprint, abort, Response
|
||||
from flask.ext.login import current_user
|
||||
from auth.auth import require_session_login
|
||||
from app import userevents
|
||||
from endpoints.common import route_show_if
|
||||
from app import app, userevents
|
||||
from auth.permissions import SuperUserPermission
|
||||
|
||||
import features
|
||||
import psutil
|
||||
import time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
realtime = Blueprint('realtime', __name__)
|
||||
|
||||
|
||||
@realtime.route("/ps")
|
||||
@route_show_if(features.SUPER_USERS)
|
||||
@require_session_login
|
||||
def ps():
|
||||
if not SuperUserPermission().can():
|
||||
abort(403)
|
||||
|
||||
def generator():
|
||||
while True:
|
||||
build_status = {}
|
||||
try:
|
||||
builder_data = app.config['HTTPCLIENT'].get('http://localhost:8686/status', timeout=1)
|
||||
if builder_data.status_code == 200:
|
||||
build_status = json.loads(builder_data.text)
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
data = {
|
||||
'count': {
|
||||
'cpu': psutil.cpu_percent(interval=1, percpu=True),
|
||||
'virtual_mem': psutil.virtual_memory(),
|
||||
'swap_mem': psutil.swap_memory(),
|
||||
'connections': len(psutil.net_connections()),
|
||||
'processes': len(psutil.pids()),
|
||||
'network': psutil.net_io_counters()
|
||||
},
|
||||
'build': build_status
|
||||
}
|
||||
except psutil.AccessDenied:
|
||||
data = {}
|
||||
|
||||
json_string = json.dumps(data)
|
||||
yield 'data: %s\n\n' % json_string
|
||||
time.sleep(1)
|
||||
|
||||
try:
|
||||
return Response(generator(), mimetype="text/event-stream")
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@realtime.route("/user/")
|
||||
@require_session_login
|
||||
def index():
|
||||
|
|
|
@ -9,6 +9,7 @@ from time import time
|
|||
|
||||
from app import storage as store, image_diff_queue, app
|
||||
from auth.auth import process_auth, extract_namespace_repo_from_session
|
||||
from auth.auth_context import get_authenticated_user
|
||||
from util import checksums, changes
|
||||
from util.http import abort, exact_abort
|
||||
from auth.permissions import (ReadRepositoryPermission,
|
||||
|
@ -20,7 +21,6 @@ from util import gzipstream
|
|||
registry = Blueprint('registry', __name__)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
class SocketReader(object):
|
||||
def __init__(self, fp):
|
||||
|
@ -100,12 +100,12 @@ def set_cache_headers(f):
|
|||
def head_image_layer(namespace, repository, image_id, headers):
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
if permission.can() or model.repository_is_public(namespace, repository):
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image:
|
||||
profile.debug('Image not found')
|
||||
logger.debug('Image not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
|
@ -114,7 +114,7 @@ def head_image_layer(namespace, repository, image_id, headers):
|
|||
# Add the Accept-Ranges header if the storage engine supports resumable
|
||||
# downloads.
|
||||
if store.get_supports_resumable_downloads(repo_image.storage.locations):
|
||||
profile.debug('Storage supports resumable downloads')
|
||||
logger.debug('Storage supports resumable downloads')
|
||||
extra_headers['Accept-Ranges'] = 'bytes'
|
||||
|
||||
resp = make_response('')
|
||||
|
@ -133,31 +133,35 @@ def head_image_layer(namespace, repository, image_id, headers):
|
|||
def get_image_layer(namespace, repository, image_id, headers):
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
if permission.can() or model.repository_is_public(namespace, repository):
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image:
|
||||
logger.debug('Image not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
profile.debug('Looking up the layer path')
|
||||
logger.debug('Looking up the layer path')
|
||||
try:
|
||||
path = store.image_layer_path(repo_image.storage.uuid)
|
||||
|
||||
profile.debug('Looking up the direct download URL')
|
||||
logger.debug('Looking up the direct download URL')
|
||||
direct_download_url = store.get_direct_download_url(repo_image.storage.locations, path)
|
||||
|
||||
if direct_download_url:
|
||||
profile.debug('Returning direct download URL')
|
||||
logger.debug('Returning direct download URL')
|
||||
resp = redirect(direct_download_url)
|
||||
return resp
|
||||
|
||||
profile.debug('Streaming layer data')
|
||||
logger.debug('Streaming layer data')
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
|
||||
return Response(store.stream_read(repo_image.storage.locations, path), headers=headers)
|
||||
except (IOError, AttributeError):
|
||||
profile.debug('Image not found')
|
||||
logger.exception('Image layer data not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
|
@ -168,29 +172,30 @@ def get_image_layer(namespace, repository, image_id, headers):
|
|||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
def put_image_layer(namespace, repository, image_id):
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
||||
profile.debug('Retrieving image')
|
||||
logger.debug('Retrieving image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
try:
|
||||
profile.debug('Retrieving image data')
|
||||
logger.debug('Retrieving image data')
|
||||
uuid = repo_image.storage.uuid
|
||||
json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
except (IOError, AttributeError):
|
||||
logger.exception('Exception when retrieving image data')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
profile.debug('Retrieving image path info')
|
||||
logger.debug('Retrieving image path info')
|
||||
layer_path = store.image_layer_path(uuid)
|
||||
|
||||
if (store.exists(repo_image.storage.locations, layer_path) and not
|
||||
image_is_uploading(repo_image)):
|
||||
exact_abort(409, 'Image already exists')
|
||||
|
||||
profile.debug('Storing layer data')
|
||||
logger.debug('Storing layer data')
|
||||
|
||||
input_stream = request.stream
|
||||
if request.headers.get('transfer-encoding') == 'chunked':
|
||||
|
@ -257,7 +262,7 @@ def put_image_layer(namespace, repository, image_id):
|
|||
|
||||
# The layer is ready for download, send a job to the work queue to
|
||||
# process it.
|
||||
profile.debug('Adding layer to diff queue')
|
||||
logger.debug('Adding layer to diff queue')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
image_diff_queue.put([repo.namespace_user.username, repository, image_id], json.dumps({
|
||||
'namespace_user_id': repo.namespace_user.id,
|
||||
|
@ -272,7 +277,7 @@ def put_image_layer(namespace, repository, image_id):
|
|||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
def put_image_checksum(namespace, repository, image_id):
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
@ -298,23 +303,23 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
abort(400, 'Checksum not found in Cookie for image %(image_id)s',
|
||||
issue='missing-checksum-cookie', image_id=image_id)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image or not repo_image.storage:
|
||||
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
||||
|
||||
uuid = repo_image.storage.uuid
|
||||
|
||||
profile.debug('Looking up repo layer data')
|
||||
logger.debug('Looking up repo layer data')
|
||||
if not store.exists(repo_image.storage.locations, store.image_json_path(uuid)):
|
||||
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
||||
|
||||
profile.debug('Marking image path')
|
||||
logger.debug('Marking image path')
|
||||
if not image_is_uploading(repo_image):
|
||||
abort(409, 'Cannot set checksum for image %(image_id)s',
|
||||
issue='image-write-error', image_id=image_id)
|
||||
|
||||
profile.debug('Storing image checksum')
|
||||
logger.debug('Storing image checksum')
|
||||
err = store_checksum(repo_image.storage, checksum)
|
||||
if err:
|
||||
abort(400, err)
|
||||
|
@ -331,7 +336,7 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
|
||||
# The layer is ready for download, send a job to the work queue to
|
||||
# process it.
|
||||
profile.debug('Adding layer to diff queue')
|
||||
logger.debug('Adding layer to diff queue')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
image_diff_queue.put([repo.namespace_user.username, repository, image_id], json.dumps({
|
||||
'namespace_user_id': repo.namespace_user.id,
|
||||
|
@ -348,23 +353,23 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
@require_completion
|
||||
@set_cache_headers
|
||||
def get_image_json(namespace, repository, image_id, headers):
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
if not permission.can() and not model.repository_is_public(namespace,
|
||||
repository):
|
||||
abort(403)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
|
||||
profile.debug('Looking up repo layer data')
|
||||
logger.debug('Looking up repo layer data')
|
||||
try:
|
||||
uuid = repo_image.storage.uuid
|
||||
data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
except (IOError, AttributeError):
|
||||
flask_abort(404)
|
||||
|
||||
profile.debug('Looking up repo layer size')
|
||||
logger.debug('Looking up repo layer size')
|
||||
size = repo_image.storage.image_size
|
||||
headers['X-Docker-Size'] = str(size)
|
||||
|
||||
|
@ -379,16 +384,16 @@ def get_image_json(namespace, repository, image_id, headers):
|
|||
@require_completion
|
||||
@set_cache_headers
|
||||
def get_image_ancestry(namespace, repository, image_id, headers):
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
if not permission.can() and not model.repository_is_public(namespace,
|
||||
repository):
|
||||
abort(403)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
|
||||
profile.debug('Looking up image data')
|
||||
logger.debug('Looking up image data')
|
||||
try:
|
||||
uuid = repo_image.storage.uuid
|
||||
data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
|
||||
|
@ -396,11 +401,11 @@ def get_image_ancestry(namespace, repository, image_id, headers):
|
|||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
profile.debug('Converting to <-> from JSON')
|
||||
logger.debug('Converting to <-> from JSON')
|
||||
response = make_response(json.dumps(json.loads(data)), 200)
|
||||
response.headers.extend(headers)
|
||||
|
||||
profile.debug('Done')
|
||||
logger.debug('Done')
|
||||
return response
|
||||
|
||||
|
||||
|
@ -430,12 +435,12 @@ def store_checksum(image_storage, checksum):
|
|||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
def put_image_json(namespace, repository, image_id):
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
||||
profile.debug('Parsing image JSON')
|
||||
logger.debug('Parsing image JSON')
|
||||
try:
|
||||
data = json.loads(request.data.decode('utf8'))
|
||||
except ValueError:
|
||||
|
@ -449,12 +454,22 @@ def put_image_json(namespace, repository, image_id):
|
|||
abort(400, 'Missing key `id` in JSON for image: %(image_id)s',
|
||||
issue='invalid-request', image_id=image_id)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image:
|
||||
profile.debug('Image not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
logger.debug('Image not found, creating image')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
if repo is None:
|
||||
abort(404, 'Repository does not exist: %(namespace)s/%(repository)s', issue='no-repo',
|
||||
namespace=namespace, repository=repository)
|
||||
|
||||
username = get_authenticated_user() and get_authenticated_user().username
|
||||
repo_image = model.find_create_or_link_image(image_id, repo, username, {},
|
||||
store.preferred_locations[0])
|
||||
|
||||
# Create a temporary tag to prevent this image from getting garbage collected while the push
|
||||
# is in progress.
|
||||
model.create_temporary_hidden_tag(repo, repo_image, app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
|
||||
|
||||
uuid = repo_image.storage.uuid
|
||||
|
||||
|
@ -466,24 +481,24 @@ def put_image_json(namespace, repository, image_id):
|
|||
|
||||
parent_image = None
|
||||
if parent_id:
|
||||
profile.debug('Looking up parent image')
|
||||
logger.debug('Looking up parent image')
|
||||
parent_image = model.get_repo_image_extended(namespace, repository, parent_id)
|
||||
|
||||
parent_uuid = parent_image and parent_image.storage.uuid
|
||||
parent_locations = parent_image and parent_image.storage.locations
|
||||
|
||||
if parent_id:
|
||||
profile.debug('Looking up parent image data')
|
||||
logger.debug('Looking up parent image data')
|
||||
|
||||
if (parent_id and not
|
||||
store.exists(parent_locations, store.image_json_path(parent_uuid))):
|
||||
abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s',
|
||||
issue='invalid-request', image_id=image_id, parent_id=parent_id)
|
||||
|
||||
profile.debug('Looking up image storage paths')
|
||||
logger.debug('Looking up image storage paths')
|
||||
json_path = store.image_json_path(uuid)
|
||||
|
||||
profile.debug('Checking if image already exists')
|
||||
logger.debug('Checking if image already exists')
|
||||
if (store.exists(repo_image.storage.locations, json_path) and not
|
||||
image_is_uploading(repo_image)):
|
||||
exact_abort(409, 'Image already exists')
|
||||
|
@ -496,24 +511,24 @@ def put_image_json(namespace, repository, image_id):
|
|||
command_list = data.get('container_config', {}).get('Cmd', None)
|
||||
command = json.dumps(command_list) if command_list else None
|
||||
|
||||
profile.debug('Setting image metadata')
|
||||
logger.debug('Setting image metadata')
|
||||
model.set_image_metadata(image_id, namespace, repository,
|
||||
data.get('created'), data.get('comment'), command,
|
||||
parent_image)
|
||||
|
||||
profile.debug('Putting json path')
|
||||
logger.debug('Putting json path')
|
||||
store.put_content(repo_image.storage.locations, json_path, request.data)
|
||||
|
||||
profile.debug('Generating image ancestry')
|
||||
logger.debug('Generating image ancestry')
|
||||
|
||||
try:
|
||||
generate_ancestry(image_id, uuid, repo_image.storage.locations, parent_id, parent_uuid,
|
||||
parent_locations)
|
||||
except IOError as ioe:
|
||||
profile.debug('Error when generating ancestry: %s' % ioe.message)
|
||||
logger.debug('Error when generating ancestry: %s' % ioe.message)
|
||||
abort(404)
|
||||
|
||||
profile.debug('Done')
|
||||
logger.debug('Done')
|
||||
return make_response('true', 200)
|
||||
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ from flask import request
|
|||
from auth.auth_context import get_authenticated_user, get_validated_token, get_validated_oauth_token
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
def track_and_log(event_name, repo, **kwargs):
|
||||
repository = repo.name
|
||||
|
@ -19,20 +18,27 @@ def track_and_log(event_name, repo, **kwargs):
|
|||
|
||||
analytics_id = 'anonymous'
|
||||
|
||||
profile.debug('Logging the %s to Mixpanel and the log system', event_name)
|
||||
if get_validated_oauth_token():
|
||||
oauth_token = get_validated_oauth_token()
|
||||
metadata['oauth_token_id'] = oauth_token.id
|
||||
metadata['oauth_token_application_id'] = oauth_token.application.client_id
|
||||
metadata['oauth_token_application'] = oauth_token.application.name
|
||||
analytics_id = 'oauth:' + oauth_token.id
|
||||
elif get_authenticated_user():
|
||||
metadata['username'] = get_authenticated_user().username
|
||||
analytics_id = get_authenticated_user().username
|
||||
elif get_validated_token():
|
||||
metadata['token'] = get_validated_token().friendly_name
|
||||
metadata['token_code'] = get_validated_token().code
|
||||
analytics_id = 'token:' + get_validated_token().code
|
||||
authenticated_oauth_token = get_validated_oauth_token()
|
||||
authenticated_user = get_authenticated_user()
|
||||
authenticated_token = get_validated_token() if not authenticated_user else None
|
||||
|
||||
logger.debug('Logging the %s to Mixpanel and the log system', event_name)
|
||||
if authenticated_oauth_token:
|
||||
metadata['oauth_token_id'] = authenticated_oauth_token.id
|
||||
metadata['oauth_token_application_id'] = authenticated_oauth_token.application.client_id
|
||||
metadata['oauth_token_application'] = authenticated_oauth_token.application.name
|
||||
analytics_id = 'oauth:' + authenticated_oauth_token.id
|
||||
elif authenticated_user:
|
||||
metadata['username'] = authenticated_user.username
|
||||
analytics_id = authenticated_user.username
|
||||
elif authenticated_token:
|
||||
metadata['token'] = authenticated_token.friendly_name
|
||||
metadata['token_code'] = authenticated_token.code
|
||||
|
||||
if authenticated_token.kind:
|
||||
metadata['token_type'] = authenticated_token.kind.name
|
||||
|
||||
analytics_id = 'token:' + authenticated_token.code
|
||||
else:
|
||||
metadata['public'] = True
|
||||
analytics_id = 'anonymous'
|
||||
|
@ -42,21 +48,27 @@ def track_and_log(event_name, repo, **kwargs):
|
|||
}
|
||||
|
||||
# Publish the user event (if applicable)
|
||||
if get_authenticated_user():
|
||||
logger.debug('Checking publishing %s to the user events system', event_name)
|
||||
if authenticated_user:
|
||||
logger.debug('Publishing %s to the user events system', event_name)
|
||||
user_event_data = {
|
||||
'action': event_name,
|
||||
'repository': repository,
|
||||
'namespace': namespace
|
||||
}
|
||||
|
||||
event = userevents.get_event(get_authenticated_user().username)
|
||||
event = userevents.get_event(authenticated_user.username)
|
||||
event.publish_event_data('docker-cli', user_event_data)
|
||||
|
||||
# Save the action to mixpanel.
|
||||
logger.debug('Logging the %s to Mixpanel', event_name)
|
||||
analytics.track(analytics_id, event_name, extra_params)
|
||||
|
||||
# Log the action to the database.
|
||||
logger.debug('Logging the %s to logs system', event_name)
|
||||
model.log_action(event_name, namespace,
|
||||
performer=get_authenticated_user(),
|
||||
performer=authenticated_user,
|
||||
ip=request.remote_addr, metadata=metadata,
|
||||
repository=repo)
|
||||
|
||||
logger.debug('Track and log of %s complete', event_name)
|
||||
|
|
|
@ -226,7 +226,7 @@ class GithubBuildTrigger(BuildTrigger):
|
|||
'personal': False,
|
||||
'repos': repo_list,
|
||||
'info': {
|
||||
'name': org.name,
|
||||
'name': org.name or org.login,
|
||||
'avatar_url': org.avatar_url
|
||||
}
|
||||
})
|
||||
|
@ -345,8 +345,10 @@ class GithubBuildTrigger(BuildTrigger):
|
|||
# compute the tag(s)
|
||||
branch = ref.split('/')[-1]
|
||||
tags = {branch}
|
||||
|
||||
if branch == repo.default_branch:
|
||||
tags.add('latest')
|
||||
|
||||
logger.debug('Pushing to tags: %s' % tags)
|
||||
|
||||
# compute the subdir
|
||||
|
@ -354,7 +356,14 @@ class GithubBuildTrigger(BuildTrigger):
|
|||
joined_subdir = os.path.join(tarball_subdir, repo_subdir)
|
||||
logger.debug('Final subdir: %s' % joined_subdir)
|
||||
|
||||
return dockerfile_id, list(tags), build_name, joined_subdir
|
||||
# compute the metadata
|
||||
metadata = {
|
||||
'commit_sha': commit_sha,
|
||||
'ref': ref,
|
||||
'default_branch': repo.default_branch
|
||||
}
|
||||
|
||||
return dockerfile_id, list(tags), build_name, joined_subdir, metadata
|
||||
|
||||
@staticmethod
|
||||
def get_display_name(sha):
|
||||
|
|
|
@ -2,11 +2,10 @@ import logging
|
|||
import json
|
||||
import hashlib
|
||||
|
||||
from flask import redirect, Blueprint, abort, send_file, request
|
||||
from flask import redirect, Blueprint, abort, send_file, make_response
|
||||
|
||||
from app import app
|
||||
from app import app, signer
|
||||
from auth.auth import process_auth
|
||||
from auth.auth_context import get_authenticated_user
|
||||
from auth.permissions import ReadRepositoryPermission
|
||||
from data import model
|
||||
from data import database
|
||||
|
@ -15,13 +14,16 @@ from storage import Storage
|
|||
|
||||
from util.queuefile import QueueFile
|
||||
from util.queueprocess import QueueProcess
|
||||
from util.gzipwrap import GzipWrap
|
||||
from util.dockerloadformat import build_docker_load_stream
|
||||
from formats.squashed import SquashedDockerImage
|
||||
from formats.aci import ACIImage
|
||||
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
verbs = Blueprint('verbs', __name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def _open_stream(namespace, repository, tag, synthetic_image_id, image_json, image_id_list):
|
||||
def _open_stream(formatter, namespace, repository, tag, synthetic_image_id, image_json,
|
||||
image_id_list):
|
||||
store = Storage(app)
|
||||
|
||||
# For performance reasons, we load the full image list here, cache it, then disconnect from
|
||||
|
@ -42,20 +44,43 @@ def _open_stream(namespace, repository, tag, synthetic_image_id, image_json, ima
|
|||
current_image_path)
|
||||
|
||||
current_image_id = current_image_entry.id
|
||||
logger.debug('Returning image layer %s: %s' % (current_image_id, current_image_path))
|
||||
logger.debug('Returning image layer %s: %s', current_image_id, current_image_path)
|
||||
yield current_image_stream
|
||||
|
||||
stream = build_docker_load_stream(namespace, repository, tag, synthetic_image_id, image_json,
|
||||
stream = formatter.build_stream(namespace, repository, tag, synthetic_image_id, image_json,
|
||||
get_next_image, get_next_layer)
|
||||
|
||||
return stream.read
|
||||
|
||||
|
||||
def _write_synthetic_image_to_storage(linked_storage_uuid, linked_locations, queue_file):
|
||||
def _sign_sythentic_image(verb, linked_storage_uuid, queue_file):
|
||||
signature = None
|
||||
try:
|
||||
signature = signer.detached_sign(queue_file)
|
||||
except:
|
||||
logger.exception('Exception when signing %s image %s', verb, linked_storage_uuid)
|
||||
return
|
||||
|
||||
# Setup the database (since this is a new process) and then disconnect immediately
|
||||
# once the operation completes.
|
||||
if not queue_file.raised_exception:
|
||||
with database.UseThenDisconnect(app.config):
|
||||
try:
|
||||
derived = model.get_storage_by_uuid(linked_storage_uuid)
|
||||
except model.InvalidImageException:
|
||||
return
|
||||
|
||||
signature_entry = model.find_or_create_storage_signature(derived, signer.name)
|
||||
signature_entry.signature = signature
|
||||
signature_entry.uploading = False
|
||||
signature_entry.save()
|
||||
|
||||
|
||||
def _write_synthetic_image_to_storage(verb, linked_storage_uuid, linked_locations, queue_file):
|
||||
store = Storage(app)
|
||||
|
||||
def handle_exception(ex):
|
||||
logger.debug('Exception when building squashed image %s: %s', linked_storage_uuid, ex)
|
||||
logger.debug('Exception when building %s image %s: %s', verb, linked_storage_uuid, ex)
|
||||
|
||||
with database.UseThenDisconnect(app.config):
|
||||
model.delete_derived_storage_by_uuid(linked_storage_uuid)
|
||||
|
@ -67,86 +92,193 @@ def _write_synthetic_image_to_storage(linked_storage_uuid, linked_locations, que
|
|||
queue_file.close()
|
||||
|
||||
if not queue_file.raised_exception:
|
||||
# Setup the database (since this is a new process) and then disconnect immediately
|
||||
# once the operation completes.
|
||||
with database.UseThenDisconnect(app.config):
|
||||
done_uploading = model.get_storage_by_uuid(linked_storage_uuid)
|
||||
done_uploading.uploading = False
|
||||
done_uploading.save()
|
||||
|
||||
|
||||
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET'])
|
||||
@process_auth
|
||||
def get_squashed_tag(namespace, repository, tag):
|
||||
# pylint: disable=too-many-locals
|
||||
def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None):
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
if permission.can() or model.repository_is_public(namespace, repository):
|
||||
# Lookup the requested tag.
|
||||
try:
|
||||
tag_image = model.get_tag_image(namespace, repository, tag)
|
||||
except model.DataModelException:
|
||||
abort(404)
|
||||
|
||||
# Lookup the tag's image and storage.
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, tag_image.docker_image_id)
|
||||
if not repo_image:
|
||||
abort(404)
|
||||
# pylint: disable=no-member
|
||||
if not permission.can() and not model.repository_is_public(namespace, repository):
|
||||
abort(403)
|
||||
|
||||
# Log the action.
|
||||
track_and_log('repo_verb', repo_image.repository, tag=tag, verb='squash')
|
||||
# Lookup the requested tag.
|
||||
try:
|
||||
tag_image = model.get_tag_image(namespace, repository, tag)
|
||||
except model.DataModelException:
|
||||
abort(404)
|
||||
|
||||
store = Storage(app)
|
||||
derived = model.find_or_create_derived_storage(repo_image.storage, 'squash',
|
||||
store.preferred_locations[0])
|
||||
if not derived.uploading:
|
||||
logger.debug('Derived image %s exists in storage', derived.uuid)
|
||||
derived_layer_path = store.image_layer_path(derived.uuid)
|
||||
download_url = store.get_direct_download_url(derived.locations, derived_layer_path)
|
||||
if download_url:
|
||||
logger.debug('Redirecting to download URL for derived image %s', derived.uuid)
|
||||
return redirect(download_url)
|
||||
# Lookup the tag's image and storage.
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, tag_image.docker_image_id)
|
||||
if not repo_image:
|
||||
abort(404)
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
# If there is a data checker, call it first.
|
||||
uuid = repo_image.storage.uuid
|
||||
image_json = None
|
||||
|
||||
logger.debug('Sending cached derived image %s', derived.uuid)
|
||||
return send_file(store.stream_read_file(derived.locations, derived_layer_path))
|
||||
|
||||
# Load the ancestry for the image.
|
||||
logger.debug('Building and returning derived image %s', derived.uuid)
|
||||
uuid = repo_image.storage.uuid
|
||||
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
|
||||
full_image_list = json.loads(ancestry_data)
|
||||
|
||||
# Load the image's JSON layer.
|
||||
if checker is not None:
|
||||
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
image_json = json.loads(image_json_data)
|
||||
|
||||
# Calculate a synthetic image ID.
|
||||
synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':squash').hexdigest()
|
||||
if not checker(image_json):
|
||||
logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repository, tag, verb)
|
||||
abort(404)
|
||||
|
||||
# Create a queue process to generate the data. The queue files will read from the process
|
||||
# and send the results to the client and storage.
|
||||
def _cleanup():
|
||||
# Close any existing DB connection once the process has exited.
|
||||
database.close_db_filter(None)
|
||||
return (repo_image, tag_image, image_json)
|
||||
|
||||
args = (namespace, repository, tag, synthetic_image_id, image_json, full_image_list)
|
||||
queue_process = QueueProcess(_open_stream,
|
||||
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
|
||||
args, finished=_cleanup)
|
||||
|
||||
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
|
||||
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
|
||||
# pylint: disable=too-many-locals
|
||||
def _repo_verb_signature(namespace, repository, tag, verb, checker=None, **kwargs):
|
||||
# Verify that the image exists and that we have access to it.
|
||||
store = Storage(app)
|
||||
result = _verify_repo_verb(store, namespace, repository, tag, verb, checker)
|
||||
(repo_image, tag_image, image_json) = result
|
||||
|
||||
# Start building.
|
||||
queue_process.run()
|
||||
# Lookup the derived image storage for the verb.
|
||||
derived = model.find_derived_storage(repo_image.storage, verb)
|
||||
if derived is None or derived.uploading:
|
||||
abort(404)
|
||||
|
||||
# Start the storage saving.
|
||||
storage_args = (derived.uuid, derived.locations, storage_queue_file)
|
||||
QueueProcess.run_process(_write_synthetic_image_to_storage, storage_args, finished=_cleanup)
|
||||
# Check if we have a valid signer configured.
|
||||
if not signer.name:
|
||||
abort(404)
|
||||
|
||||
# Lookup the signature for the verb.
|
||||
signature_entry = model.lookup_storage_signature(derived, signer.name)
|
||||
if signature_entry is None:
|
||||
abort(404)
|
||||
|
||||
# Return the signature.
|
||||
return make_response(signature_entry.signature)
|
||||
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker=None, **kwargs):
|
||||
# Verify that the image exists and that we have access to it.
|
||||
store = Storage(app)
|
||||
result = _verify_repo_verb(store, namespace, repository, tag, verb, checker)
|
||||
(repo_image, tag_image, image_json) = result
|
||||
|
||||
# Log the action.
|
||||
track_and_log('repo_verb', repo_image.repository, tag=tag, verb=verb, **kwargs)
|
||||
|
||||
# Lookup/create the derived image storage for the verb.
|
||||
derived = model.find_or_create_derived_storage(repo_image.storage, verb,
|
||||
store.preferred_locations[0])
|
||||
|
||||
if not derived.uploading:
|
||||
logger.debug('Derived %s image %s exists in storage', verb, derived.uuid)
|
||||
derived_layer_path = store.image_layer_path(derived.uuid)
|
||||
download_url = store.get_direct_download_url(derived.locations, derived_layer_path)
|
||||
if download_url:
|
||||
logger.debug('Redirecting to download URL for derived %s image %s', verb, derived.uuid)
|
||||
return redirect(download_url)
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
|
||||
# Return the client's data.
|
||||
return send_file(client_queue_file)
|
||||
logger.debug('Sending cached derived %s image %s', verb, derived.uuid)
|
||||
return send_file(store.stream_read_file(derived.locations, derived_layer_path))
|
||||
|
||||
# Load the ancestry for the image.
|
||||
uuid = repo_image.storage.uuid
|
||||
|
||||
logger.debug('Building and returning derived %s image %s', verb, derived.uuid)
|
||||
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
|
||||
full_image_list = json.loads(ancestry_data)
|
||||
|
||||
# Load the image's JSON layer.
|
||||
if not image_json:
|
||||
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
image_json = json.loads(image_json_data)
|
||||
|
||||
# Calculate a synthetic image ID.
|
||||
synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':' + verb).hexdigest()
|
||||
|
||||
def _cleanup():
|
||||
# Close any existing DB connection once the process has exited.
|
||||
database.close_db_filter(None)
|
||||
|
||||
# Create a queue process to generate the data. The queue files will read from the process
|
||||
# and send the results to the client and storage.
|
||||
args = (formatter, namespace, repository, tag, synthetic_image_id, image_json, full_image_list)
|
||||
queue_process = QueueProcess(_open_stream,
|
||||
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
|
||||
args, finished=_cleanup)
|
||||
|
||||
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
|
||||
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
|
||||
|
||||
# If signing is required, add a QueueFile for signing the image as we stream it out.
|
||||
signing_queue_file = None
|
||||
if sign and signer.name:
|
||||
signing_queue_file = QueueFile(queue_process.create_queue(), 'signing')
|
||||
|
||||
# Start building.
|
||||
queue_process.run()
|
||||
|
||||
# Start the storage saving.
|
||||
storage_args = (verb, derived.uuid, derived.locations, storage_queue_file)
|
||||
QueueProcess.run_process(_write_synthetic_image_to_storage, storage_args, finished=_cleanup)
|
||||
|
||||
if sign and signer.name:
|
||||
signing_args = (verb, derived.uuid, signing_queue_file)
|
||||
QueueProcess.run_process(_sign_sythentic_image, signing_args, finished=_cleanup)
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
|
||||
# Return the client's data.
|
||||
return send_file(client_queue_file)
|
||||
|
||||
|
||||
def os_arch_checker(os, arch):
|
||||
def checker(image_json):
|
||||
# Verify the architecture and os.
|
||||
operating_system = image_json.get('os', 'linux')
|
||||
if operating_system != os:
|
||||
return False
|
||||
|
||||
architecture = image_json.get('architecture', 'amd64')
|
||||
|
||||
# Note: Some older Docker images have 'x86_64' rather than 'amd64'.
|
||||
# We allow the conversion here.
|
||||
if architecture == 'x86_64' and operating_system == 'linux':
|
||||
architecture = 'amd64'
|
||||
|
||||
if architecture != arch:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
return checker
|
||||
|
||||
|
||||
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/sig/<os>/<arch>/', methods=['GET'])
|
||||
@process_auth
|
||||
# pylint: disable=unused-argument
|
||||
def get_aci_signature(server, namespace, repository, tag, os, arch):
|
||||
return _repo_verb_signature(namespace, repository, tag, 'aci', checker=os_arch_checker(os, arch),
|
||||
os=os, arch=arch)
|
||||
|
||||
|
||||
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci/<os>/<arch>/', methods=['GET'])
|
||||
@process_auth
|
||||
# pylint: disable=unused-argument
|
||||
def get_aci_image(server, namespace, repository, tag, os, arch):
|
||||
return _repo_verb(namespace, repository, tag, 'aci', ACIImage(),
|
||||
sign=True, checker=os_arch_checker(os, arch), os=os, arch=arch)
|
||||
|
||||
|
||||
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET'])
|
||||
@process_auth
|
||||
def get_squashed_tag(namespace, repository, tag):
|
||||
return _repo_verb(namespace, repository, tag, 'squash', SquashedDockerImage())
|
||||
|
||||
abort(403)
|
||||
|
|
|
@ -1,32 +1,38 @@
|
|||
import logging
|
||||
|
||||
from flask import (abort, redirect, request, url_for, make_response, Response,
|
||||
Blueprint, send_from_directory, jsonify)
|
||||
Blueprint, send_from_directory, jsonify, send_file)
|
||||
|
||||
from avatar_generator import Avatar
|
||||
from flask.ext.login import current_user
|
||||
from urlparse import urlparse
|
||||
from health.healthcheck import HealthCheck
|
||||
from health.healthcheck import get_healthchecker
|
||||
|
||||
from data import model
|
||||
from data.model.oauth import DatabaseAuthorizationProvider
|
||||
from app import app, billing as stripe, build_logs, avatar
|
||||
from app import app, billing as stripe, build_logs, avatar, signer
|
||||
from auth.auth import require_session_login, process_oauth
|
||||
from auth.permissions import AdministerOrganizationPermission, ReadRepositoryPermission
|
||||
from auth.permissions import (AdministerOrganizationPermission, ReadRepositoryPermission,
|
||||
SuperUserPermission)
|
||||
|
||||
from util.invoice import renderInvoiceToPdf
|
||||
from util.seo import render_snapshot
|
||||
from util.cache import no_cache
|
||||
from endpoints.common import common_login, render_page_template, route_show_if, param_required
|
||||
from endpoints.csrf import csrf_protect, generate_csrf_token
|
||||
from endpoints.csrf import csrf_protect, generate_csrf_token, verify_csrf
|
||||
from endpoints.registry import set_cache_headers
|
||||
from util.names import parse_repository_name
|
||||
from util.names import parse_repository_name, parse_repository_name_and_tag
|
||||
from util.useremails import send_email_changed
|
||||
from util.systemlogs import build_logs_archive
|
||||
from auth import scopes
|
||||
|
||||
import features
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Capture the unverified SSL errors.
|
||||
logging.captureWarnings(True)
|
||||
|
||||
web = Blueprint('web', __name__)
|
||||
|
||||
STATUS_TAGS = app.config['STATUS_TAGS']
|
||||
|
@ -57,6 +63,14 @@ def snapshot(path = ''):
|
|||
abort(404)
|
||||
|
||||
|
||||
@web.route('/aci-signing-key')
|
||||
@no_cache
|
||||
def aci_signing_key():
|
||||
if not signer.name:
|
||||
abort(404)
|
||||
|
||||
return send_file(signer.public_key_path)
|
||||
|
||||
@web.route('/plans/')
|
||||
@no_cache
|
||||
@route_show_if(features.BILLING)
|
||||
|
@ -95,6 +109,7 @@ def organizations():
|
|||
def user():
|
||||
return index('')
|
||||
|
||||
|
||||
@web.route('/superuser/')
|
||||
@no_cache
|
||||
@route_show_if(features.SUPER_USERS)
|
||||
|
@ -102,6 +117,13 @@ def superuser():
|
|||
return index('')
|
||||
|
||||
|
||||
@web.route('/setup/')
|
||||
@no_cache
|
||||
@route_show_if(features.SUPER_USERS)
|
||||
def setup():
|
||||
return index('')
|
||||
|
||||
|
||||
@web.route('/signin/')
|
||||
@no_cache
|
||||
def signin(redirect=None):
|
||||
|
@ -158,33 +180,27 @@ def v1():
|
|||
return index('')
|
||||
|
||||
|
||||
# TODO(jschorr): Remove this mirrored endpoint once we migrate ELB.
|
||||
@web.route('/health', methods=['GET'])
|
||||
@web.route('/health/instance', methods=['GET'])
|
||||
@no_cache
|
||||
def health():
|
||||
db_healthy = model.check_health()
|
||||
buildlogs_healthy = build_logs.check_health()
|
||||
|
||||
check = HealthCheck.get_check(app.config['HEALTH_CHECKER'][0], app.config['HEALTH_CHECKER'][1])
|
||||
(data, is_healthy) = check.conduct_healthcheck(db_healthy, buildlogs_healthy)
|
||||
|
||||
response = jsonify(dict(data = data, is_healthy = is_healthy))
|
||||
response.status_code = 200 if is_healthy else 503
|
||||
def instance_health():
|
||||
checker = get_healthchecker(app)
|
||||
(data, status_code) = checker.check_instance()
|
||||
response = jsonify(dict(data=data, status_code=status_code))
|
||||
response.status_code = status_code
|
||||
return response
|
||||
|
||||
|
||||
# TODO(jschorr): Remove this mirrored endpoint once we migrate pingdom.
|
||||
@web.route('/status', methods=['GET'])
|
||||
@web.route('/health/endtoend', methods=['GET'])
|
||||
@no_cache
|
||||
def status():
|
||||
db_healthy = model.check_health()
|
||||
buildlogs_healthy = build_logs.check_health()
|
||||
|
||||
response = jsonify({
|
||||
'db_healthy': db_healthy,
|
||||
'buildlogs_healthy': buildlogs_healthy,
|
||||
'is_testing': app.config['TESTING'],
|
||||
})
|
||||
response.status_code = 200 if db_healthy and buildlogs_healthy else 503
|
||||
|
||||
def endtoend_health():
|
||||
checker = get_healthchecker(app)
|
||||
(data, status_code) = checker.check_endtoend()
|
||||
response = jsonify(dict(data=data, status_code=status_code))
|
||||
response.status_code = status_code
|
||||
return response
|
||||
|
||||
|
||||
|
@ -229,14 +245,14 @@ def robots():
|
|||
@web.route('/<path:repository>')
|
||||
@no_cache
|
||||
@process_oauth
|
||||
@parse_repository_name
|
||||
def redirect_to_repository(namespace, reponame):
|
||||
@parse_repository_name_and_tag
|
||||
def redirect_to_repository(namespace, reponame, tag):
|
||||
permission = ReadRepositoryPermission(namespace, reponame)
|
||||
is_public = model.repository_is_public(namespace, reponame)
|
||||
|
||||
if permission.can() or is_public:
|
||||
repository_name = '/'.join([namespace, reponame])
|
||||
return redirect(url_for('web.repository', path=repository_name))
|
||||
return redirect(url_for('web.repository', path=repository_name, tag=tag))
|
||||
|
||||
abort(404)
|
||||
|
||||
|
@ -471,3 +487,21 @@ def exchange_code_for_token():
|
|||
|
||||
provider = FlaskAuthorizationProvider()
|
||||
return provider.get_token(grant_type, client_id, client_secret, redirect_uri, code, scope=scope)
|
||||
|
||||
|
||||
@web.route('/systemlogsarchive', methods=['GET'])
|
||||
@process_oauth
|
||||
@route_show_if(features.SUPER_USERS)
|
||||
@no_cache
|
||||
def download_logs_archive():
|
||||
# Note: We cannot use the decorator here because this is a GET method. That being said, this
|
||||
# information is sensitive enough that we want the extra protection.
|
||||
verify_csrf()
|
||||
|
||||
if SuperUserPermission().can():
|
||||
archive_data = build_logs_archive(app)
|
||||
return Response(archive_data,
|
||||
mimetype="application/octet-stream",
|
||||
headers={"Content-Disposition": "attachment;filename=erlogs.tar.gz"})
|
||||
|
||||
abort(403)
|
||||
|
|
|
@ -91,7 +91,7 @@ def build_trigger_webhook(trigger_uuid, **kwargs):
|
|||
try:
|
||||
specs = handler.handle_trigger_request(request, trigger.auth_token,
|
||||
config_dict)
|
||||
dockerfile_id, tags, name, subdir = specs
|
||||
dockerfile_id, tags, name, subdir, metadata = specs
|
||||
|
||||
except ValidationRequestException:
|
||||
# This was just a validation request, we don't need to build anything
|
||||
|
@ -104,7 +104,7 @@ def build_trigger_webhook(trigger_uuid, **kwargs):
|
|||
pull_robot_name = model.get_pull_robot_name(trigger)
|
||||
repo = model.get_repository(namespace, repository)
|
||||
start_build(repo, dockerfile_id, tags, name, subdir, False, trigger,
|
||||
pull_robot_name=pull_robot_name)
|
||||
pull_robot_name=pull_robot_name, trigger_metadata=metadata)
|
||||
|
||||
return make_response('Okay')
|
||||
|
||||
|
|
Reference in a new issue