Merge branch 'master' into quark
This commit is contained in:
commit
fbdbc21eb1
137 changed files with 8691 additions and 2414 deletions
|
@ -280,6 +280,23 @@ require_user_read = require_user_permission(UserReadPermission, scopes.READ_USER
|
|||
require_user_admin = require_user_permission(UserAdminPermission, None)
|
||||
require_fresh_user_admin = require_user_permission(UserAdminPermission, None)
|
||||
|
||||
|
||||
def verify_not_prod(func):
|
||||
@add_method_metadata('enterprise_only', True)
|
||||
@wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
# Verify that we are not running on a production (i.e. hosted) stack. If so, we fail.
|
||||
# This should never happen (because of the feature-flag on SUPER_USERS), but we want to be
|
||||
# absolutely sure.
|
||||
if app.config['SERVER_HOSTNAME'].find('quay.io') >= 0:
|
||||
logger.error('!!! Super user method called IN PRODUCTION !!!')
|
||||
raise NotFound()
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def require_fresh_login(func):
|
||||
@add_method_metadata('requires_fresh_login', True)
|
||||
@wraps(func)
|
||||
|
@ -385,8 +402,10 @@ import endpoints.api.repoemail
|
|||
import endpoints.api.repotoken
|
||||
import endpoints.api.robot
|
||||
import endpoints.api.search
|
||||
import endpoints.api.suconfig
|
||||
import endpoints.api.superuser
|
||||
import endpoints.api.tag
|
||||
import endpoints.api.team
|
||||
import endpoints.api.trigger
|
||||
import endpoints.api.user
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ from app import app, userfiles as user_files, build_logs, log_archive
|
|||
from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource,
|
||||
require_repo_read, require_repo_write, validate_json_request,
|
||||
ApiResource, internal_only, format_date, api, Unauthorized, NotFound,
|
||||
path_param)
|
||||
path_param, InvalidRequest, require_repo_admin)
|
||||
from endpoints.common import start_build
|
||||
from endpoints.trigger import BuildTrigger
|
||||
from data import model, database
|
||||
|
@ -72,10 +72,16 @@ def build_status_view(build_obj, can_write=False):
|
|||
# minutes. If not, then the build timed out.
|
||||
if phase != database.BUILD_PHASE.COMPLETE and phase != database.BUILD_PHASE.ERROR:
|
||||
if status is not None and 'heartbeat' in status and status['heartbeat']:
|
||||
heartbeat = datetime.datetime.fromtimestamp(status['heartbeat'])
|
||||
if datetime.datetime.now() - heartbeat > datetime.timedelta(minutes=1):
|
||||
heartbeat = datetime.datetime.utcfromtimestamp(status['heartbeat'])
|
||||
if datetime.datetime.utcnow() - heartbeat > datetime.timedelta(minutes=1):
|
||||
phase = database.BUILD_PHASE.INTERNAL_ERROR
|
||||
|
||||
# If the phase is internal error, return 'error' instead of the number if retries
|
||||
# on the queue item is 0.
|
||||
if phase == database.BUILD_PHASE.INTERNAL_ERROR:
|
||||
if build_obj.queue_item is None or build_obj.queue_item.retries_remaining == 0:
|
||||
phase = database.BUILD_PHASE.ERROR
|
||||
|
||||
logger.debug('Can write: %s job_config: %s', can_write, build_obj.job_config)
|
||||
resp = {
|
||||
'id': build_obj.uuid,
|
||||
|
@ -87,7 +93,7 @@ def build_status_view(build_obj, can_write=False):
|
|||
'is_writer': can_write,
|
||||
'trigger': trigger_view(build_obj.trigger),
|
||||
'resource_key': build_obj.resource_key,
|
||||
'pull_robot': user_view(build_obj.pull_robot) if build_obj.pull_robot else None,
|
||||
'pull_robot': user_view(build_obj.pull_robot) if build_obj.pull_robot else None
|
||||
}
|
||||
|
||||
if can_write:
|
||||
|
@ -201,6 +207,31 @@ class RepositoryBuildList(RepositoryParamResource):
|
|||
return resp, 201, headers
|
||||
|
||||
|
||||
|
||||
|
||||
@resource('/v1/repository/<repopath:repository>/build/<build_uuid>')
|
||||
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
||||
@path_param('build_uuid', 'The UUID of the build')
|
||||
class RepositoryBuildResource(RepositoryParamResource):
|
||||
""" Resource for dealing with repository builds. """
|
||||
@require_repo_admin
|
||||
@nickname('cancelRepoBuild')
|
||||
def delete(self, namespace, repository, build_uuid):
|
||||
""" Cancels a repository build if it has not yet been picked up by a build worker. """
|
||||
try:
|
||||
build = model.get_repository_build(build_uuid)
|
||||
except model.InvalidRepositoryBuildException:
|
||||
raise NotFound()
|
||||
|
||||
if build.repository.name != repository or build.repository.namespace_user.username != namespace:
|
||||
raise NotFound()
|
||||
|
||||
if model.cancel_repository_build(build):
|
||||
return 'Okay', 201
|
||||
else:
|
||||
raise InvalidRequest('Build is currently running or has finished')
|
||||
|
||||
|
||||
@resource('/v1/repository/<repopath:repository>/build/<build_uuid>/status')
|
||||
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
||||
@path_param('build_uuid', 'The UUID of the build')
|
||||
|
|
361
endpoints/api/suconfig.py
Normal file
361
endpoints/api/suconfig.py
Normal file
|
@ -0,0 +1,361 @@
|
|||
import logging
|
||||
import os
|
||||
import json
|
||||
import signal
|
||||
|
||||
from flask import abort, Response
|
||||
from endpoints.api import (ApiResource, nickname, resource, internal_only, show_if,
|
||||
require_fresh_login, request, validate_json_request, verify_not_prod)
|
||||
|
||||
from endpoints.common import common_login
|
||||
from app import app, CONFIG_PROVIDER, superusers
|
||||
from data import model
|
||||
from data.database import configure
|
||||
from auth.permissions import SuperUserPermission
|
||||
from auth.auth_context import get_authenticated_user
|
||||
from data.database import User
|
||||
from util.config.configutil import add_enterprise_config_defaults
|
||||
from util.config.validator import validate_service_for_config, SSL_FILENAMES
|
||||
from data.runmigration import run_alembic_migration
|
||||
|
||||
import features
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def database_is_valid():
|
||||
""" Returns whether the database, as configured, is valid. """
|
||||
if app.config['TESTING']:
|
||||
return False
|
||||
|
||||
try:
|
||||
list(User.select().limit(1))
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def database_has_users():
|
||||
""" Returns whether the database has any users defined. """
|
||||
return bool(list(User.select().limit(1)))
|
||||
|
||||
|
||||
@resource('/v1/superuser/registrystatus')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserRegistryStatus(ApiResource):
|
||||
""" Resource for determining the status of the registry, such as if config exists,
|
||||
if a database is configured, and if it has any defined users.
|
||||
"""
|
||||
@nickname('scRegistryStatus')
|
||||
@verify_not_prod
|
||||
def get(self):
|
||||
""" Returns the status of the registry. """
|
||||
# If there is no conf/stack volume, then report that status.
|
||||
if not CONFIG_PROVIDER.volume_exists():
|
||||
return {
|
||||
'status': 'missing-config-dir'
|
||||
}
|
||||
|
||||
# If there is no config file, we need to setup the database.
|
||||
if not CONFIG_PROVIDER.yaml_exists():
|
||||
return {
|
||||
'status': 'config-db'
|
||||
}
|
||||
|
||||
# If the database isn't yet valid, then we need to set it up.
|
||||
if not database_is_valid():
|
||||
return {
|
||||
'status': 'setup-db'
|
||||
}
|
||||
|
||||
# If we have SETUP_COMPLETE, then we're ready to go!
|
||||
if app.config.get('SETUP_COMPLETE', False):
|
||||
return {
|
||||
'requires_restart': CONFIG_PROVIDER.requires_restart(app.config),
|
||||
'status': 'ready'
|
||||
}
|
||||
|
||||
return {
|
||||
'status': 'create-superuser' if not database_has_users() else 'config'
|
||||
}
|
||||
|
||||
|
||||
class _AlembicLogHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
super(_AlembicLogHandler, self).__init__()
|
||||
self.records = []
|
||||
|
||||
def emit(self, record):
|
||||
self.records.append({
|
||||
'level': record.levelname,
|
||||
'message': record.getMessage()
|
||||
})
|
||||
|
||||
@resource('/v1/superuser/setupdb')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserSetupDatabase(ApiResource):
|
||||
""" Resource for invoking alembic to setup the database. """
|
||||
@verify_not_prod
|
||||
@nickname('scSetupDatabase')
|
||||
def get(self):
|
||||
""" Invokes the alembic upgrade process. """
|
||||
# Note: This method is called after the database configured is saved, but before the
|
||||
# database has any tables. Therefore, we only allow it to be run in that unique case.
|
||||
if CONFIG_PROVIDER.yaml_exists() and not database_is_valid():
|
||||
# Note: We need to reconfigure the database here as the config has changed.
|
||||
combined = dict(**app.config)
|
||||
combined.update(CONFIG_PROVIDER.get_yaml())
|
||||
|
||||
configure(combined)
|
||||
app.config['DB_URI'] = combined['DB_URI']
|
||||
|
||||
log_handler = _AlembicLogHandler()
|
||||
|
||||
try:
|
||||
run_alembic_migration(log_handler)
|
||||
except Exception as ex:
|
||||
return {
|
||||
'error': str(ex)
|
||||
}
|
||||
|
||||
return {
|
||||
'logs': log_handler.records
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
|
||||
@resource('/v1/superuser/shutdown')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserShutdown(ApiResource):
|
||||
""" Resource for sending a shutdown signal to the container. """
|
||||
|
||||
@verify_not_prod
|
||||
@nickname('scShutdownContainer')
|
||||
def post(self):
|
||||
""" Sends a signal to the phusion init system to shut down the container. """
|
||||
# Note: This method is called to set the database configuration before super users exists,
|
||||
# so we also allow it to be called if there is no valid registry configuration setup.
|
||||
if app.config['TESTING'] or not database_has_users() or SuperUserPermission().can():
|
||||
# Note: We skip if debugging locally.
|
||||
if app.config.get('DEBUGGING') == True:
|
||||
return {}
|
||||
|
||||
os.kill(1, signal.SIGINT)
|
||||
return {}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@resource('/v1/superuser/config')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserConfig(ApiResource):
|
||||
""" Resource for fetching and updating the current configuration, if any. """
|
||||
schemas = {
|
||||
'UpdateConfig': {
|
||||
'id': 'UpdateConfig',
|
||||
'type': 'object',
|
||||
'description': 'Updates the YAML config file',
|
||||
'required': [
|
||||
'config',
|
||||
'hostname'
|
||||
],
|
||||
'properties': {
|
||||
'config': {
|
||||
'type': 'object'
|
||||
},
|
||||
'hostname': {
|
||||
'type': 'string'
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('scGetConfig')
|
||||
def get(self):
|
||||
""" Returns the currently defined configuration, if any. """
|
||||
if SuperUserPermission().can():
|
||||
config_object = CONFIG_PROVIDER.get_yaml()
|
||||
return {
|
||||
'config': config_object
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
@nickname('scUpdateConfig')
|
||||
@verify_not_prod
|
||||
@validate_json_request('UpdateConfig')
|
||||
def put(self):
|
||||
""" Updates the config.yaml file. """
|
||||
# Note: This method is called to set the database configuration before super users exists,
|
||||
# so we also allow it to be called if there is no valid registry configuration setup.
|
||||
if not CONFIG_PROVIDER.yaml_exists() or SuperUserPermission().can():
|
||||
config_object = request.get_json()['config']
|
||||
hostname = request.get_json()['hostname']
|
||||
|
||||
# Add any enterprise defaults missing from the config.
|
||||
add_enterprise_config_defaults(config_object, app.config['SECRET_KEY'], hostname)
|
||||
|
||||
# Write the configuration changes to the YAML file.
|
||||
CONFIG_PROVIDER.save_yaml(config_object)
|
||||
|
||||
return {
|
||||
'exists': True,
|
||||
'config': config_object
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@resource('/v1/superuser/config/file/<filename>')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserConfigFile(ApiResource):
|
||||
""" Resource for fetching the status of config files and overriding them. """
|
||||
@nickname('scConfigFileExists')
|
||||
@verify_not_prod
|
||||
def get(self, filename):
|
||||
""" Returns whether the configuration file with the given name exists. """
|
||||
if not filename in SSL_FILENAMES:
|
||||
abort(404)
|
||||
|
||||
if SuperUserPermission().can():
|
||||
return {
|
||||
'exists': CONFIG_PROVIDER.volume_file_exists(filename)
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
@nickname('scUpdateConfigFile')
|
||||
@verify_not_prod
|
||||
def post(self, filename):
|
||||
""" Updates the configuration file with the given name. """
|
||||
if not filename in SSL_FILENAMES:
|
||||
abort(404)
|
||||
|
||||
if SuperUserPermission().can():
|
||||
uploaded_file = request.files['file']
|
||||
if not uploaded_file:
|
||||
abort(400)
|
||||
|
||||
CONFIG_PROVIDER.save_volume_file(filename, uploaded_file)
|
||||
return {
|
||||
'status': True
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@resource('/v1/superuser/config/createsuperuser')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserCreateInitialSuperUser(ApiResource):
|
||||
""" Resource for creating the initial super user. """
|
||||
schemas = {
|
||||
'CreateSuperUser': {
|
||||
'id': 'CreateSuperUser',
|
||||
'type': 'object',
|
||||
'description': 'Information for creating the initial super user',
|
||||
'required': [
|
||||
'username',
|
||||
'password',
|
||||
'email'
|
||||
],
|
||||
'properties': {
|
||||
'username': {
|
||||
'type': 'string',
|
||||
'description': 'The username for the superuser'
|
||||
},
|
||||
'password': {
|
||||
'type': 'string',
|
||||
'description': 'The password for the superuser'
|
||||
},
|
||||
'email': {
|
||||
'type': 'string',
|
||||
'description': 'The e-mail address for the superuser'
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@nickname('scCreateInitialSuperuser')
|
||||
@verify_not_prod
|
||||
@validate_json_request('CreateSuperUser')
|
||||
def post(self):
|
||||
""" Creates the initial super user, updates the underlying configuration and
|
||||
sets the current session to have that super user. """
|
||||
|
||||
# Special security check: This method is only accessible when:
|
||||
# - There is a valid config YAML file.
|
||||
# - There are currently no users in the database (clean install)
|
||||
#
|
||||
# We do this special security check because at the point this method is called, the database
|
||||
# is clean but does not (yet) have any super users for our permissions code to check against.
|
||||
if CONFIG_PROVIDER.yaml_exists() and not database_has_users():
|
||||
data = request.get_json()
|
||||
username = data['username']
|
||||
password = data['password']
|
||||
email = data['email']
|
||||
|
||||
# Create the user in the database.
|
||||
superuser = model.create_user(username, password, email, auto_verify=True)
|
||||
|
||||
# Add the user to the config.
|
||||
config_object = CONFIG_PROVIDER.get_yaml()
|
||||
config_object['SUPER_USERS'] = [username]
|
||||
CONFIG_PROVIDER.save_yaml(config_object)
|
||||
|
||||
# Update the in-memory config for the new superuser.
|
||||
superusers.register_superuser(username)
|
||||
|
||||
# Conduct login with that user.
|
||||
common_login(superuser)
|
||||
|
||||
return {
|
||||
'status': True
|
||||
}
|
||||
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@resource('/v1/superuser/config/validate/<service>')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserConfigValidate(ApiResource):
|
||||
""" Resource for validating a block of configuration against an external service. """
|
||||
schemas = {
|
||||
'ValidateConfig': {
|
||||
'id': 'ValidateConfig',
|
||||
'type': 'object',
|
||||
'description': 'Validates configuration',
|
||||
'required': [
|
||||
'config'
|
||||
],
|
||||
'properties': {
|
||||
'config': {
|
||||
'type': 'object'
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@nickname('scValidateConfig')
|
||||
@verify_not_prod
|
||||
@validate_json_request('ValidateConfig')
|
||||
def post(self, service):
|
||||
""" Validates the given config for the given service. """
|
||||
# Note: This method is called to validate the database configuration before super users exists,
|
||||
# so we also allow it to be called if there is no valid registry configuration setup. Note that
|
||||
# this is also safe since this method does not access any information not given in the request.
|
||||
if not CONFIG_PROVIDER.yaml_exists() or SuperUserPermission().can():
|
||||
config = request.get_json()['config']
|
||||
return validate_service_for_config(service, config)
|
||||
|
||||
abort(403)
|
|
@ -1,15 +1,16 @@
|
|||
import string
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
|
||||
from random import SystemRandom
|
||||
from app import app
|
||||
from app import app, avatar, superusers
|
||||
from flask import request
|
||||
|
||||
from endpoints.api import (ApiResource, nickname, resource, validate_json_request, request_error,
|
||||
log_action, internal_only, NotFound, require_user_admin, format_date,
|
||||
InvalidToken, require_scope, format_date, hide_if, show_if, parse_args,
|
||||
query_param, abort, require_fresh_login, path_param)
|
||||
query_param, abort, require_fresh_login, path_param, verify_not_prod)
|
||||
|
||||
from endpoints.api.logs import get_logs
|
||||
|
||||
|
@ -22,18 +23,76 @@ import features
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def get_immediate_subdirectories(directory):
|
||||
return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))]
|
||||
|
||||
def get_services():
|
||||
services = set(get_immediate_subdirectories(app.config['SYSTEM_SERVICES_PATH']))
|
||||
services = services - set(app.config['SYSTEM_SERVICE_BLACKLIST'])
|
||||
return services
|
||||
|
||||
|
||||
@resource('/v1/superuser/systemlogs/<service>')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserGetLogsForService(ApiResource):
|
||||
""" Resource for fetching the kinds of system logs in the system. """
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('getSystemLogs')
|
||||
def get(self, service):
|
||||
""" Returns the logs for the specific service. """
|
||||
if SuperUserPermission().can():
|
||||
if not service in get_services():
|
||||
abort(404)
|
||||
|
||||
try:
|
||||
with open(app.config['SYSTEM_SERVICE_LOGS_PATH'] % service, 'r') as f:
|
||||
logs = f.read()
|
||||
except Exception as ex:
|
||||
logger.exception('Cannot read logs')
|
||||
abort(400)
|
||||
|
||||
return {
|
||||
'logs': logs
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@resource('/v1/superuser/systemlogs/')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserSystemLogServices(ApiResource):
|
||||
""" Resource for fetching the kinds of system logs in the system. """
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('listSystemLogServices')
|
||||
def get(self):
|
||||
""" List the system logs for the current system. """
|
||||
if SuperUserPermission().can():
|
||||
return {
|
||||
'services': list(get_services())
|
||||
}
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
|
||||
@resource('/v1/superuser/logs')
|
||||
@internal_only
|
||||
@show_if(features.SUPER_USERS)
|
||||
class SuperUserLogs(ApiResource):
|
||||
""" Resource for fetching all logs in the system. """
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('listAllLogs')
|
||||
@parse_args
|
||||
@query_param('starttime', 'Earliest time from which to get logs. (%m/%d/%Y %Z)', type=str)
|
||||
@query_param('endtime', 'Latest time to which to get logs. (%m/%d/%Y %Z)', type=str)
|
||||
@query_param('performer', 'Username for which to filter logs.', type=str)
|
||||
def get(self, args):
|
||||
""" List the logs for the current system. """
|
||||
""" List the usage logs for the current system. """
|
||||
if SuperUserPermission().can():
|
||||
performer_name = args['performer']
|
||||
start_time = args['starttime']
|
||||
|
@ -49,7 +108,8 @@ def user_view(user):
|
|||
'username': user.username,
|
||||
'email': user.email,
|
||||
'verified': user.verified,
|
||||
'super_user': user.username in app.config['SUPER_USERS']
|
||||
'avatar': avatar.compute_hash(user.email, name=user.username),
|
||||
'super_user': superusers.is_superuser(user.username)
|
||||
}
|
||||
|
||||
@resource('/v1/superuser/usage/')
|
||||
|
@ -58,6 +118,7 @@ def user_view(user):
|
|||
class UsageInformation(ApiResource):
|
||||
""" Resource for returning the usage information for enterprise customers. """
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('getSystemUsage')
|
||||
def get(self):
|
||||
""" Returns the number of repository handles currently held. """
|
||||
|
@ -96,6 +157,7 @@ class SuperUserList(ApiResource):
|
|||
}
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('listAllUsers')
|
||||
def get(self):
|
||||
""" Returns a list of all users in the system. """
|
||||
|
@ -109,6 +171,7 @@ class SuperUserList(ApiResource):
|
|||
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('createInstallUser')
|
||||
@validate_json_request('CreateInstallUser')
|
||||
def post(self):
|
||||
|
@ -146,6 +209,7 @@ class SuperUserList(ApiResource):
|
|||
class SuperUserSendRecoveryEmail(ApiResource):
|
||||
""" Resource for sending a recovery user on behalf of a user. """
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('sendInstallUserRecoveryEmail')
|
||||
def post(self, username):
|
||||
if SuperUserPermission().can():
|
||||
|
@ -153,7 +217,7 @@ class SuperUserSendRecoveryEmail(ApiResource):
|
|||
if not user or user.organization or user.robot:
|
||||
abort(404)
|
||||
|
||||
if username in app.config['SUPER_USERS']:
|
||||
if superusers.is_superuser(username):
|
||||
abort(403)
|
||||
|
||||
code = model.create_reset_password_email_code(user.email)
|
||||
|
@ -190,6 +254,7 @@ class SuperUserManagement(ApiResource):
|
|||
}
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('getInstallUser')
|
||||
def get(self, username):
|
||||
""" Returns information about the specified user. """
|
||||
|
@ -203,6 +268,7 @@ class SuperUserManagement(ApiResource):
|
|||
abort(403)
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('deleteInstallUser')
|
||||
def delete(self, username):
|
||||
""" Deletes the specified user. """
|
||||
|
@ -211,7 +277,7 @@ class SuperUserManagement(ApiResource):
|
|||
if not user or user.organization or user.robot:
|
||||
abort(404)
|
||||
|
||||
if username in app.config['SUPER_USERS']:
|
||||
if superusers.is_superuser(username):
|
||||
abort(403)
|
||||
|
||||
model.delete_user(user)
|
||||
|
@ -220,6 +286,7 @@ class SuperUserManagement(ApiResource):
|
|||
abort(403)
|
||||
|
||||
@require_fresh_login
|
||||
@verify_not_prod
|
||||
@nickname('changeInstallUser')
|
||||
@validate_json_request('UpdateUser')
|
||||
def put(self, username):
|
||||
|
@ -229,7 +296,7 @@ class SuperUserManagement(ApiResource):
|
|||
if not user or user.organization or user.robot:
|
||||
abort(404)
|
||||
|
||||
if username in app.config['SUPER_USERS']:
|
||||
if superusers.is_superuser(username):
|
||||
abort(403)
|
||||
|
||||
user_data = request.get_json()
|
||||
|
|
|
@ -246,7 +246,7 @@ class User(ApiResource):
|
|||
# Username already used
|
||||
raise request_error(message='Username is already in use')
|
||||
|
||||
model.change_username(user, new_username)
|
||||
model.change_username(user.id, new_username)
|
||||
|
||||
except model.InvalidPasswordException, ex:
|
||||
raise request_error(exception=ex)
|
||||
|
|
|
@ -3,6 +3,7 @@ import urlparse
|
|||
import json
|
||||
import string
|
||||
import datetime
|
||||
import os
|
||||
|
||||
# Register the various exceptions via decorators.
|
||||
import endpoints.decorated
|
||||
|
@ -28,10 +29,26 @@ from endpoints.notificationhelper import spawn_notification
|
|||
import features
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
route_data = None
|
||||
|
||||
CACHE_BUSTERS_JSON = 'static/dist/cachebusters.json'
|
||||
CACHE_BUSTERS = None
|
||||
|
||||
def get_cache_busters():
|
||||
""" Retrieves the cache busters hashes. """
|
||||
global CACHE_BUSTERS
|
||||
if CACHE_BUSTERS is not None:
|
||||
return CACHE_BUSTERS
|
||||
|
||||
if not os.path.exists(CACHE_BUSTERS_JSON):
|
||||
return {}
|
||||
|
||||
with open(CACHE_BUSTERS_JSON, 'r') as f:
|
||||
CACHE_BUSTERS = json.loads(f.read())
|
||||
return CACHE_BUSTERS
|
||||
|
||||
|
||||
class RepoPathConverter(BaseConverter):
|
||||
regex = '[\.a-zA-Z0-9_\-]+/[\.a-zA-Z0-9_\-]+'
|
||||
weight = 200
|
||||
|
@ -113,17 +130,15 @@ def list_files(path, extension):
|
|||
filepath = 'static/' + path
|
||||
return [join_path(dp, f) for dp, dn, files in os.walk(filepath) for f in files if matches(f)]
|
||||
|
||||
SAVED_CACHE_STRING = random_string()
|
||||
|
||||
def render_page_template(name, **kwargs):
|
||||
if app.config.get('DEBUGGING', False):
|
||||
debugging = app.config.get('DEBUGGING', False)
|
||||
if debugging:
|
||||
# If DEBUGGING is enabled, then we load the full set of individual JS and CSS files
|
||||
# from the file system.
|
||||
library_styles = list_files('lib', 'css')
|
||||
main_styles = list_files('css', 'css')
|
||||
library_scripts = list_files('lib', 'js')
|
||||
main_scripts = list_files('js', 'js')
|
||||
cache_buster = 'debugging'
|
||||
|
||||
file_lists = [library_styles, main_styles, library_scripts, main_scripts]
|
||||
for file_list in file_lists:
|
||||
|
@ -133,7 +148,6 @@ def render_page_template(name, **kwargs):
|
|||
main_styles = ['dist/quay-frontend.css']
|
||||
library_scripts = []
|
||||
main_scripts = ['dist/quay-frontend.min.js']
|
||||
cache_buster = SAVED_CACHE_STRING
|
||||
|
||||
use_cdn = app.config.get('USE_CDN', True)
|
||||
if request.args.get('use_cdn') is not None:
|
||||
|
@ -142,6 +156,12 @@ def render_page_template(name, **kwargs):
|
|||
external_styles = get_external_css(local=not use_cdn)
|
||||
external_scripts = get_external_javascript(local=not use_cdn)
|
||||
|
||||
def add_cachebusters(filenames):
|
||||
cachebusters = get_cache_busters()
|
||||
for filename in filenames:
|
||||
cache_buster = cachebusters.get(filename, random_string()) if not debugging else 'debugging'
|
||||
yield (filename, cache_buster)
|
||||
|
||||
def get_oauth_config():
|
||||
oauth_config = {}
|
||||
for oauth_app in oauth_apps:
|
||||
|
@ -153,13 +173,14 @@ def render_page_template(name, **kwargs):
|
|||
if len(app.config.get('CONTACT_INFO', [])) == 1:
|
||||
contact_href = app.config['CONTACT_INFO'][0]
|
||||
|
||||
resp = make_response(render_template(name, route_data=json.dumps(get_route_data()),
|
||||
resp = make_response(render_template(name,
|
||||
route_data=json.dumps(get_route_data()),
|
||||
external_styles=external_styles,
|
||||
external_scripts=external_scripts,
|
||||
main_styles=main_styles,
|
||||
library_styles=library_styles,
|
||||
main_scripts=main_scripts,
|
||||
library_scripts=library_scripts,
|
||||
main_styles=add_cachebusters(main_styles),
|
||||
library_styles=add_cachebusters(library_styles),
|
||||
main_scripts=add_cachebusters(main_scripts),
|
||||
library_scripts=add_cachebusters(library_scripts),
|
||||
feature_set=json.dumps(features.get_features()),
|
||||
config_set=json.dumps(getFrontendVisibleConfig(app.config)),
|
||||
oauth_set=json.dumps(get_oauth_config()),
|
||||
|
@ -169,9 +190,10 @@ def render_page_template(name, **kwargs):
|
|||
sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''),
|
||||
is_debug=str(app.config.get('DEBUGGING', False)).lower(),
|
||||
show_chat=features.OLARK_CHAT,
|
||||
cache_buster=cache_buster,
|
||||
has_billing=features.BILLING,
|
||||
contact_href=contact_href,
|
||||
hostname=app.config['SERVER_HOSTNAME'],
|
||||
preferred_scheme=app.config['PREFERRED_URL_SCHEME'],
|
||||
**kwargs))
|
||||
|
||||
resp.headers['X-FRAME-OPTIONS'] = 'DENY'
|
||||
|
@ -208,10 +230,17 @@ def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
|
|||
dockerfile_id, build_name,
|
||||
trigger, pull_robot_name=pull_robot_name)
|
||||
|
||||
dockerfile_build_queue.put([repository.namespace_user.username, repository.name], json.dumps({
|
||||
json_data = json.dumps({
|
||||
'build_uuid': build_request.uuid,
|
||||
'pull_credentials': model.get_pull_credentials(pull_robot_name) if pull_robot_name else None
|
||||
}), retries_remaining=1)
|
||||
})
|
||||
|
||||
queue_item = dockerfile_build_queue.put([repository.namespace_user.username, repository.name],
|
||||
json_data,
|
||||
retries_remaining=3)
|
||||
|
||||
build_request.queue_item = queue_item
|
||||
build_request.save()
|
||||
|
||||
# Add the build to the repo's log.
|
||||
metadata = {
|
||||
|
@ -230,7 +259,7 @@ def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
|
|||
metadata=metadata, repository=repository)
|
||||
|
||||
# Add notifications for the build queue.
|
||||
profile.debug('Adding notifications for repository')
|
||||
logger.debug('Adding notifications for repository')
|
||||
event_data = {
|
||||
'build_id': build_request.uuid,
|
||||
'build_name': build_name,
|
||||
|
|
|
@ -19,19 +19,21 @@ def generate_csrf_token():
|
|||
|
||||
return session['_csrf_token']
|
||||
|
||||
def verify_csrf():
|
||||
token = session.get('_csrf_token', None)
|
||||
found_token = request.values.get('_csrf_token', None)
|
||||
|
||||
if not token or token != found_token:
|
||||
msg = 'CSRF Failure. Session token was %s and request token was %s'
|
||||
logger.error(msg, token, found_token)
|
||||
abort(403, message='CSRF token was invalid or missing.')
|
||||
|
||||
def csrf_protect(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
oauth_token = get_validated_oauth_token()
|
||||
if oauth_token is None and request.method != "GET" and request.method != "HEAD":
|
||||
token = session.get('_csrf_token', None)
|
||||
found_token = request.values.get('_csrf_token', None)
|
||||
|
||||
if not token or token != found_token:
|
||||
msg = 'CSRF Failure. Session token was %s and request token was %s'
|
||||
logger.error(msg, token, found_token)
|
||||
abort(403, message='CSRF token was invalid or missing.')
|
||||
verify_csrf()
|
||||
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
|
|
|
@ -23,7 +23,6 @@ from endpoints.notificationhelper import spawn_notification
|
|||
import features
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
index = Blueprint('index', __name__)
|
||||
|
||||
|
@ -120,7 +119,7 @@ def create_user():
|
|||
|
||||
else:
|
||||
# New user case
|
||||
profile.debug('Creating user')
|
||||
logger.debug('Creating user')
|
||||
new_user = None
|
||||
|
||||
try:
|
||||
|
@ -128,10 +127,10 @@ def create_user():
|
|||
except model.TooManyUsersException as ex:
|
||||
abort(402, 'Seat limit has been reached for this license', issue='seat-limit')
|
||||
|
||||
profile.debug('Creating email code for user')
|
||||
logger.debug('Creating email code for user')
|
||||
code = model.create_confirm_email_code(new_user)
|
||||
|
||||
profile.debug('Sending email code to user')
|
||||
logger.debug('Sending email code to user')
|
||||
send_confirmation_email(new_user.username, new_user.email, code.code)
|
||||
|
||||
return make_response('Created', 201)
|
||||
|
@ -168,12 +167,12 @@ def update_user(username):
|
|||
update_request = request.get_json()
|
||||
|
||||
if 'password' in update_request:
|
||||
profile.debug('Updating user password')
|
||||
logger.debug('Updating user password')
|
||||
model.change_password(get_authenticated_user(),
|
||||
update_request['password'])
|
||||
|
||||
if 'email' in update_request:
|
||||
profile.debug('Updating user email')
|
||||
logger.debug('Updating user email')
|
||||
model.update_email(get_authenticated_user(), update_request['email'])
|
||||
|
||||
return jsonify({
|
||||
|
@ -189,13 +188,13 @@ def update_user(username):
|
|||
@parse_repository_name
|
||||
@generate_headers(role='write')
|
||||
def create_repository(namespace, repository):
|
||||
profile.debug('Parsing image descriptions')
|
||||
logger.debug('Parsing image descriptions')
|
||||
image_descriptions = json.loads(request.data.decode('utf8'))
|
||||
|
||||
profile.debug('Looking up repository')
|
||||
logger.debug('Looking up repository')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
|
||||
profile.debug('Repository looked up')
|
||||
logger.debug('Repository looked up')
|
||||
if not repo and get_authenticated_user() is None:
|
||||
logger.debug('Attempt to create new repository without user auth.')
|
||||
abort(401,
|
||||
|
@ -219,11 +218,11 @@ def create_repository(namespace, repository):
|
|||
issue='no-create-permission',
|
||||
namespace=namespace)
|
||||
|
||||
profile.debug('Creaing repository with owner: %s', get_authenticated_user().username)
|
||||
logger.debug('Creaing repository with owner: %s', get_authenticated_user().username)
|
||||
repo = model.create_repository(namespace, repository,
|
||||
get_authenticated_user())
|
||||
|
||||
profile.debug('Determining already added images')
|
||||
logger.debug('Determining already added images')
|
||||
added_images = OrderedDict([(desc['id'], desc) for desc in image_descriptions])
|
||||
new_repo_images = dict(added_images)
|
||||
|
||||
|
@ -239,7 +238,7 @@ def create_repository(namespace, repository):
|
|||
for existing in existing_images:
|
||||
added_images.pop(existing.docker_image_id)
|
||||
|
||||
profile.debug('Creating/Linking necessary images')
|
||||
logger.debug('Creating/Linking necessary images')
|
||||
username = get_authenticated_user() and get_authenticated_user().username
|
||||
translations = {}
|
||||
for image_description in added_images.values():
|
||||
|
@ -247,7 +246,7 @@ def create_repository(namespace, repository):
|
|||
translations, storage.preferred_locations[0])
|
||||
|
||||
|
||||
profile.debug('Created images')
|
||||
logger.debug('Created images')
|
||||
track_and_log('push_repo', repo)
|
||||
return make_response('Created', 201)
|
||||
|
||||
|
@ -260,14 +259,14 @@ def update_images(namespace, repository):
|
|||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
|
||||
if permission.can():
|
||||
profile.debug('Looking up repository')
|
||||
logger.debug('Looking up repository')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
if not repo:
|
||||
# Make sure the repo actually exists.
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
||||
if get_authenticated_user():
|
||||
profile.debug('Publishing push event')
|
||||
logger.debug('Publishing push event')
|
||||
username = get_authenticated_user().username
|
||||
|
||||
# Mark that the user has pushed the repo.
|
||||
|
@ -280,11 +279,11 @@ def update_images(namespace, repository):
|
|||
event = userevents.get_event(username)
|
||||
event.publish_event_data('docker-cli', user_data)
|
||||
|
||||
profile.debug('GCing repository')
|
||||
logger.debug('GCing repository')
|
||||
num_removed = model.garbage_collect_repository(namespace, repository)
|
||||
|
||||
# Generate a job for each notification that has been added to this repo
|
||||
profile.debug('Adding notifications for repository')
|
||||
logger.debug('Adding notifications for repository')
|
||||
|
||||
updated_tags = session.get('pushed_tags', {})
|
||||
event_data = {
|
||||
|
@ -307,13 +306,13 @@ def get_repository_images(namespace, repository):
|
|||
# TODO invalidate token?
|
||||
if permission.can() or model.repository_is_public(namespace, repository):
|
||||
# We can't rely on permissions to tell us if a repo exists anymore
|
||||
profile.debug('Looking up repository')
|
||||
logger.debug('Looking up repository')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
if not repo:
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
||||
all_images = []
|
||||
profile.debug('Retrieving repository images')
|
||||
logger.debug('Retrieving repository images')
|
||||
for image in model.get_repository_images(namespace, repository):
|
||||
new_image_view = {
|
||||
'id': image.docker_image_id,
|
||||
|
@ -321,7 +320,7 @@ def get_repository_images(namespace, repository):
|
|||
}
|
||||
all_images.append(new_image_view)
|
||||
|
||||
profile.debug('Building repository image response')
|
||||
logger.debug('Building repository image response')
|
||||
resp = make_response(json.dumps(all_images), 200)
|
||||
resp.mimetype = 'application/json'
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ from util import gzipstream
|
|||
registry = Blueprint('registry', __name__)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
class SocketReader(object):
|
||||
def __init__(self, fp):
|
||||
|
@ -100,12 +99,12 @@ def set_cache_headers(f):
|
|||
def head_image_layer(namespace, repository, image_id, headers):
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
if permission.can() or model.repository_is_public(namespace, repository):
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image:
|
||||
profile.debug('Image not found')
|
||||
logger.debug('Image not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
|
@ -114,7 +113,7 @@ def head_image_layer(namespace, repository, image_id, headers):
|
|||
# Add the Accept-Ranges header if the storage engine supports resumable
|
||||
# downloads.
|
||||
if store.get_supports_resumable_downloads(repo_image.storage.locations):
|
||||
profile.debug('Storage supports resumable downloads')
|
||||
logger.debug('Storage supports resumable downloads')
|
||||
extra_headers['Accept-Ranges'] = 'bytes'
|
||||
|
||||
resp = make_response('')
|
||||
|
@ -133,31 +132,35 @@ def head_image_layer(namespace, repository, image_id, headers):
|
|||
def get_image_layer(namespace, repository, image_id, headers):
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
if permission.can() or model.repository_is_public(namespace, repository):
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image:
|
||||
logger.debug('Image not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
profile.debug('Looking up the layer path')
|
||||
logger.debug('Looking up the layer path')
|
||||
try:
|
||||
path = store.image_layer_path(repo_image.storage.uuid)
|
||||
|
||||
profile.debug('Looking up the direct download URL')
|
||||
logger.debug('Looking up the direct download URL')
|
||||
direct_download_url = store.get_direct_download_url(repo_image.storage.locations, path)
|
||||
|
||||
if direct_download_url:
|
||||
profile.debug('Returning direct download URL')
|
||||
logger.debug('Returning direct download URL')
|
||||
resp = redirect(direct_download_url)
|
||||
return resp
|
||||
|
||||
profile.debug('Streaming layer data')
|
||||
logger.debug('Streaming layer data')
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
|
||||
return Response(store.stream_read(repo_image.storage.locations, path), headers=headers)
|
||||
except (IOError, AttributeError):
|
||||
profile.debug('Image not found')
|
||||
logger.exception('Image layer data not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
|
@ -168,29 +171,30 @@ def get_image_layer(namespace, repository, image_id, headers):
|
|||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
def put_image_layer(namespace, repository, image_id):
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
||||
profile.debug('Retrieving image')
|
||||
logger.debug('Retrieving image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
try:
|
||||
profile.debug('Retrieving image data')
|
||||
logger.debug('Retrieving image data')
|
||||
uuid = repo_image.storage.uuid
|
||||
json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
except (IOError, AttributeError):
|
||||
logger.exception('Exception when retrieving image data')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
profile.debug('Retrieving image path info')
|
||||
logger.debug('Retrieving image path info')
|
||||
layer_path = store.image_layer_path(uuid)
|
||||
|
||||
if (store.exists(repo_image.storage.locations, layer_path) and not
|
||||
image_is_uploading(repo_image)):
|
||||
exact_abort(409, 'Image already exists')
|
||||
|
||||
profile.debug('Storing layer data')
|
||||
logger.debug('Storing layer data')
|
||||
|
||||
input_stream = request.stream
|
||||
if request.headers.get('transfer-encoding') == 'chunked':
|
||||
|
@ -257,7 +261,7 @@ def put_image_layer(namespace, repository, image_id):
|
|||
|
||||
# The layer is ready for download, send a job to the work queue to
|
||||
# process it.
|
||||
profile.debug('Adding layer to diff queue')
|
||||
logger.debug('Adding layer to diff queue')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
image_diff_queue.put([repo.namespace_user.username, repository, image_id], json.dumps({
|
||||
'namespace_user_id': repo.namespace_user.id,
|
||||
|
@ -272,7 +276,7 @@ def put_image_layer(namespace, repository, image_id):
|
|||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
def put_image_checksum(namespace, repository, image_id):
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
@ -298,23 +302,23 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
abort(400, 'Checksum not found in Cookie for image %(image_id)s',
|
||||
issue='missing-checksum-cookie', image_id=image_id)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image or not repo_image.storage:
|
||||
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
||||
|
||||
uuid = repo_image.storage.uuid
|
||||
|
||||
profile.debug('Looking up repo layer data')
|
||||
logger.debug('Looking up repo layer data')
|
||||
if not store.exists(repo_image.storage.locations, store.image_json_path(uuid)):
|
||||
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
||||
|
||||
profile.debug('Marking image path')
|
||||
logger.debug('Marking image path')
|
||||
if not image_is_uploading(repo_image):
|
||||
abort(409, 'Cannot set checksum for image %(image_id)s',
|
||||
issue='image-write-error', image_id=image_id)
|
||||
|
||||
profile.debug('Storing image checksum')
|
||||
logger.debug('Storing image checksum')
|
||||
err = store_checksum(repo_image.storage, checksum)
|
||||
if err:
|
||||
abort(400, err)
|
||||
|
@ -331,7 +335,7 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
|
||||
# The layer is ready for download, send a job to the work queue to
|
||||
# process it.
|
||||
profile.debug('Adding layer to diff queue')
|
||||
logger.debug('Adding layer to diff queue')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
image_diff_queue.put([repo.namespace_user.username, repository, image_id], json.dumps({
|
||||
'namespace_user_id': repo.namespace_user.id,
|
||||
|
@ -348,23 +352,23 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
@require_completion
|
||||
@set_cache_headers
|
||||
def get_image_json(namespace, repository, image_id, headers):
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
if not permission.can() and not model.repository_is_public(namespace,
|
||||
repository):
|
||||
abort(403)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
|
||||
profile.debug('Looking up repo layer data')
|
||||
logger.debug('Looking up repo layer data')
|
||||
try:
|
||||
uuid = repo_image.storage.uuid
|
||||
data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
except (IOError, AttributeError):
|
||||
flask_abort(404)
|
||||
|
||||
profile.debug('Looking up repo layer size')
|
||||
logger.debug('Looking up repo layer size')
|
||||
size = repo_image.storage.image_size
|
||||
headers['X-Docker-Size'] = str(size)
|
||||
|
||||
|
@ -379,16 +383,16 @@ def get_image_json(namespace, repository, image_id, headers):
|
|||
@require_completion
|
||||
@set_cache_headers
|
||||
def get_image_ancestry(namespace, repository, image_id, headers):
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
if not permission.can() and not model.repository_is_public(namespace,
|
||||
repository):
|
||||
abort(403)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
|
||||
profile.debug('Looking up image data')
|
||||
logger.debug('Looking up image data')
|
||||
try:
|
||||
uuid = repo_image.storage.uuid
|
||||
data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
|
||||
|
@ -396,11 +400,11 @@ def get_image_ancestry(namespace, repository, image_id, headers):
|
|||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
profile.debug('Converting to <-> from JSON')
|
||||
logger.debug('Converting to <-> from JSON')
|
||||
response = make_response(json.dumps(json.loads(data)), 200)
|
||||
response.headers.extend(headers)
|
||||
|
||||
profile.debug('Done')
|
||||
logger.debug('Done')
|
||||
return response
|
||||
|
||||
|
||||
|
@ -430,12 +434,12 @@ def store_checksum(image_storage, checksum):
|
|||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
def put_image_json(namespace, repository, image_id):
|
||||
profile.debug('Checking repo permissions')
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
||||
profile.debug('Parsing image JSON')
|
||||
logger.debug('Parsing image JSON')
|
||||
try:
|
||||
data = json.loads(request.data.decode('utf8'))
|
||||
except ValueError:
|
||||
|
@ -449,10 +453,10 @@ def put_image_json(namespace, repository, image_id):
|
|||
abort(400, 'Missing key `id` in JSON for image: %(image_id)s',
|
||||
issue='invalid-request', image_id=image_id)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image:
|
||||
profile.debug('Image not found')
|
||||
logger.debug('Image not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
|
@ -466,24 +470,24 @@ def put_image_json(namespace, repository, image_id):
|
|||
|
||||
parent_image = None
|
||||
if parent_id:
|
||||
profile.debug('Looking up parent image')
|
||||
logger.debug('Looking up parent image')
|
||||
parent_image = model.get_repo_image_extended(namespace, repository, parent_id)
|
||||
|
||||
parent_uuid = parent_image and parent_image.storage.uuid
|
||||
parent_locations = parent_image and parent_image.storage.locations
|
||||
|
||||
if parent_id:
|
||||
profile.debug('Looking up parent image data')
|
||||
logger.debug('Looking up parent image data')
|
||||
|
||||
if (parent_id and not
|
||||
store.exists(parent_locations, store.image_json_path(parent_uuid))):
|
||||
abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s',
|
||||
issue='invalid-request', image_id=image_id, parent_id=parent_id)
|
||||
|
||||
profile.debug('Looking up image storage paths')
|
||||
logger.debug('Looking up image storage paths')
|
||||
json_path = store.image_json_path(uuid)
|
||||
|
||||
profile.debug('Checking if image already exists')
|
||||
logger.debug('Checking if image already exists')
|
||||
if (store.exists(repo_image.storage.locations, json_path) and not
|
||||
image_is_uploading(repo_image)):
|
||||
exact_abort(409, 'Image already exists')
|
||||
|
@ -496,24 +500,24 @@ def put_image_json(namespace, repository, image_id):
|
|||
command_list = data.get('container_config', {}).get('Cmd', None)
|
||||
command = json.dumps(command_list) if command_list else None
|
||||
|
||||
profile.debug('Setting image metadata')
|
||||
logger.debug('Setting image metadata')
|
||||
model.set_image_metadata(image_id, namespace, repository,
|
||||
data.get('created'), data.get('comment'), command,
|
||||
parent_image)
|
||||
|
||||
profile.debug('Putting json path')
|
||||
logger.debug('Putting json path')
|
||||
store.put_content(repo_image.storage.locations, json_path, request.data)
|
||||
|
||||
profile.debug('Generating image ancestry')
|
||||
logger.debug('Generating image ancestry')
|
||||
|
||||
try:
|
||||
generate_ancestry(image_id, uuid, repo_image.storage.locations, parent_id, parent_uuid,
|
||||
parent_locations)
|
||||
except IOError as ioe:
|
||||
profile.debug('Error when generating ancestry: %s' % ioe.message)
|
||||
logger.debug('Error when generating ancestry: %s' % ioe.message)
|
||||
abort(404)
|
||||
|
||||
profile.debug('Done')
|
||||
logger.debug('Done')
|
||||
return make_response('true', 200)
|
||||
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ from flask import request
|
|||
from auth.auth_context import get_authenticated_user, get_validated_token, get_validated_oauth_token
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
def track_and_log(event_name, repo, **kwargs):
|
||||
repository = repo.name
|
||||
|
@ -23,7 +22,7 @@ def track_and_log(event_name, repo, **kwargs):
|
|||
authenticated_user = get_authenticated_user()
|
||||
authenticated_token = get_validated_token() if not authenticated_user else None
|
||||
|
||||
profile.debug('Logging the %s to Mixpanel and the log system', event_name)
|
||||
logger.debug('Logging the %s to Mixpanel and the log system', event_name)
|
||||
if authenticated_oauth_token:
|
||||
metadata['oauth_token_id'] = authenticated_oauth_token.id
|
||||
metadata['oauth_token_application_id'] = authenticated_oauth_token.application.client_id
|
||||
|
@ -45,9 +44,9 @@ def track_and_log(event_name, repo, **kwargs):
|
|||
}
|
||||
|
||||
# Publish the user event (if applicable)
|
||||
profile.debug('Checking publishing %s to the user events system', event_name)
|
||||
logger.debug('Checking publishing %s to the user events system', event_name)
|
||||
if authenticated_user:
|
||||
profile.debug('Publishing %s to the user events system', event_name)
|
||||
logger.debug('Publishing %s to the user events system', event_name)
|
||||
user_event_data = {
|
||||
'action': event_name,
|
||||
'repository': repository,
|
||||
|
@ -58,14 +57,14 @@ def track_and_log(event_name, repo, **kwargs):
|
|||
event.publish_event_data('docker-cli', user_event_data)
|
||||
|
||||
# Save the action to mixpanel.
|
||||
profile.debug('Logging the %s to Mixpanel', event_name)
|
||||
logger.debug('Logging the %s to Mixpanel', event_name)
|
||||
analytics.track(analytics_id, event_name, extra_params)
|
||||
|
||||
# Log the action to the database.
|
||||
profile.debug('Logging the %s to logs system', event_name)
|
||||
logger.debug('Logging the %s to logs system', event_name)
|
||||
model.log_action(event_name, namespace,
|
||||
performer=authenticated_user,
|
||||
ip=request.remote_addr, metadata=metadata,
|
||||
repository=repo)
|
||||
|
||||
profile.debug('Track and log of %s complete', event_name)
|
||||
logger.debug('Track and log of %s complete', event_name)
|
||||
|
|
|
@ -2,11 +2,10 @@ import logging
|
|||
import json
|
||||
import hashlib
|
||||
|
||||
from flask import redirect, Blueprint, abort, send_file, request
|
||||
from flask import redirect, Blueprint, abort, send_file, make_response
|
||||
|
||||
from app import app
|
||||
from app import app, signer
|
||||
from auth.auth import process_auth
|
||||
from auth.auth_context import get_authenticated_user
|
||||
from auth.permissions import ReadRepositoryPermission
|
||||
from data import model
|
||||
from data import database
|
||||
|
@ -15,13 +14,16 @@ from storage import Storage
|
|||
|
||||
from util.queuefile import QueueFile
|
||||
from util.queueprocess import QueueProcess
|
||||
from util.gzipwrap import GzipWrap
|
||||
from util.dockerloadformat import build_docker_load_stream
|
||||
from formats.squashed import SquashedDockerImage
|
||||
from formats.aci import ACIImage
|
||||
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
verbs = Blueprint('verbs', __name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def _open_stream(namespace, repository, tag, synthetic_image_id, image_json, image_id_list):
|
||||
def _open_stream(formatter, namespace, repository, tag, synthetic_image_id, image_json,
|
||||
image_id_list):
|
||||
store = Storage(app)
|
||||
|
||||
# For performance reasons, we load the full image list here, cache it, then disconnect from
|
||||
|
@ -42,20 +44,43 @@ def _open_stream(namespace, repository, tag, synthetic_image_id, image_json, ima
|
|||
current_image_path)
|
||||
|
||||
current_image_id = current_image_entry.id
|
||||
logger.debug('Returning image layer %s: %s' % (current_image_id, current_image_path))
|
||||
logger.debug('Returning image layer %s: %s', current_image_id, current_image_path)
|
||||
yield current_image_stream
|
||||
|
||||
stream = build_docker_load_stream(namespace, repository, tag, synthetic_image_id, image_json,
|
||||
stream = formatter.build_stream(namespace, repository, tag, synthetic_image_id, image_json,
|
||||
get_next_image, get_next_layer)
|
||||
|
||||
return stream.read
|
||||
|
||||
|
||||
def _write_synthetic_image_to_storage(linked_storage_uuid, linked_locations, queue_file):
|
||||
def _sign_sythentic_image(verb, linked_storage_uuid, queue_file):
|
||||
signature = None
|
||||
try:
|
||||
signature = signer.detached_sign(queue_file)
|
||||
except:
|
||||
logger.exception('Exception when signing %s image %s', verb, linked_storage_uuid)
|
||||
return
|
||||
|
||||
# Setup the database (since this is a new process) and then disconnect immediately
|
||||
# once the operation completes.
|
||||
if not queue_file.raised_exception:
|
||||
with database.UseThenDisconnect(app.config):
|
||||
try:
|
||||
derived = model.get_storage_by_uuid(linked_storage_uuid)
|
||||
except model.InvalidImageException:
|
||||
return
|
||||
|
||||
signature_entry = model.find_or_create_storage_signature(derived, signer.name)
|
||||
signature_entry.signature = signature
|
||||
signature_entry.uploading = False
|
||||
signature_entry.save()
|
||||
|
||||
|
||||
def _write_synthetic_image_to_storage(verb, linked_storage_uuid, linked_locations, queue_file):
|
||||
store = Storage(app)
|
||||
|
||||
def handle_exception(ex):
|
||||
logger.debug('Exception when building squashed image %s: %s', linked_storage_uuid, ex)
|
||||
logger.debug('Exception when building %s image %s: %s', verb, linked_storage_uuid, ex)
|
||||
|
||||
with database.UseThenDisconnect(app.config):
|
||||
model.delete_derived_storage_by_uuid(linked_storage_uuid)
|
||||
|
@ -67,86 +92,193 @@ def _write_synthetic_image_to_storage(linked_storage_uuid, linked_locations, que
|
|||
queue_file.close()
|
||||
|
||||
if not queue_file.raised_exception:
|
||||
# Setup the database (since this is a new process) and then disconnect immediately
|
||||
# once the operation completes.
|
||||
with database.UseThenDisconnect(app.config):
|
||||
done_uploading = model.get_storage_by_uuid(linked_storage_uuid)
|
||||
done_uploading.uploading = False
|
||||
done_uploading.save()
|
||||
|
||||
|
||||
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET'])
|
||||
@process_auth
|
||||
def get_squashed_tag(namespace, repository, tag):
|
||||
# pylint: disable=too-many-locals
|
||||
def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None):
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
if permission.can() or model.repository_is_public(namespace, repository):
|
||||
# Lookup the requested tag.
|
||||
try:
|
||||
tag_image = model.get_tag_image(namespace, repository, tag)
|
||||
except model.DataModelException:
|
||||
abort(404)
|
||||
|
||||
# Lookup the tag's image and storage.
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, tag_image.docker_image_id)
|
||||
if not repo_image:
|
||||
abort(404)
|
||||
# pylint: disable=no-member
|
||||
if not permission.can() and not model.repository_is_public(namespace, repository):
|
||||
abort(403)
|
||||
|
||||
# Log the action.
|
||||
track_and_log('repo_verb', repo_image.repository, tag=tag, verb='squash')
|
||||
# Lookup the requested tag.
|
||||
try:
|
||||
tag_image = model.get_tag_image(namespace, repository, tag)
|
||||
except model.DataModelException:
|
||||
abort(404)
|
||||
|
||||
store = Storage(app)
|
||||
derived = model.find_or_create_derived_storage(repo_image.storage, 'squash',
|
||||
store.preferred_locations[0])
|
||||
if not derived.uploading:
|
||||
logger.debug('Derived image %s exists in storage', derived.uuid)
|
||||
derived_layer_path = store.image_layer_path(derived.uuid)
|
||||
download_url = store.get_direct_download_url(derived.locations, derived_layer_path)
|
||||
if download_url:
|
||||
logger.debug('Redirecting to download URL for derived image %s', derived.uuid)
|
||||
return redirect(download_url)
|
||||
# Lookup the tag's image and storage.
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, tag_image.docker_image_id)
|
||||
if not repo_image:
|
||||
abort(404)
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
# If there is a data checker, call it first.
|
||||
uuid = repo_image.storage.uuid
|
||||
image_json = None
|
||||
|
||||
logger.debug('Sending cached derived image %s', derived.uuid)
|
||||
return send_file(store.stream_read_file(derived.locations, derived_layer_path))
|
||||
|
||||
# Load the ancestry for the image.
|
||||
logger.debug('Building and returning derived image %s', derived.uuid)
|
||||
uuid = repo_image.storage.uuid
|
||||
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
|
||||
full_image_list = json.loads(ancestry_data)
|
||||
|
||||
# Load the image's JSON layer.
|
||||
if checker is not None:
|
||||
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
image_json = json.loads(image_json_data)
|
||||
|
||||
# Calculate a synthetic image ID.
|
||||
synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':squash').hexdigest()
|
||||
if not checker(image_json):
|
||||
logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repository, tag, verb)
|
||||
abort(404)
|
||||
|
||||
# Create a queue process to generate the data. The queue files will read from the process
|
||||
# and send the results to the client and storage.
|
||||
def _cleanup():
|
||||
# Close any existing DB connection once the process has exited.
|
||||
database.close_db_filter(None)
|
||||
return (repo_image, tag_image, image_json)
|
||||
|
||||
args = (namespace, repository, tag, synthetic_image_id, image_json, full_image_list)
|
||||
queue_process = QueueProcess(_open_stream,
|
||||
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
|
||||
args, finished=_cleanup)
|
||||
|
||||
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
|
||||
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
|
||||
# pylint: disable=too-many-locals
|
||||
def _repo_verb_signature(namespace, repository, tag, verb, checker=None, **kwargs):
|
||||
# Verify that the image exists and that we have access to it.
|
||||
store = Storage(app)
|
||||
result = _verify_repo_verb(store, namespace, repository, tag, verb, checker)
|
||||
(repo_image, tag_image, image_json) = result
|
||||
|
||||
# Start building.
|
||||
queue_process.run()
|
||||
# Lookup the derived image storage for the verb.
|
||||
derived = model.find_derived_storage(repo_image.storage, verb)
|
||||
if derived is None or derived.uploading:
|
||||
abort(404)
|
||||
|
||||
# Start the storage saving.
|
||||
storage_args = (derived.uuid, derived.locations, storage_queue_file)
|
||||
QueueProcess.run_process(_write_synthetic_image_to_storage, storage_args, finished=_cleanup)
|
||||
# Check if we have a valid signer configured.
|
||||
if not signer.name:
|
||||
abort(404)
|
||||
|
||||
# Lookup the signature for the verb.
|
||||
signature_entry = model.lookup_storage_signature(derived, signer.name)
|
||||
if signature_entry is None:
|
||||
abort(404)
|
||||
|
||||
# Return the signature.
|
||||
return make_response(signature_entry.signature)
|
||||
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker=None, **kwargs):
|
||||
# Verify that the image exists and that we have access to it.
|
||||
store = Storage(app)
|
||||
result = _verify_repo_verb(store, namespace, repository, tag, verb, checker)
|
||||
(repo_image, tag_image, image_json) = result
|
||||
|
||||
# Log the action.
|
||||
track_and_log('repo_verb', repo_image.repository, tag=tag, verb=verb, **kwargs)
|
||||
|
||||
# Lookup/create the derived image storage for the verb.
|
||||
derived = model.find_or_create_derived_storage(repo_image.storage, verb,
|
||||
store.preferred_locations[0])
|
||||
|
||||
if not derived.uploading:
|
||||
logger.debug('Derived %s image %s exists in storage', verb, derived.uuid)
|
||||
derived_layer_path = store.image_layer_path(derived.uuid)
|
||||
download_url = store.get_direct_download_url(derived.locations, derived_layer_path)
|
||||
if download_url:
|
||||
logger.debug('Redirecting to download URL for derived %s image %s', verb, derived.uuid)
|
||||
return redirect(download_url)
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
|
||||
# Return the client's data.
|
||||
return send_file(client_queue_file)
|
||||
logger.debug('Sending cached derived %s image %s', verb, derived.uuid)
|
||||
return send_file(store.stream_read_file(derived.locations, derived_layer_path))
|
||||
|
||||
# Load the ancestry for the image.
|
||||
uuid = repo_image.storage.uuid
|
||||
|
||||
logger.debug('Building and returning derived %s image %s', verb, derived.uuid)
|
||||
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
|
||||
full_image_list = json.loads(ancestry_data)
|
||||
|
||||
# Load the image's JSON layer.
|
||||
if not image_json:
|
||||
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
image_json = json.loads(image_json_data)
|
||||
|
||||
# Calculate a synthetic image ID.
|
||||
synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':' + verb).hexdigest()
|
||||
|
||||
def _cleanup():
|
||||
# Close any existing DB connection once the process has exited.
|
||||
database.close_db_filter(None)
|
||||
|
||||
# Create a queue process to generate the data. The queue files will read from the process
|
||||
# and send the results to the client and storage.
|
||||
args = (formatter, namespace, repository, tag, synthetic_image_id, image_json, full_image_list)
|
||||
queue_process = QueueProcess(_open_stream,
|
||||
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
|
||||
args, finished=_cleanup)
|
||||
|
||||
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
|
||||
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
|
||||
|
||||
# If signing is required, add a QueueFile for signing the image as we stream it out.
|
||||
signing_queue_file = None
|
||||
if sign and signer.name:
|
||||
signing_queue_file = QueueFile(queue_process.create_queue(), 'signing')
|
||||
|
||||
# Start building.
|
||||
queue_process.run()
|
||||
|
||||
# Start the storage saving.
|
||||
storage_args = (verb, derived.uuid, derived.locations, storage_queue_file)
|
||||
QueueProcess.run_process(_write_synthetic_image_to_storage, storage_args, finished=_cleanup)
|
||||
|
||||
if sign and signer.name:
|
||||
signing_args = (verb, derived.uuid, signing_queue_file)
|
||||
QueueProcess.run_process(_sign_sythentic_image, signing_args, finished=_cleanup)
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
|
||||
# Return the client's data.
|
||||
return send_file(client_queue_file)
|
||||
|
||||
|
||||
def os_arch_checker(os, arch):
|
||||
def checker(image_json):
|
||||
# Verify the architecture and os.
|
||||
operating_system = image_json.get('os', 'linux')
|
||||
if operating_system != os:
|
||||
return False
|
||||
|
||||
architecture = image_json.get('architecture', 'amd64')
|
||||
|
||||
# Note: Some older Docker images have 'x86_64' rather than 'amd64'.
|
||||
# We allow the conversion here.
|
||||
if architecture == 'x86_64' and operating_system == 'linux':
|
||||
architecture = 'amd64'
|
||||
|
||||
if architecture != arch:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
return checker
|
||||
|
||||
|
||||
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/sig/<os>/<arch>/', methods=['GET'])
|
||||
@process_auth
|
||||
# pylint: disable=unused-argument
|
||||
def get_aci_signature(server, namespace, repository, tag, os, arch):
|
||||
return _repo_verb_signature(namespace, repository, tag, 'aci', checker=os_arch_checker(os, arch),
|
||||
os=os, arch=arch)
|
||||
|
||||
|
||||
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci/<os>/<arch>/', methods=['GET'])
|
||||
@process_auth
|
||||
# pylint: disable=unused-argument
|
||||
def get_aci_image(server, namespace, repository, tag, os, arch):
|
||||
return _repo_verb(namespace, repository, tag, 'aci', ACIImage(),
|
||||
sign=True, checker=os_arch_checker(os, arch), os=os, arch=arch)
|
||||
|
||||
|
||||
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET'])
|
||||
@process_auth
|
||||
def get_squashed_tag(namespace, repository, tag):
|
||||
return _repo_verb(namespace, repository, tag, 'squash', SquashedDockerImage())
|
||||
|
||||
abort(403)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import logging
|
||||
|
||||
from flask import (abort, redirect, request, url_for, make_response, Response,
|
||||
Blueprint, send_from_directory, jsonify)
|
||||
Blueprint, send_from_directory, jsonify, send_file)
|
||||
|
||||
from avatar_generator import Avatar
|
||||
from flask.ext.login import current_user
|
||||
|
@ -10,17 +10,20 @@ from health.healthcheck import get_healthchecker
|
|||
|
||||
from data import model
|
||||
from data.model.oauth import DatabaseAuthorizationProvider
|
||||
from app import app, billing as stripe, build_logs, avatar
|
||||
from app import app, billing as stripe, build_logs, avatar, signer
|
||||
from auth.auth import require_session_login, process_oauth
|
||||
from auth.permissions import AdministerOrganizationPermission, ReadRepositoryPermission
|
||||
from auth.permissions import (AdministerOrganizationPermission, ReadRepositoryPermission,
|
||||
SuperUserPermission)
|
||||
|
||||
from util.invoice import renderInvoiceToPdf
|
||||
from util.seo import render_snapshot
|
||||
from util.cache import no_cache
|
||||
from endpoints.common import common_login, render_page_template, route_show_if, param_required
|
||||
from endpoints.csrf import csrf_protect, generate_csrf_token
|
||||
from endpoints.csrf import csrf_protect, generate_csrf_token, verify_csrf
|
||||
from endpoints.registry import set_cache_headers
|
||||
from util.names import parse_repository_name, parse_repository_name_and_tag
|
||||
from util.useremails import send_email_changed
|
||||
from util.systemlogs import build_logs_archive
|
||||
from auth import scopes
|
||||
|
||||
import features
|
||||
|
@ -60,6 +63,14 @@ def snapshot(path = ''):
|
|||
abort(404)
|
||||
|
||||
|
||||
@web.route('/aci-signing-key')
|
||||
@no_cache
|
||||
def aci_signing_key():
|
||||
if not signer.name:
|
||||
abort(404)
|
||||
|
||||
return send_file(signer.public_key_path)
|
||||
|
||||
@web.route('/plans/')
|
||||
@no_cache
|
||||
@route_show_if(features.BILLING)
|
||||
|
@ -98,6 +109,7 @@ def organizations():
|
|||
def user():
|
||||
return index('')
|
||||
|
||||
|
||||
@web.route('/superuser/')
|
||||
@no_cache
|
||||
@route_show_if(features.SUPER_USERS)
|
||||
|
@ -105,6 +117,13 @@ def superuser():
|
|||
return index('')
|
||||
|
||||
|
||||
@web.route('/setup/')
|
||||
@no_cache
|
||||
@route_show_if(features.SUPER_USERS)
|
||||
def setup():
|
||||
return index('')
|
||||
|
||||
|
||||
@web.route('/signin/')
|
||||
@no_cache
|
||||
def signin(redirect=None):
|
||||
|
@ -463,3 +482,21 @@ def exchange_code_for_token():
|
|||
|
||||
provider = FlaskAuthorizationProvider()
|
||||
return provider.get_token(grant_type, client_id, client_secret, redirect_uri, code, scope=scope)
|
||||
|
||||
|
||||
@web.route('/systemlogsarchive', methods=['GET'])
|
||||
@process_oauth
|
||||
@route_show_if(features.SUPER_USERS)
|
||||
@no_cache
|
||||
def download_logs_archive():
|
||||
# Note: We cannot use the decorator here because this is a GET method. That being said, this
|
||||
# information is sensitive enough that we want the extra protection.
|
||||
verify_csrf()
|
||||
|
||||
if SuperUserPermission().can():
|
||||
archive_data = build_logs_archive(app)
|
||||
return Response(archive_data,
|
||||
mimetype="application/octet-stream",
|
||||
headers={"Content-Disposition": "attachment;filename=erlogs.tar.gz"})
|
||||
|
||||
abort(403)
|
||||
|
|
Reference in a new issue