Change spacing from 4 spaces to 2 spaces

This commit is contained in:
Sam Chow 2018-08-15 15:32:24 -04:00
parent ec14007268
commit efa66d84e4
28 changed files with 936 additions and 913 deletions

View file

@ -2,7 +2,6 @@ import os
import re
import subprocess
# Note: this currently points to the directory above, since we're in the quay config_app dir
# TODO(config_extract): revert to root directory rather than the one above
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@ -15,7 +14,6 @@ TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/')
IS_KUBERNETES = 'KUBERNETES_SERVICE_HOST' in os.environ
def _get_version_number_changelog():
try:
with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f:

View file

@ -27,14 +27,16 @@ config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml',
testing=is_testing)
if is_testing:
from test.testconfig import TestConfig
logger.debug('Loading test config.')
app.config.from_object(TestConfig())
from test.testconfig import TestConfig
logger.debug('Loading test config.')
app.config.from_object(TestConfig())
else:
from config import DefaultConfig
logger.debug('Loading default config.')
app.config.from_object(DefaultConfig())
app.teardown_request(database.close_db_filter)
from config import DefaultConfig
logger.debug('Loading default config.')
app.config.from_object(DefaultConfig())
app.teardown_request(database.close_db_filter)
# Load the override config via the provider.
config_provider.update_app_config(app.config)

View file

@ -3,7 +3,6 @@ from config_app.c_app import app as application
# Bind all of the blueprints
import config_web
if __name__ == '__main__':
logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False)
application.run(port=5000, debug=True, threaded=True, host='0.0.0.0')
logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False)
application.run(port=5000, debug=True, threaded=True, host='0.0.0.0')

View file

@ -31,13 +31,13 @@ api = ApiExceptionHandlingApi()
api.init_app(api_bp)
def log_action(kind, user_or_orgname, metadata=None, repo=None, repo_name=None):
if not metadata:
metadata = {}
if not metadata:
metadata = {}
if repo:
repo_name = repo.name
if repo:
repo_name = repo.name
model.log.log_action(kind, user_or_orgname, repo_name, user_or_orgname, request.remote_addr, metadata)
model.log.log_action(kind, user_or_orgname, repo_name, user_or_orgname, request.remote_addr, metadata)
def format_date(date):
""" Output an RFC822 date format. """

View file

@ -7,247 +7,248 @@ from config_app.c_app import app
from config_app.config_endpoints.api import method_metadata
from config_app.config_endpoints.common import fully_qualified_name, PARAM_REGEX, TYPE_CONVERTER
logger = logging.getLogger(__name__)
def generate_route_data():
include_internal = True
compact = True
include_internal = True
compact = True
def swagger_parameter(name, description, kind='path', param_type='string', required=True,
enum=None, schema=None):
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject
parameter_info = {
'name': name,
'in': kind,
'required': required
def swagger_parameter(name, description, kind='path', param_type='string', required=True,
enum=None, schema=None):
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject
parameter_info = {
'name': name,
'in': kind,
'required': required
}
if schema:
parameter_info['schema'] = {
'$ref': '#/definitions/%s' % schema
}
else:
parameter_info['type'] = param_type
if enum is not None and len(list(enum)) > 0:
parameter_info['enum'] = list(enum)
return parameter_info
paths = {}
models = {}
tags = []
tags_added = set()
operation_ids = set()
for rule in app.url_map.iter_rules():
endpoint_method = app.view_functions[rule.endpoint]
# Verify that we have a view class for this API method.
if not 'view_class' in dir(endpoint_method):
continue
view_class = endpoint_method.view_class
# Hide the class if it is internal.
internal = method_metadata(view_class, 'internal')
if not include_internal and internal:
continue
# Build the tag.
parts = fully_qualified_name(view_class).split('.')
tag_name = parts[-2]
if not tag_name in tags_added:
tags_added.add(tag_name)
tags.append({
'name': tag_name,
'description': (sys.modules[view_class.__module__].__doc__ or '').strip()
})
# Build the Swagger data for the path.
swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule)
full_name = fully_qualified_name(view_class)
path_swagger = {
'x-name': full_name,
'x-path': swagger_path,
'x-tag': tag_name
}
related_user_res = method_metadata(view_class, 'related_user_resource')
if related_user_res is not None:
path_swagger['x-user-related'] = fully_qualified_name(related_user_res)
paths[swagger_path] = path_swagger
# Add any global path parameters.
param_data_map = view_class.__api_path_params if '__api_path_params' in dir(
view_class) else {}
if param_data_map:
path_parameters_swagger = []
for path_parameter in param_data_map:
description = param_data_map[path_parameter].get('description')
path_parameters_swagger.append(swagger_parameter(path_parameter, description))
path_swagger['parameters'] = path_parameters_swagger
# Add the individual HTTP operations.
method_names = list(rule.methods.difference(['HEAD', 'OPTIONS']))
for method_name in method_names:
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object
method = getattr(view_class, method_name.lower(), None)
if method is None:
logger.debug('Unable to find method for %s in class %s', method_name, view_class)
continue
operationId = method_metadata(method, 'nickname')
operation_swagger = {
'operationId': operationId,
'parameters': [],
}
if operationId is None:
continue
if operationId in operation_ids:
raise Exception('Duplicate operation Id: %s' % operationId)
operation_ids.add(operationId)
# Mark the method as internal.
internal = method_metadata(method, 'internal')
if internal is not None:
operation_swagger['x-internal'] = True
if include_internal:
requires_fresh_login = method_metadata(method, 'requires_fresh_login')
if requires_fresh_login is not None:
operation_swagger['x-requires-fresh-login'] = True
# Add the path parameters.
if rule.arguments:
for path_parameter in rule.arguments:
description = param_data_map.get(path_parameter, {}).get('description')
operation_swagger['parameters'].append(
swagger_parameter(path_parameter, description))
# Add the query parameters.
if '__api_query_params' in dir(method):
for query_parameter_info in method.__api_query_params:
name = query_parameter_info['name']
description = query_parameter_info['help']
param_type = TYPE_CONVERTER[query_parameter_info['type']]
required = query_parameter_info['required']
operation_swagger['parameters'].append(
swagger_parameter(name, description, kind='query',
param_type=param_type,
required=required,
enum=query_parameter_info['choices']))
# Add the OAuth security block.
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject
scope = method_metadata(method, 'oauth2_scope')
if scope and not compact:
operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}]
# Add the responses block.
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject
response_schema_name = method_metadata(method, 'response_schema')
if not compact:
if response_schema_name:
models[response_schema_name] = view_class.schemas[response_schema_name]
models['ApiError'] = {
'type': 'object',
'properties': {
'status': {
'type': 'integer',
'description': 'Status code of the response.'
},
'type': {
'type': 'string',
'description': 'Reference to the type of the error.'
},
'detail': {
'type': 'string',
'description': 'Details about the specific instance of the error.'
},
'title': {
'type': 'string',
'description': 'Unique error code to identify the type of error.'
},
'error_message': {
'type': 'string',
'description': 'Deprecated; alias for detail'
},
'error_type': {
'type': 'string',
'description': 'Deprecated; alias for detail'
}
},
'required': [
'status',
'type',
'title',
]
}
if schema:
parameter_info['schema'] = {
'$ref': '#/definitions/%s' % schema
}
responses = {
'400': {
'description': 'Bad Request',
},
'401': {
'description': 'Session required',
},
'403': {
'description': 'Unauthorized access',
},
'404': {
'description': 'Not found',
},
}
for _, body in responses.items():
body['schema'] = {'$ref': '#/definitions/ApiError'}
if method_name == 'DELETE':
responses['204'] = {
'description': 'Deleted'
}
elif method_name == 'POST':
responses['201'] = {
'description': 'Successful creation'
}
else:
parameter_info['type'] = param_type
responses['200'] = {
'description': 'Successful invocation'
}
if enum is not None and len(list(enum)) > 0:
parameter_info['enum'] = list(enum)
return parameter_info
paths = {}
models = {}
tags = []
tags_added = set()
operation_ids = set()
for rule in app.url_map.iter_rules():
endpoint_method = app.view_functions[rule.endpoint]
# Verify that we have a view class for this API method.
if not 'view_class' in dir(endpoint_method):
continue
view_class = endpoint_method.view_class
# Hide the class if it is internal.
internal = method_metadata(view_class, 'internal')
if not include_internal and internal:
continue
# Build the tag.
parts = fully_qualified_name(view_class).split('.')
tag_name = parts[-2]
if not tag_name in tags_added:
tags_added.add(tag_name)
tags.append({
'name': tag_name,
'description': (sys.modules[view_class.__module__].__doc__ or '').strip()
})
# Build the Swagger data for the path.
swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule)
full_name = fully_qualified_name(view_class)
path_swagger = {
'x-name': full_name,
'x-path': swagger_path,
'x-tag': tag_name
}
related_user_res = method_metadata(view_class, 'related_user_resource')
if related_user_res is not None:
path_swagger['x-user-related'] = fully_qualified_name(related_user_res)
paths[swagger_path] = path_swagger
# Add any global path parameters.
param_data_map = view_class.__api_path_params if '__api_path_params' in dir(view_class) else {}
if param_data_map:
path_parameters_swagger = []
for path_parameter in param_data_map:
description = param_data_map[path_parameter].get('description')
path_parameters_swagger.append(swagger_parameter(path_parameter, description))
path_swagger['parameters'] = path_parameters_swagger
# Add the individual HTTP operations.
method_names = list(rule.methods.difference(['HEAD', 'OPTIONS']))
for method_name in method_names:
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object
method = getattr(view_class, method_name.lower(), None)
if method is None:
logger.debug('Unable to find method for %s in class %s', method_name, view_class)
continue
operationId = method_metadata(method, 'nickname')
operation_swagger = {
'operationId': operationId,
'parameters': [],
if response_schema_name:
responses['200']['schema'] = {
'$ref': '#/definitions/%s' % response_schema_name
}
if operationId is None:
continue
operation_swagger['responses'] = responses
if operationId in operation_ids:
raise Exception('Duplicate operation Id: %s' % operationId)
# Add the request block.
request_schema_name = method_metadata(method, 'request_schema')
if request_schema_name and not compact:
models[request_schema_name] = view_class.schemas[request_schema_name]
operation_ids.add(operationId)
operation_swagger['parameters'].append(
swagger_parameter('body', 'Request body contents.', kind='body',
schema=request_schema_name))
# Mark the method as internal.
internal = method_metadata(method, 'internal')
if internal is not None:
operation_swagger['x-internal'] = True
# Add the operation to the parent path.
if not internal or (internal and include_internal):
path_swagger[method_name.lower()] = operation_swagger
if include_internal:
requires_fresh_login = method_metadata(method, 'requires_fresh_login')
if requires_fresh_login is not None:
operation_swagger['x-requires-fresh-login'] = True
tags.sort(key=lambda t: t['name'])
paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag']))
# Add the path parameters.
if rule.arguments:
for path_parameter in rule.arguments:
description = param_data_map.get(path_parameter, {}).get('description')
operation_swagger['parameters'].append(swagger_parameter(path_parameter, description))
# Add the query parameters.
if '__api_query_params' in dir(method):
for query_parameter_info in method.__api_query_params:
name = query_parameter_info['name']
description = query_parameter_info['help']
param_type = TYPE_CONVERTER[query_parameter_info['type']]
required = query_parameter_info['required']
operation_swagger['parameters'].append(
swagger_parameter(name, description, kind='query',
param_type=param_type,
required=required,
enum=query_parameter_info['choices']))
# Add the OAuth security block.
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject
scope = method_metadata(method, 'oauth2_scope')
if scope and not compact:
operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}]
# Add the responses block.
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject
response_schema_name = method_metadata(method, 'response_schema')
if not compact:
if response_schema_name:
models[response_schema_name] = view_class.schemas[response_schema_name]
models['ApiError'] = {
'type': 'object',
'properties': {
'status': {
'type': 'integer',
'description': 'Status code of the response.'
},
'type': {
'type': 'string',
'description': 'Reference to the type of the error.'
},
'detail': {
'type': 'string',
'description': 'Details about the specific instance of the error.'
},
'title': {
'type': 'string',
'description': 'Unique error code to identify the type of error.'
},
'error_message': {
'type': 'string',
'description': 'Deprecated; alias for detail'
},
'error_type': {
'type': 'string',
'description': 'Deprecated; alias for detail'
}
},
'required': [
'status',
'type',
'title',
]
}
responses = {
'400': {
'description': 'Bad Request',
},
'401': {
'description': 'Session required',
},
'403': {
'description': 'Unauthorized access',
},
'404': {
'description': 'Not found',
},
}
for _, body in responses.items():
body['schema'] = {'$ref': '#/definitions/ApiError'}
if method_name == 'DELETE':
responses['204'] = {
'description': 'Deleted'
}
elif method_name == 'POST':
responses['201'] = {
'description': 'Successful creation'
}
else:
responses['200'] = {
'description': 'Successful invocation'
}
if response_schema_name:
responses['200']['schema'] = {
'$ref': '#/definitions/%s' % response_schema_name
}
operation_swagger['responses'] = responses
# Add the request block.
request_schema_name = method_metadata(method, 'request_schema')
if request_schema_name and not compact:
models[request_schema_name] = view_class.schemas[request_schema_name]
operation_swagger['parameters'].append(
swagger_parameter('body', 'Request body contents.', kind='body',
schema=request_schema_name))
# Add the operation to the parent path.
if not internal or (internal and include_internal):
path_swagger[method_name.lower()] = operation_swagger
tags.sort(key=lambda t: t['name'])
paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag']))
if compact:
return {'paths': paths}
if compact:
return {'paths': paths}

View file

@ -3,7 +3,8 @@ import logging
from flask import abort, request
from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model
from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request, kubernetes_only
from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request, \
kubernetes_only
from config_app.c_app import (app, config_provider, superusers, ip_resolver,
instance_keys, INIT_SCRIPTS_LOCATION)
from config_app.config_util.k8saccessor import KubernetesAccessorSingleton
@ -11,7 +12,8 @@ from config_app.config_util.k8saccessor import KubernetesAccessorSingleton
from data.database import configure
from data.runmigration import run_alembic_migration
from util.config.configutil import add_enterprise_config_defaults
from util.config.validator import validate_service_for_config, ValidatorContext, is_valid_config_upload_filename
from util.config.validator import validate_service_for_config, ValidatorContext, \
is_valid_config_upload_filename
logger = logging.getLogger(__name__)
@ -85,6 +87,7 @@ class SuperUserRegistryStatus(ApiResource):
""" Resource for determining the status of the registry, such as if config exists,
if a database is configured, and if it has any defined users.
"""
@nickname('scRegistryStatus')
def get(self):
""" Returns the status of the registry. """
@ -121,6 +124,7 @@ class _AlembicLogHandler(logging.Handler):
@resource('/v1/superuser/setupdb')
class SuperUserSetupDatabase(ApiResource):
""" Resource for invoking alembic to setup the database. """
@nickname('scSetupDatabase')
def get(self):
""" Invokes the alembic upgrade process. """
@ -251,7 +255,8 @@ class SuperUserConfigValidate(ApiResource):
# so we also allow it to be called if there is no valid registry configuration setup. Note that
# this is also safe since this method does not access any information not given in the request.
config = request.get_json()['config']
validator_context = ValidatorContext.from_app(app, config, request.get_json().get('password', ''),
validator_context = ValidatorContext.from_app(app, config,
request.get_json().get('password', ''),
instance_keys=instance_keys,
ip_resolver=ip_resolver,
config_provider=config_provider,
@ -294,6 +299,7 @@ class SuperUserKubernetesDeployment(ApiResource):
@resource('/v1/superuser/config/kubernetes')
class SuperUserKubernetesConfiguration(ApiResource):
""" Resource for saving the config files to kubernetes secrets. """
@kubernetes_only
@nickname('scDeployConfiguration')
def post(self):
@ -303,6 +309,7 @@ class SuperUserKubernetesConfiguration(ApiResource):
@resource('/v1/superuser/config/file/<filename>')
class SuperUserConfigFile(ApiResource):
""" Resource for fetching the status of config files and overriding them. """
@nickname('scConfigFileExists')
def get(self, filename):
""" Returns whether the configuration file with the given name exists. """
@ -313,7 +320,6 @@ class SuperUserConfigFile(ApiResource):
'exists': config_provider.volume_file_exists(filename)
}
@nickname('scUpdateConfigFile')
def post(self, filename):
""" Updates the configuration file with the given name. """

View file

@ -4,36 +4,36 @@ from six import add_metaclass
@add_metaclass(ABCMeta)
class SuperuserConfigDataInterface(object):
"""
Interface that represents all data store interactions required by the superuser config API.
"""
@abstractmethod
def is_valid(self):
"""
Interface that represents all data store interactions required by the superuser config API.
Returns true if the configured database is valid.
"""
@abstractmethod
def is_valid(self):
"""
Returns true if the configured database is valid.
"""
@abstractmethod
def has_users(self):
"""
Returns true if there are any users defined.
"""
@abstractmethod
def has_users(self):
"""
Returns true if there are any users defined.
"""
@abstractmethod
def create_superuser(self, username, password, email):
"""
Creates a new superuser with the given username, password and email. Returns the user's UUID.
"""
@abstractmethod
def create_superuser(self, username, password, email):
"""
Creates a new superuser with the given username, password and email. Returns the user's UUID.
"""
@abstractmethod
def has_federated_login(self, username, service_name):
"""
Returns true if the matching user has a federated login under the matching service.
"""
@abstractmethod
def has_federated_login(self, username, service_name):
"""
Returns true if the matching user has a federated login under the matching service.
"""
@abstractmethod
def attach_federated_login(self, username, service_name, federated_username):
"""
Attaches a federatated login to the matching user, under the given service.
"""
@abstractmethod
def attach_federated_login(self, username, service_name, federated_username):
"""
Attaches a federatated login to the matching user, under the given service.
"""

View file

@ -4,34 +4,34 @@ from config_app.config_endpoints.api.suconfig_models_interface import SuperuserC
class PreOCIModel(SuperuserConfigDataInterface):
# Note: this method is different than has_users: the user select will throw if the user
# table does not exist, whereas has_users assumes the table is valid
def is_valid(self):
try:
list(User.select().limit(1))
return True
except:
return False
# Note: this method is different than has_users: the user select will throw if the user
# table does not exist, whereas has_users assumes the table is valid
def is_valid(self):
try:
list(User.select().limit(1))
return True
except:
return False
def has_users(self):
return bool(list(User.select().limit(1)))
def has_users(self):
return bool(list(User.select().limit(1)))
def create_superuser(self, username, password, email):
return model.user.create_user(username, password, email, auto_verify=True).uuid
def create_superuser(self, username, password, email):
return model.user.create_user(username, password, email, auto_verify=True).uuid
def has_federated_login(self, username, service_name):
user = model.user.get_user(username)
if user is None:
return False
def has_federated_login(self, username, service_name):
user = model.user.get_user(username)
if user is None:
return False
return bool(model.user.lookup_federated_login(user, service_name))
return bool(model.user.lookup_federated_login(user, service_name))
def attach_federated_login(self, username, service_name, federated_username):
user = model.user.get_user(username)
if user is None:
return False
def attach_federated_login(self, username, service_name, federated_username):
user = model.user.get_user(username)
if user is None:
return False
model.user.attach_federated_login(user, service_name, federated_username)
model.user.attach_federated_login(user, service_name, federated_username)
pre_oci_model = PreOCIModel()

View file

@ -6,165 +6,168 @@ from config_app.config_endpoints.api import format_date
def user_view(user):
return {
'name': user.username,
'kind': 'user',
'is_robot': user.robot,
}
return {
'name': user.username,
'kind': 'user',
'is_robot': user.robot,
}
class RepositoryBuild(namedtuple('RepositoryBuild',
['uuid', 'logs_archived', 'repository_namespace_user_username', 'repository_name',
'can_write', 'can_read', 'pull_robot', 'resource_key', 'trigger', 'display_name',
'started', 'job_config', 'phase', 'status', 'error', 'archive_url'])):
"""
RepositoryBuild represents a build associated with a repostiory
:type uuid: string
:type logs_archived: boolean
:type repository_namespace_user_username: string
:type repository_name: string
:type can_write: boolean
:type can_write: boolean
:type pull_robot: User
:type resource_key: string
:type trigger: Trigger
:type display_name: string
:type started: boolean
:type job_config: {Any -> Any}
:type phase: string
:type status: string
:type error: string
:type archive_url: string
"""
['uuid', 'logs_archived', 'repository_namespace_user_username',
'repository_name',
'can_write', 'can_read', 'pull_robot', 'resource_key', 'trigger',
'display_name',
'started', 'job_config', 'phase', 'status', 'error',
'archive_url'])):
"""
RepositoryBuild represents a build associated with a repostiory
:type uuid: string
:type logs_archived: boolean
:type repository_namespace_user_username: string
:type repository_name: string
:type can_write: boolean
:type can_write: boolean
:type pull_robot: User
:type resource_key: string
:type trigger: Trigger
:type display_name: string
:type started: boolean
:type job_config: {Any -> Any}
:type phase: string
:type status: string
:type error: string
:type archive_url: string
"""
def to_dict(self):
def to_dict(self):
resp = {
'id': self.uuid,
'phase': self.phase,
'started': format_date(self.started),
'display_name': self.display_name,
'status': self.status or {},
'subdirectory': self.job_config.get('build_subdir', ''),
'dockerfile_path': self.job_config.get('build_subdir', ''),
'context': self.job_config.get('context', ''),
'tags': self.job_config.get('docker_tags', []),
'manual_user': self.job_config.get('manual_user', None),
'is_writer': self.can_write,
'trigger': self.trigger.to_dict(),
'trigger_metadata': self.job_config.get('trigger_metadata', None) if self.can_read else None,
'resource_key': self.resource_key,
'pull_robot': user_view(self.pull_robot) if self.pull_robot else None,
'repository': {
'namespace': self.repository_namespace_user_username,
'name': self.repository_name
},
'error': self.error,
}
resp = {
'id': self.uuid,
'phase': self.phase,
'started': format_date(self.started),
'display_name': self.display_name,
'status': self.status or {},
'subdirectory': self.job_config.get('build_subdir', ''),
'dockerfile_path': self.job_config.get('build_subdir', ''),
'context': self.job_config.get('context', ''),
'tags': self.job_config.get('docker_tags', []),
'manual_user': self.job_config.get('manual_user', None),
'is_writer': self.can_write,
'trigger': self.trigger.to_dict(),
'trigger_metadata': self.job_config.get('trigger_metadata', None) if self.can_read else None,
'resource_key': self.resource_key,
'pull_robot': user_view(self.pull_robot) if self.pull_robot else None,
'repository': {
'namespace': self.repository_namespace_user_username,
'name': self.repository_name
},
'error': self.error,
}
if self.can_write:
if self.resource_key is not None:
resp['archive_url'] = self.archive_url
elif self.job_config.get('archive_url', None):
resp['archive_url'] = self.job_config['archive_url']
if self.can_write:
if self.resource_key is not None:
resp['archive_url'] = self.archive_url
elif self.job_config.get('archive_url', None):
resp['archive_url'] = self.job_config['archive_url']
return resp
return resp
class Approval(namedtuple('Approval', ['approver', 'approval_type', 'approved_date', 'notes'])):
"""
Approval represents whether a key has been approved or not
:type approver: User
:type approval_type: string
:type approved_date: Date
:type notes: string
"""
"""
Approval represents whether a key has been approved or not
:type approver: User
:type approval_type: string
:type approved_date: Date
:type notes: string
"""
def to_dict(self):
return {
'approver': self.approver.to_dict() if self.approver else None,
'approval_type': self.approval_type,
'approved_date': self.approved_date,
'notes': self.notes,
}
def to_dict(self):
return {
'approver': self.approver.to_dict() if self.approver else None,
'approval_type': self.approval_type,
'approved_date': self.approved_date,
'notes': self.notes,
}
class ServiceKey(namedtuple('ServiceKey', ['name', 'kid', 'service', 'jwk', 'metadata', 'created_date',
'expiration_date', 'rotation_duration', 'approval'])):
"""
ServiceKey is an apostille signing key
:type name: string
:type kid: int
:type service: string
:type jwk: string
:type metadata: string
:type created_date: Date
:type expiration_date: Date
:type rotation_duration: Date
:type approval: Approval
class ServiceKey(
namedtuple('ServiceKey', ['name', 'kid', 'service', 'jwk', 'metadata', 'created_date',
'expiration_date', 'rotation_duration', 'approval'])):
"""
ServiceKey is an apostille signing key
:type name: string
:type kid: int
:type service: string
:type jwk: string
:type metadata: string
:type created_date: Date
:type expiration_date: Date
:type rotation_duration: Date
:type approval: Approval
"""
"""
def to_dict(self):
return {
'name': self.name,
'kid': self.kid,
'service': self.service,
'jwk': self.jwk,
'metadata': self.metadata,
'created_date': self.created_date,
'expiration_date': self.expiration_date,
'rotation_duration': self.rotation_duration,
'approval': self.approval.to_dict() if self.approval is not None else None,
}
def to_dict(self):
return {
'name': self.name,
'kid': self.kid,
'service': self.service,
'jwk': self.jwk,
'metadata': self.metadata,
'created_date': self.created_date,
'expiration_date': self.expiration_date,
'rotation_duration': self.rotation_duration,
'approval': self.approval.to_dict() if self.approval is not None else None,
}
class User(namedtuple('User', ['username', 'email', 'verified', 'enabled', 'robot'])):
"""
User represents a single user.
:type username: string
:type email: string
:type verified: boolean
:type enabled: boolean
:type robot: User
"""
"""
User represents a single user.
:type username: string
:type email: string
:type verified: boolean
:type enabled: boolean
:type robot: User
"""
def to_dict(self):
user_data = {
'kind': 'user',
'name': self.username,
'username': self.username,
'email': self.email,
'verified': self.verified,
'enabled': self.enabled,
}
def to_dict(self):
user_data = {
'kind': 'user',
'name': self.username,
'username': self.username,
'email': self.email,
'verified': self.verified,
'enabled': self.enabled,
}
return user_data
return user_data
class Organization(namedtuple('Organization', ['username', 'email'])):
"""
Organization represents a single org.
:type username: string
:type email: string
"""
def to_dict(self):
return {
'name': self.username,
'email': self.email,
}
"""
Organization represents a single org.
:type username: string
:type email: string
"""
def to_dict(self):
return {
'name': self.username,
'email': self.email,
}
@add_metaclass(ABCMeta)
class SuperuserDataInterface(object):
"""
Interface that represents all data store interactions required by a superuser api.
"""
@abstractmethod
def list_all_service_keys(self):
"""
Interface that represents all data store interactions required by a superuser api.
Returns a list of service keys
"""
@abstractmethod
def list_all_service_keys(self):
"""
Returns a list of service keys
"""

View file

@ -1,6 +1,8 @@
from data import model
from config_app.config_endpoints.api.superuser_models_interface import SuperuserDataInterface, User, ServiceKey, Approval
from config_app.config_endpoints.api.superuser_models_interface import (SuperuserDataInterface, User, ServiceKey,
Approval)
def _create_user(user):
if user is None:
@ -11,12 +13,15 @@ def _create_user(user):
def _create_key(key):
approval = None
if key.approval is not None:
approval = Approval(_create_user(key.approval.approver), key.approval.approval_type, key.approval.approved_date,
approval = Approval(_create_user(key.approval.approver), key.approval.approval_type,
key.approval.approved_date,
key.approval.notes)
return ServiceKey(key.name, key.kid, key.service, key.jwk, key.metadata, key.created_date, key.expiration_date,
return ServiceKey(key.name, key.kid, key.service, key.jwk, key.metadata, key.created_date,
key.expiration_date,
key.rotation_duration, approval)
class ServiceKeyDoesNotExist(Exception):
pass
@ -30,6 +35,7 @@ class PreOCIModel(SuperuserDataInterface):
PreOCIModel implements the data model for the SuperUser using a database schema
before it was changed to support the OCI specification.
"""
def list_all_service_keys(self):
keys = model.service_keys.list_all_keys()
return [_create_key(key) for key in keys]
@ -43,8 +49,10 @@ class PreOCIModel(SuperuserDataInterface):
except model.ServiceKeyAlreadyApproved:
raise ServiceKeyAlreadyApproved
def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None):
(private_key, key) = model.service_keys.generate_service_key(service, expiration_date, metadata=metadata, name=name)
def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None,
rotation_duration=None):
(private_key, key) = model.service_keys.generate_service_key(service, expiration_date,
metadata=metadata, name=name)
return private_key, key.kid

View file

@ -10,50 +10,51 @@ from config_app.c_app import app, config_provider
from config_app.config_endpoints.api import resource, ApiResource, nickname
from config_app.config_util.tar import tarinfo_filter_partial, strip_absolute_path_and_add_trailing_dir
@resource('/v1/configapp/initialization')
class ConfigInitialization(ApiResource):
"""
Resource for dealing with any initialization logic for the config app
"""
"""
Resource for dealing with any initialization logic for the config app
"""
@nickname('scStartNewConfig')
def post(self):
config_provider.new_config_dir()
return make_response('OK')
@nickname('scStartNewConfig')
def post(self):
config_provider.new_config_dir()
return make_response('OK')
@resource('/v1/configapp/tarconfig')
class TarConfigLoader(ApiResource):
"""
Resource for dealing with configuration as a tarball,
including loading and generating functions
"""
"""
Resource for dealing with configuration as a tarball,
including loading and generating functions
"""
@nickname('scGetConfigTarball')
def get(self):
config_path = config_provider.get_config_dir_path()
tar_dir_prefix = strip_absolute_path_and_add_trailing_dir(config_path)
temp = tempfile.NamedTemporaryFile()
@nickname('scGetConfigTarball')
def get(self):
config_path = config_provider.get_config_dir_path()
tar_dir_prefix = strip_absolute_path_and_add_trailing_dir(config_path)
temp = tempfile.NamedTemporaryFile()
tar = tarfile.open(temp.name, mode="w|gz")
for name in os.listdir(config_path):
tar.add(os.path.join(config_path, name), filter=tarinfo_filter_partial(tar_dir_prefix))
tar = tarfile.open(temp.name, mode="w|gz")
for name in os.listdir(config_path):
tar.add(os.path.join(config_path, name), filter=tarinfo_filter_partial(tar_dir_prefix))
tar.close()
return send_file(temp.name, mimetype='application/gzip')
tar.close()
return send_file(temp.name, mimetype='application/gzip')
@nickname('scUploadTarballConfig')
def put(self):
""" Loads tarball config into the config provider """
# Generate a new empty dir to load the config into
config_provider.new_config_dir()
input_stream = request.stream
with tarfile.open(mode="r|gz", fileobj=input_stream) as tar_stream:
tar_stream.extractall(config_provider.get_config_dir_path())
@nickname('scUploadTarballConfig')
def put(self):
""" Loads tarball config into the config provider """
# Generate a new empty dir to load the config into
config_provider.new_config_dir()
input_stream = request.stream
with tarfile.open(mode="r|gz", fileobj=input_stream) as tar_stream:
tar_stream.extractall(config_provider.get_config_dir_path())
# now try to connect to the db provided in their config to validate it works
combined = dict(**app.config)
combined.update(config_provider.get_config())
configure(combined)
# now try to connect to the db provided in their config to validate it works
combined = dict(**app.config)
combined.update(config_provider.get_config())
configure(combined)
return make_response('OK')
return make_response('OK')

View file

@ -5,15 +5,14 @@ from config_app.config_endpoints.api.superuser_models_interface import user_view
@resource('/v1/user/')
class User(ApiResource):
""" Operations related to users. """
""" Operations related to users. """
@nickname('getLoggedInUser')
def get(self):
""" Get user information for the authenticated user. """
user = get_authenticated_user()
# TODO(config): figure out if we need user validation
# if user is None or user.organization or not UserReadPermission(user.username).can():
# raise InvalidToken("Requires authentication", payload={'session_required': False})
return user_view(user)
@nickname('getLoggedInUser')
def get(self):
""" Get user information for the authenticated user. """
user = get_authenticated_user()
# TODO(config): figure out if we need user validation
# if user is None or user.organization or not UserReadPermission(user.username).can():
# raise InvalidToken("Requires authentication", payload={'session_required': False})
return user_view(user)

View file

@ -13,52 +13,50 @@ from config_app.config_util.k8sconfig import get_k8s_namespace
def truthy_bool(param):
return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'}
return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'}
DEFAULT_JS_BUNDLE_NAME = 'configapp'
PARAM_REGEX = re.compile(r'<([^:>]+:)*([\w]+)>')
logger = logging.getLogger(__name__)
TYPE_CONVERTER = {
truthy_bool: 'boolean',
str: 'string',
basestring: 'string',
reqparse.text_type: 'string',
int: 'integer',
truthy_bool: 'boolean',
str: 'string',
basestring: 'string',
reqparse.text_type: 'string',
int: 'integer',
}
def _list_files(path, extension, contains=""):
""" Returns a list of all the files with the given extension found under the given path. """
""" Returns a list of all the files with the given extension found under the given path. """
def matches(f):
return os.path.splitext(f)[1] == '.' + extension and contains in os.path.splitext(f)[0]
def matches(f):
return os.path.splitext(f)[1] == '.' + extension and contains in os.path.splitext(f)[0]
def join_path(dp, f):
# Remove the static/ prefix. It is added in the template.
return os.path.join(dp, f)[len(ROOT_DIR) + 1 + len('config_app/static/'):]
def join_path(dp, f):
# Remove the static/ prefix. It is added in the template.
return os.path.join(dp, f)[len(ROOT_DIR) + 1 + len('config_app/static/'):]
filepath = os.path.join(os.path.join(ROOT_DIR, 'config_app/static/'), path)
return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)]
filepath = os.path.join(os.path.join(ROOT_DIR, 'config_app/static/'), path)
return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)]
def render_page_template(name, route_data=None, js_bundle_name=DEFAULT_JS_BUNDLE_NAME, **kwargs):
""" Renders the page template with the given name as the response and returns its contents. """
main_scripts = _list_files('build', 'js', js_bundle_name)
""" Renders the page template with the given name as the response and returns its contents. """
main_scripts = _list_files('build', 'js', js_bundle_name)
contents = render_template(name,
route_data=route_data,
main_scripts=main_scripts,
config_set=frontend_visible_config(app.config),
kubernetes_namespace=IS_KUBERNETES and get_k8s_namespace(),
**kwargs)
contents = render_template(name,
route_data=route_data,
main_scripts=main_scripts,
config_set=frontend_visible_config(app.config),
kubernetes_namespace=IS_KUBERNETES and get_k8s_namespace(),
**kwargs)
resp = make_response(contents)
resp.headers['X-FRAME-OPTIONS'] = 'DENY'
return resp
resp = make_response(contents)
resp.headers['X-FRAME-OPTIONS'] = 'DENY'
return resp
def fully_qualified_name(method_view_class):
return '%s.%s' % (method_view_class.__module__, method_view_class.__name__)
return '%s.%s' % (method_view_class.__module__, method_view_class.__name__)

View file

@ -5,63 +5,62 @@ from werkzeug.exceptions import HTTPException
class ApiErrorType(Enum):
invalid_request = 'invalid_request'
invalid_request = 'invalid_request'
class ApiException(HTTPException):
"""
Represents an error in the application/problem+json format.
"""
Represents an error in the application/problem+json format.
See: https://tools.ietf.org/html/rfc7807
See: https://tools.ietf.org/html/rfc7807
- "type" (string) - A URI reference that identifies the
problem type.
- "type" (string) - A URI reference that identifies the
problem type.
- "title" (string) - A short, human-readable summary of the problem
type. It SHOULD NOT change from occurrence to occurrence of the
problem, except for purposes of localization
- "title" (string) - A short, human-readable summary of the problem
type. It SHOULD NOT change from occurrence to occurrence of the
problem, except for purposes of localization
- "status" (number) - The HTTP status code
- "status" (number) - The HTTP status code
- "detail" (string) - A human-readable explanation specific to this
occurrence of the problem.
- "detail" (string) - A human-readable explanation specific to this
occurrence of the problem.
- "instance" (string) - A URI reference that identifies the specific
occurrence of the problem. It may or may not yield further
information if dereferenced.
"""
- "instance" (string) - A URI reference that identifies the specific
occurrence of the problem. It may or may not yield further
information if dereferenced.
"""
def __init__(self, error_type, status_code, error_description, payload=None):
Exception.__init__(self)
self.error_description = error_description
self.code = status_code
self.payload = payload
self.error_type = error_type
self.data = self.to_dict()
def __init__(self, error_type, status_code, error_description, payload=None):
Exception.__init__(self)
self.error_description = error_description
self.code = status_code
self.payload = payload
self.error_type = error_type
self.data = self.to_dict()
super(ApiException, self).__init__(error_description, None)
super(ApiException, self).__init__(error_description, None)
def to_dict(self):
rv = dict(self.payload or ())
def to_dict(self):
rv = dict(self.payload or ())
if self.error_description is not None:
rv['detail'] = self.error_description
rv['error_message'] = self.error_description # TODO: deprecate
if self.error_description is not None:
rv['detail'] = self.error_description
rv['error_message'] = self.error_description # TODO: deprecate
rv['error_type'] = self.error_type.value # TODO: deprecate
rv['title'] = self.error_type.value
rv['type'] = url_for('api.error', error_type=self.error_type.value, _external=True)
rv['status'] = self.code
return rv
rv['error_type'] = self.error_type.value # TODO: deprecate
rv['title'] = self.error_type.value
rv['type'] = url_for('api.error', error_type=self.error_type.value, _external=True)
rv['status'] = self.code
return rv
class InvalidRequest(ApiException):
def __init__(self, error_description, payload=None):
ApiException.__init__(self, ApiErrorType.invalid_request, 400, error_description, payload)
def __init__(self, error_description, payload=None):
ApiException.__init__(self, ApiErrorType.invalid_request, 400, error_description, payload)
class InvalidResponse(ApiException):
def __init__(self, error_description, payload=None):
ApiException.__init__(self, ApiErrorType.invalid_response, 400, error_description, payload)
def __init__(self, error_description, payload=None):
ApiException.__init__(self, ApiErrorType.invalid_response, 400, error_description, payload)

View file

@ -5,22 +5,19 @@ from config_app.config_endpoints.common import render_page_template
from config_app.config_endpoints.api.discovery import generate_route_data
from config_app.config_endpoints.api import no_cache
setup_web = Blueprint('setup_web', __name__, template_folder='templates')
@lru_cache(maxsize=1)
def _get_route_data():
return generate_route_data()
return generate_route_data()
def render_page_template_with_routedata(name, *args, **kwargs):
return render_page_template(name, _get_route_data(), *args, **kwargs)
return render_page_template(name, _get_route_data(), *args, **kwargs)
@no_cache
@setup_web.route('/', methods=['GET'], defaults={'path': ''})
def index(path, **kwargs):
return render_page_template_with_routedata('index.html', js_bundle_name='configapp', **kwargs)
return render_page_template_with_routedata('index.html', js_bundle_name='configapp', **kwargs)

View file

@ -6,41 +6,42 @@ from config_app.config_util.k8saccessor import KubernetesAccessorSingleton
class TransientDirectoryProvider(FileConfigProvider):
""" Implementation of the config provider that reads and writes the data
from/to the file system, only using temporary directories,
deleting old dirs and creating new ones as requested.
""" Implementation of the config provider that reads and writes the data
from/to the file system, only using temporary directories,
deleting old dirs and creating new ones as requested.
"""
def __init__(self, config_volume, yaml_filename, py_filename):
# Create a temp directory that will be cleaned up when we change the config path
# This should ensure we have no "pollution" of different configs:
# no uploaded config should ever affect subsequent config modifications/creations
temp_dir = TemporaryDirectory()
self.temp_dir = temp_dir
super(TransientDirectoryProvider, self).__init__(temp_dir.name, yaml_filename, py_filename)
@property
def provider_id(self):
return 'transient'
def new_config_dir(self):
"""
def __init__(self, config_volume, yaml_filename, py_filename):
# Create a temp directory that will be cleaned up when we change the config path
# This should ensure we have no "pollution" of different configs:
# no uploaded config should ever affect subsequent config modifications/creations
temp_dir = TemporaryDirectory()
self.temp_dir = temp_dir
super(TransientDirectoryProvider, self).__init__(temp_dir.name, yaml_filename, py_filename)
Update the path with a new temporary directory, deleting the old one in the process
"""
self.temp_dir.cleanup()
temp_dir = TemporaryDirectory()
@property
def provider_id(self):
return 'transient'
self.config_volume = temp_dir.name
self.temp_dir = temp_dir
self.yaml_path = os.path.join(temp_dir.name, self.yaml_filename)
def new_config_dir(self):
"""
Update the path with a new temporary directory, deleting the old one in the process
"""
self.temp_dir.cleanup()
temp_dir = TemporaryDirectory()
def get_config_dir_path(self):
return self.config_volume
self.config_volume = temp_dir.name
self.temp_dir = temp_dir
self.yaml_path = os.path.join(temp_dir.name, self.yaml_filename)
def save_configuration_to_kubernetes(self):
config_path = self.get_config_dir_path()
def get_config_dir_path(self):
return self.config_volume
for name in os.listdir(config_path):
file_path = os.path.join(self.config_volume, name)
KubernetesAccessorSingleton.get_instance().save_file_as_secret(name, file_path)
def save_configuration_to_kubernetes(self):
config_path = self.get_config_dir_path()
for name in os.listdir(config_path):
file_path = os.path.join(self.config_volume, name)
KubernetesAccessorSingleton.get_instance().save_file_as_secret(name, file_path)
return 200
return 200

View file

@ -4,9 +4,9 @@ from config_app.config_util.config.TransientDirectoryProvider import TransientDi
def get_config_provider(config_volume, yaml_filename, py_filename, testing=False):
""" Loads and returns the config provider for the current environment. """
""" Loads and returns the config provider for the current environment. """
if testing:
return TestConfigProvider()
if testing:
return TestConfigProvider()
return TransientDirectoryProvider(config_volume, yaml_filename, py_filename)
return TransientDirectoryProvider(config_volume, yaml_filename, py_filename)

View file

@ -8,64 +8,65 @@ logger = logging.getLogger(__name__)
class BaseFileProvider(BaseProvider):
""" Base implementation of the config provider that reads the data from the file system. """
def __init__(self, config_volume, yaml_filename, py_filename):
self.config_volume = config_volume
self.yaml_filename = yaml_filename
self.py_filename = py_filename
""" Base implementation of the config provider that reads the data from the file system. """
self.yaml_path = os.path.join(config_volume, yaml_filename)
self.py_path = os.path.join(config_volume, py_filename)
def __init__(self, config_volume, yaml_filename, py_filename):
self.config_volume = config_volume
self.yaml_filename = yaml_filename
self.py_filename = py_filename
def update_app_config(self, app_config):
if os.path.exists(self.py_path):
logger.debug('Applying config file: %s', self.py_path)
app_config.from_pyfile(self.py_path)
self.yaml_path = os.path.join(config_volume, yaml_filename)
self.py_path = os.path.join(config_volume, py_filename)
if os.path.exists(self.yaml_path):
logger.debug('Applying config file: %s', self.yaml_path)
import_yaml(app_config, self.yaml_path)
def update_app_config(self, app_config):
if os.path.exists(self.py_path):
logger.debug('Applying config file: %s', self.py_path)
app_config.from_pyfile(self.py_path)
def get_config(self):
if not self.config_exists():
return None
if os.path.exists(self.yaml_path):
logger.debug('Applying config file: %s', self.yaml_path)
import_yaml(app_config, self.yaml_path)
config_obj = {}
import_yaml(config_obj, self.yaml_path)
return config_obj
def get_config(self):
if not self.config_exists():
return None
def config_exists(self):
return self.volume_file_exists(self.yaml_filename)
config_obj = {}
import_yaml(config_obj, self.yaml_path)
return config_obj
def volume_exists(self):
return os.path.exists(self.config_volume)
def config_exists(self):
return self.volume_file_exists(self.yaml_filename)
def volume_file_exists(self, filename):
return os.path.exists(os.path.join(self.config_volume, filename))
def volume_exists(self):
return os.path.exists(self.config_volume)
def get_volume_file(self, filename, mode='r'):
return open(os.path.join(self.config_volume, filename), mode=mode)
def volume_file_exists(self, filename):
return os.path.exists(os.path.join(self.config_volume, filename))
def get_volume_path(self, directory, filename):
return os.path.join(directory, filename)
def get_volume_file(self, filename, mode='r'):
return open(os.path.join(self.config_volume, filename), mode=mode)
def list_volume_directory(self, path):
dirpath = os.path.join(self.config_volume, path)
if not os.path.exists(dirpath):
return None
def get_volume_path(self, directory, filename):
return os.path.join(directory, filename)
if not os.path.isdir(dirpath):
return None
def list_volume_directory(self, path):
dirpath = os.path.join(self.config_volume, path)
if not os.path.exists(dirpath):
return None
return os.listdir(dirpath)
if not os.path.isdir(dirpath):
return None
def requires_restart(self, app_config):
file_config = self.get_config()
if not file_config:
return False
return os.listdir(dirpath)
for key in file_config:
if app_config.get(key) != file_config[key]:
return True
def requires_restart(self, app_config):
file_config = self.get_config()
if not file_config:
return False
return False
for key in file_config:
if app_config.get(key) != file_config[key]:
return True
return False

View file

@ -4,57 +4,57 @@ import logging
from config_app.config_util.config.baseprovider import export_yaml, CannotWriteConfigException
from config_app.config_util.config.basefileprovider import BaseFileProvider
logger = logging.getLogger(__name__)
def _ensure_parent_dir(filepath):
""" Ensures that the parent directory of the given file path exists. """
try:
parentpath = os.path.abspath(os.path.join(filepath, os.pardir))
if not os.path.isdir(parentpath):
os.makedirs(parentpath)
except IOError as ioe:
raise CannotWriteConfigException(str(ioe))
""" Ensures that the parent directory of the given file path exists. """
try:
parentpath = os.path.abspath(os.path.join(filepath, os.pardir))
if not os.path.isdir(parentpath):
os.makedirs(parentpath)
except IOError as ioe:
raise CannotWriteConfigException(str(ioe))
class FileConfigProvider(BaseFileProvider):
""" Implementation of the config provider that reads and writes the data
from/to the file system. """
def __init__(self, config_volume, yaml_filename, py_filename):
super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename)
""" Implementation of the config provider that reads and writes the data
from/to the file system. """
@property
def provider_id(self):
return 'file'
def __init__(self, config_volume, yaml_filename, py_filename):
super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename)
def save_config(self, config_obj):
export_yaml(config_obj, self.yaml_path)
@property
def provider_id(self):
return 'file'
def write_volume_file(self, filename, contents):
filepath = os.path.join(self.config_volume, filename)
_ensure_parent_dir(filepath)
def save_config(self, config_obj):
export_yaml(config_obj, self.yaml_path)
try:
with open(filepath, mode='w') as f:
f.write(contents)
except IOError as ioe:
raise CannotWriteConfigException(str(ioe))
def write_volume_file(self, filename, contents):
filepath = os.path.join(self.config_volume, filename)
_ensure_parent_dir(filepath)
return filepath
try:
with open(filepath, mode='w') as f:
f.write(contents)
except IOError as ioe:
raise CannotWriteConfigException(str(ioe))
def remove_volume_file(self, filename):
filepath = os.path.join(self.config_volume, filename)
os.remove(filepath)
return filepath
def save_volume_file(self, filename, flask_file):
filepath = os.path.join(self.config_volume, filename)
_ensure_parent_dir(filepath)
def remove_volume_file(self, filename):
filepath = os.path.join(self.config_volume, filename)
os.remove(filepath)
# Write the file.
try:
flask_file.save(filepath)
except IOError as ioe:
raise CannotWriteConfigException(str(ioe))
def save_volume_file(self, filename, flask_file):
filepath = os.path.join(self.config_volume, filename)
_ensure_parent_dir(filepath)
return filepath
# Write the file.
try:
flask_file.save(filepath)
except IOError as ioe:
raise CannotWriteConfigException(str(ioe))
return filepath

View file

@ -1,7 +1,6 @@
import json
import io
import os
from datetime import datetime, timedelta
from config_app.config_util.config.baseprovider import BaseProvider
@ -9,73 +8,73 @@ REAL_FILES = ['test/data/signing-private.gpg', 'test/data/signing-public.gpg', '
class TestConfigProvider(BaseProvider):
""" Implementation of the config provider for testing. Everything is kept in-memory instead on
the real file system. """
def __init__(self):
self.clear()
""" Implementation of the config provider for testing. Everything is kept in-memory instead on
the real file system. """
def clear(self):
self.files = {}
self._config = {}
def __init__(self):
self.clear()
@property
def provider_id(self):
return 'test'
def clear(self):
self.files = {}
self._config = {}
def update_app_config(self, app_config):
self._config = app_config
@property
def provider_id(self):
return 'test'
def get_config(self):
if not 'config.yaml' in self.files:
return None
def update_app_config(self, app_config):
self._config = app_config
return json.loads(self.files.get('config.yaml', '{}'))
def get_config(self):
if not 'config.yaml' in self.files:
return None
def save_config(self, config_obj):
self.files['config.yaml'] = json.dumps(config_obj)
return json.loads(self.files.get('config.yaml', '{}'))
def config_exists(self):
return 'config.yaml' in self.files
def save_config(self, config_obj):
self.files['config.yaml'] = json.dumps(config_obj)
def volume_exists(self):
return True
def config_exists(self):
return 'config.yaml' in self.files
def volume_file_exists(self, filename):
if filename in REAL_FILES:
return True
def volume_exists(self):
return True
return filename in self.files
def volume_file_exists(self, filename):
if filename in REAL_FILES:
return True
def save_volume_file(self, filename, flask_file):
self.files[filename] = flask_file.read()
return filename in self.files
def write_volume_file(self, filename, contents):
self.files[filename] = contents
def save_volume_file(self, filename, flask_file):
self.files[filename] = flask_file.read()
def get_volume_file(self, filename, mode='r'):
if filename in REAL_FILES:
return open(filename, mode=mode)
def write_volume_file(self, filename, contents):
self.files[filename] = contents
return io.BytesIO(self.files[filename])
def get_volume_file(self, filename, mode='r'):
if filename in REAL_FILES:
return open(filename, mode=mode)
def remove_volume_file(self, filename):
self.files.pop(filename, None)
return io.BytesIO(self.files[filename])
def list_volume_directory(self, path):
paths = []
for filename in self.files:
if filename.startswith(path):
paths.append(filename[len(path)+1:])
def remove_volume_file(self, filename):
self.files.pop(filename, None)
return paths
def list_volume_directory(self, path):
paths = []
for filename in self.files:
if filename.startswith(path):
paths.append(filename[len(path) + 1:])
def requires_restart(self, app_config):
return False
return paths
def reset_for_test(self):
self._config['SUPER_USERS'] = ['devtable']
self.files = {}
def requires_restart(self, app_config):
return False
def get_volume_path(self, directory, filename):
return os.path.join(directory, filename)
def reset_for_test(self):
self._config['SUPER_USERS'] = ['devtable']
self.files = {}
def get_volume_path(self, directory, filename):
return os.path.join(directory, filename)

View file

@ -11,135 +11,135 @@ logger = logging.getLogger(__name__)
QE_DEPLOYMENT_LABEL = 'quay-enterprise-component'
class KubernetesAccessorSingleton(object):
""" Singleton allowing access to kubernetes operations """
_instance = None
""" Singleton allowing access to kubernetes operations """
_instance = None
def __init__(self, kube_config=None):
self.kube_config = kube_config
if kube_config is None:
self.kube_config = KubernetesConfig.from_env()
def __init__(self, kube_config=None):
self.kube_config = kube_config
if kube_config is None:
self.kube_config = KubernetesConfig.from_env()
KubernetesAccessorSingleton._instance = self
KubernetesAccessorSingleton._instance = self
@classmethod
def get_instance(cls, kube_config=None):
"""
Singleton getter implementation, returns the instance if one exists, otherwise creates the
instance and ties it to the class.
:return: KubernetesAccessorSingleton
"""
if cls._instance is None:
return cls(kube_config)
@classmethod
def get_instance(cls, kube_config=None):
"""
Singleton getter implementation, returns the instance if one exists, otherwise creates the
instance and ties it to the class.
:return: KubernetesAccessorSingleton
"""
if cls._instance is None:
return cls(kube_config)
return cls._instance
return cls._instance
def save_file_as_secret(self, name, file_path):
with open(file_path) as f:
value = f.read()
self._update_secret_file(name, value)
def save_file_as_secret(self, name, file_path):
with open(file_path) as f:
value = f.read()
self._update_secret_file(name, value)
def get_qe_deployments(self):
""""
Returns all deployments matching the label selector provided in the KubeConfig
"""
deployment_selector_url = 'namespaces/%s/deployments?labelSelector=%s%%3D%s' % (
self.kube_config.qe_namespace, QE_DEPLOYMENT_LABEL, self.kube_config.qe_deployment_selector
)
def get_qe_deployments(self):
""""
Returns all deployments matching the label selector provided in the KubeConfig
"""
deployment_selector_url = 'namespaces/%s/deployments?labelSelector=%s%%3D%s' % (
self.kube_config.qe_namespace, QE_DEPLOYMENT_LABEL, self.kube_config.qe_deployment_selector
)
response = self._execute_k8s_api('GET', deployment_selector_url, api_prefix='apis/extensions/v1beta1')
if response.status_code != 200:
return None
return json.loads(response.text)
response = self._execute_k8s_api('GET', deployment_selector_url, api_prefix='apis/extensions/v1beta1')
if response.status_code != 200:
return None
return json.loads(response.text)
def cycle_qe_deployments(self, deployment_names):
""""
Triggers a rollout of all desired deployments in the qe namespace
"""
def cycle_qe_deployments(self, deployment_names):
""""
Triggers a rollout of all desired deployments in the qe namespace
"""
for name in deployment_names:
logger.debug('Cycling deployment %s', name)
deployment_url = 'namespaces/%s/deployments/%s' % (self.kube_config.qe_namespace, name)
for name in deployment_names:
logger.debug('Cycling deployment %s', name)
deployment_url = 'namespaces/%s/deployments/%s' % (self.kube_config.qe_namespace, name)
# There is currently no command to simply rolling restart all the pods: https://github.com/kubernetes/kubernetes/issues/13488
# Instead, we modify the template of the deployment with a dummy env variable to trigger a cycle of the pods
# (based off this comment: https://github.com/kubernetes/kubernetes/issues/13488#issuecomment-240393845)
self._assert_success(self._execute_k8s_api('PATCH', deployment_url, {
'spec': {
'template': {
'spec': {
'containers': [{
'name': 'quay-enterprise-app', 'env': [{
'name': 'RESTART_TIME',
'value': str(datetime.datetime.now())
}]
# There is currently no command to simply rolling restart all the pods: https://github.com/kubernetes/kubernetes/issues/13488
# Instead, we modify the template of the deployment with a dummy env variable to trigger a cycle of the pods
# (based off this comment: https://github.com/kubernetes/kubernetes/issues/13488#issuecomment-240393845)
self._assert_success(self._execute_k8s_api('PATCH', deployment_url, {
'spec': {
'template': {
'spec': {
'containers': [{
'name': 'quay-enterprise-app', 'env': [{
'name': 'RESTART_TIME',
'value': str(datetime.datetime.now())
}]
}
}]
}
}
}, api_prefix='apis/extensions/v1beta1', content_type='application/strategic-merge-patch+json'))
def _assert_success(self, response):
if response.status_code != 200:
logger.error('Kubernetes API call failed with response: %s => %s', response.status_code,
response.text)
raise Exception('Kubernetes API call failed: %s' % response.text)
def _update_secret_file(self, relative_file_path, value=None):
if '/' in relative_file_path:
raise Exception('Expected path from get_volume_path, but found slashes')
# Check first that the namespace for Quay Enterprise exists. If it does not, report that
# as an error, as it seems to be a common issue.
namespace_url = 'namespaces/%s' % (self.kube_config.qe_namespace)
response = self._execute_k8s_api('GET', namespace_url)
if response.status_code // 100 != 2:
msg = 'A Kubernetes namespace with name `%s` must be created to save config' % self.kube_config.qe_namespace
raise Exception(msg)
# Check if the secret exists. If not, then we create an empty secret and then update the file
# inside.
secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret)
secret = self._lookup_secret()
if secret is None:
self._assert_success(self._execute_k8s_api('POST', secret_url, {
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
"name": self.kube_config.qe_config_secret
},
"data": {}
}))
# Update the secret to reflect the file change.
secret['data'] = secret.get('data', {})
if value is not None:
secret['data'][relative_file_path] = base64.b64encode(value)
else:
secret['data'].pop(relative_file_path)
self._assert_success(self._execute_k8s_api('PUT', secret_url, secret))
def _lookup_secret(self):
secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret)
response = self._execute_k8s_api('GET', secret_url)
if response.status_code != 200:
return None
return json.loads(response.text)
def _execute_k8s_api(self, method, relative_url, data=None, api_prefix='api/v1', content_type='application/json'):
headers = {
'Authorization': 'Bearer ' + self.kube_config.service_account_token
}
}, api_prefix='apis/extensions/v1beta1', content_type='application/strategic-merge-patch+json'))
if data:
headers['Content-Type'] = content_type
def _assert_success(self, response):
if response.status_code != 200:
logger.error('Kubernetes API call failed with response: %s => %s', response.status_code,
response.text)
raise Exception('Kubernetes API call failed: %s' % response.text)
data = json.dumps(data) if data else None
session = Session()
url = 'https://%s/%s/%s' % (self.kube_config.api_host, api_prefix, relative_url)
def _update_secret_file(self, relative_file_path, value=None):
if '/' in relative_file_path:
raise Exception('Expected path from get_volume_path, but found slashes')
request = Request(method, url, data=data, headers=headers)
return session.send(request.prepare(), verify=False, timeout=2)
# Check first that the namespace for Quay Enterprise exists. If it does not, report that
# as an error, as it seems to be a common issue.
namespace_url = 'namespaces/%s' % (self.kube_config.qe_namespace)
response = self._execute_k8s_api('GET', namespace_url)
if response.status_code // 100 != 2:
msg = 'A Kubernetes namespace with name `%s` must be created to save config' % self.kube_config.qe_namespace
raise Exception(msg)
# Check if the secret exists. If not, then we create an empty secret and then update the file
# inside.
secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret)
secret = self._lookup_secret()
if secret is None:
self._assert_success(self._execute_k8s_api('POST', secret_url, {
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
"name": self.kube_config.qe_config_secret
},
"data": {}
}))
# Update the secret to reflect the file change.
secret['data'] = secret.get('data', {})
if value is not None:
secret['data'][relative_file_path] = base64.b64encode(value)
else:
secret['data'].pop(relative_file_path)
self._assert_success(self._execute_k8s_api('PUT', secret_url, secret))
def _lookup_secret(self):
secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret)
response = self._execute_k8s_api('GET', secret_url)
if response.status_code != 200:
return None
return json.loads(response.text)
def _execute_k8s_api(self, method, relative_url, data=None, api_prefix='api/v1', content_type='application/json'):
headers = {
'Authorization': 'Bearer ' + self.kube_config.service_account_token
}
if data:
headers['Content-Type'] = content_type
data = json.dumps(data) if data else None
session = Session()
url = 'https://%s/%s/%s' % (self.kube_config.api_host, api_prefix, relative_url)
request = Request(method, url, data=data, headers=headers)
return session.send(request.prepare(), verify=False, timeout=2)

View file

@ -8,9 +8,11 @@ DEFAULT_QE_CONFIG_SECRET = 'quay-enterprise-config-secret'
# The name of the quay enterprise deployment (not config app) that is used to query & rollout
DEFAULT_QE_DEPLOYMENT_SELECTOR = 'app'
def get_k8s_namespace():
return os.environ.get('QE_K8S_NAMESPACE', DEFAULT_QE_NAMESPACE)
class KubernetesConfig(object):
def __init__(self, api_host='', service_account_token=SERVICE_ACCOUNT_TOKEN_PATH,
qe_namespace=DEFAULT_QE_NAMESPACE,
@ -31,7 +33,7 @@ class KubernetesConfig(object):
with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f:
service_token = f.read()
api_host=os.environ.get('KUBERNETES_SERVICE_HOST', '')
api_host = os.environ.get('KUBERNETES_SERVICE_HOST', '')
port = os.environ.get('KUBERNETES_SERVICE_PORT')
if port:
api_host += ':' + port
@ -42,6 +44,3 @@ class KubernetesConfig(object):
return cls(api_host=api_host, service_account_token=service_token, qe_namespace=qe_namespace,
qe_config_secret=qe_config_secret, qe_deployment_selector=qe_deployment_selector)

View file

@ -3,45 +3,45 @@ from config_app._init_config import CONF_DIR
def logfile_path(jsonfmt=False, debug=False):
"""
Returns the a logfileconf path following this rules:
- conf/logging_debug_json.conf # jsonfmt=true, debug=true
- conf/logging_json.conf # jsonfmt=true, debug=false
- conf/logging_debug.conf # jsonfmt=false, debug=true
- conf/logging.conf # jsonfmt=false, debug=false
Can be parametrized via envvars: JSONLOG=true, DEBUGLOG=true
"""
_json = ""
_debug = ""
"""
Returns the a logfileconf path following this rules:
- conf/logging_debug_json.conf # jsonfmt=true, debug=true
- conf/logging_json.conf # jsonfmt=true, debug=false
- conf/logging_debug.conf # jsonfmt=false, debug=true
- conf/logging.conf # jsonfmt=false, debug=false
Can be parametrized via envvars: JSONLOG=true, DEBUGLOG=true
"""
_json = ""
_debug = ""
if jsonfmt or os.getenv('JSONLOG', 'false').lower() == 'true':
_json = "_json"
if jsonfmt or os.getenv('JSONLOG', 'false').lower() == 'true':
_json = "_json"
if debug or os.getenv('DEBUGLOG', 'false').lower() == 'true':
_debug = "_debug"
if debug or os.getenv('DEBUGLOG', 'false').lower() == 'true':
_debug = "_debug"
return os.path.join(CONF_DIR, "logging%s%s.conf" % (_debug, _json))
return os.path.join(CONF_DIR, "logging%s%s.conf" % (_debug, _json))
def filter_logs(values, filtered_fields):
"""
Takes a dict and a list of keys to filter.
eg:
with filtered_fields:
[{'key': ['k1', k2'], 'fn': lambda x: 'filtered'}]
and values:
{'k1': {'k2': 'some-secret'}, 'k3': 'some-value'}
the returned dict is:
{'k1': {k2: 'filtered'}, 'k3': 'some-value'}
"""
for field in filtered_fields:
cdict = values
"""
Takes a dict and a list of keys to filter.
eg:
with filtered_fields:
[{'key': ['k1', k2'], 'fn': lambda x: 'filtered'}]
and values:
{'k1': {'k2': 'some-secret'}, 'k3': 'some-value'}
the returned dict is:
{'k1': {k2: 'filtered'}, 'k3': 'some-value'}
"""
for field in filtered_fields:
cdict = values
for key in field['key'][:-1]:
if key in cdict:
cdict = cdict[key]
for key in field['key'][:-1]:
if key in cdict:
cdict = cdict[key]
last_key = field['key'][-1]
last_key = field['key'][-1]
if last_key in cdict and cdict[last_key]:
cdict[last_key] = field['fn'](cdict[last_key])
if last_key in cdict and cdict[last_key]:
cdict[last_key] = field['fn'](cdict[last_key])

View file

@ -2,10 +2,12 @@ from fnmatch import fnmatch
import OpenSSL
class CertInvalidException(Exception):
""" Exception raised when a certificate could not be parsed/loaded. """
pass
class KeyInvalidException(Exception):
""" Exception raised when a key could not be parsed/loaded or successfully applied to a cert. """
pass
@ -24,8 +26,10 @@ def load_certificate(cert_contents):
_SUBJECT_ALT_NAME = 'subjectAltName'
class SSLCertificate(object):
""" Helper class for easier working with SSL certificates. """
def __init__(self, openssl_cert):
self.openssl_cert = openssl_cert

View file

@ -1,20 +1,22 @@
from util.config.validator import EXTRA_CA_DIRECTORY
def strip_absolute_path_and_add_trailing_dir(path):
"""
Removes the initial trailing / from the prefix path, and add the last dir one
"""
return path[1:] + '/'
"""
Removes the initial trailing / from the prefix path, and add the last dir one
"""
return path[1:] + '/'
def tarinfo_filter_partial(prefix):
def tarinfo_filter(tarinfo):
# remove leading directory info
tarinfo.name = tarinfo.name.replace(prefix, '')
def tarinfo_filter(tarinfo):
# remove leading directory info
tarinfo.name = tarinfo.name.replace(prefix, '')
# ignore any directory that isn't the specified extra ca one:
if tarinfo.isdir() and not tarinfo.name == EXTRA_CA_DIRECTORY:
return None
# ignore any directory that isn't the specified extra ca one:
if tarinfo.isdir() and not tarinfo.name == EXTRA_CA_DIRECTORY:
return None
return tarinfo
return tarinfo
return tarinfo_filter
return tarinfo_filter

View file

@ -1,23 +1,25 @@
import pytest
import re
from httmock import urlmatch, HTTMock, response
from config_app.config_util.k8saccessor import KubernetesAccessorSingleton
from config_app.config_util.k8sconfig import KubernetesConfig
@pytest.mark.parametrize('kube_config, expected_api, expected_query', [
({'api_host':'www.customhost.com'},
({'api_host': 'www.customhost.com'},
'/apis/extensions/v1beta1/namespaces/quay-enterprise/deployments', 'labelSelector=quay-enterprise-component%3Dapp'),
({'api_host':'www.customhost.com', 'qe_deployment_selector':'custom-selector'},
'/apis/extensions/v1beta1/namespaces/quay-enterprise/deployments', 'labelSelector=quay-enterprise-component%3Dcustom-selector'),
({'api_host': 'www.customhost.com', 'qe_deployment_selector': 'custom-selector'},
'/apis/extensions/v1beta1/namespaces/quay-enterprise/deployments',
'labelSelector=quay-enterprise-component%3Dcustom-selector'),
({'api_host':'www.customhost.com', 'qe_namespace':'custom-namespace'},
({'api_host': 'www.customhost.com', 'qe_namespace': 'custom-namespace'},
'/apis/extensions/v1beta1/namespaces/custom-namespace/deployments', 'labelSelector=quay-enterprise-component%3Dapp'),
({'api_host':'www.customhost.com', 'qe_namespace':'custom-namespace', 'qe_deployment_selector':'custom-selector'},
'/apis/extensions/v1beta1/namespaces/custom-namespace/deployments', 'labelSelector=quay-enterprise-component%3Dcustom-selector'),
({'api_host': 'www.customhost.com', 'qe_namespace': 'custom-namespace', 'qe_deployment_selector': 'custom-selector'},
'/apis/extensions/v1beta1/namespaces/custom-namespace/deployments',
'labelSelector=quay-enterprise-component%3Dcustom-selector'),
])
def test_get_qe_deployments(kube_config, expected_api, expected_query):
config = KubernetesConfig(**kube_config)
@ -36,12 +38,15 @@ def test_get_qe_deployments(kube_config, expected_api, expected_query):
assert url_hit[0]
@pytest.mark.parametrize('kube_config, deployment_names, expected_api_hits', [
({'api_host':'www.customhost.com'}, [], []),
({'api_host':'www.customhost.com'}, ['myDeployment'], ['/apis/extensions/v1beta1/namespaces/quay-enterprise/deployments/myDeployment']),
({'api_host':'www.customhost.com', 'qe_namespace':'custom-namespace'},
({'api_host': 'www.customhost.com'}, [], []),
({'api_host': 'www.customhost.com'}, ['myDeployment'],
['/apis/extensions/v1beta1/namespaces/quay-enterprise/deployments/myDeployment']),
({'api_host': 'www.customhost.com', 'qe_namespace': 'custom-namespace'},
['myDeployment', 'otherDeployment'],
['/apis/extensions/v1beta1/namespaces/custom-namespace/deployments/myDeployment', '/apis/extensions/v1beta1/namespaces/custom-namespace/deployments/otherDeployment']),
['/apis/extensions/v1beta1/namespaces/custom-namespace/deployments/myDeployment',
'/apis/extensions/v1beta1/namespaces/custom-namespace/deployments/otherDeployment']),
])
def test_cycle_qe_deployments(kube_config, deployment_names, expected_api_hits):
KubernetesAccessorSingleton._instance = None

View file

@ -6,24 +6,27 @@ from util.config.validator import EXTRA_CA_DIRECTORY
from test.fixtures import *
class MockTarInfo:
def __init__(self, name, isdir):
self.name = name
self.isdir = lambda: isdir
def __eq__(self, other):
return other is not None and self.name == other.name
class MockTarInfo:
def __init__(self, name, isdir):
self.name = name
self.isdir = lambda: isdir
def __eq__(self, other):
return other is not None and self.name == other.name
@pytest.mark.parametrize('prefix,tarinfo,expected', [
# It should handle simple files
('Users/sam/', MockTarInfo('Users/sam/config.yaml', False), MockTarInfo('config.yaml', False)),
# It should allow the extra CA dir
('Users/sam/', MockTarInfo('Users/sam/%s' % EXTRA_CA_DIRECTORY, True), MockTarInfo('%s' % EXTRA_CA_DIRECTORY, True)),
# it should allow a file in that extra dir
('Users/sam/', MockTarInfo('Users/sam/%s/cert.crt' % EXTRA_CA_DIRECTORY, False), MockTarInfo('%s/cert.crt' % EXTRA_CA_DIRECTORY, False)),
# it should not allow a directory that isn't the CA dir
('Users/sam/', MockTarInfo('Users/sam/dirignore', True), None),
# It should handle simple files
('Users/sam/', MockTarInfo('Users/sam/config.yaml', False), MockTarInfo('config.yaml', False)),
# It should allow the extra CA dir
('Users/sam/', MockTarInfo('Users/sam/%s' % EXTRA_CA_DIRECTORY, True), MockTarInfo('%s' % EXTRA_CA_DIRECTORY, True)),
# it should allow a file in that extra dir
('Users/sam/', MockTarInfo('Users/sam/%s/cert.crt' % EXTRA_CA_DIRECTORY, False),
MockTarInfo('%s/cert.crt' % EXTRA_CA_DIRECTORY, False)),
# it should not allow a directory that isn't the CA dir
('Users/sam/', MockTarInfo('Users/sam/dirignore', True), None),
])
def test_tarinfo_filter(prefix, tarinfo, expected):
partial = tarinfo_filter_partial(prefix)
assert partial(tarinfo) == expected
partial = tarinfo_filter_partial(prefix)
assert partial(tarinfo) == expected

View file

@ -2,7 +2,5 @@ from config_app.c_app import app as application
from config_app.config_endpoints.api import api_bp
from config_app.config_endpoints.setup_web import setup_web
application.register_blueprint(setup_web)
application.register_blueprint(api_bp, url_prefix='/api')