initial import for Open Source 🎉
This commit is contained in:
parent
1898c361f3
commit
9c0dd3b722
2048 changed files with 218743 additions and 0 deletions
13
util/config/provider/__init__.py
Normal file
13
util/config/provider/__init__.py
Normal file
|
@ -0,0 +1,13 @@
|
|||
from util.config.provider.fileprovider import FileConfigProvider
|
||||
from util.config.provider.testprovider import TestConfigProvider
|
||||
from util.config.provider.k8sprovider import KubernetesConfigProvider
|
||||
|
||||
def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False):
|
||||
""" Loads and returns the config provider for the current environment. """
|
||||
if testing:
|
||||
return TestConfigProvider()
|
||||
|
||||
if kubernetes:
|
||||
return KubernetesConfigProvider(config_volume, yaml_filename, py_filename)
|
||||
|
||||
return FileConfigProvider(config_volume, yaml_filename, py_filename)
|
62
util/config/provider/basefileprovider.py
Normal file
62
util/config/provider/basefileprovider.py
Normal file
|
@ -0,0 +1,62 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
from util.config.provider.baseprovider import (BaseProvider, import_yaml, export_yaml,
|
||||
CannotWriteConfigException)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class BaseFileProvider(BaseProvider):
|
||||
""" Base implementation of the config provider that reads the data from the file system. """
|
||||
def __init__(self, config_volume, yaml_filename, py_filename):
|
||||
self.config_volume = config_volume
|
||||
self.yaml_filename = yaml_filename
|
||||
self.py_filename = py_filename
|
||||
|
||||
self.yaml_path = os.path.join(config_volume, yaml_filename)
|
||||
self.py_path = os.path.join(config_volume, py_filename)
|
||||
|
||||
def update_app_config(self, app_config):
|
||||
if os.path.exists(self.py_path):
|
||||
logger.debug('Applying config file: %s', self.py_path)
|
||||
app_config.from_pyfile(self.py_path)
|
||||
|
||||
if os.path.exists(self.yaml_path):
|
||||
logger.debug('Applying config file: %s', self.yaml_path)
|
||||
import_yaml(app_config, self.yaml_path)
|
||||
|
||||
def get_config(self):
|
||||
if not self.config_exists():
|
||||
return None
|
||||
|
||||
config_obj = {}
|
||||
import_yaml(config_obj, self.yaml_path)
|
||||
return config_obj
|
||||
|
||||
def config_exists(self):
|
||||
return self.volume_file_exists(self.yaml_filename)
|
||||
|
||||
def volume_exists(self):
|
||||
return os.path.exists(self.config_volume)
|
||||
|
||||
def volume_file_exists(self, relative_file_path):
|
||||
return os.path.exists(os.path.join(self.config_volume, relative_file_path))
|
||||
|
||||
def get_volume_file(self, relative_file_path, mode='r'):
|
||||
return open(os.path.join(self.config_volume, relative_file_path), mode=mode)
|
||||
|
||||
def get_volume_path(self, directory, relative_file_path):
|
||||
return os.path.join(directory, relative_file_path)
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
dirpath = os.path.join(self.config_volume, path)
|
||||
if not os.path.exists(dirpath):
|
||||
return None
|
||||
|
||||
if not os.path.isdir(dirpath):
|
||||
return None
|
||||
|
||||
return os.listdir(dirpath)
|
||||
|
||||
def get_config_root(self):
|
||||
return self.config_volume
|
123
util/config/provider/baseprovider.py
Normal file
123
util/config/provider/baseprovider.py
Normal file
|
@ -0,0 +1,123 @@
|
|||
import logging
|
||||
import yaml
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from six import add_metaclass
|
||||
|
||||
from jsonschema import validate, ValidationError
|
||||
|
||||
from util.config.schema import CONFIG_SCHEMA
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CannotWriteConfigException(Exception):
|
||||
""" Exception raised when the config cannot be written. """
|
||||
pass
|
||||
|
||||
|
||||
class SetupIncompleteException(Exception):
|
||||
""" Exception raised when attempting to verify config that has not yet been setup. """
|
||||
pass
|
||||
|
||||
|
||||
def import_yaml(config_obj, config_file):
|
||||
with open(config_file) as f:
|
||||
c = yaml.safe_load(f)
|
||||
if not c:
|
||||
logger.debug('Empty YAML config file')
|
||||
return
|
||||
|
||||
if isinstance(c, str):
|
||||
raise Exception('Invalid YAML config file: ' + str(c))
|
||||
|
||||
for key in c.iterkeys():
|
||||
if key.isupper():
|
||||
config_obj[key] = c[key]
|
||||
|
||||
if config_obj.get('SETUP_COMPLETE', True):
|
||||
try:
|
||||
validate(config_obj, CONFIG_SCHEMA)
|
||||
except ValidationError:
|
||||
# TODO: Change this into a real error
|
||||
logger.exception('Could not validate config schema')
|
||||
else:
|
||||
logger.debug('Skipping config schema validation because setup is not complete')
|
||||
|
||||
return config_obj
|
||||
|
||||
|
||||
def get_yaml(config_obj):
|
||||
return yaml.safe_dump(config_obj, encoding='utf-8', allow_unicode=True)
|
||||
|
||||
|
||||
def export_yaml(config_obj, config_file):
|
||||
try:
|
||||
with open(config_file, 'w') as f:
|
||||
f.write(get_yaml(config_obj))
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
|
||||
@add_metaclass(ABCMeta)
|
||||
class BaseProvider(object):
|
||||
""" A configuration provider helps to load, save, and handle config override in the application.
|
||||
"""
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def update_app_config(self, app_config):
|
||||
""" Updates the given application config object with the loaded override config. """
|
||||
|
||||
@abstractmethod
|
||||
def get_config(self):
|
||||
""" Returns the contents of the config override file, or None if none. """
|
||||
|
||||
@abstractmethod
|
||||
def save_config(self, config_object):
|
||||
""" Updates the contents of the config override file to those given. """
|
||||
|
||||
@abstractmethod
|
||||
def config_exists(self):
|
||||
""" Returns true if a config override file exists in the config volume. """
|
||||
|
||||
@abstractmethod
|
||||
def volume_exists(self):
|
||||
""" Returns whether the config override volume exists. """
|
||||
|
||||
@abstractmethod
|
||||
def volume_file_exists(self, relative_file_path):
|
||||
""" Returns whether the file with the given relative path exists under the config override
|
||||
volume. """
|
||||
|
||||
@abstractmethod
|
||||
def get_volume_file(self, relative_file_path, mode='r'):
|
||||
""" Returns a Python file referring to the given path under the config override volume. """
|
||||
|
||||
@abstractmethod
|
||||
def remove_volume_file(self, relative_file_path):
|
||||
""" Removes the config override volume file with the given path. """
|
||||
|
||||
@abstractmethod
|
||||
def list_volume_directory(self, path):
|
||||
""" Returns a list of strings representing the names of the files found in the config override
|
||||
directory under the given path. If the path doesn't exist, returns None.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def save_volume_file(self, flask_file, relative_file_path):
|
||||
""" Saves the given flask file to the config override volume, with the given
|
||||
relative path.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_volume_path(self, directory, filename):
|
||||
""" Helper for constructing relative file paths, which may differ between providers.
|
||||
For example, kubernetes can't have subfolders in configmaps """
|
||||
|
||||
@abstractmethod
|
||||
def get_config_root(self):
|
||||
""" Returns the config root directory. """
|
47
util/config/provider/fileprovider.py
Normal file
47
util/config/provider/fileprovider.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
from util.config.provider.baseprovider import export_yaml, CannotWriteConfigException
|
||||
from util.config.provider.basefileprovider import BaseFileProvider
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def _ensure_parent_dir(filepath):
|
||||
""" Ensures that the parent directory of the given file path exists. """
|
||||
try:
|
||||
parentpath = os.path.abspath(os.path.join(filepath, os.pardir))
|
||||
if not os.path.isdir(parentpath):
|
||||
os.makedirs(parentpath)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
|
||||
class FileConfigProvider(BaseFileProvider):
|
||||
""" Implementation of the config provider that reads and writes the data
|
||||
from/to the file system. """
|
||||
def __init__(self, config_volume, yaml_filename, py_filename):
|
||||
super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename)
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'file'
|
||||
|
||||
def save_config(self, config_obj):
|
||||
export_yaml(config_obj, self.yaml_path)
|
||||
|
||||
def remove_volume_file(self, relative_file_path):
|
||||
filepath = os.path.join(self.config_volume, relative_file_path)
|
||||
os.remove(filepath)
|
||||
|
||||
def save_volume_file(self, flask_file, relative_file_path):
|
||||
filepath = os.path.join(self.config_volume, relative_file_path)
|
||||
_ensure_parent_dir(filepath)
|
||||
|
||||
# Write the file.
|
||||
try:
|
||||
flask_file.save(filepath)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
return filepath
|
188
util/config/provider/k8sprovider.py
Normal file
188
util/config/provider/k8sprovider.py
Normal file
|
@ -0,0 +1,188 @@
|
|||
import os
|
||||
import logging
|
||||
import json
|
||||
import base64
|
||||
import time
|
||||
|
||||
from cStringIO import StringIO
|
||||
from requests import Request, Session
|
||||
|
||||
from util.config.provider.baseprovider import CannotWriteConfigException, get_yaml
|
||||
from util.config.provider.basefileprovider import BaseFileProvider
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
KUBERNETES_API_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', '')
|
||||
port = os.environ.get('KUBERNETES_SERVICE_PORT')
|
||||
if port:
|
||||
KUBERNETES_API_HOST += ':' + port
|
||||
|
||||
SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
|
||||
|
||||
QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise')
|
||||
QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret')
|
||||
|
||||
class KubernetesConfigProvider(BaseFileProvider):
|
||||
""" Implementation of the config provider that reads and writes configuration
|
||||
data from a Kubernetes Secret. """
|
||||
def __init__(self, config_volume, yaml_filename, py_filename, api_host=None,
|
||||
service_account_token_path=None):
|
||||
super(KubernetesConfigProvider, self).__init__(config_volume, yaml_filename, py_filename)
|
||||
service_account_token_path = service_account_token_path or SERVICE_ACCOUNT_TOKEN_PATH
|
||||
api_host = api_host or KUBERNETES_API_HOST
|
||||
|
||||
# Load the service account token from the local store.
|
||||
if not os.path.exists(service_account_token_path):
|
||||
raise Exception('Cannot load Kubernetes service account token')
|
||||
|
||||
with open(service_account_token_path, 'r') as f:
|
||||
self._service_token = f.read()
|
||||
|
||||
self._api_host = api_host
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'k8s'
|
||||
|
||||
def get_volume_path(self, directory, filename):
|
||||
# NOTE: Overridden to ensure we don't have subdirectories, which aren't supported
|
||||
# in Kubernetes secrets.
|
||||
return "_".join([directory.rstrip('/'), filename])
|
||||
|
||||
def volume_exists(self):
|
||||
secret = self._lookup_secret()
|
||||
return secret is not None
|
||||
|
||||
def volume_file_exists(self, relative_file_path):
|
||||
if '/' in relative_file_path:
|
||||
raise Exception('Expected path from get_volume_path, but found slashes')
|
||||
|
||||
# NOTE: Overridden because we don't have subdirectories, which aren't supported
|
||||
# in Kubernetes secrets.
|
||||
secret = self._lookup_secret()
|
||||
if not secret or not secret.get('data'):
|
||||
return False
|
||||
return relative_file_path in secret['data']
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
# NOTE: Overridden because we don't have subdirectories, which aren't supported
|
||||
# in Kubernetes secrets.
|
||||
secret = self._lookup_secret()
|
||||
|
||||
if not secret:
|
||||
return []
|
||||
|
||||
paths = []
|
||||
for filename in secret.get('data', {}):
|
||||
if filename.startswith(path):
|
||||
paths.append(filename[len(path) + 1:])
|
||||
return paths
|
||||
|
||||
def save_config(self, config_obj):
|
||||
self._update_secret_file(self.yaml_filename, get_yaml(config_obj))
|
||||
|
||||
def remove_volume_file(self, relative_file_path):
|
||||
try:
|
||||
self._update_secret_file(relative_file_path, None)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
def save_volume_file(self, flask_file, relative_file_path):
|
||||
# Write the file to a temp location.
|
||||
buf = StringIO()
|
||||
try:
|
||||
try:
|
||||
flask_file.save(buf)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
self._update_secret_file(relative_file_path, buf.getvalue())
|
||||
finally:
|
||||
buf.close()
|
||||
|
||||
def _assert_success(self, response):
|
||||
if response.status_code != 200:
|
||||
logger.error('Kubernetes API call failed with response: %s => %s', response.status_code,
|
||||
response.text)
|
||||
raise CannotWriteConfigException('Kubernetes API call failed: %s' % response.text)
|
||||
|
||||
def _update_secret_file(self, relative_file_path, value=None):
|
||||
if '/' in relative_file_path:
|
||||
raise Exception('Expected path from get_volume_path, but found slashes')
|
||||
|
||||
# Check first that the namespace for Red Hat Quay exists. If it does not, report that
|
||||
# as an error, as it seems to be a common issue.
|
||||
namespace_url = 'namespaces/%s' % (QE_NAMESPACE)
|
||||
response = self._execute_k8s_api('GET', namespace_url)
|
||||
if response.status_code // 100 != 2:
|
||||
msg = 'A Kubernetes namespace with name `%s` must be created to save config' % QE_NAMESPACE
|
||||
raise CannotWriteConfigException(msg)
|
||||
|
||||
# Check if the secret exists. If not, then we create an empty secret and then update the file
|
||||
# inside.
|
||||
secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
|
||||
secret = self._lookup_secret()
|
||||
if secret is None:
|
||||
self._assert_success(self._execute_k8s_api('POST', secret_url, {
|
||||
"kind": "Secret",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": QE_CONFIG_SECRET
|
||||
},
|
||||
"data": {}
|
||||
}))
|
||||
|
||||
# Update the secret to reflect the file change.
|
||||
secret['data'] = secret.get('data', {})
|
||||
|
||||
if value is not None:
|
||||
secret['data'][relative_file_path] = base64.b64encode(value)
|
||||
else:
|
||||
secret['data'].pop(relative_file_path)
|
||||
|
||||
self._assert_success(self._execute_k8s_api('PUT', secret_url, secret))
|
||||
|
||||
# Wait until the local mounted copy of the secret has been updated, as
|
||||
# this is an eventual consistency operation, but the caller expects immediate
|
||||
# consistency.
|
||||
while True:
|
||||
matching_files = set()
|
||||
for secret_filename, encoded_value in secret['data'].iteritems():
|
||||
expected_value = base64.b64decode(encoded_value)
|
||||
try:
|
||||
with self.get_volume_file(secret_filename) as f:
|
||||
contents = f.read()
|
||||
|
||||
if contents == expected_value:
|
||||
matching_files.add(secret_filename)
|
||||
except IOError:
|
||||
continue
|
||||
|
||||
if matching_files == set(secret['data'].keys()):
|
||||
break
|
||||
|
||||
# Sleep for a second and then try again.
|
||||
time.sleep(1)
|
||||
|
||||
def _lookup_secret(self):
|
||||
secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
|
||||
response = self._execute_k8s_api('GET', secret_url)
|
||||
if response.status_code != 200:
|
||||
return None
|
||||
return json.loads(response.text)
|
||||
|
||||
def _execute_k8s_api(self, method, relative_url, data=None):
|
||||
headers = {
|
||||
'Authorization': 'Bearer ' + self._service_token
|
||||
}
|
||||
|
||||
if data:
|
||||
headers['Content-Type'] = 'application/json'
|
||||
|
||||
data = json.dumps(data) if data else None
|
||||
session = Session()
|
||||
url = 'https://%s/api/v1/%s' % (self._api_host, relative_url)
|
||||
|
||||
request = Request(method, url, data=data, headers=headers)
|
||||
return session.send(request.prepare(), verify=False, timeout=2)
|
29
util/config/provider/test/test_fileprovider.py
Normal file
29
util/config/provider/test/test_fileprovider.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
import pytest
|
||||
|
||||
from util.config.provider import FileConfigProvider
|
||||
|
||||
from test.fixtures import *
|
||||
|
||||
|
||||
class TestFileConfigProvider(FileConfigProvider):
|
||||
def __init__(self):
|
||||
self.yaml_filename = 'yaml_filename'
|
||||
self._service_token = 'service_token'
|
||||
self.config_volume = 'config_volume'
|
||||
self.py_filename = 'py_filename'
|
||||
self.yaml_path = os.path.join(self.config_volume, self.yaml_filename)
|
||||
self.py_path = os.path.join(self.config_volume, self.py_filename)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('directory,filename,expected', [
|
||||
("directory", "file", "directory/file"),
|
||||
("directory/dir", "file", "directory/dir/file"),
|
||||
("directory/dir/", "file", "directory/dir/file"),
|
||||
("directory", "file/test", "directory/file/test"),
|
||||
])
|
||||
def test_get_volume_path(directory, filename, expected):
|
||||
provider = TestFileConfigProvider()
|
||||
|
||||
assert expected == provider.get_volume_path(directory, filename)
|
||||
|
||||
|
138
util/config/provider/test/test_k8sprovider.py
Normal file
138
util/config/provider/test/test_k8sprovider.py
Normal file
|
@ -0,0 +1,138 @@
|
|||
import base64
|
||||
import os
|
||||
import json
|
||||
import uuid
|
||||
|
||||
import pytest
|
||||
|
||||
from contextlib import contextmanager
|
||||
from collections import namedtuple
|
||||
from httmock import urlmatch, HTTMock
|
||||
|
||||
from util.config.provider import KubernetesConfigProvider
|
||||
|
||||
def normalize_path(path):
|
||||
return path.replace('/', '_')
|
||||
|
||||
@contextmanager
|
||||
def fake_kubernetes_api(tmpdir_factory, files=None):
|
||||
hostname = 'kubapi'
|
||||
service_account_token_path = str(tmpdir_factory.mktemp("k8s").join("serviceaccount"))
|
||||
auth_header = str(uuid.uuid4())
|
||||
|
||||
with open(service_account_token_path, 'w') as f:
|
||||
f.write(auth_header)
|
||||
|
||||
global secret
|
||||
secret = {
|
||||
'data': {}
|
||||
}
|
||||
|
||||
def write_file(config_dir, filepath, value):
|
||||
normalized_path = normalize_path(filepath)
|
||||
absolute_path = str(config_dir.join(normalized_path))
|
||||
try:
|
||||
os.makedirs(os.path.dirname(absolute_path))
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
with open(absolute_path, 'w') as f:
|
||||
f.write(value)
|
||||
|
||||
config_dir = tmpdir_factory.mktemp("config")
|
||||
if files:
|
||||
for filepath, value in files.iteritems():
|
||||
normalized_path = normalize_path(filepath)
|
||||
write_file(config_dir, filepath, value)
|
||||
secret['data'][normalized_path] = base64.b64encode(value)
|
||||
|
||||
@urlmatch(netloc=hostname,
|
||||
path='/api/v1/namespaces/quay-enterprise/secrets/quay-enterprise-config-secret$',
|
||||
method='get')
|
||||
def get_secret(_, __):
|
||||
return {'status_code': 200, 'content': json.dumps(secret)}
|
||||
|
||||
@urlmatch(netloc=hostname,
|
||||
path='/api/v1/namespaces/quay-enterprise/secrets/quay-enterprise-config-secret$',
|
||||
method='put')
|
||||
def put_secret(_, request):
|
||||
updated_secret = json.loads(request.body)
|
||||
for filepath, value in updated_secret['data'].iteritems():
|
||||
if filepath not in secret['data']:
|
||||
# Add
|
||||
write_file(config_dir, filepath, base64.b64decode(value))
|
||||
|
||||
for filepath in secret['data']:
|
||||
if filepath not in updated_secret['data']:
|
||||
# Remove.
|
||||
normalized_path = normalize_path(filepath)
|
||||
os.remove(str(config_dir.join(normalized_path)))
|
||||
|
||||
secret['data'] = updated_secret['data']
|
||||
return {'status_code': 200, 'content': json.dumps(secret)}
|
||||
|
||||
@urlmatch(netloc=hostname, path='/api/v1/namespaces/quay-enterprise$')
|
||||
def get_namespace(_, __):
|
||||
return {'status_code': 200, 'content': json.dumps({})}
|
||||
|
||||
@urlmatch(netloc=hostname)
|
||||
def catch_all(url, _):
|
||||
print url
|
||||
return {'status_code': 404, 'content': '{}'}
|
||||
|
||||
with HTTMock(get_secret, put_secret, get_namespace, catch_all):
|
||||
provider = KubernetesConfigProvider(str(config_dir), 'config.yaml', 'config.py',
|
||||
api_host=hostname,
|
||||
service_account_token_path=service_account_token_path)
|
||||
|
||||
# Validate all the files.
|
||||
for filepath, value in files.iteritems():
|
||||
normalized_path = normalize_path(filepath)
|
||||
assert provider.volume_file_exists(normalized_path)
|
||||
with provider.get_volume_file(normalized_path) as f:
|
||||
assert f.read() == value
|
||||
|
||||
yield provider
|
||||
|
||||
|
||||
def test_basic_config(tmpdir_factory):
|
||||
basic_files = {
|
||||
'config.yaml': 'FOO: bar',
|
||||
}
|
||||
|
||||
with fake_kubernetes_api(tmpdir_factory, files=basic_files) as provider:
|
||||
assert provider.config_exists()
|
||||
assert provider.get_config() is not None
|
||||
assert provider.get_config()['FOO'] == 'bar'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('filepath', [
|
||||
'foo',
|
||||
'foo/meh',
|
||||
'foo/bar/baz',
|
||||
])
|
||||
def test_remove_file(filepath, tmpdir_factory):
|
||||
basic_files = {
|
||||
filepath: 'foo',
|
||||
}
|
||||
|
||||
with fake_kubernetes_api(tmpdir_factory, files=basic_files) as provider:
|
||||
normalized_path = normalize_path(filepath)
|
||||
assert provider.volume_file_exists(normalized_path)
|
||||
provider.remove_volume_file(normalized_path)
|
||||
assert not provider.volume_file_exists(normalized_path)
|
||||
|
||||
|
||||
class TestFlaskFile(object):
|
||||
def save(self, buf):
|
||||
buf.write('hello world!')
|
||||
|
||||
|
||||
def test_save_file(tmpdir_factory):
|
||||
basic_files = {}
|
||||
|
||||
with fake_kubernetes_api(tmpdir_factory, files=basic_files) as provider:
|
||||
assert not provider.volume_file_exists('testfile')
|
||||
flask_file = TestFlaskFile()
|
||||
provider.save_volume_file(flask_file, 'testfile')
|
||||
assert provider.volume_file_exists('testfile')
|
77
util/config/provider/testprovider.py
Normal file
77
util/config/provider/testprovider.py
Normal file
|
@ -0,0 +1,77 @@
|
|||
import json
|
||||
import io
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from util.config.provider.baseprovider import BaseProvider
|
||||
|
||||
REAL_FILES = ['test/data/signing-private.gpg', 'test/data/signing-public.gpg', 'test/data/test.pem']
|
||||
|
||||
class TestConfigProvider(BaseProvider):
|
||||
""" Implementation of the config provider for testing. Everything is kept in-memory instead on
|
||||
the real file system. """
|
||||
|
||||
def get_config_root(self):
|
||||
raise Exception('Test Config does not have a config root')
|
||||
|
||||
def __init__(self):
|
||||
self.clear()
|
||||
|
||||
def clear(self):
|
||||
self.files = {}
|
||||
self._config = {}
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'test'
|
||||
|
||||
def update_app_config(self, app_config):
|
||||
self._config = app_config
|
||||
|
||||
def get_config(self):
|
||||
if not 'config.yaml' in self.files:
|
||||
return None
|
||||
|
||||
return json.loads(self.files.get('config.yaml', '{}'))
|
||||
|
||||
def save_config(self, config_obj):
|
||||
self.files['config.yaml'] = json.dumps(config_obj)
|
||||
|
||||
def config_exists(self):
|
||||
return 'config.yaml' in self.files
|
||||
|
||||
def volume_exists(self):
|
||||
return True
|
||||
|
||||
def volume_file_exists(self, filename):
|
||||
if filename in REAL_FILES:
|
||||
return True
|
||||
|
||||
return filename in self.files
|
||||
|
||||
def save_volume_file(self, flask_file, filename):
|
||||
self.files[filename] = flask_file.read()
|
||||
|
||||
def get_volume_file(self, filename, mode='r'):
|
||||
if filename in REAL_FILES:
|
||||
return open(filename, mode=mode)
|
||||
|
||||
return io.BytesIO(self.files[filename])
|
||||
|
||||
def remove_volume_file(self, filename):
|
||||
self.files.pop(filename, None)
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
paths = []
|
||||
for filename in self.files:
|
||||
if filename.startswith(path):
|
||||
paths.append(filename[len(path)+1:])
|
||||
|
||||
return paths
|
||||
|
||||
def reset_for_test(self):
|
||||
self._config['SUPER_USERS'] = ['devtable']
|
||||
self.files = {}
|
||||
|
||||
def get_volume_path(self, directory, filename):
|
||||
return os.path.join(directory, filename)
|
Reference in a new issue