initial import for Open Source 🎉
This commit is contained in:
parent
1898c361f3
commit
9c0dd3b722
2048 changed files with 218743 additions and 0 deletions
0
config_app/config_util/__init__.py
Normal file
0
config_app/config_util/__init__.py
Normal file
62
config_app/config_util/config/TransientDirectoryProvider.py
Normal file
62
config_app/config_util/config/TransientDirectoryProvider.py
Normal file
|
@ -0,0 +1,62 @@
|
|||
import os
|
||||
|
||||
from shutil import copytree
|
||||
from backports.tempfile import TemporaryDirectory
|
||||
|
||||
from config_app.config_util.config.fileprovider import FileConfigProvider
|
||||
|
||||
OLD_CONFIG_SUBDIR = 'old/'
|
||||
|
||||
class TransientDirectoryProvider(FileConfigProvider):
|
||||
""" Implementation of the config provider that reads and writes the data
|
||||
from/to the file system, only using temporary directories,
|
||||
deleting old dirs and creating new ones as requested.
|
||||
"""
|
||||
|
||||
def __init__(self, config_volume, yaml_filename, py_filename):
|
||||
# Create a temp directory that will be cleaned up when we change the config path
|
||||
# This should ensure we have no "pollution" of different configs:
|
||||
# no uploaded config should ever affect subsequent config modifications/creations
|
||||
temp_dir = TemporaryDirectory()
|
||||
self.temp_dir = temp_dir
|
||||
self.old_config_dir = None
|
||||
super(TransientDirectoryProvider, self).__init__(temp_dir.name, yaml_filename, py_filename)
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'transient'
|
||||
|
||||
def new_config_dir(self):
|
||||
"""
|
||||
Update the path with a new temporary directory, deleting the old one in the process
|
||||
"""
|
||||
self.temp_dir.cleanup()
|
||||
temp_dir = TemporaryDirectory()
|
||||
|
||||
self.config_volume = temp_dir.name
|
||||
self.temp_dir = temp_dir
|
||||
self.yaml_path = os.path.join(temp_dir.name, self.yaml_filename)
|
||||
|
||||
def create_copy_of_config_dir(self):
|
||||
"""
|
||||
Create a directory to store loaded/populated configuration (for rollback if necessary)
|
||||
"""
|
||||
if self.old_config_dir is not None:
|
||||
self.old_config_dir.cleanup()
|
||||
|
||||
temp_dir = TemporaryDirectory()
|
||||
self.old_config_dir = temp_dir
|
||||
|
||||
# Python 2.7's shutil.copy() doesn't allow for copying to existing directories,
|
||||
# so when copying/reading to the old saved config, we have to talk to a subdirectory,
|
||||
# and use the shutil.copytree() function
|
||||
copytree(self.config_volume, os.path.join(temp_dir.name, OLD_CONFIG_SUBDIR))
|
||||
|
||||
def get_config_dir_path(self):
|
||||
return self.config_volume
|
||||
|
||||
def get_old_config_dir(self):
|
||||
if self.old_config_dir is None:
|
||||
raise Exception('Cannot return a configuration that was no old configuration')
|
||||
|
||||
return os.path.join(self.old_config_dir.name, OLD_CONFIG_SUBDIR)
|
39
config_app/config_util/config/__init__.py
Normal file
39
config_app/config_util/config/__init__.py
Normal file
|
@ -0,0 +1,39 @@
|
|||
import base64
|
||||
import os
|
||||
|
||||
from config_app.config_util.config.fileprovider import FileConfigProvider
|
||||
from config_app.config_util.config.testprovider import TestConfigProvider
|
||||
from config_app.config_util.config.TransientDirectoryProvider import TransientDirectoryProvider
|
||||
from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX
|
||||
|
||||
|
||||
def get_config_provider(config_volume, yaml_filename, py_filename, testing=False):
|
||||
""" Loads and returns the config provider for the current environment. """
|
||||
|
||||
if testing:
|
||||
return TestConfigProvider()
|
||||
|
||||
return TransientDirectoryProvider(config_volume, yaml_filename, py_filename)
|
||||
|
||||
|
||||
def get_config_as_kube_secret(config_path):
|
||||
data = {}
|
||||
|
||||
# Kubernetes secrets don't have sub-directories, so for the extra_ca_certs dir
|
||||
# we have to put the extra certs in with a prefix, and then one of our init scripts
|
||||
# (02_get_kube_certs.sh) will expand the prefixed certs into the equivalent directory
|
||||
# so that they'll be installed correctly on startup by the certs_install script
|
||||
certs_dir = os.path.join(config_path, EXTRA_CA_DIRECTORY)
|
||||
if os.path.exists(certs_dir):
|
||||
for extra_cert in os.listdir(certs_dir):
|
||||
with open(os.path.join(certs_dir, extra_cert)) as f:
|
||||
data[EXTRA_CA_DIRECTORY_PREFIX + extra_cert] = base64.b64encode(f.read())
|
||||
|
||||
|
||||
for name in os.listdir(config_path):
|
||||
file_path = os.path.join(config_path, name)
|
||||
if not os.path.isdir(file_path):
|
||||
with open(file_path) as f:
|
||||
data[name] = base64.b64encode(f.read())
|
||||
|
||||
return data
|
72
config_app/config_util/config/basefileprovider.py
Normal file
72
config_app/config_util/config/basefileprovider.py
Normal file
|
@ -0,0 +1,72 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
from config_app.config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml,
|
||||
CannotWriteConfigException)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseFileProvider(BaseProvider):
|
||||
""" Base implementation of the config provider that reads the data from the file system. """
|
||||
|
||||
def __init__(self, config_volume, yaml_filename, py_filename):
|
||||
self.config_volume = config_volume
|
||||
self.yaml_filename = yaml_filename
|
||||
self.py_filename = py_filename
|
||||
|
||||
self.yaml_path = os.path.join(config_volume, yaml_filename)
|
||||
self.py_path = os.path.join(config_volume, py_filename)
|
||||
|
||||
def update_app_config(self, app_config):
|
||||
if os.path.exists(self.py_path):
|
||||
logger.debug('Applying config file: %s', self.py_path)
|
||||
app_config.from_pyfile(self.py_path)
|
||||
|
||||
if os.path.exists(self.yaml_path):
|
||||
logger.debug('Applying config file: %s', self.yaml_path)
|
||||
import_yaml(app_config, self.yaml_path)
|
||||
|
||||
def get_config(self):
|
||||
if not self.config_exists():
|
||||
return None
|
||||
|
||||
config_obj = {}
|
||||
import_yaml(config_obj, self.yaml_path)
|
||||
return config_obj
|
||||
|
||||
def config_exists(self):
|
||||
return self.volume_file_exists(self.yaml_filename)
|
||||
|
||||
def volume_exists(self):
|
||||
return os.path.exists(self.config_volume)
|
||||
|
||||
def volume_file_exists(self, filename):
|
||||
return os.path.exists(os.path.join(self.config_volume, filename))
|
||||
|
||||
def get_volume_file(self, filename, mode='r'):
|
||||
return open(os.path.join(self.config_volume, filename), mode=mode)
|
||||
|
||||
def get_volume_path(self, directory, filename):
|
||||
return os.path.join(directory, filename)
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
dirpath = os.path.join(self.config_volume, path)
|
||||
if not os.path.exists(dirpath):
|
||||
return None
|
||||
|
||||
if not os.path.isdir(dirpath):
|
||||
return None
|
||||
|
||||
return os.listdir(dirpath)
|
||||
|
||||
def requires_restart(self, app_config):
|
||||
file_config = self.get_config()
|
||||
if not file_config:
|
||||
return False
|
||||
|
||||
for key in file_config:
|
||||
if app_config.get(key) != file_config[key]:
|
||||
return True
|
||||
|
||||
return False
|
128
config_app/config_util/config/baseprovider.py
Normal file
128
config_app/config_util/config/baseprovider.py
Normal file
|
@ -0,0 +1,128 @@
|
|||
import logging
|
||||
import yaml
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from six import add_metaclass
|
||||
|
||||
from jsonschema import validate, ValidationError
|
||||
|
||||
from util.config.schema import CONFIG_SCHEMA
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CannotWriteConfigException(Exception):
|
||||
""" Exception raised when the config cannot be written. """
|
||||
pass
|
||||
|
||||
|
||||
class SetupIncompleteException(Exception):
|
||||
""" Exception raised when attempting to verify config that has not yet been setup. """
|
||||
pass
|
||||
|
||||
|
||||
def import_yaml(config_obj, config_file):
|
||||
with open(config_file) as f:
|
||||
c = yaml.safe_load(f)
|
||||
if not c:
|
||||
logger.debug('Empty YAML config file')
|
||||
return
|
||||
|
||||
if isinstance(c, str):
|
||||
raise Exception('Invalid YAML config file: ' + str(c))
|
||||
|
||||
for key in c.iterkeys():
|
||||
if key.isupper():
|
||||
config_obj[key] = c[key]
|
||||
|
||||
if config_obj.get('SETUP_COMPLETE', False):
|
||||
try:
|
||||
validate(config_obj, CONFIG_SCHEMA)
|
||||
except ValidationError:
|
||||
# TODO: Change this into a real error
|
||||
logger.exception('Could not validate config schema')
|
||||
else:
|
||||
logger.debug('Skipping config schema validation because setup is not complete')
|
||||
|
||||
return config_obj
|
||||
|
||||
|
||||
def get_yaml(config_obj):
|
||||
return yaml.safe_dump(config_obj, encoding='utf-8', allow_unicode=True)
|
||||
|
||||
|
||||
def export_yaml(config_obj, config_file):
|
||||
try:
|
||||
with open(config_file, 'w') as f:
|
||||
f.write(get_yaml(config_obj))
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
|
||||
@add_metaclass(ABCMeta)
|
||||
class BaseProvider(object):
|
||||
""" A configuration provider helps to load, save, and handle config override in the application.
|
||||
"""
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def update_app_config(self, app_config):
|
||||
""" Updates the given application config object with the loaded override config. """
|
||||
|
||||
@abstractmethod
|
||||
def get_config(self):
|
||||
""" Returns the contents of the config override file, or None if none. """
|
||||
|
||||
@abstractmethod
|
||||
def save_config(self, config_object):
|
||||
""" Updates the contents of the config override file to those given. """
|
||||
|
||||
@abstractmethod
|
||||
def config_exists(self):
|
||||
""" Returns true if a config override file exists in the config volume. """
|
||||
|
||||
@abstractmethod
|
||||
def volume_exists(self):
|
||||
""" Returns whether the config override volume exists. """
|
||||
|
||||
@abstractmethod
|
||||
def volume_file_exists(self, filename):
|
||||
""" Returns whether the file with the given name exists under the config override volume. """
|
||||
|
||||
@abstractmethod
|
||||
def get_volume_file(self, filename, mode='r'):
|
||||
""" Returns a Python file referring to the given name under the config override volume. """
|
||||
|
||||
@abstractmethod
|
||||
def write_volume_file(self, filename, contents):
|
||||
""" Writes the given contents to the config override volumne, with the given filename. """
|
||||
|
||||
@abstractmethod
|
||||
def remove_volume_file(self, filename):
|
||||
""" Removes the config override volume file with the given filename. """
|
||||
|
||||
@abstractmethod
|
||||
def list_volume_directory(self, path):
|
||||
""" Returns a list of strings representing the names of the files found in the config override
|
||||
directory under the given path. If the path doesn't exist, returns None.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def save_volume_file(self, filename, flask_file):
|
||||
""" Saves the given flask file to the config override volume, with the given
|
||||
filename.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def requires_restart(self, app_config):
|
||||
""" If true, the configuration loaded into memory for the app does not match that on disk,
|
||||
indicating that this container requires a restart.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_volume_path(self, directory, filename):
|
||||
""" Helper for constructing file paths, which may differ between providers. For example,
|
||||
kubernetes can't have subfolders in configmaps """
|
60
config_app/config_util/config/fileprovider.py
Normal file
60
config_app/config_util/config/fileprovider.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
from config_app.config_util.config.baseprovider import export_yaml, CannotWriteConfigException
|
||||
from config_app.config_util.config.basefileprovider import BaseFileProvider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _ensure_parent_dir(filepath):
|
||||
""" Ensures that the parent directory of the given file path exists. """
|
||||
try:
|
||||
parentpath = os.path.abspath(os.path.join(filepath, os.pardir))
|
||||
if not os.path.isdir(parentpath):
|
||||
os.makedirs(parentpath)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
|
||||
class FileConfigProvider(BaseFileProvider):
|
||||
""" Implementation of the config provider that reads and writes the data
|
||||
from/to the file system. """
|
||||
|
||||
def __init__(self, config_volume, yaml_filename, py_filename):
|
||||
super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename)
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'file'
|
||||
|
||||
def save_config(self, config_obj):
|
||||
export_yaml(config_obj, self.yaml_path)
|
||||
|
||||
def write_volume_file(self, filename, contents):
|
||||
filepath = os.path.join(self.config_volume, filename)
|
||||
_ensure_parent_dir(filepath)
|
||||
|
||||
try:
|
||||
with open(filepath, mode='w') as f:
|
||||
f.write(contents)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
return filepath
|
||||
|
||||
def remove_volume_file(self, filename):
|
||||
filepath = os.path.join(self.config_volume, filename)
|
||||
os.remove(filepath)
|
||||
|
||||
def save_volume_file(self, filename, flask_file):
|
||||
filepath = os.path.join(self.config_volume, filename)
|
||||
_ensure_parent_dir(filepath)
|
||||
|
||||
# Write the file.
|
||||
try:
|
||||
flask_file.save(filepath)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
return filepath
|
75
config_app/config_util/config/test/test_helpers.py
Normal file
75
config_app/config_util/config/test/test_helpers.py
Normal file
|
@ -0,0 +1,75 @@
|
|||
import pytest
|
||||
import os
|
||||
import base64
|
||||
|
||||
from backports.tempfile import TemporaryDirectory
|
||||
|
||||
from config_app.config_util.config import get_config_as_kube_secret
|
||||
from util.config.validator import EXTRA_CA_DIRECTORY
|
||||
|
||||
|
||||
def _create_temp_file_structure(file_structure):
|
||||
temp_dir = TemporaryDirectory()
|
||||
|
||||
for filename, data in file_structure.iteritems():
|
||||
if filename == EXTRA_CA_DIRECTORY:
|
||||
extra_ca_dir_path = os.path.join(temp_dir.name, EXTRA_CA_DIRECTORY)
|
||||
os.mkdir(extra_ca_dir_path)
|
||||
|
||||
for name, cert_value in data:
|
||||
with open(os.path.join(extra_ca_dir_path, name), 'w') as f:
|
||||
f.write(cert_value)
|
||||
else:
|
||||
with open(os.path.join(temp_dir.name, filename), 'w') as f:
|
||||
f.write(data)
|
||||
|
||||
return temp_dir
|
||||
|
||||
|
||||
@pytest.mark.parametrize('file_structure, expected_secret', [
|
||||
pytest.param({
|
||||
'config.yaml': 'test:true',
|
||||
},
|
||||
{
|
||||
'config.yaml': 'dGVzdDp0cnVl',
|
||||
}, id='just a config value'),
|
||||
pytest.param({
|
||||
'config.yaml': 'test:true',
|
||||
'otherfile.ext': 'im a file'
|
||||
},
|
||||
{
|
||||
'config.yaml': 'dGVzdDp0cnVl',
|
||||
'otherfile.ext': base64.b64encode('im a file')
|
||||
}, id='config and another file'),
|
||||
pytest.param({
|
||||
'config.yaml': 'test:true',
|
||||
'extra_ca_certs': [
|
||||
('cert.crt', 'im a cert!'),
|
||||
]
|
||||
},
|
||||
{
|
||||
'config.yaml': 'dGVzdDp0cnVl',
|
||||
'extra_ca_certs_cert.crt': base64.b64encode('im a cert!'),
|
||||
}, id='config and an extra cert'),
|
||||
pytest.param({
|
||||
'config.yaml': 'test:true',
|
||||
'otherfile.ext': 'im a file',
|
||||
'extra_ca_certs': [
|
||||
('cert.crt', 'im a cert!'),
|
||||
('another.crt', 'im a different cert!'),
|
||||
]
|
||||
},
|
||||
{
|
||||
'config.yaml': 'dGVzdDp0cnVl',
|
||||
'otherfile.ext': base64.b64encode('im a file'),
|
||||
'extra_ca_certs_cert.crt': base64.b64encode('im a cert!'),
|
||||
'extra_ca_certs_another.crt': base64.b64encode('im a different cert!'),
|
||||
}, id='config, files, and extra certs!'),
|
||||
])
|
||||
def test_get_config_as_kube_secret(file_structure, expected_secret):
|
||||
temp_dir = _create_temp_file_structure(file_structure)
|
||||
|
||||
secret = get_config_as_kube_secret(temp_dir.name)
|
||||
assert secret == expected_secret
|
||||
|
||||
temp_dir.cleanup()
|
|
@ -0,0 +1,68 @@
|
|||
import pytest
|
||||
import os
|
||||
|
||||
from config_app.config_util.config.TransientDirectoryProvider import TransientDirectoryProvider
|
||||
|
||||
|
||||
@pytest.mark.parametrize('files_to_write, operations, expected_new_dir', [
|
||||
pytest.param({
|
||||
'config.yaml': 'a config',
|
||||
}, ([], [], []), {
|
||||
'config.yaml': 'a config',
|
||||
}, id='just a config'),
|
||||
pytest.param({
|
||||
'config.yaml': 'a config',
|
||||
'oldfile': 'hmmm'
|
||||
}, ([], [], ['oldfile']), {
|
||||
'config.yaml': 'a config',
|
||||
}, id='delete a file'),
|
||||
pytest.param({
|
||||
'config.yaml': 'a config',
|
||||
'oldfile': 'hmmm'
|
||||
}, ([('newfile', 'asdf')], [], ['oldfile']), {
|
||||
'config.yaml': 'a config',
|
||||
'newfile': 'asdf'
|
||||
}, id='delete and add a file'),
|
||||
pytest.param({
|
||||
'config.yaml': 'a config',
|
||||
'somefile': 'before'
|
||||
}, ([('newfile', 'asdf')], [('somefile', 'after')], []), {
|
||||
'config.yaml': 'a config',
|
||||
'newfile': 'asdf',
|
||||
'somefile': 'after',
|
||||
}, id='add new files and change files'),
|
||||
])
|
||||
def test_transient_dir_copy_config_dir(files_to_write, operations, expected_new_dir):
|
||||
config_provider = TransientDirectoryProvider('', '', '')
|
||||
|
||||
for name, data in files_to_write.iteritems():
|
||||
config_provider.write_volume_file(name, data)
|
||||
|
||||
config_provider.create_copy_of_config_dir()
|
||||
|
||||
for create in operations[0]:
|
||||
(name, data) = create
|
||||
config_provider.write_volume_file(name, data)
|
||||
|
||||
for update in operations[1]:
|
||||
(name, data) = update
|
||||
config_provider.write_volume_file(name, data)
|
||||
|
||||
for delete in operations[2]:
|
||||
config_provider.remove_volume_file(delete)
|
||||
|
||||
# check that the new directory matches expected state
|
||||
for filename, data in expected_new_dir.iteritems():
|
||||
with open(os.path.join(config_provider.get_config_dir_path(), filename)) as f:
|
||||
new_data = f.read()
|
||||
assert new_data == data
|
||||
|
||||
# Now check that the old dir matches the original state
|
||||
saved = config_provider.get_old_config_dir()
|
||||
|
||||
for filename, data in files_to_write.iteritems():
|
||||
with open(os.path.join(saved, filename)) as f:
|
||||
new_data = f.read()
|
||||
assert new_data == data
|
||||
|
||||
config_provider.temp_dir.cleanup()
|
83
config_app/config_util/config/testprovider.py
Normal file
83
config_app/config_util/config/testprovider.py
Normal file
|
@ -0,0 +1,83 @@
|
|||
import json
|
||||
import io
|
||||
import os
|
||||
|
||||
from config_app.config_util.config.baseprovider import BaseProvider
|
||||
|
||||
REAL_FILES = ['test/data/signing-private.gpg', 'test/data/signing-public.gpg', 'test/data/test.pem']
|
||||
|
||||
|
||||
class TestConfigProvider(BaseProvider):
|
||||
""" Implementation of the config provider for testing. Everything is kept in-memory instead on
|
||||
the real file system. """
|
||||
|
||||
def __init__(self):
|
||||
self.clear()
|
||||
|
||||
def clear(self):
|
||||
self.files = {}
|
||||
self._config = {}
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'test'
|
||||
|
||||
def update_app_config(self, app_config):
|
||||
self._config = app_config
|
||||
|
||||
def get_config(self):
|
||||
if not 'config.yaml' in self.files:
|
||||
return None
|
||||
|
||||
return json.loads(self.files.get('config.yaml', '{}'))
|
||||
|
||||
def save_config(self, config_obj):
|
||||
self.files['config.yaml'] = json.dumps(config_obj)
|
||||
|
||||
def config_exists(self):
|
||||
return 'config.yaml' in self.files
|
||||
|
||||
def volume_exists(self):
|
||||
return True
|
||||
|
||||
def volume_file_exists(self, filename):
|
||||
if filename in REAL_FILES:
|
||||
return True
|
||||
|
||||
return filename in self.files
|
||||
|
||||
def save_volume_file(self, filename, flask_file):
|
||||
self.files[filename] = flask_file.read()
|
||||
|
||||
def write_volume_file(self, filename, contents):
|
||||
self.files[filename] = contents
|
||||
|
||||
def get_volume_file(self, filename, mode='r'):
|
||||
if filename in REAL_FILES:
|
||||
return open(filename, mode=mode)
|
||||
|
||||
return io.BytesIO(self.files[filename])
|
||||
|
||||
def remove_volume_file(self, filename):
|
||||
self.files.pop(filename, None)
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
paths = []
|
||||
for filename in self.files:
|
||||
if filename.startswith(path):
|
||||
paths.append(filename[len(path) + 1:])
|
||||
|
||||
return paths
|
||||
|
||||
def requires_restart(self, app_config):
|
||||
return False
|
||||
|
||||
def reset_for_test(self):
|
||||
self._config['SUPER_USERS'] = ['devtable']
|
||||
self.files = {}
|
||||
|
||||
def get_volume_path(self, directory, filename):
|
||||
return os.path.join(directory, filename)
|
||||
|
||||
def get_config_dir_path(self):
|
||||
return ''
|
306
config_app/config_util/k8saccessor.py
Normal file
306
config_app/config_util/k8saccessor.py
Normal file
|
@ -0,0 +1,306 @@
|
|||
import logging
|
||||
import json
|
||||
import base64
|
||||
import datetime
|
||||
import os
|
||||
|
||||
from requests import Request, Session
|
||||
from collections import namedtuple
|
||||
from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX
|
||||
|
||||
from config_app.config_util.k8sconfig import KubernetesConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
QE_DEPLOYMENT_LABEL = 'quay-enterprise-component'
|
||||
QE_CONTAINER_NAME = 'quay-enterprise-app'
|
||||
|
||||
|
||||
# Tuple containing response of the deployment rollout status method.
|
||||
# status is one of: 'failed' | 'progressing' | 'available'
|
||||
# message is any string describing the state.
|
||||
DeploymentRolloutStatus = namedtuple('DeploymentRolloutStatus', ['status', 'message'])
|
||||
|
||||
class K8sApiException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _deployment_rollout_status_message(deployment, deployment_name):
|
||||
"""
|
||||
Gets the friendly human readable message of the current state of the deployment rollout
|
||||
:param deployment: python dict matching: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#deployment-v1-apps
|
||||
:param deployment_name: string
|
||||
:return: DeploymentRolloutStatus
|
||||
"""
|
||||
# Logic for rollout status pulled from the `kubectl rollout status` command:
|
||||
# https://github.com/kubernetes/kubernetes/blob/d9ba19c751709c8608e09a0537eea98973f3a796/pkg/kubectl/rollout_status.go#L62
|
||||
if deployment['metadata']['generation'] <= deployment['status']['observedGeneration']:
|
||||
for cond in deployment['status']['conditions']:
|
||||
if cond['type'] == 'Progressing' and cond['reason'] == 'ProgressDeadlineExceeded':
|
||||
return DeploymentRolloutStatus(
|
||||
status='failed',
|
||||
message="Deployment %s's rollout failed. Please try again later." % deployment_name
|
||||
)
|
||||
|
||||
desired_replicas = deployment['spec']['replicas']
|
||||
current_replicas = deployment['status'].get('replicas', 0)
|
||||
if current_replicas == 0:
|
||||
return DeploymentRolloutStatus(
|
||||
status='available',
|
||||
message='Deployment %s updated (no replicas, so nothing to roll out)' % deployment_name
|
||||
)
|
||||
|
||||
# Some fields are optional in the spec, so if they're omitted, replace with defaults that won't indicate a wrong status
|
||||
available_replicas = deployment['status'].get('availableReplicas', 0)
|
||||
updated_replicas = deployment['status'].get('updatedReplicas', 0)
|
||||
|
||||
if updated_replicas < desired_replicas:
|
||||
return DeploymentRolloutStatus(
|
||||
status='progressing',
|
||||
message='Waiting for rollout to finish: %d out of %d new replicas have been updated...' % (
|
||||
updated_replicas, desired_replicas)
|
||||
)
|
||||
|
||||
if current_replicas > updated_replicas:
|
||||
return DeploymentRolloutStatus(
|
||||
status='progressing',
|
||||
message='Waiting for rollout to finish: %d old replicas are pending termination...' % (
|
||||
current_replicas - updated_replicas)
|
||||
)
|
||||
|
||||
if available_replicas < updated_replicas:
|
||||
return DeploymentRolloutStatus(
|
||||
status='progressing',
|
||||
message='Waiting for rollout to finish: %d of %d updated replicas are available...' % (
|
||||
available_replicas, updated_replicas)
|
||||
)
|
||||
|
||||
return DeploymentRolloutStatus(
|
||||
status='available',
|
||||
message='Deployment %s successfully rolled out.' % deployment_name
|
||||
)
|
||||
|
||||
return DeploymentRolloutStatus(
|
||||
status='progressing',
|
||||
message='Waiting for deployment spec to be updated...'
|
||||
)
|
||||
|
||||
|
||||
class KubernetesAccessorSingleton(object):
|
||||
""" Singleton allowing access to kubernetes operations """
|
||||
_instance = None
|
||||
|
||||
def __init__(self, kube_config=None):
|
||||
self.kube_config = kube_config
|
||||
if kube_config is None:
|
||||
self.kube_config = KubernetesConfig.from_env()
|
||||
|
||||
KubernetesAccessorSingleton._instance = self
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls, kube_config=None):
|
||||
"""
|
||||
Singleton getter implementation, returns the instance if one exists, otherwise creates the
|
||||
instance and ties it to the class.
|
||||
:return: KubernetesAccessorSingleton
|
||||
"""
|
||||
if cls._instance is None:
|
||||
return cls(kube_config)
|
||||
|
||||
return cls._instance
|
||||
|
||||
def save_secret_to_directory(self, dir_path):
|
||||
"""
|
||||
Saves all files in the kubernetes secret to a local directory.
|
||||
Assumes the directory is empty.
|
||||
"""
|
||||
secret = self._lookup_secret()
|
||||
|
||||
secret_data = secret.get('data', {})
|
||||
|
||||
# Make the `extra_ca_certs` dir to ensure we can populate extra certs
|
||||
extra_ca_dir_path = os.path.join(dir_path, EXTRA_CA_DIRECTORY)
|
||||
os.mkdir(extra_ca_dir_path)
|
||||
|
||||
for secret_filename, data in secret_data.iteritems():
|
||||
write_path = os.path.join(dir_path, secret_filename)
|
||||
|
||||
if EXTRA_CA_DIRECTORY_PREFIX in secret_filename:
|
||||
write_path = os.path.join(extra_ca_dir_path, secret_filename.replace(EXTRA_CA_DIRECTORY_PREFIX, ''))
|
||||
|
||||
with open(write_path, 'w') as f:
|
||||
f.write(base64.b64decode(data))
|
||||
|
||||
return 200
|
||||
|
||||
def save_file_as_secret(self, name, file_pointer):
|
||||
value = file_pointer.read()
|
||||
self._update_secret_file(name, value)
|
||||
|
||||
def replace_qe_secret(self, new_secret_data):
|
||||
"""
|
||||
Removes the old config and replaces it with the new_secret_data as one action
|
||||
"""
|
||||
# Check first that the namespace for Red Hat Quay exists. If it does not, report that
|
||||
# as an error, as it seems to be a common issue.
|
||||
namespace_url = 'namespaces/%s' % (self.kube_config.qe_namespace)
|
||||
response = self._execute_k8s_api('GET', namespace_url)
|
||||
if response.status_code // 100 != 2:
|
||||
msg = 'A Kubernetes namespace with name `%s` must be created to save config' % self.kube_config.qe_namespace
|
||||
raise Exception(msg)
|
||||
|
||||
# Check if the secret exists. If not, then we create an empty secret and then update the file
|
||||
# inside.
|
||||
secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret)
|
||||
secret = self._lookup_secret()
|
||||
if secret is None:
|
||||
self._assert_success(self._execute_k8s_api('POST', secret_url, {
|
||||
"kind": "Secret",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": self.kube_config.qe_config_secret
|
||||
},
|
||||
"data": {}
|
||||
}))
|
||||
|
||||
# Update the secret to reflect the file change.
|
||||
secret['data'] = new_secret_data
|
||||
|
||||
self._assert_success(self._execute_k8s_api('PUT', secret_url, secret))
|
||||
|
||||
def get_deployment_rollout_status(self, deployment_name):
|
||||
""""
|
||||
Returns the status of a rollout of a given deployment
|
||||
:return _DeploymentRolloutStatus
|
||||
"""
|
||||
deployment_selector_url = 'namespaces/%s/deployments/%s' % (
|
||||
self.kube_config.qe_namespace, deployment_name
|
||||
)
|
||||
|
||||
response = self._execute_k8s_api('GET', deployment_selector_url, api_prefix='apis/apps/v1')
|
||||
if response.status_code != 200:
|
||||
return DeploymentRolloutStatus('failed', 'Could not get deployment. Please check that the deployment exists')
|
||||
|
||||
deployment = json.loads(response.text)
|
||||
|
||||
return _deployment_rollout_status_message(deployment, deployment_name)
|
||||
|
||||
def get_qe_deployments(self):
|
||||
""""
|
||||
Returns all deployments matching the label selector provided in the KubeConfig
|
||||
"""
|
||||
deployment_selector_url = 'namespaces/%s/deployments?labelSelector=%s%%3D%s' % (
|
||||
self.kube_config.qe_namespace, QE_DEPLOYMENT_LABEL, self.kube_config.qe_deployment_selector
|
||||
)
|
||||
|
||||
response = self._execute_k8s_api('GET', deployment_selector_url, api_prefix='apis/extensions/v1beta1')
|
||||
if response.status_code != 200:
|
||||
return None
|
||||
return json.loads(response.text)
|
||||
|
||||
def cycle_qe_deployments(self, deployment_names):
|
||||
""""
|
||||
Triggers a rollout of all desired deployments in the qe namespace
|
||||
"""
|
||||
|
||||
for name in deployment_names:
|
||||
logger.debug('Cycling deployment %s', name)
|
||||
deployment_url = 'namespaces/%s/deployments/%s' % (self.kube_config.qe_namespace, name)
|
||||
|
||||
# There is currently no command to simply rolling restart all the pods: https://github.com/kubernetes/kubernetes/issues/13488
|
||||
# Instead, we modify the template of the deployment with a dummy env variable to trigger a cycle of the pods
|
||||
# (based off this comment: https://github.com/kubernetes/kubernetes/issues/13488#issuecomment-240393845)
|
||||
self._assert_success(self._execute_k8s_api('PATCH', deployment_url, {
|
||||
'spec': {
|
||||
'template': {
|
||||
'spec': {
|
||||
'containers': [{
|
||||
# Note: this name MUST match the deployment template's pod template
|
||||
# (e.g. <template>.spec.template.spec.containers[0] == 'quay-enterprise-app')
|
||||
'name': QE_CONTAINER_NAME,
|
||||
'env': [{
|
||||
'name': 'RESTART_TIME',
|
||||
'value': str(datetime.datetime.now())
|
||||
}],
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}, api_prefix='apis/extensions/v1beta1', content_type='application/strategic-merge-patch+json'))
|
||||
|
||||
def rollback_deployment(self, deployment_name):
|
||||
deployment_rollback_url = 'namespaces/%s/deployments/%s/rollback' % (
|
||||
self.kube_config.qe_namespace, deployment_name
|
||||
)
|
||||
|
||||
self._assert_success(self._execute_k8s_api('POST', deployment_rollback_url, {
|
||||
'name': deployment_name,
|
||||
'rollbackTo': {
|
||||
# revision=0 makes the deployment rollout to the previous revision
|
||||
'revision': 0
|
||||
}
|
||||
}, api_prefix='apis/extensions/v1beta1'), 201)
|
||||
|
||||
def _assert_success(self, response, expected_code=200):
|
||||
if response.status_code != expected_code:
|
||||
logger.error('Kubernetes API call failed with response: %s => %s', response.status_code,
|
||||
response.text)
|
||||
raise K8sApiException('Kubernetes API call failed: %s' % response.text)
|
||||
|
||||
def _update_secret_file(self, relative_file_path, value=None):
|
||||
if '/' in relative_file_path:
|
||||
raise Exception('Expected path from get_volume_path, but found slashes')
|
||||
|
||||
# Check first that the namespace for Red Hat Quay exists. If it does not, report that
|
||||
# as an error, as it seems to be a common issue.
|
||||
namespace_url = 'namespaces/%s' % (self.kube_config.qe_namespace)
|
||||
response = self._execute_k8s_api('GET', namespace_url)
|
||||
if response.status_code // 100 != 2:
|
||||
msg = 'A Kubernetes namespace with name `%s` must be created to save config' % self.kube_config.qe_namespace
|
||||
raise Exception(msg)
|
||||
|
||||
# Check if the secret exists. If not, then we create an empty secret and then update the file
|
||||
# inside.
|
||||
secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret)
|
||||
secret = self._lookup_secret()
|
||||
if secret is None:
|
||||
self._assert_success(self._execute_k8s_api('POST', secret_url, {
|
||||
"kind": "Secret",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": self.kube_config.qe_config_secret
|
||||
},
|
||||
"data": {}
|
||||
}))
|
||||
|
||||
# Update the secret to reflect the file change.
|
||||
secret['data'] = secret.get('data', {})
|
||||
|
||||
if value is not None:
|
||||
secret['data'][relative_file_path] = base64.b64encode(value)
|
||||
else:
|
||||
secret['data'].pop(relative_file_path)
|
||||
|
||||
self._assert_success(self._execute_k8s_api('PUT', secret_url, secret))
|
||||
|
||||
def _lookup_secret(self):
|
||||
secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret)
|
||||
response = self._execute_k8s_api('GET', secret_url)
|
||||
if response.status_code != 200:
|
||||
return None
|
||||
return json.loads(response.text)
|
||||
|
||||
def _execute_k8s_api(self, method, relative_url, data=None, api_prefix='api/v1', content_type='application/json'):
|
||||
headers = {
|
||||
'Authorization': 'Bearer ' + self.kube_config.service_account_token
|
||||
}
|
||||
|
||||
if data:
|
||||
headers['Content-Type'] = content_type
|
||||
|
||||
data = json.dumps(data) if data else None
|
||||
session = Session()
|
||||
url = 'https://%s/%s/%s' % (self.kube_config.api_host, api_prefix, relative_url)
|
||||
|
||||
request = Request(method, url, data=data, headers=headers)
|
||||
return session.send(request.prepare(), verify=False, timeout=2)
|
46
config_app/config_util/k8sconfig.py
Normal file
46
config_app/config_util/k8sconfig.py
Normal file
|
@ -0,0 +1,46 @@
|
|||
import os
|
||||
|
||||
SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
|
||||
|
||||
DEFAULT_QE_NAMESPACE = 'quay-enterprise'
|
||||
DEFAULT_QE_CONFIG_SECRET = 'quay-enterprise-config-secret'
|
||||
|
||||
# The name of the quay enterprise deployment (not config app) that is used to query & rollout
|
||||
DEFAULT_QE_DEPLOYMENT_SELECTOR = 'app'
|
||||
|
||||
|
||||
def get_k8s_namespace():
|
||||
return os.environ.get('QE_K8S_NAMESPACE', DEFAULT_QE_NAMESPACE)
|
||||
|
||||
|
||||
class KubernetesConfig(object):
|
||||
def __init__(self, api_host='', service_account_token=SERVICE_ACCOUNT_TOKEN_PATH,
|
||||
qe_namespace=DEFAULT_QE_NAMESPACE,
|
||||
qe_config_secret=DEFAULT_QE_CONFIG_SECRET,
|
||||
qe_deployment_selector=DEFAULT_QE_DEPLOYMENT_SELECTOR):
|
||||
self.api_host = api_host
|
||||
self.qe_namespace = qe_namespace
|
||||
self.qe_config_secret = qe_config_secret
|
||||
self.qe_deployment_selector = qe_deployment_selector
|
||||
self.service_account_token = service_account_token
|
||||
|
||||
@classmethod
|
||||
def from_env(cls):
|
||||
# Load the service account token from the local store.
|
||||
if not os.path.exists(SERVICE_ACCOUNT_TOKEN_PATH):
|
||||
raise Exception('Cannot load Kubernetes service account token')
|
||||
|
||||
with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f:
|
||||
service_token = f.read()
|
||||
|
||||
api_host = os.environ.get('KUBERNETES_SERVICE_HOST', '')
|
||||
port = os.environ.get('KUBERNETES_SERVICE_PORT')
|
||||
if port:
|
||||
api_host += ':' + port
|
||||
|
||||
qe_namespace = get_k8s_namespace()
|
||||
qe_config_secret = os.environ.get('QE_K8S_CONFIG_SECRET', DEFAULT_QE_CONFIG_SECRET)
|
||||
qe_deployment_selector = os.environ.get('QE_DEPLOYMENT_SELECTOR', DEFAULT_QE_DEPLOYMENT_SELECTOR)
|
||||
|
||||
return cls(api_host=api_host, service_account_token=service_token, qe_namespace=qe_namespace,
|
||||
qe_config_secret=qe_config_secret, qe_deployment_selector=qe_deployment_selector)
|
47
config_app/config_util/log.py
Normal file
47
config_app/config_util/log.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
import os
|
||||
from config_app._init_config import CONF_DIR
|
||||
|
||||
|
||||
def logfile_path(jsonfmt=False, debug=False):
|
||||
"""
|
||||
Returns the a logfileconf path following this rules:
|
||||
- conf/logging_debug_json.conf # jsonfmt=true, debug=true
|
||||
- conf/logging_json.conf # jsonfmt=true, debug=false
|
||||
- conf/logging_debug.conf # jsonfmt=false, debug=true
|
||||
- conf/logging.conf # jsonfmt=false, debug=false
|
||||
Can be parametrized via envvars: JSONLOG=true, DEBUGLOG=true
|
||||
"""
|
||||
_json = ""
|
||||
_debug = ""
|
||||
|
||||
if jsonfmt or os.getenv('JSONLOG', 'false').lower() == 'true':
|
||||
_json = "_json"
|
||||
|
||||
if debug or os.getenv('DEBUGLOG', 'false').lower() == 'true':
|
||||
_debug = "_debug"
|
||||
|
||||
return os.path.join(CONF_DIR, "logging%s%s.conf" % (_debug, _json))
|
||||
|
||||
|
||||
def filter_logs(values, filtered_fields):
|
||||
"""
|
||||
Takes a dict and a list of keys to filter.
|
||||
eg:
|
||||
with filtered_fields:
|
||||
[{'key': ['k1', k2'], 'fn': lambda x: 'filtered'}]
|
||||
and values:
|
||||
{'k1': {'k2': 'some-secret'}, 'k3': 'some-value'}
|
||||
the returned dict is:
|
||||
{'k1': {k2: 'filtered'}, 'k3': 'some-value'}
|
||||
"""
|
||||
for field in filtered_fields:
|
||||
cdict = values
|
||||
|
||||
for key in field['key'][:-1]:
|
||||
if key in cdict:
|
||||
cdict = cdict[key]
|
||||
|
||||
last_key = field['key'][-1]
|
||||
|
||||
if last_key in cdict and cdict[last_key]:
|
||||
cdict[last_key] = field['fn'](cdict[last_key])
|
85
config_app/config_util/ssl.py
Normal file
85
config_app/config_util/ssl.py
Normal file
|
@ -0,0 +1,85 @@
|
|||
from fnmatch import fnmatch
|
||||
|
||||
import OpenSSL
|
||||
|
||||
|
||||
class CertInvalidException(Exception):
|
||||
""" Exception raised when a certificate could not be parsed/loaded. """
|
||||
pass
|
||||
|
||||
|
||||
class KeyInvalidException(Exception):
|
||||
""" Exception raised when a key could not be parsed/loaded or successfully applied to a cert. """
|
||||
pass
|
||||
|
||||
|
||||
def load_certificate(cert_contents):
|
||||
""" Loads the certificate from the given contents and returns it or raises a CertInvalidException
|
||||
on failure.
|
||||
"""
|
||||
try:
|
||||
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_contents)
|
||||
return SSLCertificate(cert)
|
||||
except OpenSSL.crypto.Error as ex:
|
||||
raise CertInvalidException(ex.message[0][2])
|
||||
|
||||
|
||||
_SUBJECT_ALT_NAME = 'subjectAltName'
|
||||
|
||||
|
||||
class SSLCertificate(object):
|
||||
""" Helper class for easier working with SSL certificates. """
|
||||
|
||||
def __init__(self, openssl_cert):
|
||||
self.openssl_cert = openssl_cert
|
||||
|
||||
def validate_private_key(self, private_key_path):
|
||||
""" Validates that the private key found at the given file path applies to this certificate.
|
||||
Raises a KeyInvalidException on failure.
|
||||
"""
|
||||
context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
|
||||
context.use_certificate(self.openssl_cert)
|
||||
|
||||
try:
|
||||
context.use_privatekey_file(private_key_path)
|
||||
context.check_privatekey()
|
||||
except OpenSSL.SSL.Error as ex:
|
||||
raise KeyInvalidException(ex.message[0][2])
|
||||
|
||||
def matches_name(self, check_name):
|
||||
""" Returns true if this SSL certificate matches the given DNS hostname. """
|
||||
for dns_name in self.names:
|
||||
if fnmatch(check_name, dns_name):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@property
|
||||
def expired(self):
|
||||
""" Returns whether the SSL certificate has expired. """
|
||||
return self.openssl_cert.has_expired()
|
||||
|
||||
@property
|
||||
def common_name(self):
|
||||
""" Returns the defined common name for the certificate, if any. """
|
||||
return self.openssl_cert.get_subject().commonName
|
||||
|
||||
@property
|
||||
def names(self):
|
||||
""" Returns all the DNS named to which the certificate applies. May be empty. """
|
||||
dns_names = set()
|
||||
common_name = self.common_name
|
||||
if common_name is not None:
|
||||
dns_names.add(common_name)
|
||||
|
||||
# Find the DNS extension, if any.
|
||||
for i in range(0, self.openssl_cert.get_extension_count()):
|
||||
ext = self.openssl_cert.get_extension(i)
|
||||
if ext.get_short_name() == _SUBJECT_ALT_NAME:
|
||||
value = str(ext)
|
||||
for san_name in value.split(','):
|
||||
san_name_trimmed = san_name.strip()
|
||||
if san_name_trimmed.startswith('DNS:'):
|
||||
dns_names.add(san_name_trimmed[4:])
|
||||
|
||||
return dns_names
|
22
config_app/config_util/tar.py
Normal file
22
config_app/config_util/tar.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
from util.config.validator import EXTRA_CA_DIRECTORY
|
||||
|
||||
|
||||
def strip_absolute_path_and_add_trailing_dir(path):
|
||||
"""
|
||||
Removes the initial trailing / from the prefix path, and add the last dir one
|
||||
"""
|
||||
return path[1:] + '/'
|
||||
|
||||
|
||||
def tarinfo_filter_partial(prefix):
|
||||
def tarinfo_filter(tarinfo):
|
||||
# remove leading directory info
|
||||
tarinfo.name = tarinfo.name.replace(prefix, '')
|
||||
|
||||
# ignore any directory that isn't the specified extra ca one:
|
||||
if tarinfo.isdir() and not tarinfo.name == EXTRA_CA_DIRECTORY:
|
||||
return None
|
||||
|
||||
return tarinfo
|
||||
|
||||
return tarinfo_filter
|
116
config_app/config_util/test/test_k8saccessor.py
Normal file
116
config_app/config_util/test/test_k8saccessor.py
Normal file
|
@ -0,0 +1,116 @@
|
|||
import pytest
|
||||
|
||||
from httmock import urlmatch, HTTMock, response
|
||||
|
||||
from config_app.config_util.k8saccessor import KubernetesAccessorSingleton, _deployment_rollout_status_message
|
||||
from config_app.config_util.k8sconfig import KubernetesConfig
|
||||
|
||||
|
||||
@pytest.mark.parametrize('deployment_object, expected_status, expected_message', [
|
||||
({'metadata': {'generation': 1},
|
||||
'status': {'observedGeneration': 0, 'conditions': []},
|
||||
'spec': {'replicas': 0}},
|
||||
'progressing',
|
||||
'Waiting for deployment spec to be updated...'),
|
||||
({'metadata': {'generation': 0},
|
||||
'status': {'observedGeneration': 0, 'conditions': [{'type': 'Progressing', 'reason': 'ProgressDeadlineExceeded'}]},
|
||||
'spec': {'replicas': 0}},
|
||||
'failed',
|
||||
"Deployment my-deployment's rollout failed. Please try again later."),
|
||||
({'metadata': {'generation': 0},
|
||||
'status': {'observedGeneration': 0, 'conditions': []},
|
||||
'spec': {'replicas': 0}},
|
||||
'available',
|
||||
'Deployment my-deployment updated (no replicas, so nothing to roll out)'),
|
||||
({'metadata': {'generation': 0},
|
||||
'status': {'observedGeneration': 0, 'conditions': [], 'replicas': 1},
|
||||
'spec': {'replicas': 2}},
|
||||
'progressing',
|
||||
'Waiting for rollout to finish: 0 out of 2 new replicas have been updated...'),
|
||||
({'metadata': {'generation': 0},
|
||||
'status': {'observedGeneration': 0, 'conditions': [], 'replicas': 1, 'updatedReplicas': 1},
|
||||
'spec': {'replicas': 2}},
|
||||
'progressing',
|
||||
'Waiting for rollout to finish: 1 out of 2 new replicas have been updated...'),
|
||||
({'metadata': {'generation': 0},
|
||||
'status': {'observedGeneration': 0, 'conditions': [], 'replicas': 2, 'updatedReplicas': 1},
|
||||
'spec': {'replicas': 1}},
|
||||
'progressing',
|
||||
'Waiting for rollout to finish: 1 old replicas are pending termination...'),
|
||||
({'metadata': {'generation': 0},
|
||||
'status': {'observedGeneration': 0, 'conditions': [], 'replicas': 1, 'updatedReplicas': 2, 'availableReplicas': 0},
|
||||
'spec': {'replicas': 0}},
|
||||
'progressing',
|
||||
'Waiting for rollout to finish: 0 of 2 updated replicas are available...'),
|
||||
({'metadata': {'generation': 0},
|
||||
'status': {'observedGeneration': 0, 'conditions': [], 'replicas': 1, 'updatedReplicas': 2, 'availableReplicas': 2},
|
||||
'spec': {'replicas': 0}},
|
||||
'available',
|
||||
'Deployment my-deployment successfully rolled out.'),
|
||||
])
|
||||
def test_deployment_rollout_status_message(deployment_object, expected_status, expected_message):
|
||||
deployment_status = _deployment_rollout_status_message(deployment_object, 'my-deployment')
|
||||
assert deployment_status.status == expected_status
|
||||
assert deployment_status.message == expected_message
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kube_config, expected_api, expected_query', [
|
||||
({'api_host': 'www.customhost.com'},
|
||||
'/apis/extensions/v1beta1/namespaces/quay-enterprise/deployments', 'labelSelector=quay-enterprise-component%3Dapp'),
|
||||
|
||||
({'api_host': 'www.customhost.com', 'qe_deployment_selector': 'custom-selector'},
|
||||
'/apis/extensions/v1beta1/namespaces/quay-enterprise/deployments',
|
||||
'labelSelector=quay-enterprise-component%3Dcustom-selector'),
|
||||
|
||||
({'api_host': 'www.customhost.com', 'qe_namespace': 'custom-namespace'},
|
||||
'/apis/extensions/v1beta1/namespaces/custom-namespace/deployments', 'labelSelector=quay-enterprise-component%3Dapp'),
|
||||
|
||||
({'api_host': 'www.customhost.com', 'qe_namespace': 'custom-namespace', 'qe_deployment_selector': 'custom-selector'},
|
||||
'/apis/extensions/v1beta1/namespaces/custom-namespace/deployments',
|
||||
'labelSelector=quay-enterprise-component%3Dcustom-selector'),
|
||||
])
|
||||
def test_get_qe_deployments(kube_config, expected_api, expected_query):
|
||||
config = KubernetesConfig(**kube_config)
|
||||
url_hit = [False]
|
||||
|
||||
@urlmatch(netloc=r'www.customhost.com')
|
||||
def handler(request, _):
|
||||
assert request.path == expected_api
|
||||
assert request.query == expected_query
|
||||
url_hit[0] = True
|
||||
return response(200, '{}')
|
||||
|
||||
with HTTMock(handler):
|
||||
KubernetesAccessorSingleton._instance = None
|
||||
assert KubernetesAccessorSingleton.get_instance(config).get_qe_deployments() is not None
|
||||
|
||||
assert url_hit[0]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kube_config, deployment_names, expected_api_hits', [
|
||||
({'api_host': 'www.customhost.com'}, [], []),
|
||||
({'api_host': 'www.customhost.com'}, ['myDeployment'],
|
||||
['/apis/extensions/v1beta1/namespaces/quay-enterprise/deployments/myDeployment']),
|
||||
({'api_host': 'www.customhost.com', 'qe_namespace': 'custom-namespace'},
|
||||
['myDeployment', 'otherDeployment'],
|
||||
['/apis/extensions/v1beta1/namespaces/custom-namespace/deployments/myDeployment',
|
||||
'/apis/extensions/v1beta1/namespaces/custom-namespace/deployments/otherDeployment']),
|
||||
])
|
||||
def test_cycle_qe_deployments(kube_config, deployment_names, expected_api_hits):
|
||||
KubernetesAccessorSingleton._instance = None
|
||||
|
||||
config = KubernetesConfig(**kube_config)
|
||||
url_hit = [False] * len(expected_api_hits)
|
||||
i = [0]
|
||||
|
||||
@urlmatch(netloc=r'www.customhost.com', method='PATCH')
|
||||
def handler(request, _):
|
||||
assert request.path == expected_api_hits[i[0]]
|
||||
url_hit[i[0]] = True
|
||||
i[0] += 1
|
||||
return response(200, '{}')
|
||||
|
||||
with HTTMock(handler):
|
||||
KubernetesAccessorSingleton.get_instance(config).cycle_qe_deployments(deployment_names)
|
||||
|
||||
assert all(url_hit)
|
32
config_app/config_util/test/test_tar.py
Normal file
32
config_app/config_util/test/test_tar.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
import pytest
|
||||
|
||||
from config_app.config_util.tar import tarinfo_filter_partial
|
||||
|
||||
from util.config.validator import EXTRA_CA_DIRECTORY
|
||||
|
||||
from test.fixtures import *
|
||||
|
||||
|
||||
class MockTarInfo:
|
||||
def __init__(self, name, isdir):
|
||||
self.name = name
|
||||
self.isdir = lambda: isdir
|
||||
|
||||
def __eq__(self, other):
|
||||
return other is not None and self.name == other.name
|
||||
|
||||
|
||||
@pytest.mark.parametrize('prefix,tarinfo,expected', [
|
||||
# It should handle simple files
|
||||
('Users/sam/', MockTarInfo('Users/sam/config.yaml', False), MockTarInfo('config.yaml', False)),
|
||||
# It should allow the extra CA dir
|
||||
('Users/sam/', MockTarInfo('Users/sam/%s' % EXTRA_CA_DIRECTORY, True), MockTarInfo('%s' % EXTRA_CA_DIRECTORY, True)),
|
||||
# it should allow a file in that extra dir
|
||||
('Users/sam/', MockTarInfo('Users/sam/%s/cert.crt' % EXTRA_CA_DIRECTORY, False),
|
||||
MockTarInfo('%s/cert.crt' % EXTRA_CA_DIRECTORY, False)),
|
||||
# it should not allow a directory that isn't the CA dir
|
||||
('Users/sam/', MockTarInfo('Users/sam/dirignore', True), None),
|
||||
])
|
||||
def test_tarinfo_filter(prefix, tarinfo, expected):
|
||||
partial = tarinfo_filter_partial(prefix)
|
||||
assert partial(tarinfo) == expected
|
Reference in a new issue