Remove kube and scshutdown endpoint

This commit is contained in:
Sam Chow 2018-06-08 15:33:54 -04:00
parent e9d24dc5ff
commit 79a05909d5
5 changed files with 2 additions and 218 deletions

View file

@ -18,9 +18,6 @@ OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'config_app/conf/stack')
is_testing = 'TEST' in os.environ
# TODO(config kubernetes): reinstate when enabling kubernetes in config app
# is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ
config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py',
testing=is_testing)

View file

@ -199,43 +199,6 @@ class SuperUserSetupDatabase(ApiResource):
abort(403)
# From: https://stackoverflow.com/a/44712205
def get_process_id(name):
"""Return process ids found by (partial) name or regex.
>>> get_process_id('kthreadd')
[2]
>>> get_process_id('watchdog')
[10, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61] # ymmv
>>> get_process_id('non-existent process')
[]
"""
child = subprocess.Popen(['pgrep', name], stdout=subprocess.PIPE, shell=False)
response = child.communicate()[0]
return [int(pid) for pid in response.split()]
@resource('/v1/superuser/shutdown')
class SuperUserShutdown(ApiResource):
""" Resource for sending a shutdown signal to the container. """
@nickname('scShutdownContainer')
def post(self):
""" Sends a signal to the phusion init system to shut down the container. """
# Note: This method is called to set the database configuration before super users exists,
# so we also allow it to be called if there is no valid registry configuration setup.
if app.config['TESTING'] or not database_has_users():
# Note: We skip if debugging locally.
if app.config.get('DEBUGGING') == True:
return {}
os.kill(get_process_id('my_init')[0], signal.SIGINT)
return {}
abort(403)
@resource('/v1/superuser/config/createsuperuser')
class SuperUserCreateInitialSuperUser(ApiResource):
""" Resource for creating the initial super user. """

View file

@ -1,16 +1,12 @@
from config_app.config_util.config.fileprovider import FileConfigProvider
from config_app.config_util.config.testprovider import TestConfigProvider
from config_app.config_util.config.k8sprovider import KubernetesConfigProvider
def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False):
def get_config_provider(config_volume, yaml_filename, py_filename, testing=False):
""" Loads and returns the config provider for the current environment. """
if testing:
return TestConfigProvider()
if kubernetes:
return KubernetesConfigProvider(config_volume, yaml_filename, py_filename)
return FileConfigProvider(config_volume, yaml_filename, py_filename)

View file

@ -1,170 +0,0 @@
import os
import logging
import json
import base64
import time
from requests import Request, Session
from config_app.config_util.config.baseprovider import CannotWriteConfigException, get_yaml
from config_app.config_util.config.basefileprovider import BaseFileProvider
logger = logging.getLogger(__name__)
KUBERNETES_API_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', '')
port = os.environ.get('KUBERNETES_SERVICE_PORT')
if port:
KUBERNETES_API_HOST += ':' + port
SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise')
QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret')
class KubernetesConfigProvider(BaseFileProvider):
""" Implementation of the config provider that reads and writes configuration
data from a Kubernetes Secret. """
def __init__(self, config_volume, yaml_filename, py_filename):
super(KubernetesConfigProvider, self).__init__(config_volume, yaml_filename, py_filename)
# Load the service account token from the local store.
if not os.path.exists(SERVICE_ACCOUNT_TOKEN_PATH):
raise Exception('Cannot load Kubernetes service account token')
with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f:
self._service_token = f.read()
@property
def provider_id(self):
return 'k8s'
def get_volume_path(self, directory, filename):
# NOTE: Overridden to ensure we don't have subdirectories, which aren't supported
# in Kubernetes secrets.
return "_".join([directory.rstrip('/'), filename])
def volume_file_exists(self, filename):
# NOTE: Overridden because we don't have subdirectories, which aren't supported
# in Kubernetes secrets.
secret = self._lookup_secret()
if not secret or not secret.get('data'):
return False
return filename in secret['data']
def list_volume_directory(self, path):
# NOTE: Overridden because we don't have subdirectories, which aren't supported
# in Kubernetes secrets.
secret = self._lookup_secret()
if not secret:
return []
paths = []
for filename in secret.get('data', {}):
if filename.startswith(path):
paths.append(filename[len(path) + 1:])
return paths
def save_config(self, config_obj):
self._update_secret_file(self.yaml_filename, get_yaml(config_obj))
def write_volume_file(self, filename, contents):
try:
self._update_secret_file(filename, contents)
except IOError as ioe:
raise CannotWriteConfigException(str(ioe))
def remove_volume_file(self, filename):
try:
self._update_secret_file(filename, None)
except IOError as ioe:
raise CannotWriteConfigException(str(ioe))
def save_volume_file(self, filename, flask_file):
filepath = super(KubernetesConfigProvider, self).save_volume_file(filename, flask_file)
with open(filepath, 'r') as f:
self.write_volume_file(filename, f.read())
def _assert_success(self, response):
if response.status_code != 200:
logger.error('Kubernetes API call failed with response: %s => %s', response.status_code,
response.text)
raise CannotWriteConfigException('Kubernetes API call failed: %s' % response.text)
def _update_secret_file(self, filename, value=None):
# Check first that the namespace for Quay Enterprise exists. If it does not, report that
# as an error, as it seems to be a common issue.
namespace_url = 'namespaces/%s' % (QE_NAMESPACE)
response = self._execute_k8s_api('GET', namespace_url)
if response.status_code // 100 != 2:
msg = 'A Kubernetes namespace with name `%s` must be created to save config' % QE_NAMESPACE
raise CannotWriteConfigException(msg)
# Check if the secret exists. If not, then we create an empty secret and then update the file
# inside.
secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
secret = self._lookup_secret()
if secret is None:
self._assert_success(self._execute_k8s_api('POST', secret_url, {
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
"name": QE_CONFIG_SECRET
},
"data": {}
}))
# Update the secret to reflect the file change.
secret['data'] = secret.get('data', {})
if value is not None:
secret['data'][filename] = base64.b64encode(value)
else:
secret['data'].pop(filename)
self._assert_success(self._execute_k8s_api('PUT', secret_url, secret))
# Wait until the local mounted copy of the secret has been updated, as
# this is an eventual consistency operation, but the caller expects immediate
# consistency.
while True:
matching_files = set()
for secret_filename, encoded_value in secret['data'].iteritems():
expected_value = base64.b64decode(encoded_value)
try:
with self.get_volume_file(secret_filename) as f:
contents = f.read()
if contents == expected_value:
matching_files.add(secret_filename)
except IOError:
continue
if matching_files == set(secret['data'].keys()):
break
# Sleep for a second and then try again.
time.sleep(1)
def _lookup_secret(self):
secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
response = self._execute_k8s_api('GET', secret_url)
if response.status_code != 200:
return None
return json.loads(response.text)
def _execute_k8s_api(self, method, relative_url, data=None):
headers = {
'Authorization': 'Bearer ' + self._service_token
}
if data:
headers['Content-Type'] = 'application/json'
data = json.dumps(data) if data else None
session = Session()
url = 'https://%s/api/v1/%s' % (KUBERNETES_API_HOST, relative_url)
request = Request(method, url, data=data, headers=headers)
return session.send(request.prepare(), verify=False, timeout=2)

View file

@ -6,9 +6,7 @@ angular.module('quay-config')
function(ApiService, $timeout, Restangular) {
var containerService = {};
containerService.restartContainer = function(callback) {
ApiService.scShutdownContainer(null, null).then(function(resp) {
$timeout(callback, 2000);
}, ApiService.errorDisplay('Cannot restart container. Please report this to support.'))
ApiService.errorDisplay('Removed Endpoint. This error should never be seen.')
};
containerService.scheduleStatusCheck = function(callback, opt_config) {