diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index 9ea3cac56..253d85ac9 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -5,6 +5,8 @@ from app import app from cachetools import lru_cache from notifications import spawn_notification from data import model +from data.registry_model import registry_model +from data.registry_model.datatypes import RepositoryReference from data.database import UseThenDisconnect from util.imagetree import ImageTree from util.morecollections import AttrDict @@ -27,7 +29,7 @@ class BuildJob(object): self.build_notifier = BuildJobNotifier(self.build_uuid) except ValueError: raise BuildJobLoadException( - 'Could not parse build queue item config with ID %s' % self.job_details['build_uuid'] + 'Could not parse build queue item config with ID %s' % self.job_details['build_uuid'] ) @property @@ -95,70 +97,24 @@ class BuildJob(object): def determine_cached_tag(self, base_image_id=None, cache_comments=None): """ Returns the tag to pull to prime the cache or None if none. """ - cached_tag = None - if base_image_id and cache_comments: - cached_tag = self._determine_cached_tag_by_comments(base_image_id, cache_comments) - - if not cached_tag: - cached_tag = self._determine_cached_tag_by_tag() - + cached_tag = self._determine_cached_tag_by_tag() logger.debug('Determined cached tag %s for %s: %s', cached_tag, base_image_id, cache_comments) - return cached_tag - def _determine_cached_tag_by_comments(self, base_image_id, cache_commands): - """ Determines the tag to use for priming the cache for this build job, by matching commands - starting at the given base_image_id. This mimics the Docker cache checking, so it should, - in theory, provide "perfect" caching. - """ - with UseThenDisconnect(app.config): - # Lookup the base image in the repository. If it doesn't exist, nothing more to do. - repo_build = self.repo_build - repo_namespace = repo_build.repository.namespace_user.username - repo_name = repo_build.repository.name - - base_image = model.image.get_image(repo_build.repository, base_image_id) - if base_image is None: - return None - - # Build an in-memory tree of the full heirarchy of images in the repository. - all_images = model.image.get_repository_images_without_placements(repo_build.repository, - with_ancestor=base_image) - - all_tags = model.tag.list_repository_tags(repo_namespace, repo_name) - tree = ImageTree(all_images, all_tags, base_filter=base_image.id) - - # Find a path in the tree, starting at the base image, that matches the cache comments - # or some subset thereof. - def checker(step, image): - if step >= len(cache_commands): - return False - - full_command = '["/bin/sh", "-c", "%s"]' % cache_commands[step] - logger.debug('Checking step #%s: %s, %s == %s', step, image.id, image.command, full_command) - - return image.command == full_command - - path = tree.find_longest_path(base_image.id, checker) - if not path: - return None - - # Find any tag associated with the last image in the path. - return tree.tag_containing_image(path[-1]) - - def _determine_cached_tag_by_tag(self): """ Determines the cached tag by looking for one of the tags being built, and seeing if it exists in the repository. This is a fallback for when no comment information is available. """ with UseThenDisconnect(app.config): tags = self.build_config.get('docker_tags', ['latest']) - repository = self.repo_build.repository - existing_tags = model.tag.list_repository_tags(repository.namespace_user.username, - repository.name) - cached_tags = set(tags) & set([tag.name for tag in existing_tags]) - if cached_tags: - return list(cached_tags)[0] + repository = RepositoryReference.for_repo_obj(self.repo_build.repository) + matching_tag = registry_model.find_matching_tag(repository, tags) + if matching_tag is not None: + return matching_tag.name + + most_recent_tag = registry_model.get_most_recent_tag(repository) + if most_recent_tag is not None: + return most_recent_tag.name return None diff --git a/conf/init/02_get_kube_certs.py b/conf/init/02_get_kube_certs.py new file mode 100644 index 000000000..3f88a09ac --- /dev/null +++ b/conf/init/02_get_kube_certs.py @@ -0,0 +1,71 @@ +import json +import os +import base64 + +from requests import Request, Session + +QUAYPATH = os.environ.get('QUAYPATH', '.') +KUBE_EXTRA_CA_CERTDIR = os.environ.get('KUBE_EXTRA_CA_CERTDIR', '%s/conf/kube_extra_certs' % QUAYPATH) + +KUBERNETES_API_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', '') +port = os.environ.get('KUBERNETES_SERVICE_PORT') +if port: + KUBERNETES_API_HOST += ':' + port + +SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token' + +QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise') +QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret') +EXTRA_CA_DIRECTORY_PREFIX = 'extra_ca_certs_' + + +def _lookup_secret(service_token): + secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET) + response = _execute_k8s_api(service_token, 'GET', secret_url) + if response.status_code != 200: + raise Exception('Cannot get the config secret') + return json.loads(response.text) + +def _execute_k8s_api(service_account_token, method, relative_url, data=None, api_prefix='api/v1', content_type='application/json'): + headers = { + 'Authorization': 'Bearer ' + service_account_token + } + + if data: + headers['Content-Type'] = content_type + + data = json.dumps(data) if data else None + session = Session() + url = 'https://%s/%s/%s' % (KUBERNETES_API_HOST, api_prefix, relative_url) + + request = Request(method, url, data=data, headers=headers) + return session.send(request.prepare(), verify=False, timeout=2) + +def is_extra_cert(key): + return key.find(EXTRA_CA_DIRECTORY_PREFIX) == 0 + +def main(): + # Load the service account token from the local store. + if not os.path.exists(SERVICE_ACCOUNT_TOKEN_PATH): + raise Exception('Cannot load Kubernetes service account token') + + with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f: + service_token = f.read() + + secret_data = _lookup_secret(service_token).get('data', {}) + cert_keys = filter(is_extra_cert, secret_data.keys()) + + for cert_key in cert_keys: + if not os.path.exists(KUBE_EXTRA_CA_CERTDIR): + os.mkdir(KUBE_EXTRA_CA_CERTDIR) + + cert_value = base64.b64decode(secret_data[cert_key]) + cert_filename = cert_key.replace(EXTRA_CA_DIRECTORY_PREFIX, '') + print "Found an extra cert %s in config-secret, copying to kube ca dir" + + with open(os.path.join(KUBE_EXTRA_CA_CERTDIR, cert_filename), 'w') as f: + f.write(cert_value) + + +if __name__ == '__main__': + main() diff --git a/conf/init/02_get_kube_certs.sh b/conf/init/02_get_kube_certs.sh new file mode 100755 index 000000000..997847d17 --- /dev/null +++ b/conf/init/02_get_kube_certs.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +QUAYDIR=${QUAYDIR:-"/"} +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd $QUAYDIR + +if [[ "$KUBERNETES_SERVICE_HOST" != "" ]];then + echo "Running on kubernetes, attempting to retrieve extra certs from secret" + venv/bin/python $QUAYCONF/init/02_get_kube_certs.py +fi \ No newline at end of file diff --git a/conf/init/certs_install.sh b/conf/init/certs_install.sh index 29be4e8e5..e58d282bb 100755 --- a/conf/init/certs_install.sh +++ b/conf/init/certs_install.sh @@ -3,6 +3,12 @@ set -e QUAYPATH=${QUAYPATH:-"."} QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf/stack"} QUAYCONFIG=${QUAYCONFIG:-"$QUAYCONF/stack"} +CERTDIR=${QUAYCONFIG/extra_ca_certs} + +# If we're running under kube, the previous script (02_get_kube_certs.sh) will put the certs in a different location +if [[ "$KUBERNETES_SERVICE_HOST" != "" ]];then + CERTDIR=${KUBE_EXTRA_CA_CERTDIR:-"$QUAYPATH/conf/kube_extra_certs"} +fi cd ${QUAYDIR:-"/quay-registry"} @@ -13,25 +19,25 @@ then fi # Add extra trusted certificates (as a directory) -if [ -d $QUAYCONFIG/extra_ca_certs ]; then - if test "$(ls -A "$QUAYCONFIG/extra_ca_certs")"; then - echo "Installing extra certificates found in $QUAYCONFIG/extra_ca_certs directory" - cp $QUAYCONFIG/extra_ca_certs/* /usr/local/share/ca-certificates/ - cat $QUAYCONFIG/extra_ca_certs/* >> venv/lib/python2.7/site-packages/requests/cacert.pem - cat $QUAYCONFIG/extra_ca_certs/* >> venv/lib/python2.7/site-packages/certifi/cacert.pem +if [ -d $CERTDIR ]; then + if test "$(ls -A "$CERTDIR")"; then + echo "Installing extra certificates found in $CERTDIR directory" + cp $CERTDIR/* /usr/local/share/ca-certificates/ + cat $CERTDIR/* >> venv/lib/python2.7/site-packages/requests/cacert.pem + cat $CERTDIR/* >> venv/lib/python2.7/site-packages/certifi/cacert.pem fi fi # Add extra trusted certificates (as a file) -if [ -f $QUAYCONFIG/extra_ca_certs ]; then - echo "Installing extra certificates found in $QUAYCONFIG/extra_ca_certs file" - csplit -z -f /usr/local/share/ca-certificates/extra-ca- $QUAYCONFIG/extra_ca_certs '/-----BEGIN CERTIFICATE-----/' '{*}' - cat $QUAYCONFIG/extra_ca_certs >> venv/lib/python2.7/site-packages/requests/cacert.pem - cat $QUAYCONFIG/extra_ca_certs >> venv/lib/python2.7/site-packages/certifi/cacert.pem +if [ -f $CERTDIR ]; then + echo "Installing extra certificates found in $CERTDIR file" + csplit -z -f /usr/local/share/ca-certificates/extra-ca- $CERTDIR '/-----BEGIN CERTIFICATE-----/' '{*}' + cat $CERTDIR >> venv/lib/python2.7/site-packages/requests/cacert.pem + cat $CERTDIR >> venv/lib/python2.7/site-packages/certifi/cacert.pem fi # Add extra trusted certificates (prefixed) -for f in $(find $QUAYCONFIG/ -maxdepth 1 -type f -name "extra_ca*") +for f in $(find $CERTDIR/ -maxdepth 1 -type f -name "extra_ca*") do echo "Installing extra cert $f" cp "$f" /usr/local/share/ca-certificates/ diff --git a/config_app/conf/server-base.conf b/config_app/conf/server-base.conf index b1732ed20..bb7af7bf9 100644 --- a/config_app/conf/server-base.conf +++ b/config_app/conf/server-base.conf @@ -10,6 +10,9 @@ proxy_redirect off; proxy_set_header Transfer-Encoding $http_transfer_encoding; +# The DB migrations sometimes take a while, so increase timeoutso we don't report an error +proxy_read_timeout 300s; + location / { proxy_pass http://web_app_server; } diff --git a/config_app/config_endpoints/api/__init__.py b/config_app/config_endpoints/api/__init__.py index 56467a392..4f5b4c2f8 100644 --- a/config_app/config_endpoints/api/__init__.py +++ b/config_app/config_endpoints/api/__init__.py @@ -150,10 +150,11 @@ def kubernetes_only(f): nickname = partial(add_method_metadata, 'nickname') + import config_app.config_endpoints.api import config_app.config_endpoints.api.discovery +import config_app.config_endpoints.api.kubeconfig import config_app.config_endpoints.api.suconfig import config_app.config_endpoints.api.superuser -import config_app.config_endpoints.api.user import config_app.config_endpoints.api.tar_config_loader - +import config_app.config_endpoints.api.user diff --git a/config_app/config_endpoints/api/kubeconfig.py b/config_app/config_endpoints/api/kubeconfig.py new file mode 100644 index 000000000..a2f9219c8 --- /dev/null +++ b/config_app/config_endpoints/api/kubeconfig.py @@ -0,0 +1,69 @@ +from flask import request + +from data.database import configure + +from config_app.c_app import app, config_provider +from config_app.config_endpoints.api import resource, ApiResource, nickname, kubernetes_only +from config_app.config_util.k8saccessor import KubernetesAccessorSingleton + + +@resource('/v1/kubernetes/deployments/') +class SuperUserKubernetesDeployment(ApiResource): + """ Resource for the getting the status of Quay Enterprise deployments and cycling them """ + schemas = { + 'ValidateDeploymentNames': { + 'type': 'object', + 'description': 'Validates deployment names for cycling', + 'required': [ + 'deploymentNames' + ], + 'properties': { + 'deploymentNames': { + 'type': 'array', + 'description': 'The names of the deployments to cycle' + }, + }, + } + } + + @kubernetes_only + @nickname('scGetNumDeployments') + def get(self): + return KubernetesAccessorSingleton.get_instance().get_qe_deployments() + + @kubernetes_only + @nickname('scCycleQEDeployments') + def put(self): + deployment_names = request.get_json()['deploymentNames'] + return KubernetesAccessorSingleton.get_instance().cycle_qe_deployments(deployment_names) + + +@resource('/v1/superuser/config/kubernetes') +class SuperUserKubernetesConfiguration(ApiResource): + """ Resource for saving the config files to kubernetes secrets. """ + + @kubernetes_only + @nickname('scDeployConfiguration') + def post(self): + return config_provider.save_configuration_to_kubernetes() + + +@resource('/v1/kubernetes/config/populate') +class KubernetesConfigurationPopulator(ApiResource): + """ Resource for populating the local configuration from the cluster's kubernetes secrets. """ + + @kubernetes_only + @nickname('scKubePopulateConfig') + def post(self): + # Get a clean transient directory to write the config into + config_provider.new_config_dir() + KubernetesAccessorSingleton.get_instance().save_secret_to_directory(config_provider.get_config_dir_path()) + + # We update the db configuration to connect to their specified one + # (Note, even if this DB isn't valid, it won't affect much in the config app, since we'll report an error, + # and all of the options create a new clean dir, so we'll never pollute configs) + combined = dict(**app.config) + combined.update(config_provider.get_config()) + configure(combined) + + return 200 diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py index 562c48010..fdf58ee3b 100644 --- a/config_app/config_endpoints/api/suconfig.py +++ b/config_app/config_endpoints/api/suconfig.py @@ -3,11 +3,9 @@ import logging from flask import abort, request from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model -from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request, \ - kubernetes_only +from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request from config_app.c_app import (app, config_provider, superusers, ip_resolver, instance_keys, INIT_SCRIPTS_LOCATION) -from config_app.config_util.k8saccessor import KubernetesAccessorSingleton from data.database import configure from data.runmigration import run_alembic_migration @@ -270,47 +268,6 @@ class SuperUserConfigValidate(ApiResource): return validate_service_for_config(service, validator_context) -@resource('/v1/kubernetes/deployments/') -class SuperUserKubernetesDeployment(ApiResource): - """ Resource for the getting the status of Quay Enterprise deployments and cycling them """ - schemas = { - 'ValidateDeploymentNames': { - 'type': 'object', - 'description': 'Validates deployment names for cycling', - 'required': [ - 'deploymentNames' - ], - 'properties': { - 'deploymentNames': { - 'type': 'array', - 'description': 'The names of the deployments to cycle' - }, - }, - } - } - - @kubernetes_only - @nickname('scGetNumDeployments') - def get(self): - return KubernetesAccessorSingleton.get_instance().get_qe_deployments() - - @kubernetes_only - @nickname('scCycleQEDeployments') - def put(self): - deployment_names = request.get_json()['deploymentNames'] - return KubernetesAccessorSingleton.get_instance().cycle_qe_deployments(deployment_names) - - -@resource('/v1/superuser/config/kubernetes') -class SuperUserKubernetesConfiguration(ApiResource): - """ Resource for saving the config files to kubernetes secrets. """ - - @kubernetes_only - @nickname('scDeployConfiguration') - def post(self): - return config_provider.save_configuration_to_kubernetes() - - @resource('/v1/superuser/config/file/') class SuperUserConfigFile(ApiResource): """ Resource for fetching the status of config files and overriding them. """ diff --git a/config_app/config_endpoints/api/superuser.py b/config_app/config_endpoints/api/superuser.py index a40cf4483..8874b10d3 100644 --- a/config_app/config_endpoints/api/superuser.py +++ b/config_app/config_endpoints/api/superuser.py @@ -55,8 +55,8 @@ class SuperUserCustomCertificate(ApiResource): # Call the update script with config dir location to install the certificate immediately. if not app.config['TESTING']: - if subprocess.call([os.path.join(INIT_SCRIPTS_LOCATION, 'certs_install.sh')], - env={ 'QUAYCONFIG': config_provider.get_config_dir_path() }) != 0: + cert_dir = os.path.join(config_provider.get_config_dir_path(), EXTRA_CA_DIRECTORY) + if subprocess.call([os.path.join(INIT_SCRIPTS_LOCATION, 'certs_install.sh')], env={ 'CERTDIR': cert_dir }) != 0: raise Exception('Could not install certificates') return '', 204 diff --git a/config_app/config_util/config/TransientDirectoryProvider.py b/config_app/config_util/config/TransientDirectoryProvider.py index 2525baaea..7dab7dcf3 100644 --- a/config_app/config_util/config/TransientDirectoryProvider.py +++ b/config_app/config_util/config/TransientDirectoryProvider.py @@ -1,8 +1,12 @@ import os +import base64 + from backports.tempfile import TemporaryDirectory from config_app.config_util.config.fileprovider import FileConfigProvider from config_app.config_util.k8saccessor import KubernetesAccessorSingleton +from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX + class TransientDirectoryProvider(FileConfigProvider): @@ -38,10 +42,25 @@ class TransientDirectoryProvider(FileConfigProvider): return self.config_volume def save_configuration_to_kubernetes(self): - config_path = self.get_config_dir_path() + data = {} - for name in os.listdir(config_path): + # Kubernetes secrets don't have sub-directories, so for the extra_ca_certs dir + # we have to put the extra certs in with a prefix, and then one of our init scripts + # (02_get_kube_certs.sh) will expand the prefixed certs into the equivalent directory + # so that they'll be installed correctly on startup by the certs_install script + certs_dir = os.path.join(self.config_volume, EXTRA_CA_DIRECTORY) + if os.path.exists(certs_dir): + for extra_cert in os.listdir(certs_dir): + with open(os.path.join(certs_dir, extra_cert)) as f: + data[EXTRA_CA_DIRECTORY_PREFIX + extra_cert] = base64.b64encode(f.read()) + + + for name in os.listdir(self.config_volume): file_path = os.path.join(self.config_volume, name) - KubernetesAccessorSingleton.get_instance().save_file_as_secret(name, file_path) + if not os.path.isdir(file_path): + with open(file_path) as f: + data[name] = base64.b64encode(f.read()) + + KubernetesAccessorSingleton.get_instance().replace_qe_secret(data) return 200 diff --git a/config_app/config_util/k8saccessor.py b/config_app/config_util/k8saccessor.py index 61efb04fc..dd7f596e2 100644 --- a/config_app/config_util/k8saccessor.py +++ b/config_app/config_util/k8saccessor.py @@ -2,8 +2,10 @@ import logging import json import base64 import datetime +import os from requests import Request, Session +from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX from config_app.config_util.k8sconfig import KubernetesConfig @@ -35,10 +37,64 @@ class KubernetesAccessorSingleton(object): return cls._instance - def save_file_as_secret(self, name, file_path): - with open(file_path) as f: - value = f.read() - self._update_secret_file(name, value) + def save_secret_to_directory(self, dir_path): + """ + Saves all files in the kubernetes secret to a local directory. + Assumes the directory is empty. + """ + secret = self._lookup_secret() + + secret_data = secret.get('data', {}) + + # Make the `extra_ca_certs` dir to ensure we can populate extra certs + extra_ca_dir_path = os.path.join(dir_path, EXTRA_CA_DIRECTORY) + os.mkdir(extra_ca_dir_path) + + for secret_filename, data in secret_data.iteritems(): + write_path = os.path.join(dir_path, secret_filename) + + if EXTRA_CA_DIRECTORY_PREFIX in secret_filename: + write_path = os.path.join(extra_ca_dir_path, secret_filename.replace(EXTRA_CA_DIRECTORY_PREFIX, '')) + + with open(write_path, 'w') as f: + f.write(base64.b64decode(data)) + + return 200 + + def save_file_as_secret(self, name, file_pointer): + value = file_pointer.read() + self._update_secret_file(name, value) + + def replace_qe_secret(self, new_secret_data): + """ + Removes the old config and replaces it with the new_secret_data as one action + """ + # Check first that the namespace for Quay Enterprise exists. If it does not, report that + # as an error, as it seems to be a common issue. + namespace_url = 'namespaces/%s' % (self.kube_config.qe_namespace) + response = self._execute_k8s_api('GET', namespace_url) + if response.status_code // 100 != 2: + msg = 'A Kubernetes namespace with name `%s` must be created to save config' % self.kube_config.qe_namespace + raise Exception(msg) + + # Check if the secret exists. If not, then we create an empty secret and then update the file + # inside. + secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret) + secret = self._lookup_secret() + if secret is None: + self._assert_success(self._execute_k8s_api('POST', secret_url, { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": self.kube_config.qe_config_secret + }, + "data": {} + })) + + # Update the secret to reflect the file change. + secret['data'] = new_secret_data + + self._assert_success(self._execute_k8s_api('PUT', secret_url, secret)) def get_qe_deployments(self): """" diff --git a/config_app/js/components/config-setup-app/config-setup-app.component.ts b/config_app/js/components/config-setup-app/config-setup-app.component.ts index 16a796ed4..3b05c304c 100644 --- a/config_app/js/components/config-setup-app/config-setup-app.component.ts +++ b/config_app/js/components/config-setup-app/config-setup-app.component.ts @@ -15,7 +15,6 @@ export class ConfigSetupAppComponent { : 'choice' | 'setup' | 'load' - | 'populate' | 'download' | 'deploy'; @@ -45,7 +44,15 @@ export class ConfigSetupAppComponent { } private choosePopulate(): void { - this.state = 'populate'; + this.apiService.scKubePopulateConfig() + .then(() => { + this.state = 'setup'; + }) + .catch(err => { + this.apiService.errorDisplay( + `Could not populate the configuration from your cluster. Please report this error: ${JSON.stringify(err)}` + )() + }) } private configLoaded(): void { diff --git a/config_app/js/components/kube-deploy-modal/kube-deploy-modal.component.ts b/config_app/js/components/kube-deploy-modal/kube-deploy-modal.component.ts index 47eec013b..d66b442da 100644 --- a/config_app/js/components/kube-deploy-modal/kube-deploy-modal.component.ts +++ b/config_app/js/components/kube-deploy-modal/kube-deploy-modal.component.ts @@ -24,7 +24,7 @@ export class KubeDeployModalComponent { this.state = 'loadingDeployments'; ApiService.scGetNumDeployments().then(resp => { - this.deploymentsStatus = resp.items.map(dep => ({ name: dep.metadata.name, numPods: dep.status.replicas })); + this.deploymentsStatus = resp.items.map(dep => ({ name: dep.metadata.name, numPods: dep.spec.replicas })); this.state = 'readyToDeploy'; }).catch(err => { this.state = 'error'; @@ -37,7 +37,7 @@ export class KubeDeployModalComponent { deployConfiguration(): void { this.ApiService.scDeployConfiguration().then(() => { - const deploymentNames: string[]= this.deploymentsStatus.map(dep => dep.name); + const deploymentNames: string[] = this.deploymentsStatus.map(dep => dep.name); this.ApiService.scCycleQEDeployments({ deploymentNames }).then(() => { this.state = 'deployed' @@ -46,7 +46,6 @@ export class KubeDeployModalComponent { this.errorMessage = `Could cycle the deployments with the new configuration. Error: ${err.toString()}`; }) }).catch(err => { - console.log(err) this.state = 'error'; this.errorMessage = `Could not deploy the configuration. Error: ${err.toString()}`; }) diff --git a/config_app/js/setup/setup.html b/config_app/js/setup/setup.html index 04f959770..5548e52f5 100644 --- a/config_app/js/setup/setup.html +++ b/config_app/js/setup/setup.html @@ -209,19 +209,19 @@ - - -