Small code cleanup before whitelist addition
This commit is contained in:
parent
1e3351f3f4
commit
7471d0e35f
3 changed files with 63 additions and 63 deletions
|
@ -59,6 +59,9 @@ class EphemeralBuilderManager(BaseManager):
|
||||||
self._etcd_realm_prefix = None
|
self._etcd_realm_prefix = None
|
||||||
self._etcd_builder_prefix = None
|
self._etcd_builder_prefix = None
|
||||||
|
|
||||||
|
self._etcd_lock_prefix = Nopne
|
||||||
|
self._ephemeral_api_timeout = DEFAULT_EPHEMERAL_API_TIMEOUT
|
||||||
|
|
||||||
self._component_to_job = {}
|
self._component_to_job = {}
|
||||||
self._job_uuid_to_component = {}
|
self._job_uuid_to_component = {}
|
||||||
self._component_to_builder = {}
|
self._component_to_builder = {}
|
||||||
|
@ -95,7 +98,6 @@ class EphemeralBuilderManager(BaseManager):
|
||||||
# at the index we retrieved. We therefore start a new watch at HEAD and
|
# at the index we retrieved. We therefore start a new watch at HEAD and
|
||||||
# (if specified) call the restarter method which should conduct a read and
|
# (if specified) call the restarter method which should conduct a read and
|
||||||
# reset the state of the manager.
|
# reset the state of the manager.
|
||||||
# TODO: Remove this hack once Etcd is fixed.
|
|
||||||
logger.exception('Etcd moved forward too quickly. Restarting watch cycle.')
|
logger.exception('Etcd moved forward too quickly. Restarting watch cycle.')
|
||||||
new_index = None
|
new_index = None
|
||||||
if restarter is not None:
|
if restarter is not None:
|
||||||
|
@ -327,6 +329,7 @@ class EphemeralBuilderManager(BaseManager):
|
||||||
'had_heartbeat': False,
|
'had_heartbeat': False,
|
||||||
'job_queue_item': build_job.job_item,
|
'job_queue_item': build_job.job_item,
|
||||||
}
|
}
|
||||||
|
|
||||||
lock_payload = json.dumps(payload)
|
lock_payload = json.dumps(payload)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -388,6 +391,7 @@ class EphemeralBuilderManager(BaseManager):
|
||||||
except etcd.EtcdException:
|
except etcd.EtcdException:
|
||||||
logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid)
|
logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid)
|
||||||
raise Return(False, setup_time)
|
raise Return(False, setup_time)
|
||||||
|
|
||||||
self._job_to_executor[builder_id] = executor
|
self._job_to_executor[builder_id] = executor
|
||||||
|
|
||||||
raise Return(True, None)
|
raise Return(True, None)
|
||||||
|
|
|
@ -6,7 +6,6 @@ import boto.ec2
|
||||||
import requests
|
import requests
|
||||||
import cachetools
|
import cachetools
|
||||||
import trollius
|
import trollius
|
||||||
import json
|
|
||||||
import datetime
|
import datetime
|
||||||
import release
|
import release
|
||||||
import socket
|
import socket
|
||||||
|
@ -62,9 +61,6 @@ class BuilderExecutor(object):
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def get_manager_websocket_url(self):
|
|
||||||
return 'ws://{0}:'
|
|
||||||
|
|
||||||
def generate_cloud_config(self, realm, token, coreos_channel, manager_hostname,
|
def generate_cloud_config(self, realm, token, coreos_channel, manager_hostname,
|
||||||
quay_username=None, quay_password=None):
|
quay_username=None, quay_password=None):
|
||||||
if quay_username is None:
|
if quay_username is None:
|
||||||
|
@ -229,7 +225,7 @@ class PopenExecutor(BuilderExecutor):
|
||||||
builder_env = {
|
builder_env = {
|
||||||
'TOKEN': token,
|
'TOKEN': token,
|
||||||
'REALM': realm,
|
'REALM': realm,
|
||||||
'ENDPOINT': 'ws://%s:%s' % (ws_host,ws_port),
|
'ENDPOINT': 'ws://%s:%s' % (ws_host, ws_port),
|
||||||
'DOCKER_TLS_VERIFY': os.environ.get('DOCKER_TLS_VERIFY', ''),
|
'DOCKER_TLS_VERIFY': os.environ.get('DOCKER_TLS_VERIFY', ''),
|
||||||
'DOCKER_CERT_PATH': os.environ.get('DOCKER_CERT_PATH', ''),
|
'DOCKER_CERT_PATH': os.environ.get('DOCKER_CERT_PATH', ''),
|
||||||
'DOCKER_HOST': os.environ.get('DOCKER_HOST', ''),
|
'DOCKER_HOST': os.environ.get('DOCKER_HOST', ''),
|
||||||
|
@ -258,7 +254,8 @@ class PopenExecutor(BuilderExecutor):
|
||||||
|
|
||||||
|
|
||||||
class KubernetesExecutor(BuilderExecutor):
|
class KubernetesExecutor(BuilderExecutor):
|
||||||
""" Executes build jobs by creating Kubernetes jobs which run a qemu-kvm virtual machine in a pod """
|
""" Executes build jobs by creating Kubernetes jobs which run a qemu-kvm virtual
|
||||||
|
machine in a pod """
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self._loop = get_event_loop()
|
self._loop = get_event_loop()
|
||||||
super(KubernetesExecutor, self).__init__(*args, **kwargs)
|
super(KubernetesExecutor, self).__init__(*args, **kwargs)
|
||||||
|
@ -286,7 +283,8 @@ class KubernetesExecutor(BuilderExecutor):
|
||||||
|
|
||||||
server = self.executor_config.get('K8S_API_SERVER', 'localhost:8080')
|
server = self.executor_config.get('K8S_API_SERVER', 'localhost:8080')
|
||||||
url = '%s://%s%s' % (scheme, server, path)
|
url = '%s://%s%s' % (scheme, server, path)
|
||||||
logger.debug('EXEC CFG: %s',self.executor_config)
|
|
||||||
|
logger.debug('Executor config: %s', self.executor_config)
|
||||||
logger.debug('Kubernetes request: %s %s: %s', method, url, request_options)
|
logger.debug('Kubernetes request: %s %s: %s', method, url, request_options)
|
||||||
res = requests.request(method, url, **request_options)
|
res = requests.request(method, url, **request_options)
|
||||||
logger.debug('Kubernetes response: %s: %s', res.status_code, res.text)
|
logger.debug('Kubernetes response: %s: %s', res.status_code, res.text)
|
||||||
|
@ -343,16 +341,16 @@ class KubernetesExecutor(BuilderExecutor):
|
||||||
'name': 'builder',
|
'name': 'builder',
|
||||||
'image': '%s:%s' % (self.image, coreos_channel),
|
'image': '%s:%s' % (self.image, coreos_channel),
|
||||||
'imagePullPolicy': 'Always',
|
'imagePullPolicy': 'Always',
|
||||||
'securityContext': { 'privileged': True },
|
'securityContext': {'privileged': True},
|
||||||
'env': [
|
'env': [
|
||||||
{ 'name': 'USERDATA', 'value': user_data },
|
{'name': 'USERDATA', 'value': user_data},
|
||||||
{ 'name': 'VM_MEMORY', 'value': vm_memory_limit },
|
{'name': 'VM_MEMORY', 'value': vm_memory_limit},
|
||||||
],
|
],
|
||||||
'limits' : container_limits,
|
'limits' : container_limits,
|
||||||
'requests' : container_requests,
|
'requests' : container_requests,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
'imagePullSecrets': [{ 'name': 'builder' }],
|
'imagePullSecrets': [{'name': 'builder'}],
|
||||||
'restartPolicy': 'Never',
|
'restartPolicy': 'Never',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -370,8 +368,8 @@ class KubernetesExecutor(BuilderExecutor):
|
||||||
# schedule
|
# schedule
|
||||||
create_job = yield From(self._request('POST', self._jobs_path(), json=resource))
|
create_job = yield From(self._request('POST', self._jobs_path(), json=resource))
|
||||||
if int(create_job.status_code / 100) != 2:
|
if int(create_job.status_code / 100) != 2:
|
||||||
raise ExecutorException('Failed to create job: %s: %s: %s' % (
|
raise ExecutorException('Failed to create job: %s: %s: %s' %
|
||||||
build_uuid, create_job.status_code, create_job.text))
|
(build_uuid, create_job.status_code, create_job.text))
|
||||||
|
|
||||||
job = create_job.json()
|
job = create_job.json()
|
||||||
raise Return(job['metadata']['name'])
|
raise Return(job['metadata']['name'])
|
||||||
|
@ -380,21 +378,19 @@ class KubernetesExecutor(BuilderExecutor):
|
||||||
def stop_builder(self, builder_id):
|
def stop_builder(self, builder_id):
|
||||||
pods_path = '/api/v1/namespaces/%s/pods' % self.namespace
|
pods_path = '/api/v1/namespaces/%s/pods' % self.namespace
|
||||||
|
|
||||||
|
# Delete the pod(s) for the job.
|
||||||
selectorString = "job-name=%s" % builder_id
|
selectorString = "job-name=%s" % builder_id
|
||||||
try:
|
try:
|
||||||
delete_pod = yield From(self._request('DELETE', pods_path, params=dict(labelSelector=selectorString)))
|
yield From(self._request('DELETE', pods_path, params=dict(labelSelector=selectorString)))
|
||||||
except:
|
except:
|
||||||
# if the pod does not exist, we will not get an error here. this covers lack of api connectivity, etc
|
logger.exception("Failed to send delete pod call for job %s", builder_id)
|
||||||
logger.exception("Failed to delete pod for job %s", builder_id)
|
|
||||||
raise
|
|
||||||
|
|
||||||
logger.debug("Got successful delete pod response: %s", delete_pod.text)
|
|
||||||
|
|
||||||
|
# Delete the job itself.
|
||||||
try:
|
try:
|
||||||
delete_job = yield From(self._request('DELETE', self._job_path(builder_id)))
|
yield From(self._request('DELETE', self._job_path(builder_id)))
|
||||||
except:
|
except:
|
||||||
logger.exception('Exception when trying to terminate job %s', builder_id)
|
logger.exception('Failed to send delete job call for job %s', builder_id)
|
||||||
raise
|
|
||||||
|
|
||||||
class LogPipe(threading.Thread):
|
class LogPipe(threading.Thread):
|
||||||
""" Adapted from http://codereview.stackexchange.com/a/17959
|
""" Adapted from http://codereview.stackexchange.com/a/17959
|
||||||
|
|
|
@ -19,7 +19,7 @@ write_files:
|
||||||
content: |
|
content: |
|
||||||
REALM={{ realm }}
|
REALM={{ realm }}
|
||||||
TOKEN={{ token }}
|
TOKEN={{ token }}
|
||||||
SERVER={{websocket_scheme}}://{{ manager_hostname }}
|
SERVER={{ websocket_scheme }}://{{ manager_hostname }}
|
||||||
{% if logentries_token -%}
|
{% if logentries_token -%}
|
||||||
LOGENTRIES_TOKEN={{ logentries_token }}
|
LOGENTRIES_TOKEN={{ logentries_token }}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
Reference in a new issue