2014-12-16 18:41:30 +00:00
|
|
|
import logging
|
|
|
|
import etcd
|
|
|
|
import uuid
|
2014-12-22 17:14:16 +00:00
|
|
|
import calendar
|
2014-12-22 21:22:07 +00:00
|
|
|
import os.path
|
2014-12-23 19:09:04 +00:00
|
|
|
import json
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
from collections import namedtuple
|
2014-12-16 18:41:30 +00:00
|
|
|
from datetime import datetime, timedelta
|
2014-12-22 21:22:07 +00:00
|
|
|
from trollius import From, coroutine, Return, async
|
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
2015-01-23 16:29:38 +00:00
|
|
|
from urllib3.exceptions import ReadTimeoutError, ProtocolError
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2015-09-18 20:21:16 +00:00
|
|
|
from app import metric_queue
|
2014-12-16 18:41:30 +00:00
|
|
|
from buildman.manager.basemanager import BaseManager
|
2015-11-20 20:32:32 +00:00
|
|
|
from buildman.manager.executor import PopenExecutor, EC2Executor, KubernetesExecutor
|
2014-12-16 18:41:30 +00:00
|
|
|
from buildman.component.buildcomponent import BuildComponent
|
2014-12-31 16:33:56 +00:00
|
|
|
from buildman.jobutil.buildjob import BuildJob
|
2014-12-22 17:14:16 +00:00
|
|
|
from buildman.asyncutil import AsyncWrapper
|
2015-05-20 15:32:37 +00:00
|
|
|
from buildman.server import BuildJobResult
|
2014-12-31 16:33:56 +00:00
|
|
|
from util.morecollections import AttrDict
|
2014-12-16 18:41:30 +00:00
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2015-06-26 03:08:49 +00:00
|
|
|
ETCD_MAX_WATCH_TIMEOUT = 30
|
2015-06-10 18:17:32 +00:00
|
|
|
RETRY_IMMEDIATELY_TIMEOUT = 0
|
2016-07-14 15:49:01 +00:00
|
|
|
NO_WORKER_AVAILABLE_TIMEOUT = 10
|
2015-11-20 20:32:32 +00:00
|
|
|
DEFAULT_EPHEMERAL_API_TIMEOUT = 20
|
2016-07-22 17:35:38 +00:00
|
|
|
DEFAULT_EPHEMERAL_SETUP_TIMEOUT = 300
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2014-12-31 16:33:56 +00:00
|
|
|
class EtcdAction(object):
|
|
|
|
GET = 'get'
|
|
|
|
SET = 'set'
|
|
|
|
EXPIRE = 'expire'
|
|
|
|
UPDATE = 'update'
|
|
|
|
DELETE = 'delete'
|
|
|
|
CREATE = 'create'
|
|
|
|
COMPARE_AND_SWAP = 'compareAndSwap'
|
|
|
|
COMPARE_AND_DELETE = 'compareAndDelete'
|
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
BuildInfo = namedtuple('BuildInfo', ['component', 'build_job', 'execution_id', 'executor_name'])
|
|
|
|
|
2016-07-18 17:50:16 +00:00
|
|
|
def _create_async_etcd_client(worker_threads=1, **kwargs):
|
2016-07-15 22:28:48 +00:00
|
|
|
client = etcd.Client(**kwargs)
|
|
|
|
async_executor = ThreadPoolExecutor(worker_threads)
|
|
|
|
return AsyncWrapper(client, executor=async_executor), async_executor
|
|
|
|
|
2014-12-16 18:41:30 +00:00
|
|
|
|
|
|
|
class EphemeralBuilderManager(BaseManager):
|
|
|
|
""" Build manager implementation for the Enterprise Registry. """
|
2014-12-22 17:14:16 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
EXECUTORS = {
|
|
|
|
'popen': PopenExecutor,
|
|
|
|
'ec2': EC2Executor,
|
|
|
|
'kubernetes': KubernetesExecutor,
|
|
|
|
}
|
2014-12-22 17:14:16 +00:00
|
|
|
|
2014-12-16 18:41:30 +00:00
|
|
|
def __init__(self, *args, **kwargs):
|
2016-07-18 17:50:16 +00:00
|
|
|
self._etcd_client_creator = kwargs.pop('etcd_creator', _create_async_etcd_client)
|
2016-07-15 22:28:48 +00:00
|
|
|
|
|
|
|
super(EphemeralBuilderManager, self).__init__(*args, **kwargs)
|
|
|
|
|
2014-12-22 21:22:07 +00:00
|
|
|
self._shutting_down = False
|
|
|
|
|
2014-12-16 18:41:30 +00:00
|
|
|
self._manager_config = None
|
2014-12-22 21:22:07 +00:00
|
|
|
self._async_thread_executor = None
|
2014-12-16 18:41:30 +00:00
|
|
|
self._etcd_client = None
|
|
|
|
|
2015-02-02 17:00:19 +00:00
|
|
|
self._etcd_realm_prefix = None
|
2016-07-15 22:28:48 +00:00
|
|
|
self._etcd_job_prefix = None
|
2015-02-02 17:00:19 +00:00
|
|
|
|
2016-07-08 17:01:02 +00:00
|
|
|
self._ephemeral_api_timeout = DEFAULT_EPHEMERAL_API_TIMEOUT
|
2016-07-22 17:35:38 +00:00
|
|
|
self._ephemeral_setup_timeout = DEFAULT_EPHEMERAL_SETUP_TIMEOUT
|
2016-07-08 17:01:02 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
# The registered executors available for running jobs, in order.
|
|
|
|
self._ordered_executors = []
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
# The registered executors, mapped by their unique name.
|
|
|
|
self._executor_name_to_executor = {}
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2014-12-31 16:33:56 +00:00
|
|
|
# Map of etcd keys being watched to the tasks watching them
|
|
|
|
self._watch_tasks = {}
|
2014-12-22 21:22:07 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
# Map from builder component to its associated job.
|
|
|
|
self._component_to_job = {}
|
|
|
|
|
|
|
|
# Map from build UUID to a BuildInfo tuple with information about the build.
|
|
|
|
self._build_uuid_to_info = {}
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2015-06-26 01:36:58 +00:00
|
|
|
def _watch_etcd(self, etcd_key, change_callback, start_index=None, recursive=True,
|
|
|
|
restarter=None):
|
2014-12-31 16:33:56 +00:00
|
|
|
watch_task_key = (etcd_key, recursive)
|
|
|
|
def callback_wrapper(changed_key_future):
|
2015-06-23 20:46:05 +00:00
|
|
|
new_index = start_index
|
|
|
|
etcd_result = None
|
|
|
|
|
|
|
|
if not changed_key_future.cancelled():
|
|
|
|
try:
|
|
|
|
etcd_result = changed_key_future.result()
|
|
|
|
existing_index = getattr(etcd_result, 'etcd_index', None)
|
|
|
|
new_index = etcd_result.modifiedIndex + 1
|
|
|
|
|
|
|
|
logger.debug('Got watch of key: %s%s at #%s with result: %s', etcd_key,
|
|
|
|
'*' if recursive else '', existing_index, etcd_result)
|
2014-12-22 21:22:07 +00:00
|
|
|
|
2015-06-23 20:46:05 +00:00
|
|
|
except ReadTimeoutError:
|
2015-06-26 03:08:49 +00:00
|
|
|
logger.debug('Read-timeout on etcd watch %s, rescheduling', etcd_key)
|
2014-12-23 17:13:49 +00:00
|
|
|
|
2015-06-26 01:22:39 +00:00
|
|
|
except etcd.EtcdEventIndexCleared:
|
|
|
|
# This happens if etcd2 has moved forward too fast for us to start watching
|
|
|
|
# at the index we retrieved. We therefore start a new watch at HEAD and
|
2015-06-26 01:53:42 +00:00
|
|
|
# (if specified) call the restarter method which should conduct a read and
|
|
|
|
# reset the state of the manager.
|
2015-06-26 01:22:39 +00:00
|
|
|
logger.exception('Etcd moved forward too quickly. Restarting watch cycle.')
|
|
|
|
new_index = None
|
2015-06-26 01:36:58 +00:00
|
|
|
if restarter is not None:
|
|
|
|
async(restarter())
|
2015-06-26 01:22:39 +00:00
|
|
|
|
2015-10-06 21:45:02 +00:00
|
|
|
except (KeyError, etcd.EtcdKeyError):
|
|
|
|
logger.debug('Etcd key already cleared: %s', etcd_key)
|
|
|
|
return
|
|
|
|
|
2015-06-26 03:35:29 +00:00
|
|
|
except etcd.EtcdException as eex:
|
|
|
|
# TODO(jschorr): This is a quick and dirty hack and should be replaced
|
|
|
|
# with a proper exception check.
|
|
|
|
if str(eex.message).find('Read timed out') >= 0:
|
|
|
|
logger.debug('Read-timeout on etcd watch %s, rescheduling', etcd_key)
|
|
|
|
else:
|
|
|
|
logger.exception('Exception on etcd watch: %s', etcd_key)
|
|
|
|
|
|
|
|
except ProtocolError:
|
|
|
|
logger.exception('Exception on etcd watch: %s', etcd_key)
|
|
|
|
|
2015-06-23 20:46:05 +00:00
|
|
|
if watch_task_key not in self._watch_tasks or self._watch_tasks[watch_task_key].done():
|
2015-06-26 01:36:58 +00:00
|
|
|
self._watch_etcd(etcd_key, change_callback, start_index=new_index, restarter=restarter)
|
2015-06-23 20:46:05 +00:00
|
|
|
|
|
|
|
if etcd_result:
|
|
|
|
change_callback(etcd_result)
|
2014-12-31 16:33:56 +00:00
|
|
|
|
|
|
|
if not self._shutting_down:
|
2015-06-23 20:46:05 +00:00
|
|
|
logger.debug('Scheduling watch of key: %s%s at start index %s', etcd_key,
|
|
|
|
'*' if recursive else '', start_index)
|
|
|
|
watch_future = self._etcd_client.watch(etcd_key, recursive=recursive, index=start_index,
|
2015-06-26 03:08:49 +00:00
|
|
|
timeout=ETCD_MAX_WATCH_TIMEOUT)
|
2014-12-31 16:33:56 +00:00
|
|
|
watch_future.add_done_callback(callback_wrapper)
|
2015-06-23 20:46:05 +00:00
|
|
|
|
2014-12-31 16:33:56 +00:00
|
|
|
self._watch_tasks[watch_task_key] = async(watch_future)
|
|
|
|
|
2015-05-20 15:32:37 +00:00
|
|
|
@coroutine
|
2016-07-21 21:22:37 +00:00
|
|
|
def _handle_job_expiration_or_delete(self, etcd_result):
|
|
|
|
""" Handler invoked whenever a job expires or is deleted in etcd. """
|
2015-02-25 20:15:22 +00:00
|
|
|
if etcd_result is None:
|
|
|
|
return
|
|
|
|
|
2016-07-21 21:22:37 +00:00
|
|
|
# Handle the expiration/deletion
|
2016-07-18 17:50:16 +00:00
|
|
|
job_metadata = json.loads(etcd_result._prev_node.value)
|
|
|
|
build_job = BuildJob(AttrDict(job_metadata['job_queue_item']))
|
2016-07-21 21:22:37 +00:00
|
|
|
logger.debug('Job %s %s', etcd_result.action, build_job.build_uuid)
|
|
|
|
|
|
|
|
# Pop the build info.
|
|
|
|
build_info = self._build_uuid_to_info.get(build_job.build_uuid, None)
|
2016-07-18 17:50:16 +00:00
|
|
|
if build_info is None:
|
2016-07-21 21:22:37 +00:00
|
|
|
logger.debug('No build info for %s job %s (%s); was probably already deleted by this manager',
|
|
|
|
etcd_result.action, build_job.build_uuid, job_metadata)
|
|
|
|
return
|
|
|
|
|
|
|
|
# If the etcd action was not an expiration, then it was already deleted and the execution
|
|
|
|
# shutdown.
|
|
|
|
if etcd_result.action != EtcdAction.EXPIRE:
|
|
|
|
# Build information will no longer be needed; pop it off.
|
|
|
|
self._build_uuid_to_info.pop(build_job.build_uuid, None)
|
2016-07-18 17:50:16 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
execution_id = build_info.execution_id
|
|
|
|
|
|
|
|
# If we have not yet received a heartbeat, then the node failed to boot in some way. We mark
|
|
|
|
# the job as incomplete here.
|
|
|
|
if not job_metadata.get('had_heartbeat', True):
|
|
|
|
logger.warning('Build executor failed to successfully boot with execution id %s',
|
|
|
|
execution_id)
|
|
|
|
self.job_complete_callback(build_job, BuildJobResult.INCOMPLETE)
|
|
|
|
|
|
|
|
# Finally, we terminate the build execution for the job.
|
|
|
|
logger.info('Terminating expired build executor for job %s with execution id %s',
|
|
|
|
build_job.build_uuid, execution_id)
|
|
|
|
yield From(self.kill_builder_executor(build_job.build_uuid))
|
2014-12-22 21:22:07 +00:00
|
|
|
|
2014-12-31 16:33:56 +00:00
|
|
|
def _handle_realm_change(self, etcd_result):
|
2015-02-25 20:15:22 +00:00
|
|
|
if etcd_result is None:
|
|
|
|
return
|
|
|
|
|
2014-12-31 16:46:02 +00:00
|
|
|
if etcd_result.action == EtcdAction.CREATE:
|
2014-12-31 16:33:56 +00:00
|
|
|
# We must listen on the realm created by ourselves or another worker
|
|
|
|
realm_spec = json.loads(etcd_result.value)
|
2015-01-05 16:21:36 +00:00
|
|
|
self._register_realm(realm_spec)
|
2014-12-31 16:33:56 +00:00
|
|
|
|
|
|
|
elif etcd_result.action == EtcdAction.DELETE or etcd_result.action == EtcdAction.EXPIRE:
|
|
|
|
# We must stop listening for new connections on the specified realm, if we did not get the
|
|
|
|
# connection
|
|
|
|
realm_spec = json.loads(etcd_result._prev_node.value)
|
|
|
|
build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
|
2016-07-15 22:28:48 +00:00
|
|
|
build_uuid = build_job.build_uuid
|
|
|
|
|
2016-07-21 21:22:37 +00:00
|
|
|
logger.debug('Realm key %s for build %s', etcd_result.action, build_uuid)
|
|
|
|
build_info = self._build_uuid_to_info.get(build_uuid, None)
|
2016-07-15 22:28:48 +00:00
|
|
|
if build_info is not None:
|
2016-07-21 19:47:25 +00:00
|
|
|
# Pop the component off. If we find one, then the build has not connected to this manager,
|
|
|
|
# so we can safely unregister its component.
|
|
|
|
component = self._component_to_job.pop(build_info.component, None)
|
|
|
|
if component is not None:
|
|
|
|
# We were not the manager which the worker connected to, remove the bookkeeping for it
|
|
|
|
logger.debug('Unregistering unused component for build %s', build_uuid)
|
|
|
|
self.unregister_component(build_info.component)
|
2014-12-31 16:33:56 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
logger.warning('Unexpected action (%s) on realm key: %s', etcd_result.action, etcd_result.key)
|
|
|
|
|
2015-01-05 16:21:36 +00:00
|
|
|
def _register_realm(self, realm_spec):
|
|
|
|
logger.debug('Registering realm with manager: %s', realm_spec['realm'])
|
|
|
|
component = self.register_component(realm_spec['realm'], BuildComponent,
|
|
|
|
token=realm_spec['token'])
|
2015-06-26 01:22:39 +00:00
|
|
|
|
|
|
|
if component in self._component_to_job:
|
|
|
|
logger.debug('Realm already registered with manager: %s', realm_spec['realm'])
|
|
|
|
return component
|
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
# Create the build information block for the registered realm.
|
2015-01-05 16:21:36 +00:00
|
|
|
build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
|
2016-07-18 17:50:16 +00:00
|
|
|
|
|
|
|
# TODO(jschorr): Remove the back-compat lookups once we've finished the rollout.
|
|
|
|
execution_id = realm_spec.get('execution_id', realm_spec.get('builder_id', None))
|
|
|
|
executor_name = realm_spec.get('executor_name', 'EC2Executor')
|
2016-07-15 22:28:48 +00:00
|
|
|
|
|
|
|
build_info = BuildInfo(component=component, build_job=build_job, execution_id=execution_id,
|
|
|
|
executor_name=executor_name)
|
|
|
|
|
2015-01-05 16:21:36 +00:00
|
|
|
self._component_to_job[component] = build_job
|
2016-07-15 22:28:48 +00:00
|
|
|
self._build_uuid_to_info[build_job.build_uuid] = build_info
|
2015-06-26 01:22:39 +00:00
|
|
|
return component
|
2015-01-05 16:21:36 +00:00
|
|
|
|
2016-07-08 18:52:14 +00:00
|
|
|
@property
|
|
|
|
def registered_executors(self):
|
2016-07-15 22:28:48 +00:00
|
|
|
return self._ordered_executors
|
2016-07-08 18:52:14 +00:00
|
|
|
|
2015-01-05 16:21:36 +00:00
|
|
|
@coroutine
|
|
|
|
def _register_existing_realms(self):
|
2015-01-05 17:23:54 +00:00
|
|
|
try:
|
2015-02-02 17:00:19 +00:00
|
|
|
all_realms = yield From(self._etcd_client.read(self._etcd_realm_prefix, recursive=True))
|
2015-06-26 01:22:39 +00:00
|
|
|
|
|
|
|
# Register all existing realms found.
|
|
|
|
encountered = set()
|
2015-01-05 17:23:54 +00:00
|
|
|
for realm in all_realms.children:
|
|
|
|
if not realm.dir:
|
2015-06-26 01:22:39 +00:00
|
|
|
component = self._register_realm(json.loads(realm.value))
|
|
|
|
encountered.add(component)
|
|
|
|
|
|
|
|
# Remove any components not encountered so we can clean up.
|
2016-07-15 22:28:48 +00:00
|
|
|
for component, job in list(self._component_to_job.items()):
|
|
|
|
if not component in encountered:
|
|
|
|
self._component_to_job.pop(component, None)
|
|
|
|
self._build_uuid_to_info.pop(job.build_uuid, None)
|
2015-06-26 01:22:39 +00:00
|
|
|
|
2015-02-27 22:33:46 +00:00
|
|
|
except (KeyError, etcd.EtcdKeyError):
|
2015-01-05 17:23:54 +00:00
|
|
|
# no realms have been registered yet
|
|
|
|
pass
|
2015-01-05 16:21:36 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
def _load_executor(self, executor_kind_name, executor_config):
|
|
|
|
executor_klass = EphemeralBuilderManager.EXECUTORS.get(executor_kind_name)
|
2016-07-08 18:52:14 +00:00
|
|
|
if executor_klass is None:
|
2016-07-15 22:28:48 +00:00
|
|
|
logger.error('Unknown executor %s; skipping install', executor_kind_name)
|
2016-07-08 18:52:14 +00:00
|
|
|
return
|
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
executor = executor_klass(executor_config, self.manager_hostname)
|
|
|
|
if executor.name in self._executor_name_to_executor:
|
|
|
|
raise Exception('Executor with name %s already registered' % executor.name)
|
|
|
|
|
|
|
|
self._ordered_executors.append(executor)
|
|
|
|
self._executor_name_to_executor[executor.name] = executor
|
2016-07-08 18:52:14 +00:00
|
|
|
|
2014-12-16 18:41:30 +00:00
|
|
|
def initialize(self, manager_config):
|
|
|
|
logger.debug('Calling initialize')
|
|
|
|
self._manager_config = manager_config
|
|
|
|
|
2016-07-08 18:52:14 +00:00
|
|
|
# Note: Executor config can be defined either as a single block of EXECUTOR_CONFIG (old style)
|
|
|
|
# or as a new set of executor configurations, with the order determining how we fallback. We
|
|
|
|
# check for both here to ensure backwards compatibility.
|
|
|
|
if manager_config.get('EXECUTORS'):
|
|
|
|
for executor_config in manager_config['EXECUTORS']:
|
|
|
|
self._load_executor(executor_config.get('EXECUTOR'), executor_config)
|
|
|
|
else:
|
|
|
|
self._load_executor(manager_config.get('EXECUTOR'), manager_config.get('EXECUTOR_CONFIG'))
|
2014-12-16 18:41:30 +00:00
|
|
|
|
|
|
|
etcd_host = self._manager_config.get('ETCD_HOST', '127.0.0.1')
|
|
|
|
etcd_port = self._manager_config.get('ETCD_PORT', 2379)
|
2015-01-22 15:53:23 +00:00
|
|
|
etcd_ca_cert = self._manager_config.get('ETCD_CA_CERT', None)
|
2015-03-26 18:53:56 +00:00
|
|
|
|
|
|
|
etcd_auth = self._manager_config.get('ETCD_CERT_AND_KEY', None)
|
|
|
|
if etcd_auth is not None:
|
|
|
|
etcd_auth = tuple(etcd_auth) # Convert YAML list to a tuple
|
|
|
|
|
2015-01-22 21:59:04 +00:00
|
|
|
etcd_protocol = 'http' if etcd_auth is None else 'https'
|
2014-12-16 18:41:30 +00:00
|
|
|
logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port)
|
|
|
|
|
2014-12-22 22:24:44 +00:00
|
|
|
worker_threads = self._manager_config.get('ETCD_WORKER_THREADS', 5)
|
2016-07-21 21:22:37 +00:00
|
|
|
(self._etcd_client, self._async_thread_executor) = self._etcd_client_creator(
|
|
|
|
worker_threads,
|
|
|
|
host=etcd_host,
|
|
|
|
port=etcd_port,
|
|
|
|
cert=etcd_auth,
|
|
|
|
ca_cert=etcd_ca_cert,
|
|
|
|
protocol=etcd_protocol,
|
|
|
|
read_timeout=5,
|
|
|
|
)
|
2014-12-22 21:22:07 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
self._etcd_job_prefix = self._manager_config.get('ETCD_BUILDER_PREFIX', 'building/')
|
2016-07-21 21:22:37 +00:00
|
|
|
self._watch_etcd(self._etcd_job_prefix, self._handle_job_expiration_or_delete)
|
2015-02-02 17:00:19 +00:00
|
|
|
|
|
|
|
self._etcd_realm_prefix = self._manager_config.get('ETCD_REALM_PREFIX', 'realm/')
|
2015-06-26 01:36:58 +00:00
|
|
|
self._watch_etcd(self._etcd_realm_prefix, self._handle_realm_change,
|
|
|
|
restarter=self._register_existing_realms)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2016-07-08 18:52:14 +00:00
|
|
|
self._ephemeral_api_timeout = self._manager_config.get('API_TIMEOUT',
|
|
|
|
DEFAULT_EPHEMERAL_API_TIMEOUT)
|
2015-05-20 15:32:37 +00:00
|
|
|
|
2016-07-22 17:35:38 +00:00
|
|
|
self._ephemeral_setup_timeout = self._manager_config.get('SETUP_TIMEOUT',
|
|
|
|
DEFAULT_EPHEMERAL_SETUP_TIMEOUT)
|
|
|
|
|
2015-01-05 16:21:36 +00:00
|
|
|
# Load components for all realms currently known to the cluster
|
|
|
|
async(self._register_existing_realms())
|
|
|
|
|
2014-12-16 18:41:30 +00:00
|
|
|
def setup_time(self):
|
2016-07-08 18:52:14 +00:00
|
|
|
return self._manager_config.get('MACHINE_SETUP_TIME', 300)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
|
|
|
def shutdown(self):
|
2014-12-22 21:22:07 +00:00
|
|
|
logger.debug('Shutting down worker.')
|
|
|
|
self._shutting_down = True
|
|
|
|
|
2014-12-31 16:33:56 +00:00
|
|
|
for (etcd_key, _), task in self._watch_tasks.items():
|
|
|
|
if not task.done():
|
|
|
|
logger.debug('Canceling watch task for %s', etcd_key)
|
|
|
|
task.cancel()
|
2014-12-22 21:22:07 +00:00
|
|
|
|
|
|
|
if self._async_thread_executor is not None:
|
|
|
|
logger.debug('Shutting down thread pool executor.')
|
|
|
|
self._async_thread_executor.shutdown()
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2014-12-22 17:14:16 +00:00
|
|
|
@coroutine
|
2014-12-31 16:33:56 +00:00
|
|
|
def schedule(self, build_job):
|
2015-01-05 20:35:14 +00:00
|
|
|
build_uuid = build_job.job_details['build_uuid']
|
|
|
|
logger.debug('Calling schedule with job: %s', build_uuid)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
|
|
|
# Check if there are worker slots avialable by checking the number of jobs in etcd
|
2014-12-22 17:14:16 +00:00
|
|
|
allowed_worker_count = self._manager_config.get('ALLOWED_WORKER_COUNT', 1)
|
2014-12-16 18:41:30 +00:00
|
|
|
try:
|
2016-07-15 22:28:48 +00:00
|
|
|
active_jobs = yield From(self._etcd_client.read(self._etcd_job_prefix, recursive=True))
|
|
|
|
workers_alive = sum(1 for child in active_jobs.children if not child.dir)
|
2015-02-27 22:33:46 +00:00
|
|
|
except (KeyError, etcd.EtcdKeyError):
|
2014-12-16 18:41:30 +00:00
|
|
|
workers_alive = 0
|
2015-02-25 20:15:22 +00:00
|
|
|
except etcd.EtcdException:
|
2015-06-10 18:18:12 +00:00
|
|
|
logger.exception('Exception when reading job count from etcd for job: %s', build_uuid)
|
2015-06-10 18:17:32 +00:00
|
|
|
raise Return(False, RETRY_IMMEDIATELY_TIMEOUT)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2016-07-14 15:49:01 +00:00
|
|
|
logger.debug('Total jobs (scheduling job %s): %s', build_uuid, workers_alive)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
|
|
|
if workers_alive >= allowed_worker_count:
|
2015-06-10 18:18:12 +00:00
|
|
|
logger.info('Too many workers alive, unable to start new worker for build job: %s. %s >= %s',
|
|
|
|
build_uuid, workers_alive, allowed_worker_count)
|
2016-07-14 15:49:01 +00:00
|
|
|
raise Return(False, NO_WORKER_AVAILABLE_TIMEOUT)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
|
|
|
job_key = self._etcd_job_key(build_job)
|
|
|
|
|
|
|
|
# First try to take a lock for this job, meaning we will be responsible for its lifeline
|
|
|
|
realm = str(uuid.uuid4())
|
|
|
|
token = str(uuid.uuid4())
|
2015-06-10 18:17:32 +00:00
|
|
|
nonce = str(uuid.uuid4())
|
|
|
|
setup_time = self.setup_time()
|
|
|
|
expiration = datetime.utcnow() + timedelta(seconds=setup_time)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2014-12-23 16:18:10 +00:00
|
|
|
machine_max_expiration = self._manager_config.get('MACHINE_MAX_TIME', 7200)
|
|
|
|
max_expiration = datetime.utcnow() + timedelta(seconds=machine_max_expiration)
|
|
|
|
|
2014-12-16 18:41:30 +00:00
|
|
|
payload = {
|
2016-08-01 17:18:21 +00:00
|
|
|
# TODO: remove expiration (but not max_expiration) after migration; not used.
|
2016-07-08 17:01:02 +00:00
|
|
|
'expiration': calendar.timegm(expiration.timetuple()),
|
|
|
|
'max_expiration': calendar.timegm(max_expiration.timetuple()),
|
|
|
|
'nonce': nonce,
|
|
|
|
'had_heartbeat': False,
|
|
|
|
'job_queue_item': build_job.job_item,
|
2014-12-16 18:41:30 +00:00
|
|
|
}
|
2016-07-08 17:01:02 +00:00
|
|
|
|
2015-06-10 18:17:32 +00:00
|
|
|
lock_payload = json.dumps(payload)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
|
|
|
try:
|
2015-06-10 18:17:32 +00:00
|
|
|
yield From(self._etcd_client.write(job_key, lock_payload, prevExist=False,
|
2016-07-22 17:35:38 +00:00
|
|
|
ttl=self._ephemeral_setup_timeout))
|
2015-02-27 22:33:46 +00:00
|
|
|
except (KeyError, etcd.EtcdKeyError):
|
2014-12-16 18:41:30 +00:00
|
|
|
# The job was already taken by someone else, we are probably a retry
|
2015-06-10 18:18:12 +00:00
|
|
|
logger.error('Job: %s already exists in etcd, timeout may be misconfigured', build_uuid)
|
2015-11-20 20:32:32 +00:00
|
|
|
raise Return(False, self._ephemeral_api_timeout)
|
2015-02-25 20:15:22 +00:00
|
|
|
except etcd.EtcdException:
|
|
|
|
logger.exception('Exception when writing job %s to etcd', build_uuid)
|
2015-06-10 18:17:32 +00:00
|
|
|
raise Return(False, RETRY_IMMEDIATELY_TIMEOUT)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2016-07-08 18:52:14 +00:00
|
|
|
started_with_executor = None
|
2016-07-15 22:28:48 +00:00
|
|
|
execution_id = None
|
2016-07-08 18:52:14 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
logger.debug("Registered executors are: %s", [ex.name for ex in self._ordered_executors])
|
2016-07-08 18:52:14 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
for executor in self._ordered_executors:
|
2016-07-08 18:52:14 +00:00
|
|
|
# Check if we can use this executor based on its whitelist, by namespace.
|
|
|
|
namespace = build_job.namespace
|
|
|
|
if not executor.allowed_for_namespace(namespace):
|
|
|
|
logger.debug('Job %s (namespace: %s) cannot use executor %s', build_uuid, namespace,
|
2016-07-15 22:28:48 +00:00
|
|
|
executor.name)
|
2016-07-08 18:52:14 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# Check if we can use this executor based on the retries remaining.
|
|
|
|
if executor.minimum_retry_threshold > build_job.retries_remaining:
|
2016-07-15 22:28:48 +00:00
|
|
|
logger.debug('Job %s cannot use executor %s as it is below retry threshold %s (retry #%s)',
|
|
|
|
build_uuid, executor.name, executor.minimum_retry_threshold,
|
|
|
|
build_job.retries_remaining)
|
2016-07-08 18:52:14 +00:00
|
|
|
continue
|
|
|
|
|
2016-07-14 15:49:01 +00:00
|
|
|
logger.debug('Starting builder for job %s with selected executor: %s', build_uuid,
|
2016-07-15 22:28:48 +00:00
|
|
|
executor.name)
|
2015-11-20 20:32:32 +00:00
|
|
|
|
|
|
|
try:
|
2016-07-15 22:28:48 +00:00
|
|
|
execution_id = yield From(executor.start_builder(realm, token, build_uuid))
|
2015-11-20 20:32:32 +00:00
|
|
|
except:
|
|
|
|
logger.exception('Exception when starting builder for job: %s', build_uuid)
|
|
|
|
continue
|
|
|
|
|
2016-07-14 15:49:01 +00:00
|
|
|
try:
|
|
|
|
metric_queue.put_deprecated('EphemeralBuilderStarted', 1, unit='Count')
|
2016-07-15 22:28:48 +00:00
|
|
|
metric_queue.ephemeral_build_workers.Inc(labelvalues=[execution_id, build_uuid])
|
2016-07-14 15:49:01 +00:00
|
|
|
except:
|
2016-07-15 22:28:48 +00:00
|
|
|
logger.exception('Exception when writing start metrics for execution %s for job %s',
|
|
|
|
execution_id, build_uuid)
|
2016-07-14 15:49:01 +00:00
|
|
|
|
|
|
|
started_with_executor = executor
|
|
|
|
|
|
|
|
# Break out of the loop now that we've started a builder successfully.
|
|
|
|
break
|
|
|
|
|
2016-07-08 18:52:14 +00:00
|
|
|
if started_with_executor is None:
|
|
|
|
logger.error('Could not start ephemeral worker for build %s', build_uuid)
|
2015-11-20 20:32:32 +00:00
|
|
|
raise Return(False, self._ephemeral_api_timeout)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
logger.debug('Started execution with ID %s for job: %s with executor: %s',
|
|
|
|
execution_id, build_uuid, started_with_executor.name)
|
2014-12-22 17:14:16 +00:00
|
|
|
|
2014-12-31 16:33:56 +00:00
|
|
|
# Store the realm spec which will allow any manager to accept this builder when it connects
|
|
|
|
realm_spec = json.dumps({
|
2016-07-08 17:01:02 +00:00
|
|
|
'realm': realm,
|
|
|
|
'token': token,
|
2016-07-15 22:28:48 +00:00
|
|
|
'execution_id': execution_id,
|
|
|
|
'executor_name': started_with_executor.name,
|
2016-07-08 17:01:02 +00:00
|
|
|
'job_queue_item': build_job.job_item,
|
2016-07-18 17:50:16 +00:00
|
|
|
|
|
|
|
# TODO: remove this back-compat field once we finish the rollout.
|
|
|
|
'builder_id': execution_id,
|
2014-12-31 16:33:56 +00:00
|
|
|
})
|
2015-02-25 20:15:22 +00:00
|
|
|
|
2014-12-31 16:33:56 +00:00
|
|
|
try:
|
|
|
|
yield From(self._etcd_client.write(self._etcd_realm_key(realm), realm_spec, prevExist=False,
|
2015-06-10 18:17:32 +00:00
|
|
|
ttl=setup_time))
|
2015-02-27 22:33:46 +00:00
|
|
|
except (KeyError, etcd.EtcdKeyError):
|
2015-06-10 18:18:12 +00:00
|
|
|
logger.error('Realm %s already exists in etcd for job %s ' +
|
|
|
|
'UUID collision or something is very very wrong.', realm, build_uuid)
|
2015-06-10 18:17:32 +00:00
|
|
|
raise Return(False, setup_time)
|
2015-02-25 20:15:22 +00:00
|
|
|
except etcd.EtcdException:
|
2015-06-10 18:18:12 +00:00
|
|
|
logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid)
|
|
|
|
raise Return(False, setup_time)
|
2016-07-08 17:01:02 +00:00
|
|
|
|
2016-07-14 15:49:01 +00:00
|
|
|
logger.debug('Builder spawn complete for job %s using executor %s with ID %s ', build_uuid,
|
2016-07-15 22:28:48 +00:00
|
|
|
started_with_executor.name, execution_id)
|
2015-06-10 18:17:32 +00:00
|
|
|
raise Return(True, None)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2014-12-31 16:33:56 +00:00
|
|
|
@coroutine
|
|
|
|
def build_component_ready(self, build_component):
|
2014-12-16 18:41:30 +00:00
|
|
|
try:
|
2016-07-15 22:28:48 +00:00
|
|
|
# Pop off the job for the component. We do so before we send out the etcd watch below,
|
|
|
|
# as it will also remove this mapping.
|
2014-12-16 18:41:30 +00:00
|
|
|
job = self._component_to_job.pop(build_component)
|
2016-07-15 22:28:48 +00:00
|
|
|
if job is None:
|
|
|
|
logger.error('Could not find job for the build component on realm %s',
|
|
|
|
build_component.builder_realm)
|
|
|
|
return
|
|
|
|
|
|
|
|
# Clean up the bookkeeping for allowing any manager to take the job.
|
2014-12-31 16:33:56 +00:00
|
|
|
yield From(self._etcd_client.delete(self._etcd_realm_key(build_component.builder_realm)))
|
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
# Start the build job.
|
2014-12-22 17:14:16 +00:00
|
|
|
logger.debug('Sending build %s to newly ready component on realm %s',
|
2016-07-15 22:28:48 +00:00
|
|
|
job.build_uuid, build_component.builder_realm)
|
2014-12-31 16:33:56 +00:00
|
|
|
yield From(build_component.start_build(job))
|
2015-02-27 22:33:46 +00:00
|
|
|
except (KeyError, etcd.EtcdKeyError):
|
2015-06-23 20:46:05 +00:00
|
|
|
logger.warning('Builder is asking for more work, but work already completed')
|
2014-12-16 18:41:30 +00:00
|
|
|
|
|
|
|
def build_component_disposed(self, build_component, timed_out):
|
|
|
|
logger.debug('Calling build_component_disposed.')
|
2014-12-22 22:24:44 +00:00
|
|
|
self.unregister_component(build_component)
|
|
|
|
|
2014-12-22 17:14:16 +00:00
|
|
|
@coroutine
|
2014-12-16 18:41:30 +00:00
|
|
|
def job_completed(self, build_job, job_status, build_component):
|
2016-07-15 22:28:48 +00:00
|
|
|
logger.debug('Calling job_completed for job %s with status: %s',
|
|
|
|
build_job.build_uuid, job_status)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
# Mark the job as completed.
|
|
|
|
self.job_complete_callback(build_job, job_status)
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
# Kill the ephmeral builder.
|
|
|
|
yield From(self.kill_builder_executor(build_job.build_uuid))
|
|
|
|
|
|
|
|
# Delete the build job from etcd.
|
2014-12-16 18:41:30 +00:00
|
|
|
job_key = self._etcd_job_key(build_job)
|
2015-06-17 19:02:58 +00:00
|
|
|
try:
|
|
|
|
yield From(self._etcd_client.delete(job_key))
|
|
|
|
except (KeyError, etcd.EtcdKeyError):
|
2015-10-06 21:45:02 +00:00
|
|
|
logger.debug('Builder is asking for job to be removed, but work already completed')
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2016-07-15 22:28:48 +00:00
|
|
|
logger.debug('job_completed for job %s with status: %s', build_job.build_uuid, job_status)
|
|
|
|
|
|
|
|
@coroutine
|
|
|
|
def kill_builder_executor(self, build_uuid):
|
|
|
|
logger.info('Starting termination of executor for job %s', build_uuid)
|
|
|
|
build_info = self._build_uuid_to_info.pop(build_uuid, None)
|
|
|
|
if build_info is None:
|
2016-07-21 21:22:37 +00:00
|
|
|
logger.debug('Build information not found for build %s; skipping termination', build_uuid)
|
2016-07-15 22:28:48 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# Remove the build's component.
|
|
|
|
self._component_to_job.pop(build_info.component, None)
|
|
|
|
|
|
|
|
# Stop the build node/executor itself.
|
|
|
|
executor = self._executor_name_to_executor.get(build_info.executor_name)
|
|
|
|
if executor is None:
|
|
|
|
logger.error('Could not find registered executor %s for build %s',
|
|
|
|
build_info.executor_name, build_uuid)
|
|
|
|
return
|
|
|
|
|
|
|
|
# Terminate the executor's execution.
|
|
|
|
logger.info('Terminating executor for job %s with execution id %s',
|
|
|
|
build_uuid, build_info.execution_id)
|
|
|
|
yield From(executor.stop_builder(build_info.execution_id))
|
2014-12-16 18:41:30 +00:00
|
|
|
|
2014-12-22 22:24:44 +00:00
|
|
|
@coroutine
|
|
|
|
def job_heartbeat(self, build_job):
|
|
|
|
# Extend the deadline in etcd
|
|
|
|
job_key = self._etcd_job_key(build_job)
|
2015-12-01 17:24:17 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
build_job_metadata_response = yield From(self._etcd_client.read(job_key))
|
|
|
|
except (KeyError, etcd.EtcdKeyError):
|
2016-07-15 22:28:48 +00:00
|
|
|
logger.info('Job %s no longer exists in etcd', build_job.build_uuid)
|
2015-12-01 17:24:17 +00:00
|
|
|
return
|
|
|
|
|
2014-12-23 19:09:04 +00:00
|
|
|
build_job_metadata = json.loads(build_job_metadata_response.value)
|
2014-12-22 22:24:44 +00:00
|
|
|
|
2014-12-23 19:09:04 +00:00
|
|
|
max_expiration = datetime.utcfromtimestamp(build_job_metadata['max_expiration'])
|
2014-12-23 16:18:10 +00:00
|
|
|
max_expiration_remaining = max_expiration - datetime.utcnow()
|
|
|
|
max_expiration_sec = max(0, int(max_expiration_remaining.total_seconds()))
|
|
|
|
|
|
|
|
ttl = min(self.heartbeat_period_sec * 2, max_expiration_sec)
|
2014-12-22 22:24:44 +00:00
|
|
|
new_expiration = datetime.utcnow() + timedelta(seconds=ttl)
|
|
|
|
|
|
|
|
payload = {
|
2016-08-01 17:18:21 +00:00
|
|
|
# TODO: remove expiration (but not max_expiration) after migration; not used.
|
2016-07-08 17:01:02 +00:00
|
|
|
'expiration': calendar.timegm(new_expiration.timetuple()),
|
|
|
|
'job_queue_item': build_job.job_item,
|
|
|
|
'max_expiration': build_job_metadata['max_expiration'],
|
|
|
|
'had_heartbeat': True,
|
2014-12-22 22:24:44 +00:00
|
|
|
}
|
|
|
|
|
2016-08-01 17:18:21 +00:00
|
|
|
# Note: A TTL of < 0 in etcd results in the key *never being expired*. We use a max here
|
|
|
|
# to ensure that if the TTL is < 0, the key will expire immediately.
|
|
|
|
etcd_ttl = max(ttl, 0)
|
|
|
|
yield From(self._etcd_client.write(job_key, json.dumps(payload), ttl=etcd_ttl))
|
2014-12-22 22:24:44 +00:00
|
|
|
self.job_heartbeat_callback(build_job)
|
|
|
|
|
2015-02-02 17:00:19 +00:00
|
|
|
def _etcd_job_key(self, build_job):
|
2014-12-16 18:41:30 +00:00
|
|
|
""" Create a key which is used to track a job in etcd.
|
|
|
|
"""
|
2016-07-15 22:28:48 +00:00
|
|
|
return os.path.join(self._etcd_job_prefix, build_job.job_details['build_uuid'])
|
2015-05-20 15:32:37 +00:00
|
|
|
|
2015-02-02 17:00:19 +00:00
|
|
|
def _etcd_realm_key(self, realm):
|
2014-12-31 16:33:56 +00:00
|
|
|
""" Create a key which is used to track an incoming connection on a realm.
|
|
|
|
"""
|
2015-02-02 17:00:19 +00:00
|
|
|
return os.path.join(self._etcd_realm_prefix, realm)
|
2014-12-31 16:33:56 +00:00
|
|
|
|
2014-12-22 22:24:44 +00:00
|
|
|
def num_workers(self):
|
|
|
|
""" Return the number of workers we're managing locally.
|
|
|
|
"""
|
2016-07-15 22:28:48 +00:00
|
|
|
return len(self._component_to_job)
|