Merge remote-tracking branch 'origin/master' into pullfail
This commit is contained in:
commit
5388633f9a
100 changed files with 2125 additions and 1008 deletions
|
@ -44,9 +44,9 @@ def matches_system_error(status_str):
|
|||
KNOWN_MATCHES = ['lxc-start: invalid', 'lxc-start: failed to', 'lxc-start: Permission denied']
|
||||
|
||||
for match in KNOWN_MATCHES:
|
||||
# 4 because we might have a Unix control code at the start.
|
||||
found = status_str.find(match[0:len(match) + 4])
|
||||
if found >= 0 and found <= 4:
|
||||
# 10 because we might have a Unix control code at the start.
|
||||
found = status_str.find(match[0:len(match) + 10])
|
||||
if found >= 0 and found <= 10:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -489,6 +489,7 @@ class DockerfileBuildWorker(Worker):
|
|||
container['Id'], container['Command'])
|
||||
docker_cl.kill(container['Id'])
|
||||
self._timeout.set()
|
||||
|
||||
except ConnectionError as exc:
|
||||
raise WorkerUnhealthyException(exc.message)
|
||||
|
||||
|
@ -506,7 +507,7 @@ class DockerfileBuildWorker(Worker):
|
|||
|
||||
job_config = json.loads(repository_build.job_config)
|
||||
|
||||
resource_url = user_files.get_file_url(repository_build.resource_key)
|
||||
resource_url = user_files.get_file_url(repository_build.resource_key, requires_cors=False)
|
||||
tag_names = job_config['docker_tags']
|
||||
build_subdir = job_config['build_subdir']
|
||||
repo = job_config['repository']
|
||||
|
@ -625,6 +626,7 @@ class DockerfileBuildWorker(Worker):
|
|||
|
||||
except WorkerUnhealthyException as exc:
|
||||
# Spawn a notification that the build has failed.
|
||||
log_appender('Worker has become unhealthy. Will retry shortly.', build_logs.ERROR)
|
||||
spawn_failure(exc.message, event_data)
|
||||
|
||||
# Raise the exception to the queue.
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
import logging
|
||||
import argparse
|
||||
import requests
|
||||
import json
|
||||
|
||||
from app import webhook_queue
|
||||
from workers.worker import Worker
|
||||
|
||||
|
||||
root_logger = logging.getLogger('')
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
|
||||
FORMAT = '%(asctime)-15s - %(levelname)s - %(pathname)s - %(funcName)s - %(message)s'
|
||||
formatter = logging.Formatter(FORMAT)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WebhookWorker(Worker):
|
||||
def process_queue_item(self, job_details):
|
||||
url = job_details['url']
|
||||
payload = job_details['payload']
|
||||
headers = {'Content-type': 'application/json'}
|
||||
|
||||
try:
|
||||
resp = requests.post(url, data=json.dumps(payload), headers=headers)
|
||||
if resp.status_code/100 != 2:
|
||||
logger.error('%s response for webhook to url: %s' % (resp.status_code,
|
||||
url))
|
||||
return False
|
||||
except requests.exceptions.RequestException as ex:
|
||||
logger.exception('Webhook was unable to be sent: %s' % ex.message)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
logging.config.fileConfig('conf/logging.conf', disable_existing_loggers=False)
|
||||
|
||||
worker = WebhookWorker(webhook_queue, poll_period_seconds=15,
|
||||
reservation_seconds=3600)
|
||||
worker.start()
|
|
@ -102,8 +102,8 @@ class Worker(object):
|
|||
logger.debug('Running watchdog.')
|
||||
try:
|
||||
self.watchdog()
|
||||
except WorkerUnhealthyException:
|
||||
logger.error('The worker has encountered an error and will not take new jobs.')
|
||||
except WorkerUnhealthyException as exc:
|
||||
logger.error('The worker has encountered an error via watchdog and will not take new jobs: %s' % exc.message)
|
||||
self.mark_current_incomplete(restore_retry=True)
|
||||
self._stop.set()
|
||||
|
||||
|
@ -133,10 +133,10 @@ class Worker(object):
|
|||
logger.warning('An error occurred processing request: %s', current_queue_item.body)
|
||||
self.mark_current_incomplete(restore_retry=False)
|
||||
|
||||
except WorkerUnhealthyException:
|
||||
logger.error('The worker has encountered an error and will not take new jobs. Job is being requeued.')
|
||||
self._stop.set()
|
||||
except WorkerUnhealthyException as exc:
|
||||
logger.error('The worker has encountered an error via the job and will not take new jobs: %s' % exc.message)
|
||||
self.mark_current_incomplete(restore_retry=True)
|
||||
self._stop.set()
|
||||
|
||||
finally:
|
||||
# Close the db handle periodically
|
||||
|
|
Reference in a new issue