2013-10-24 20:37:03 +00:00
|
|
|
import logging
|
|
|
|
import daemon
|
|
|
|
import argparse
|
2013-10-29 23:59:29 +00:00
|
|
|
import os
|
2014-01-24 19:40:36 +00:00
|
|
|
import requests
|
|
|
|
import re
|
|
|
|
import json
|
|
|
|
import shutil
|
2014-03-31 19:40:24 +00:00
|
|
|
import tarfile
|
2013-10-24 20:37:03 +00:00
|
|
|
|
2014-01-24 19:40:36 +00:00
|
|
|
from docker import Client, APIError
|
|
|
|
from tempfile import TemporaryFile, mkdtemp
|
|
|
|
from zipfile import ZipFile
|
2014-02-12 23:58:40 +00:00
|
|
|
from functools import partial
|
2014-04-02 23:32:41 +00:00
|
|
|
from datetime import datetime, timedelta
|
|
|
|
from threading import Event
|
2014-04-14 19:21:05 +00:00
|
|
|
from uuid import uuid4
|
2013-10-24 20:37:03 +00:00
|
|
|
|
|
|
|
from data.queue import dockerfile_build_queue
|
|
|
|
from data import model
|
2014-01-24 19:40:36 +00:00
|
|
|
from workers.worker import Worker
|
2014-04-03 21:31:46 +00:00
|
|
|
from app import app, userfiles as user_files
|
2014-04-01 17:46:41 +00:00
|
|
|
from util.safetar import safe_extractall
|
2014-04-16 19:45:41 +00:00
|
|
|
from util.dockerfileparse import parse_dockerfile, ParsedDockerfile, serialize_dockerfile
|
2013-10-24 20:37:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
root_logger = logging.getLogger('')
|
|
|
|
root_logger.setLevel(logging.DEBUG)
|
|
|
|
|
|
|
|
FORMAT = '%(asctime)-15s - %(levelname)s - %(pathname)s - %(funcName)s - %(message)s'
|
|
|
|
formatter = logging.Formatter(FORMAT)
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2014-02-04 00:08:37 +00:00
|
|
|
build_logs = app.config['BUILDLOGS']
|
|
|
|
|
2014-04-02 23:32:41 +00:00
|
|
|
TIMEOUT_PERIOD_MINUTES = 20
|
2014-04-14 19:21:05 +00:00
|
|
|
CACHE_EXPIRATION_PERIOD_HOURS = 24
|
2014-04-02 23:32:41 +00:00
|
|
|
|
2014-02-04 00:08:37 +00:00
|
|
|
|
|
|
|
class StatusWrapper(object):
|
|
|
|
def __init__(self, build_uuid):
|
|
|
|
self._uuid = build_uuid
|
|
|
|
self._status = {
|
|
|
|
'total_commands': None,
|
|
|
|
'current_command': None,
|
|
|
|
'push_completion': 0.0,
|
|
|
|
'image_completion': {},
|
|
|
|
}
|
|
|
|
|
|
|
|
self.__exit__(None, None, None)
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self._status
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, value, traceback):
|
|
|
|
build_logs.set_status(self._uuid, self._status)
|
|
|
|
|
2013-10-27 23:06:20 +00:00
|
|
|
|
2014-03-28 21:53:33 +00:00
|
|
|
class _IncompleteJsonError(Exception):
|
|
|
|
def __init__(self, start_from):
|
|
|
|
self.start_from = start_from
|
|
|
|
|
|
|
|
|
|
|
|
class _StreamingJSONDecoder(json.JSONDecoder):
|
|
|
|
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
|
|
|
|
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
|
|
|
|
|
|
|
|
def decode(self, s, _w=WHITESPACE.match):
|
|
|
|
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
|
|
|
|
instance containing a JSON document)
|
|
|
|
|
|
|
|
"""
|
|
|
|
start_from = 0
|
|
|
|
while start_from < len(s):
|
|
|
|
try:
|
|
|
|
obj, end = self.raw_decode(s[start_from:], idx=_w(s[start_from:], 0).end())
|
|
|
|
except ValueError:
|
|
|
|
raise _IncompleteJsonError(start_from)
|
|
|
|
end = _w(s[start_from:], end).end()
|
|
|
|
start_from += end
|
|
|
|
yield obj
|
|
|
|
|
|
|
|
|
|
|
|
class StreamingDockerClient(Client):
|
|
|
|
def _stream_helper(self, response):
|
|
|
|
"""Generator for data coming from a chunked-encoded HTTP response."""
|
|
|
|
content_buf = ''
|
|
|
|
for content in response.iter_content(chunk_size=256):
|
|
|
|
content_buf += content
|
|
|
|
try:
|
|
|
|
for val in json.loads(content_buf, cls=_StreamingJSONDecoder):
|
|
|
|
yield val
|
|
|
|
content_buf = ''
|
|
|
|
except _IncompleteJsonError as exc:
|
|
|
|
content_buf = content_buf[exc.start_from:]
|
2014-03-05 15:32:24 +00:00
|
|
|
|
|
|
|
|
2014-02-05 16:07:54 +00:00
|
|
|
class DockerfileBuildContext(object):
|
2014-04-14 19:21:05 +00:00
|
|
|
image_id_to_cache_time = {}
|
2014-04-14 22:54:39 +00:00
|
|
|
private_repo_tags = set()
|
2014-04-14 19:21:05 +00:00
|
|
|
|
2014-02-24 21:11:23 +00:00
|
|
|
def __init__(self, build_context_dir, dockerfile_subdir, repo, tag_names,
|
2014-03-27 22:33:13 +00:00
|
|
|
push_token, build_uuid, pull_credentials=None):
|
2014-02-05 16:07:54 +00:00
|
|
|
self._build_dir = build_context_dir
|
2014-02-24 21:11:23 +00:00
|
|
|
self._dockerfile_subdir = dockerfile_subdir
|
|
|
|
self._repo = repo
|
|
|
|
self._tag_names = tag_names
|
2014-02-05 16:07:54 +00:00
|
|
|
self._push_token = push_token
|
2014-02-12 23:58:40 +00:00
|
|
|
self._status = StatusWrapper(build_uuid)
|
2014-02-13 16:44:39 +00:00
|
|
|
self._build_logger = partial(build_logs.append_log_message, build_uuid)
|
2014-03-27 22:33:13 +00:00
|
|
|
self._pull_credentials = pull_credentials
|
2014-04-14 22:54:39 +00:00
|
|
|
self._public_repos = set()
|
2014-03-27 22:33:13 +00:00
|
|
|
|
|
|
|
# Note: We have two different clients here because we (potentially) login
|
|
|
|
# with both, but with different credentials that we do not want shared between
|
|
|
|
# the build and push operations.
|
2014-03-31 22:10:34 +00:00
|
|
|
self._push_cl = StreamingDockerClient(timeout=1200)
|
|
|
|
self._build_cl = StreamingDockerClient(timeout=1200)
|
2013-10-27 23:06:20 +00:00
|
|
|
|
2014-02-24 21:11:23 +00:00
|
|
|
dockerfile_path = os.path.join(self._build_dir, dockerfile_subdir,
|
2014-03-28 21:53:33 +00:00
|
|
|
'Dockerfile')
|
2014-04-14 22:54:39 +00:00
|
|
|
|
|
|
|
# Compute the number of steps
|
|
|
|
with open(dockerfile_path, 'r') as dockerfileobj:
|
|
|
|
self._parsed_dockerfile = parse_dockerfile(dockerfileobj.read())
|
2014-04-16 19:45:41 +00:00
|
|
|
|
|
|
|
self.__inject_quay_repo_env(self._parsed_dockerfile, repo)
|
2014-04-14 22:54:39 +00:00
|
|
|
self._num_steps = len(self._parsed_dockerfile.commands)
|
2014-02-05 16:07:54 +00:00
|
|
|
|
2014-04-16 19:45:41 +00:00
|
|
|
with open(dockerfile_path, 'w') as dockerfileobj:
|
|
|
|
dockerfileobj.write(serialize_dockerfile(self._parsed_dockerfile))
|
|
|
|
|
2014-02-24 21:11:23 +00:00
|
|
|
logger.debug('Will build and push to repo %s with tags named: %s' %
|
|
|
|
(self._repo, self._tag_names))
|
2014-02-05 16:07:54 +00:00
|
|
|
|
|
|
|
def __enter__(self):
|
2014-04-14 19:59:57 +00:00
|
|
|
self.__cleanup_containers()
|
2014-04-14 19:21:05 +00:00
|
|
|
self.__evict_expired_images()
|
2014-04-14 19:59:57 +00:00
|
|
|
self.__cleanup()
|
2014-02-05 16:07:54 +00:00
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, value, traceback):
|
2014-04-14 22:54:39 +00:00
|
|
|
self.__cleanup_containers()
|
2014-02-05 16:07:54 +00:00
|
|
|
self.__cleanup()
|
|
|
|
|
|
|
|
shutil.rmtree(self._build_dir)
|
2013-10-27 23:06:20 +00:00
|
|
|
|
2014-04-16 19:45:41 +00:00
|
|
|
@staticmethod
|
|
|
|
def __inject_quay_repo_env(parsed_dockerfile, quay_reponame):
|
|
|
|
env_command = {
|
|
|
|
'command': 'ENV',
|
|
|
|
'parameters': 'QUAY_REPOSITORY %s' % quay_reponame
|
|
|
|
}
|
|
|
|
for index, command in reversed(list(enumerate(parsed_dockerfile.commands))):
|
|
|
|
if command['command'] == 'FROM':
|
|
|
|
new_command_index = index + 1
|
|
|
|
logger.debug('Injecting env command at dockerfile index: %s', new_command_index)
|
|
|
|
parsed_dockerfile.commands.insert(new_command_index, env_command)
|
|
|
|
break
|
|
|
|
|
|
|
|
|
2014-01-24 19:40:36 +00:00
|
|
|
@staticmethod
|
|
|
|
def __total_completion(statuses, total_images):
|
|
|
|
percentage_with_sizes = float(len(statuses.values()))/total_images
|
|
|
|
sent_bytes = sum([status[u'current'] for status in statuses.values()])
|
|
|
|
total_bytes = sum([status[u'total'] for status in statuses.values()])
|
|
|
|
return float(sent_bytes)/total_bytes*percentage_with_sizes
|
|
|
|
|
2014-02-05 16:07:54 +00:00
|
|
|
def build(self):
|
2014-03-27 22:33:13 +00:00
|
|
|
# Login with the specified credentials (if any).
|
|
|
|
if self._pull_credentials:
|
2014-04-01 23:00:11 +00:00
|
|
|
logger.debug('Logging in with pull credentials: %s@%s',
|
|
|
|
self._pull_credentials['username'], self._pull_credentials['registry'])
|
2014-03-31 22:10:34 +00:00
|
|
|
self._build_cl.login(self._pull_credentials['username'], self._pull_credentials['password'],
|
2014-03-27 22:33:13 +00:00
|
|
|
registry=self._pull_credentials['registry'], reauth=True)
|
|
|
|
|
2014-04-14 22:54:39 +00:00
|
|
|
# Pull the image, in case it was updated since the last build
|
|
|
|
base_image = self._parsed_dockerfile.get_base_image()
|
|
|
|
self._build_logger('Pulling base image: %s' % base_image)
|
|
|
|
self._build_cl.pull(base_image)
|
|
|
|
|
2014-03-27 22:33:13 +00:00
|
|
|
# Start the build itself.
|
2014-02-04 00:08:37 +00:00
|
|
|
logger.debug('Starting build.')
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-02-05 16:07:54 +00:00
|
|
|
with self._status as status:
|
|
|
|
status['total_commands'] = self._num_steps
|
2014-02-04 00:08:37 +00:00
|
|
|
|
2014-02-24 21:11:23 +00:00
|
|
|
logger.debug('Building to tags named: %s' % self._tag_names)
|
|
|
|
context_path = os.path.join(self._build_dir, self._dockerfile_subdir)
|
|
|
|
|
|
|
|
logger.debug('Final context path: %s exists: %s' %
|
|
|
|
(context_path, os.path.exists(context_path)))
|
|
|
|
|
2014-03-27 22:33:13 +00:00
|
|
|
build_status = self._build_cl.build(path=context_path, stream=True)
|
2014-02-04 00:08:37 +00:00
|
|
|
|
|
|
|
current_step = 0
|
|
|
|
built_image = None
|
2014-03-28 21:53:33 +00:00
|
|
|
for status in build_status:
|
2014-03-07 17:27:30 +00:00
|
|
|
fully_unwrapped = ""
|
|
|
|
if isinstance(status, dict):
|
2014-04-01 17:46:41 +00:00
|
|
|
keys_to_extract = ['error', 'status', 'stream']
|
|
|
|
for key in keys_to_extract:
|
|
|
|
if key in status:
|
|
|
|
fully_unwrapped = status[key]
|
|
|
|
break
|
|
|
|
|
|
|
|
if not fully_unwrapped:
|
|
|
|
logger.debug('Status dict did not have any extractable keys and was: %s', status)
|
2014-03-07 17:27:30 +00:00
|
|
|
elif isinstance(status, basestring):
|
|
|
|
fully_unwrapped = status
|
|
|
|
|
2014-03-05 22:44:22 +00:00
|
|
|
status_str = str(fully_unwrapped.encode('utf-8'))
|
2014-02-13 16:44:39 +00:00
|
|
|
logger.debug('Status: %s', status_str)
|
2014-03-05 22:44:22 +00:00
|
|
|
step_increment = re.search(r'Step ([0-9]+) :', status_str)
|
2014-02-04 00:08:37 +00:00
|
|
|
if step_increment:
|
2014-02-13 16:44:39 +00:00
|
|
|
self._build_logger(status_str, build_logs.COMMAND)
|
2014-02-04 00:08:37 +00:00
|
|
|
current_step = int(step_increment.group(1))
|
2014-02-05 16:07:54 +00:00
|
|
|
logger.debug('Step now: %s/%s' % (current_step, self._num_steps))
|
2014-03-05 22:44:22 +00:00
|
|
|
with self._status as status_update:
|
|
|
|
status_update['current_command'] = current_step
|
2014-02-04 00:08:37 +00:00
|
|
|
continue
|
2014-02-10 20:03:55 +00:00
|
|
|
else:
|
2014-02-13 16:44:39 +00:00
|
|
|
self._build_logger(status_str)
|
2014-02-04 00:08:37 +00:00
|
|
|
|
2014-03-05 22:44:22 +00:00
|
|
|
complete = re.match(r'Successfully built ([a-z0-9]+)$', status_str)
|
2014-02-04 00:08:37 +00:00
|
|
|
if complete:
|
|
|
|
built_image = complete.group(1)
|
|
|
|
logger.debug('Final image ID is: %s' % built_image)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Get the image count
|
|
|
|
if not built_image:
|
|
|
|
return
|
|
|
|
|
|
|
|
return built_image
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-02-05 16:07:54 +00:00
|
|
|
def push(self, built_image):
|
2014-02-04 00:08:37 +00:00
|
|
|
# Login to the registry
|
2014-02-24 21:11:23 +00:00
|
|
|
host = re.match(r'([a-z0-9.:]+)/.+/.+$', self._repo)
|
2014-02-04 00:08:37 +00:00
|
|
|
if not host:
|
2014-02-24 21:11:23 +00:00
|
|
|
raise RuntimeError('Invalid repo name: %s' % self._repo)
|
2014-01-24 21:25:25 +00:00
|
|
|
|
2014-02-04 00:08:37 +00:00
|
|
|
for protocol in ['https', 'http']:
|
|
|
|
registry_endpoint = '%s://%s/v1/' % (protocol, host.group(1))
|
|
|
|
logger.debug('Attempting login to registry: %s' % registry_endpoint)
|
2014-01-24 21:25:25 +00:00
|
|
|
|
2014-02-04 00:08:37 +00:00
|
|
|
try:
|
2014-03-27 22:33:13 +00:00
|
|
|
self._push_cl.login('$token', self._push_token, registry=registry_endpoint)
|
2014-02-04 00:08:37 +00:00
|
|
|
break
|
|
|
|
except APIError:
|
|
|
|
pass # Probably the wrong protocol
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-02-24 21:11:23 +00:00
|
|
|
for tag in self._tag_names:
|
|
|
|
logger.debug('Tagging image %s as %s:%s' %
|
|
|
|
(built_image, self._repo, tag))
|
2014-03-27 22:33:13 +00:00
|
|
|
self._push_cl.tag(built_image, self._repo, tag)
|
2014-02-24 21:11:23 +00:00
|
|
|
|
2014-03-27 22:33:13 +00:00
|
|
|
history = json.loads(self._push_cl.history(built_image))
|
2014-02-04 00:08:37 +00:00
|
|
|
num_images = len(history)
|
2014-02-05 16:07:54 +00:00
|
|
|
with self._status as status:
|
2014-02-04 00:08:37 +00:00
|
|
|
status['total_images'] = num_images
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-02-24 21:11:23 +00:00
|
|
|
logger.debug('Pushing to repo %s' % self._repo)
|
2014-03-27 22:33:13 +00:00
|
|
|
resp = self._push_cl.push(self._repo, stream=True)
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-03-28 21:53:33 +00:00
|
|
|
for status in resp:
|
|
|
|
logger.debug('Status: %s', status)
|
2014-02-04 00:08:37 +00:00
|
|
|
if u'status' in status:
|
|
|
|
status_msg = status[u'status']
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-02-04 00:08:37 +00:00
|
|
|
if status_msg == 'Pushing':
|
|
|
|
if u'progressDetail' in status and u'id' in status:
|
|
|
|
image_id = status[u'id']
|
|
|
|
detail = status[u'progressDetail']
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-02-04 00:08:37 +00:00
|
|
|
if u'current' in detail and 'total' in detail:
|
2014-02-05 16:07:54 +00:00
|
|
|
with self._status as status:
|
2014-02-04 00:08:37 +00:00
|
|
|
images = status['image_completion']
|
2014-01-24 19:40:36 +00:00
|
|
|
|
|
|
|
images[image_id] = detail
|
2014-02-04 00:08:37 +00:00
|
|
|
status['push_completion'] = \
|
2014-02-05 16:07:54 +00:00
|
|
|
DockerfileBuildContext.__total_completion(images, num_images)
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-02-04 00:08:37 +00:00
|
|
|
elif u'errorDetail' in status:
|
|
|
|
message = 'Error pushing image.'
|
|
|
|
if u'message' in status[u'errorDetail']:
|
|
|
|
message = str(status[u'errorDetail'][u'message'])
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-02-04 00:08:37 +00:00
|
|
|
raise RuntimeError(message)
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-04-14 19:21:05 +00:00
|
|
|
def __is_repo_public(self, repo_name):
|
2014-04-14 22:54:39 +00:00
|
|
|
if repo_name in self._public_repos:
|
2014-04-14 19:21:05 +00:00
|
|
|
return True
|
|
|
|
|
2014-04-14 22:54:39 +00:00
|
|
|
repo_portions = repo_name.split('/')
|
|
|
|
registry_hostname = 'index.docker.io'
|
|
|
|
local_repo_name = repo_name
|
|
|
|
if len(repo_portions) > 2:
|
|
|
|
registry_hostname = repo_portions[0]
|
|
|
|
local_repo_name = '/'.join(repo_portions[1:])
|
|
|
|
|
|
|
|
repo_url_template = '%s://%s/v1/repositories/%s/images'
|
|
|
|
protocols = ['https', 'http']
|
|
|
|
secure_repo_url, repo_url = [repo_url_template % (protocol, registry_hostname, local_repo_name)
|
|
|
|
for protocol in protocols]
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
|
|
|
try:
|
|
|
|
repo_info = requests.get(secure_repo_url)
|
|
|
|
except requests.exceptions.SSLError:
|
|
|
|
repo_info = requests.get(repo_url)
|
|
|
|
|
|
|
|
except requests.exceptions.ConnectionError:
|
|
|
|
return False
|
|
|
|
|
2014-04-14 19:21:05 +00:00
|
|
|
if repo_info.status_code / 100 == 2:
|
2014-04-14 22:54:39 +00:00
|
|
|
self._public_repos.add(repo_name)
|
2014-04-14 19:21:05 +00:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def __cleanup_containers(self):
|
2014-01-24 19:40:36 +00:00
|
|
|
# First clean up any containers that might be holding the images
|
2014-03-27 22:33:13 +00:00
|
|
|
for running in self._build_cl.containers(quiet=True):
|
2014-02-05 16:07:54 +00:00
|
|
|
logger.debug('Killing container: %s' % running['Id'])
|
2014-03-27 22:33:13 +00:00
|
|
|
self._build_cl.kill(running['Id'])
|
2014-01-24 19:40:36 +00:00
|
|
|
|
|
|
|
# Next, remove all of the containers (which should all now be killed)
|
2014-03-27 22:33:13 +00:00
|
|
|
for container in self._build_cl.containers(all=True, quiet=True):
|
2014-02-05 16:07:54 +00:00
|
|
|
logger.debug('Removing container: %s' % container['Id'])
|
2014-03-27 22:33:13 +00:00
|
|
|
self._build_cl.remove_container(container['Id'])
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-04-14 19:21:05 +00:00
|
|
|
def __evict_expired_images(self):
|
|
|
|
logger.debug('Cleaning images older than %s hours.', CACHE_EXPIRATION_PERIOD_HOURS)
|
|
|
|
now = datetime.now()
|
|
|
|
verify_removed = set()
|
|
|
|
|
2014-03-27 22:33:13 +00:00
|
|
|
for image in self._build_cl.images():
|
2014-04-14 19:21:05 +00:00
|
|
|
image_id = image[u'Id']
|
|
|
|
created = datetime.fromtimestamp(image[u'Created'])
|
2014-02-06 00:59:26 +00:00
|
|
|
|
2014-04-14 19:21:05 +00:00
|
|
|
# If we don't have a cache time, use the created time (e.g. worker reboot)
|
|
|
|
cache_time = self.image_id_to_cache_time.get(image_id, created)
|
|
|
|
expiration = cache_time + timedelta(hours=CACHE_EXPIRATION_PERIOD_HOURS)
|
|
|
|
|
|
|
|
if expiration < now:
|
|
|
|
logger.debug('Removing expired image: %s' % image_id)
|
2014-04-14 22:54:39 +00:00
|
|
|
|
|
|
|
for tag in image['RepoTags']:
|
|
|
|
# We can forget about this particular tag if it was indeed one of our renamed tags
|
|
|
|
self.private_repo_tags.discard(tag)
|
|
|
|
|
2014-04-18 22:36:11 +00:00
|
|
|
try:
|
|
|
|
self._build_cl.remove_image(tag)
|
|
|
|
except APIError:
|
|
|
|
# Sometimes an upstream image removed this one
|
|
|
|
pass
|
|
|
|
|
2014-04-14 19:21:05 +00:00
|
|
|
try:
|
|
|
|
self._build_cl.remove_image(image_id)
|
|
|
|
except APIError:
|
|
|
|
# Sometimes an upstream image removed this one
|
|
|
|
pass
|
2014-04-18 22:36:11 +00:00
|
|
|
verify_removed.add(image_id)
|
2014-01-24 19:40:36 +00:00
|
|
|
|
|
|
|
# Verify that our images were actually removed
|
2014-03-27 22:33:13 +00:00
|
|
|
for image in self._build_cl.images():
|
2014-04-14 19:21:05 +00:00
|
|
|
if image['Id'] in verify_removed:
|
2014-04-18 22:36:11 +00:00
|
|
|
logger.warning('Image was not removed: %s' % image['Id'])
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2014-04-14 19:21:05 +00:00
|
|
|
def __cleanup(self):
|
|
|
|
# Iterate all of the images and rename the ones that aren't public. This should preserve
|
|
|
|
# base images and also allow the cache to function.
|
|
|
|
now = datetime.now()
|
|
|
|
for image in self._build_cl.images():
|
|
|
|
image_id = image[u'Id']
|
|
|
|
|
|
|
|
if image_id not in self.image_id_to_cache_time:
|
|
|
|
logger.debug('Setting image %s cache time to %s', image_id, now)
|
|
|
|
self.image_id_to_cache_time[image_id] = now
|
|
|
|
|
|
|
|
for tag in image['RepoTags']:
|
2014-04-14 22:54:39 +00:00
|
|
|
tag_repo = ParsedDockerfile.base_image_from_repo_identifier(tag)
|
2014-04-14 19:21:05 +00:00
|
|
|
if tag_repo != '<none>':
|
2014-04-14 22:54:39 +00:00
|
|
|
if tag_repo in self.private_repo_tags:
|
|
|
|
logger.debug('Repo is private and has already been renamed: %s' % tag_repo)
|
|
|
|
elif self.__is_repo_public(tag_repo):
|
2014-04-14 19:21:05 +00:00
|
|
|
logger.debug('Repo was deemed public: %s', tag_repo)
|
|
|
|
else:
|
|
|
|
new_name = str(uuid4())
|
|
|
|
logger.debug('Private repo tag being renamed %s -> %s', tag, new_name)
|
|
|
|
self._build_cl.tag(image_id, new_name)
|
|
|
|
self._build_cl.remove_image(tag)
|
2014-04-14 22:54:39 +00:00
|
|
|
self.private_repo_tags.add(new_name)
|
2014-02-05 16:07:54 +00:00
|
|
|
|
|
|
|
class DockerfileBuildWorker(Worker):
|
|
|
|
def __init__(self, *vargs, **kwargs):
|
|
|
|
super(DockerfileBuildWorker, self).__init__(*vargs, **kwargs)
|
|
|
|
|
|
|
|
self._mime_processors = {
|
|
|
|
'application/zip': DockerfileBuildWorker.__prepare_zip,
|
2014-03-07 04:40:21 +00:00
|
|
|
'application/x-zip-compressed': DockerfileBuildWorker.__prepare_zip,
|
2014-02-05 16:07:54 +00:00
|
|
|
'text/plain': DockerfileBuildWorker.__prepare_dockerfile,
|
|
|
|
'application/octet-stream': DockerfileBuildWorker.__prepare_dockerfile,
|
2014-03-31 19:40:24 +00:00
|
|
|
'application/x-tar': DockerfileBuildWorker.__prepare_tarball,
|
|
|
|
'application/gzip': DockerfileBuildWorker.__prepare_tarball,
|
2014-04-07 14:58:53 +00:00
|
|
|
'application/x-gzip': DockerfileBuildWorker.__prepare_tarball,
|
2014-02-05 16:07:54 +00:00
|
|
|
}
|
|
|
|
|
2014-04-02 23:32:41 +00:00
|
|
|
self._timeout = Event()
|
|
|
|
|
2014-02-05 16:07:54 +00:00
|
|
|
@staticmethod
|
|
|
|
def __prepare_zip(request_file):
|
|
|
|
build_dir = mkdtemp(prefix='docker-build-')
|
|
|
|
|
|
|
|
# Save the zip file to temp somewhere
|
|
|
|
with TemporaryFile() as zip_file:
|
2014-04-04 20:06:41 +00:00
|
|
|
zip_file.write(request_file.content)
|
2014-02-05 16:07:54 +00:00
|
|
|
to_extract = ZipFile(zip_file)
|
|
|
|
to_extract.extractall(build_dir)
|
|
|
|
|
|
|
|
return build_dir
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def __prepare_dockerfile(request_file):
|
|
|
|
build_dir = mkdtemp(prefix='docker-build-')
|
|
|
|
dockerfile_path = os.path.join(build_dir, "Dockerfile")
|
|
|
|
with open(dockerfile_path, 'w') as dockerfile:
|
|
|
|
dockerfile.write(request_file.content)
|
|
|
|
|
|
|
|
return build_dir
|
|
|
|
|
2014-03-31 19:40:24 +00:00
|
|
|
@staticmethod
|
|
|
|
def __prepare_tarball(request_file):
|
|
|
|
build_dir = mkdtemp(prefix='docker-build-')
|
|
|
|
|
|
|
|
# Save the zip file to temp somewhere
|
|
|
|
with tarfile.open(mode='r|*', fileobj=request_file.raw) as tar_stream:
|
2014-04-01 17:46:41 +00:00
|
|
|
safe_extractall(tar_stream, build_dir)
|
2014-03-31 19:40:24 +00:00
|
|
|
|
|
|
|
return build_dir
|
|
|
|
|
2014-04-02 23:32:41 +00:00
|
|
|
def watchdog(self):
|
|
|
|
logger.debug('Running build watchdog code.')
|
|
|
|
|
|
|
|
docker_cl = Client()
|
|
|
|
|
|
|
|
# Iterate the running containers and kill ones that have been running more than 20 minutes
|
|
|
|
for container in docker_cl.containers():
|
|
|
|
start_time = datetime.fromtimestamp(container[u'Created'])
|
|
|
|
running_time = datetime.now() - start_time
|
|
|
|
if running_time > timedelta(minutes=TIMEOUT_PERIOD_MINUTES):
|
|
|
|
logger.warning('Container has been running too long: %s with command: %s',
|
|
|
|
container[u'Id'], container[u'Command'])
|
|
|
|
docker_cl.kill(container[u'Id'])
|
|
|
|
self._timeout.set()
|
|
|
|
|
2014-01-24 19:40:36 +00:00
|
|
|
def process_queue_item(self, job_details):
|
2014-04-02 23:32:41 +00:00
|
|
|
self._timeout.clear()
|
|
|
|
|
2014-02-04 00:08:37 +00:00
|
|
|
repository_build = model.get_repository_build(job_details['namespace'],
|
|
|
|
job_details['repository'],
|
|
|
|
job_details['build_uuid'])
|
2013-10-28 18:49:23 +00:00
|
|
|
|
2014-04-02 01:49:06 +00:00
|
|
|
pull_credentials = job_details.get('pull_credentials', None)
|
|
|
|
|
2014-02-24 21:11:23 +00:00
|
|
|
job_config = json.loads(repository_build.job_config)
|
|
|
|
|
2014-02-25 23:22:02 +00:00
|
|
|
resource_url = user_files.get_file_url(repository_build.resource_key)
|
2014-02-24 21:11:23 +00:00
|
|
|
tag_names = job_config['docker_tags']
|
|
|
|
build_subdir = job_config['build_subdir']
|
|
|
|
repo = job_config['repository']
|
|
|
|
|
2014-01-24 19:40:36 +00:00
|
|
|
access_token = repository_build.access_token.code
|
|
|
|
|
2014-02-12 23:58:40 +00:00
|
|
|
log_appender = partial(build_logs.append_log_message,
|
|
|
|
repository_build.uuid)
|
|
|
|
|
|
|
|
log_appender('initializing', build_logs.PHASE)
|
|
|
|
|
2014-02-24 21:11:23 +00:00
|
|
|
start_msg = ('Starting job with resource url: %s repo: %s' % (resource_url,
|
|
|
|
repo))
|
2014-04-02 23:32:41 +00:00
|
|
|
logger.debug(start_msg)
|
2013-10-24 20:37:03 +00:00
|
|
|
|
2014-03-31 19:40:24 +00:00
|
|
|
docker_resource = requests.get(resource_url, stream=True)
|
2014-01-24 19:40:36 +00:00
|
|
|
c_type = docker_resource.headers['content-type']
|
2013-10-24 20:37:03 +00:00
|
|
|
|
2014-04-11 22:34:47 +00:00
|
|
|
if ';' in c_type:
|
|
|
|
c_type = c_type.split(';')[0]
|
|
|
|
|
2014-02-24 21:11:23 +00:00
|
|
|
filetype_msg = ('Request to build type: %s with repo: %s and tags: %s' %
|
|
|
|
(c_type, repo, tag_names))
|
2014-02-04 00:08:37 +00:00
|
|
|
logger.info(filetype_msg)
|
2014-02-12 23:58:40 +00:00
|
|
|
log_appender(filetype_msg)
|
2013-10-24 20:37:03 +00:00
|
|
|
|
2014-01-24 19:40:36 +00:00
|
|
|
if c_type not in self._mime_processors:
|
2014-03-07 04:40:21 +00:00
|
|
|
log_appender('error', build_logs.PHASE)
|
|
|
|
repository_build.phase = 'error'
|
|
|
|
repository_build.save()
|
|
|
|
log_appender('Unknown mime-type: %s' % c_type, build_logs.ERROR)
|
|
|
|
return True
|
2013-10-30 17:22:00 +00:00
|
|
|
|
2014-01-24 19:40:36 +00:00
|
|
|
build_dir = self._mime_processors[c_type](docker_resource)
|
2014-02-12 23:58:40 +00:00
|
|
|
log_appender('building', build_logs.PHASE)
|
2014-02-04 00:08:37 +00:00
|
|
|
repository_build.phase = 'building'
|
|
|
|
repository_build.save()
|
|
|
|
|
2014-02-24 21:11:23 +00:00
|
|
|
with DockerfileBuildContext(build_dir, build_subdir, repo, tag_names,
|
|
|
|
access_token,
|
2014-03-27 22:33:13 +00:00
|
|
|
repository_build.uuid, pull_credentials) as build_ctxt:
|
2014-02-06 00:59:26 +00:00
|
|
|
try:
|
2014-02-05 16:07:54 +00:00
|
|
|
built_image = build_ctxt.build()
|
2014-02-04 00:08:37 +00:00
|
|
|
|
2014-02-05 16:07:54 +00:00
|
|
|
if not built_image:
|
2014-02-12 23:58:40 +00:00
|
|
|
log_appender('error', build_logs.PHASE)
|
2014-02-05 16:07:54 +00:00
|
|
|
repository_build.phase = 'error'
|
|
|
|
repository_build.save()
|
2014-04-02 23:32:41 +00:00
|
|
|
if self._timeout.is_set():
|
|
|
|
log_appender('Build step was terminated after %s minutes.' % TIMEOUT_PERIOD_MINUTES,
|
|
|
|
build_logs.ERROR)
|
|
|
|
else:
|
|
|
|
log_appender('Unable to build dockerfile.', build_logs.ERROR)
|
2014-02-24 21:11:23 +00:00
|
|
|
return True
|
2014-02-04 00:08:37 +00:00
|
|
|
|
2014-02-12 23:58:40 +00:00
|
|
|
log_appender('pushing', build_logs.PHASE)
|
2014-02-05 16:07:54 +00:00
|
|
|
repository_build.phase = 'pushing'
|
|
|
|
repository_build.save()
|
2014-02-04 00:08:37 +00:00
|
|
|
|
2014-02-05 16:07:54 +00:00
|
|
|
build_ctxt.push(built_image)
|
2013-10-24 20:37:03 +00:00
|
|
|
|
2014-02-12 23:58:40 +00:00
|
|
|
log_appender('complete', build_logs.PHASE)
|
2014-02-05 16:07:54 +00:00
|
|
|
repository_build.phase = 'complete'
|
|
|
|
repository_build.save()
|
2013-10-24 20:37:03 +00:00
|
|
|
|
2014-02-06 00:59:26 +00:00
|
|
|
except Exception as exc:
|
2014-02-12 23:58:40 +00:00
|
|
|
log_appender('error', build_logs.PHASE)
|
2014-02-06 00:59:26 +00:00
|
|
|
logger.exception('Exception when processing request.')
|
|
|
|
repository_build.phase = 'error'
|
|
|
|
repository_build.save()
|
2014-02-13 16:44:39 +00:00
|
|
|
log_appender(str(exc), build_logs.ERROR)
|
2014-02-24 21:11:23 +00:00
|
|
|
return True
|
2013-10-24 20:37:03 +00:00
|
|
|
|
2014-02-04 00:08:37 +00:00
|
|
|
return True
|
2013-10-24 20:37:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
desc = 'Worker daemon to monitor dockerfile build'
|
|
|
|
parser = argparse.ArgumentParser(description=desc)
|
|
|
|
parser.add_argument('-D', action='store_true', default=False,
|
|
|
|
help='Run the worker in daemon mode.')
|
|
|
|
parser.add_argument('--log', default='dockerfilebuild.log',
|
|
|
|
help='Specify the log file for the worker as a daemon.')
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
|
2014-04-11 17:32:45 +00:00
|
|
|
worker = DockerfileBuildWorker(dockerfile_build_queue, reservation_seconds=60*60) # 1 hour
|
2014-01-24 19:40:36 +00:00
|
|
|
|
2013-10-24 20:37:03 +00:00
|
|
|
if args.D:
|
|
|
|
handler = logging.FileHandler(args.log)
|
|
|
|
handler.setFormatter(formatter)
|
|
|
|
root_logger.addHandler(handler)
|
2014-01-24 19:40:36 +00:00
|
|
|
with daemon.DaemonContext(files_preserve=[handler.stream]):
|
|
|
|
worker.start()
|
2013-10-24 20:37:03 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
handler = logging.StreamHandler()
|
|
|
|
handler.setFormatter(formatter)
|
|
|
|
root_logger.addHandler(handler)
|
2014-03-27 22:33:13 +00:00
|
|
|
worker.start()
|