Separate out the dockerfile build stuff to a separate class. Add a dependency on docker-py master.
This commit is contained in:
parent
72c380d9a1
commit
67e0736fc6
4 changed files with 184 additions and 176 deletions
|
@ -1186,8 +1186,8 @@ def get_repo_build_logs(namespace, repository, build_uuid):
|
|||
if permission.can():
|
||||
build = model.get_repository_build(namespace, repository, build_uuid)
|
||||
|
||||
start = request.args.get('start', -1000)
|
||||
end = request.args.get('end', -1)
|
||||
start = int(request.args.get('start', -1000))
|
||||
end = int(request.args.get('end', -1))
|
||||
count, logs = build_logs.get_log_entries(build.uuid, start, end)
|
||||
|
||||
if start < 0:
|
||||
|
|
|
@ -20,4 +20,5 @@ python-digitalocean
|
|||
xhtml2pdf
|
||||
logstash_formatter
|
||||
redis
|
||||
hiredis
|
||||
hiredis
|
||||
git+https://github.com/dotcloud/docker-py.git
|
|
@ -13,6 +13,7 @@ beautifulsoup4==4.3.2
|
|||
blinker==1.3
|
||||
boto==2.24.0
|
||||
distribute==0.6.34
|
||||
git+https://github.com/dotcloud/docker-py.git
|
||||
ecdsa==0.10
|
||||
gevent==1.0
|
||||
greenlet==0.4.2
|
||||
|
@ -24,6 +25,7 @@ lockfile==0.9.1
|
|||
logstash-formatter==0.5.8
|
||||
marisa-trie==0.5.1
|
||||
mixpanel-py==3.1.1
|
||||
mock==1.0.1
|
||||
paramiko==1.12.1
|
||||
peewee==2.2.0
|
||||
py-bcrypt==0.4
|
||||
|
@ -37,5 +39,6 @@ reportlab==2.7
|
|||
requests==2.2.1
|
||||
six==1.5.2
|
||||
stripe==1.12.0
|
||||
websocket-client==0.11.0
|
||||
wsgiref==0.1.2
|
||||
xhtml2pdf==0.0.5
|
||||
|
|
|
@ -48,15 +48,27 @@ class StatusWrapper(object):
|
|||
build_logs.set_status(self._uuid, self._status)
|
||||
|
||||
|
||||
class DockerfileBuildWorker(Worker):
|
||||
def __init__(self, *vargs, **kwargs):
|
||||
super(DockerfileBuildWorker, self).__init__(*vargs, **kwargs)
|
||||
class DockerfileBuildContext(object):
|
||||
def __init__(self, build_context_dir, tag_name, push_token, build_uuid):
|
||||
self._build_dir = build_context_dir
|
||||
self._tag_name = tag_name
|
||||
self._push_token = push_token
|
||||
self._build_uuid = build_uuid
|
||||
self._cl = Client(timeout=1200)
|
||||
self._status = StatusWrapper(self._build_uuid)
|
||||
|
||||
self._mime_processors = {
|
||||
'application/zip': DockerfileBuildWorker.__prepare_zip,
|
||||
'text/plain': DockerfileBuildWorker.__prepare_dockerfile,
|
||||
'application/octet-stream': DockerfileBuildWorker.__prepare_dockerfile,
|
||||
}
|
||||
dockerfile_path = os.path.join(self._build_dir, "Dockerfile")
|
||||
self._num_steps = DockerfileBuildContext.__count_steps(dockerfile_path)
|
||||
|
||||
logger.debug('Will build and push to tag named: %s' % self._tag_name)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
self.__cleanup()
|
||||
|
||||
shutil.rmtree(self._build_dir)
|
||||
|
||||
@staticmethod
|
||||
def __count_steps(dockerfile_path):
|
||||
|
@ -68,6 +80,150 @@ class DockerfileBuildWorker(Worker):
|
|||
steps += 1
|
||||
return steps
|
||||
|
||||
@staticmethod
|
||||
def __total_completion(statuses, total_images):
|
||||
percentage_with_sizes = float(len(statuses.values()))/total_images
|
||||
sent_bytes = sum([status[u'current'] for status in statuses.values()])
|
||||
total_bytes = sum([status[u'total'] for status in statuses.values()])
|
||||
return float(sent_bytes)/total_bytes*percentage_with_sizes
|
||||
|
||||
def build(self):
|
||||
logger.debug('Starting build.')
|
||||
|
||||
with self._status as status:
|
||||
status['total_commands'] = self._num_steps
|
||||
|
||||
logger.debug('Building to tag names: %s' % self._tag_name)
|
||||
build_status = self._cl.build(path=self._build_dir, tag=self._tag_name,
|
||||
stream=True)
|
||||
|
||||
current_step = 0
|
||||
built_image = None
|
||||
for status in build_status:
|
||||
logger.debug('Status: %s', str(status))
|
||||
build_logs.append_log_message(self._build_uuid, str(status))
|
||||
step_increment = re.search(r'Step ([0-9]+) :', status)
|
||||
if step_increment:
|
||||
current_step = int(step_increment.group(1))
|
||||
logger.debug('Step now: %s/%s' % (current_step, self._num_steps))
|
||||
with self._status as status:
|
||||
status['current_command'] = current_step
|
||||
continue
|
||||
|
||||
complete = re.match(r'Successfully built ([a-z0-9]+)$', status)
|
||||
if complete:
|
||||
built_image = complete.group(1)
|
||||
logger.debug('Final image ID is: %s' % built_image)
|
||||
continue
|
||||
|
||||
# Get the image count
|
||||
if not built_image:
|
||||
return
|
||||
|
||||
return built_image
|
||||
|
||||
def push(self, built_image):
|
||||
# Login to the registry
|
||||
host = re.match(r'([a-z0-9.:]+)/.+/.+$', self._tag_name)
|
||||
if not host:
|
||||
raise RuntimeError('Invalid tag name: %s' % self._tag_name)
|
||||
|
||||
for protocol in ['https', 'http']:
|
||||
registry_endpoint = '%s://%s/v1/' % (protocol, host.group(1))
|
||||
logger.debug('Attempting login to registry: %s' % registry_endpoint)
|
||||
|
||||
try:
|
||||
self._cl.login('$token', self._push_token, registry=registry_endpoint)
|
||||
break
|
||||
except APIError:
|
||||
pass # Probably the wrong protocol
|
||||
|
||||
history = json.loads(self._cl.history(built_image))
|
||||
num_images = len(history)
|
||||
with self._status as status:
|
||||
status['total_images'] = num_images
|
||||
|
||||
logger.debug('Pushing to tag name: %s' % self._tag_name)
|
||||
resp = self._cl.push(self._tag_name, stream=True)
|
||||
|
||||
for status_str in resp:
|
||||
status = json.loads(status_str)
|
||||
logger.debug('Status: %s', status_str)
|
||||
if u'status' in status:
|
||||
status_msg = status[u'status']
|
||||
|
||||
if status_msg == 'Pushing':
|
||||
if u'progressDetail' in status and u'id' in status:
|
||||
image_id = status[u'id']
|
||||
detail = status[u'progressDetail']
|
||||
|
||||
if u'current' in detail and 'total' in detail:
|
||||
with self._status as status:
|
||||
images = status['image_completion']
|
||||
|
||||
images[image_id] = detail
|
||||
status['push_completion'] = \
|
||||
DockerfileBuildContext.__total_completion(images, num_images)
|
||||
|
||||
elif u'errorDetail' in status:
|
||||
message = 'Error pushing image.'
|
||||
if u'message' in status[u'errorDetail']:
|
||||
message = str(status[u'errorDetail'][u'message'])
|
||||
|
||||
raise RuntimeError(message)
|
||||
|
||||
def __cleanup(self):
|
||||
# First clean up any containers that might be holding the images
|
||||
for running in self._cl.containers(quiet=True):
|
||||
logger.debug('Killing container: %s' % running['Id'])
|
||||
self._cl.kill(running['Id'])
|
||||
|
||||
# Next, remove all of the containers (which should all now be killed)
|
||||
for container in self._cl.containers(all=True, quiet=True):
|
||||
logger.debug('Removing container: %s' % container['Id'])
|
||||
self._cl.remove_container(container['Id'])
|
||||
|
||||
# Iterate all of the images and remove the ones that the public registry
|
||||
# doesn't know about, this should preserve base images.
|
||||
images_to_remove = set()
|
||||
repos = set()
|
||||
for image in self._cl.images():
|
||||
images_to_remove.add(image['Id'])
|
||||
repos.add(image['Repository'])
|
||||
|
||||
for repo in repos:
|
||||
repo_url = 'https://index.docker.io/v1/repositories/%s/images' % repo
|
||||
repo_info = requests.get(repo_url)
|
||||
if repo_info.status_code / 100 == 2:
|
||||
for repo_image in repo_info.json():
|
||||
if repo_image['id'] in images_to_remove:
|
||||
logger.debug('Image was deemed public: %s' % repo_image['id'])
|
||||
images_to_remove.remove(repo_image['id'])
|
||||
|
||||
for to_remove in images_to_remove:
|
||||
logger.debug('Removing private image: %s' % to_remove)
|
||||
try:
|
||||
self._cl.remove_image(to_remove)
|
||||
except APIError:
|
||||
# Sometimes an upstream image removed this one
|
||||
pass
|
||||
|
||||
# Verify that our images were actually removed
|
||||
for image in self._cl.images():
|
||||
if image['Id'] in images_to_remove:
|
||||
raise RuntimeError('Image was not removed: %s' % image['Id'])
|
||||
|
||||
|
||||
class DockerfileBuildWorker(Worker):
|
||||
def __init__(self, *vargs, **kwargs):
|
||||
super(DockerfileBuildWorker, self).__init__(*vargs, **kwargs)
|
||||
|
||||
self._mime_processors = {
|
||||
'application/zip': DockerfileBuildWorker.__prepare_zip,
|
||||
'text/plain': DockerfileBuildWorker.__prepare_dockerfile,
|
||||
'application/octet-stream': DockerfileBuildWorker.__prepare_dockerfile,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def __prepare_zip(request_file):
|
||||
build_dir = mkdtemp(prefix='docker-build-')
|
||||
|
@ -89,145 +245,6 @@ class DockerfileBuildWorker(Worker):
|
|||
|
||||
return build_dir
|
||||
|
||||
@staticmethod
|
||||
def __total_completion(statuses, total_images):
|
||||
percentage_with_sizes = float(len(statuses.values()))/total_images
|
||||
sent_bytes = sum([status[u'current'] for status in statuses.values()])
|
||||
total_bytes = sum([status[u'total'] for status in statuses.values()])
|
||||
return float(sent_bytes)/total_bytes*percentage_with_sizes
|
||||
|
||||
@staticmethod
|
||||
def __build_image(build_dir, tag_name, num_steps, result_object, uuid):
|
||||
logger.debug('Starting build.')
|
||||
docker_cl = Client(timeout=1200)
|
||||
|
||||
with result_object as status:
|
||||
status['total_commands'] = num_steps
|
||||
|
||||
build_status = docker_cl.build(path=build_dir, tag=tag_name, stream=True)
|
||||
|
||||
current_step = 0
|
||||
built_image = None
|
||||
for status in build_status:
|
||||
logger.debug('Status: %s', str(status))
|
||||
build_logs.append_log_message(uuid, str(status))
|
||||
step_increment = re.search(r'Step ([0-9]+) :', status)
|
||||
if step_increment:
|
||||
current_step = int(step_increment.group(1))
|
||||
logger.debug('Step now: %s/%s' % (current_step, num_steps))
|
||||
with result_object as status:
|
||||
status['current_command'] = current_step
|
||||
continue
|
||||
|
||||
complete = re.match(r'Successfully built ([a-z0-9]+)$', status)
|
||||
if complete:
|
||||
built_image = complete.group(1)
|
||||
logger.debug('Final image ID is: %s' % built_image)
|
||||
continue
|
||||
|
||||
shutil.rmtree(build_dir)
|
||||
|
||||
# Get the image count
|
||||
if not built_image:
|
||||
return
|
||||
|
||||
return built_image
|
||||
|
||||
@staticmethod
|
||||
def __push_image(built_image, token, tag_name, result_object):
|
||||
# Login to the registry
|
||||
host = re.match(r'([a-z0-9.:]+)/.+/.+$', tag_name)
|
||||
if not host:
|
||||
raise RuntimeError('Invalid tag name: %s' % tag_name)
|
||||
|
||||
docker_cl = Client(timeout=1200)
|
||||
|
||||
for protocol in ['https', 'http']:
|
||||
registry_endpoint = '%s://%s/v1/' % (protocol, host.group(1))
|
||||
logger.debug('Attempting login to registry: %s' % registry_endpoint)
|
||||
|
||||
try:
|
||||
docker_cl.login('$token', token, registry=registry_endpoint)
|
||||
break
|
||||
except APIError:
|
||||
pass # Probably the wrong protocol
|
||||
|
||||
history = json.loads(docker_cl.history(built_image))
|
||||
num_images = len(history)
|
||||
with result_object as status:
|
||||
status['total_images'] = num_images
|
||||
|
||||
logger.debug('Pushing to tag name: %s' % tag_name)
|
||||
resp = docker_cl.push(tag_name, stream=True)
|
||||
|
||||
for status_str in resp:
|
||||
status = json.loads(status_str)
|
||||
logger.debug('Status: %s', status_str)
|
||||
if u'status' in status:
|
||||
status_msg = status[u'status']
|
||||
|
||||
if status_msg == 'Pushing':
|
||||
if u'progressDetail' in status and u'id' in status:
|
||||
image_id = status[u'id']
|
||||
detail = status[u'progressDetail']
|
||||
|
||||
if u'current' in detail and 'total' in detail:
|
||||
with result_object as status:
|
||||
images = status['image_completion']
|
||||
|
||||
images[image_id] = detail
|
||||
status['push_completion'] = \
|
||||
DockerfileBuildWorker.__total_completion(images, num_images)
|
||||
|
||||
elif u'errorDetail' in status:
|
||||
message = 'Error pushing image.'
|
||||
if u'message' in status[u'errorDetail']:
|
||||
message = str(status[u'errorDetail'][u'message'])
|
||||
|
||||
raise RuntimeError(message)
|
||||
|
||||
@staticmethod
|
||||
def __cleanup():
|
||||
docker_cl = Client(timeout=1200)
|
||||
|
||||
# First clean up any containers that might be holding the images
|
||||
for running in docker_cl.containers(quiet=True):
|
||||
docker_cl.kill(running['Id'])
|
||||
|
||||
# Next, remove all of the containers (which should all now be killed)
|
||||
for container in docker_cl.containers(all=True, quiet=True):
|
||||
docker_cl.remove_container(container['Id'])
|
||||
|
||||
# Iterate all of the images and remove the ones that the public registry
|
||||
# doesn't know about, this should preserve base images.
|
||||
images_to_remove = set()
|
||||
repos = set()
|
||||
for image in docker_cl.images():
|
||||
images_to_remove.add(image['Id'])
|
||||
repos.add(image['Repository'])
|
||||
|
||||
for repo in repos:
|
||||
repo_url = 'https://index.docker.io/v1/repositories/%s/images' % repo
|
||||
repo_info = requests.get(repo_url)
|
||||
if repo_info.status_code / 100 == 2:
|
||||
for repo_image in repo_info.json():
|
||||
if repo_image['id'] in images_to_remove:
|
||||
logger.debug('Image was deemed public: %s' % repo_image['id'])
|
||||
images_to_remove.remove(repo_image['id'])
|
||||
|
||||
for to_remove in images_to_remove:
|
||||
logger.debug('Removing private image: %s' % to_remove)
|
||||
try:
|
||||
docker_cl.remove_image(to_remove)
|
||||
except APIError:
|
||||
# Sometimes an upstream image removed this one
|
||||
pass
|
||||
|
||||
# Verify that our images were actually removed
|
||||
for image in docker_cl.images():
|
||||
if image['Id'] in images_to_remove:
|
||||
raise RuntimeError('Image was not removed: %s' % image['Id'])
|
||||
|
||||
def process_queue_item(self, job_details):
|
||||
repository_build = model.get_repository_build(job_details['namespace'],
|
||||
job_details['repository'],
|
||||
|
@ -237,8 +254,6 @@ class DockerfileBuildWorker(Worker):
|
|||
tag_name = repository_build.tag
|
||||
access_token = repository_build.access_token.code
|
||||
|
||||
result_object = StatusWrapper(repository_build.uuid)
|
||||
|
||||
start_msg = ('Starting job with resource url: %s tag: %s and token: %s' %
|
||||
(resource_url, tag_name, access_token))
|
||||
logger.debug(start_msg)
|
||||
|
@ -256,40 +271,29 @@ class DockerfileBuildWorker(Worker):
|
|||
raise RuntimeError('Invalid dockerfile content type: %s' % c_type)
|
||||
|
||||
build_dir = self._mime_processors[c_type](docker_resource)
|
||||
|
||||
dockerfile_path = os.path.join(build_dir, "Dockerfile")
|
||||
num_steps = DockerfileBuildWorker.__count_steps(dockerfile_path)
|
||||
|
||||
steps_msg = 'Dockerfile had %s steps' % num_steps
|
||||
logger.debug(steps_msg)
|
||||
build_logs.append_log_message(repository_build.uuid, steps_msg)
|
||||
|
||||
uuid = repository_build.uuid
|
||||
repository_build.phase = 'building'
|
||||
repository_build.save()
|
||||
|
||||
try:
|
||||
built_image = DockerfileBuildWorker.__build_image(build_dir, tag_name,
|
||||
num_steps,
|
||||
result_object, uuid)
|
||||
with DockerfileBuildContext(build_dir, tag_name, access_token,
|
||||
repository_build.uuid) as build_ctxt:
|
||||
built_image = build_ctxt.build()
|
||||
|
||||
if not built_image:
|
||||
repository_build.phase = 'error'
|
||||
if not built_image:
|
||||
repository_build.phase = 'error'
|
||||
repository_build.save()
|
||||
build_logs.append_log_message(uuid, 'Unable to build dockerfile.')
|
||||
return False
|
||||
|
||||
repository_build.phase = 'pushing'
|
||||
repository_build.save()
|
||||
build_logs.append_log_message(uuid, 'Unable to build dockerfile.')
|
||||
return False
|
||||
|
||||
repository_build.phase = 'pushing'
|
||||
repository_build.save()
|
||||
build_ctxt.push(built_image)
|
||||
|
||||
DockerfileBuildWorker.__push_image(built_image, access_token, tag_name,
|
||||
result_object)
|
||||
repository_build.phase = 'complete'
|
||||
repository_build.save()
|
||||
|
||||
repository_build.phase = 'complete'
|
||||
repository_build.save()
|
||||
|
||||
# TODO turn cleanup on before pushing to prod
|
||||
# DockerfileBuildWorker.__cleanup()
|
||||
except Exception as exc:
|
||||
logger.exception('Exception when processing request.')
|
||||
repository_build.phase = 'error'
|
||||
|
|
Reference in a new issue