Merge remote-tracking branch 'origin/master' into ncc1701

Conflicts:
	endpoints/web.py
	static/directives/signup-form.html
	static/js/app.js
	static/js/controllers.js
	static/partials/landing.html
	static/partials/view-repo.html
	test/data/test.db
This commit is contained in:
jakedt 2014-04-14 19:37:22 -04:00
commit 0827e0fbac
45 changed files with 1149 additions and 306 deletions

View file

@ -14,12 +14,14 @@ from zipfile import ZipFile
from functools import partial
from datetime import datetime, timedelta
from threading import Event
from uuid import uuid4
from data.queue import dockerfile_build_queue
from data import model
from workers.worker import Worker
from app import app, userfiles as user_files
from util.safetar import safe_extractall
from util.dockerfileparse import parse_dockerfile, ParsedDockerfile
root_logger = logging.getLogger('')
@ -33,6 +35,7 @@ logger = logging.getLogger(__name__)
build_logs = app.config['BUILDLOGS']
TIMEOUT_PERIOD_MINUTES = 20
CACHE_EXPIRATION_PERIOD_HOURS = 24
class StatusWrapper(object):
@ -94,6 +97,9 @@ class StreamingDockerClient(Client):
class DockerfileBuildContext(object):
image_id_to_cache_time = {}
private_repo_tags = set()
def __init__(self, build_context_dir, dockerfile_subdir, repo, tag_names,
push_token, build_uuid, pull_credentials=None):
self._build_dir = build_context_dir
@ -104,6 +110,7 @@ class DockerfileBuildContext(object):
self._status = StatusWrapper(build_uuid)
self._build_logger = partial(build_logs.append_log_message, build_uuid)
self._pull_credentials = pull_credentials
self._public_repos = set()
# Note: We have two different clients here because we (potentially) login
# with both, but with different credentials that we do not want shared between
@ -113,29 +120,27 @@ class DockerfileBuildContext(object):
dockerfile_path = os.path.join(self._build_dir, dockerfile_subdir,
'Dockerfile')
self._num_steps = DockerfileBuildContext.__count_steps(dockerfile_path)
# Compute the number of steps
with open(dockerfile_path, 'r') as dockerfileobj:
self._parsed_dockerfile = parse_dockerfile(dockerfileobj.read())
self._num_steps = len(self._parsed_dockerfile.commands)
logger.debug('Will build and push to repo %s with tags named: %s' %
(self._repo, self._tag_names))
def __enter__(self):
self.__cleanup_containers()
self.__evict_expired_images()
self.__cleanup()
return self
def __exit__(self, exc_type, value, traceback):
self.__cleanup_containers()
self.__cleanup()
shutil.rmtree(self._build_dir)
@staticmethod
def __count_steps(dockerfile_path):
with open(dockerfile_path, 'r') as dockerfileobj:
steps = 0
for line in dockerfileobj.readlines():
stripped = line.strip()
if stripped and stripped[0] is not '#':
steps += 1
return steps
@staticmethod
def __total_completion(statuses, total_images):
percentage_with_sizes = float(len(statuses.values()))/total_images
@ -151,6 +156,11 @@ class DockerfileBuildContext(object):
self._build_cl.login(self._pull_credentials['username'], self._pull_credentials['password'],
registry=self._pull_credentials['registry'], reauth=True)
# Pull the image, in case it was updated since the last build
base_image = self._parsed_dockerfile.get_base_image()
self._build_logger('Pulling base image: %s' % base_image)
self._build_cl.pull(base_image)
# Start the build itself.
logger.debug('Starting build.')
@ -260,7 +270,39 @@ class DockerfileBuildContext(object):
raise RuntimeError(message)
def __cleanup(self):
def __is_repo_public(self, repo_name):
if repo_name in self._public_repos:
return True
repo_portions = repo_name.split('/')
registry_hostname = 'index.docker.io'
local_repo_name = repo_name
if len(repo_portions) > 2:
registry_hostname = repo_portions[0]
local_repo_name = '/'.join(repo_portions[1:])
repo_url_template = '%s://%s/v1/repositories/%s/images'
protocols = ['https', 'http']
secure_repo_url, repo_url = [repo_url_template % (protocol, registry_hostname, local_repo_name)
for protocol in protocols]
try:
try:
repo_info = requests.get(secure_repo_url)
except requests.exceptions.SSLError:
repo_info = requests.get(repo_url)
except requests.exceptions.ConnectionError:
return False
if repo_info.status_code / 100 == 2:
self._public_repos.add(repo_name)
return True
else:
return False
def __cleanup_containers(self):
# First clean up any containers that might be holding the images
for running in self._build_cl.containers(quiet=True):
logger.debug('Killing container: %s' % running['Id'])
@ -271,40 +313,62 @@ class DockerfileBuildContext(object):
logger.debug('Removing container: %s' % container['Id'])
self._build_cl.remove_container(container['Id'])
# Iterate all of the images and remove the ones that the public registry
# doesn't know about, this should preserve base images.
images_to_remove = set()
repos = set()
def __evict_expired_images(self):
logger.debug('Cleaning images older than %s hours.', CACHE_EXPIRATION_PERIOD_HOURS)
now = datetime.now()
verify_removed = set()
for image in self._build_cl.images():
images_to_remove.add(image['Id'])
image_id = image[u'Id']
created = datetime.fromtimestamp(image[u'Created'])
for tag in image['RepoTags']:
tag_repo = tag.split(':')[0]
if tag_repo != '<none>':
repos.add(tag_repo)
# If we don't have a cache time, use the created time (e.g. worker reboot)
cache_time = self.image_id_to_cache_time.get(image_id, created)
expiration = cache_time + timedelta(hours=CACHE_EXPIRATION_PERIOD_HOURS)
for repo in repos:
repo_url = 'https://index.docker.io/v1/repositories/%s/images' % repo
repo_info = requests.get(repo_url)
if repo_info.status_code / 100 == 2:
for repo_image in repo_info.json():
if repo_image['id'] in images_to_remove:
logger.debug('Image was deemed public: %s' % repo_image['id'])
images_to_remove.remove(repo_image['id'])
if expiration < now:
logger.debug('Removing expired image: %s' % image_id)
for to_remove in images_to_remove:
logger.debug('Removing private image: %s' % to_remove)
try:
self._build_cl.remove_image(to_remove)
except APIError:
# Sometimes an upstream image removed this one
pass
for tag in image['RepoTags']:
# We can forget about this particular tag if it was indeed one of our renamed tags
self.private_repo_tags.discard(tag)
verify_removed.add(image_id)
try:
self._build_cl.remove_image(image_id)
except APIError:
# Sometimes an upstream image removed this one
pass
# Verify that our images were actually removed
for image in self._build_cl.images():
if image['Id'] in images_to_remove:
if image['Id'] in verify_removed:
raise RuntimeError('Image was not removed: %s' % image['Id'])
def __cleanup(self):
# Iterate all of the images and rename the ones that aren't public. This should preserve
# base images and also allow the cache to function.
now = datetime.now()
for image in self._build_cl.images():
image_id = image[u'Id']
if image_id not in self.image_id_to_cache_time:
logger.debug('Setting image %s cache time to %s', image_id, now)
self.image_id_to_cache_time[image_id] = now
for tag in image['RepoTags']:
tag_repo = ParsedDockerfile.base_image_from_repo_identifier(tag)
if tag_repo != '<none>':
if tag_repo in self.private_repo_tags:
logger.debug('Repo is private and has already been renamed: %s' % tag_repo)
elif self.__is_repo_public(tag_repo):
logger.debug('Repo was deemed public: %s', tag_repo)
else:
new_name = str(uuid4())
logger.debug('Private repo tag being renamed %s -> %s', tag, new_name)
self._build_cl.tag(image_id, new_name)
self._build_cl.remove_image(tag)
self.private_repo_tags.add(new_name)
class DockerfileBuildWorker(Worker):
def __init__(self, *vargs, **kwargs):
@ -317,6 +381,7 @@ class DockerfileBuildWorker(Worker):
'application/octet-stream': DockerfileBuildWorker.__prepare_dockerfile,
'application/x-tar': DockerfileBuildWorker.__prepare_tarball,
'application/gzip': DockerfileBuildWorker.__prepare_tarball,
'application/x-gzip': DockerfileBuildWorker.__prepare_tarball,
}
self._timeout = Event()
@ -327,7 +392,7 @@ class DockerfileBuildWorker(Worker):
# Save the zip file to temp somewhere
with TemporaryFile() as zip_file:
zip_file.write(request_file.raw)
zip_file.write(request_file.content)
to_extract = ZipFile(zip_file)
to_extract.extractall(build_dir)