diff --git a/application.py b/application.py index 5cfb29108..bc1f0b653 100644 --- a/application.py +++ b/application.py @@ -1,5 +1,4 @@ import logging -import os from app import app as application from data.model import db as model_db @@ -20,10 +19,6 @@ from endpoints.webhooks import webhooks logger = logging.getLogger(__name__) -if application.config.get('INCLUDE_TEST_ENDPOINTS', False): - logger.debug('Loading test endpoints.') - import endpoints.test - application.register_blueprint(web) application.register_blueprint(index, url_prefix='/v1') application.register_blueprint(tags, url_prefix='/v1') diff --git a/binary_dependencies/builder/linux-headers-3.11.0-17-generic_3.11.0-17.28_amd64.deb b/binary_dependencies/builder/linux-headers-3.11.0-17-generic_3.11.0-17.28_amd64.deb new file mode 100644 index 000000000..b69f98a44 Binary files /dev/null and b/binary_dependencies/builder/linux-headers-3.11.0-17-generic_3.11.0-17.28_amd64.deb differ diff --git a/binary_dependencies/builder/linux-headers-3.11.0-17_3.11.0-17.28_all.deb b/binary_dependencies/builder/linux-headers-3.11.0-17_3.11.0-17.28_all.deb new file mode 100644 index 000000000..c9e530479 Binary files /dev/null and b/binary_dependencies/builder/linux-headers-3.11.0-17_3.11.0-17.28_all.deb differ diff --git a/binary_dependencies/builder/linux-image-3.11.0-17-generic_3.11.0-17.28_amd64.deb b/binary_dependencies/builder/linux-image-3.11.0-17-generic_3.11.0-17.28_amd64.deb new file mode 100644 index 000000000..cf0dce064 Binary files /dev/null and b/binary_dependencies/builder/linux-image-3.11.0-17-generic_3.11.0-17.28_amd64.deb differ diff --git a/binary_dependencies/builder/linux-image-extra-3.11.0-17-generic_3.11.0-17.28_amd64.deb b/binary_dependencies/builder/linux-image-extra-3.11.0-17-generic_3.11.0-17.28_amd64.deb new file mode 100644 index 000000000..bdee3f6af Binary files /dev/null and b/binary_dependencies/builder/linux-image-extra-3.11.0-17-generic_3.11.0-17.28_amd64.deb differ diff --git a/binary_dependencies/builder/lxc-docker-0.8.0-tutum_0.8.0-tutum-20140212002736-afad5c0-dirty_amd64.deb b/binary_dependencies/builder/lxc-docker-0.8.0-tutum_0.8.0-tutum-20140212002736-afad5c0-dirty_amd64.deb new file mode 100644 index 000000000..e8db59fd5 Binary files /dev/null and b/binary_dependencies/builder/lxc-docker-0.8.0-tutum_0.8.0-tutum-20140212002736-afad5c0-dirty_amd64.deb differ diff --git a/binary_dependencies/builder/nsexec_1.22ubuntu1trusty1_amd64.deb b/binary_dependencies/builder/nsexec_1.22ubuntu1trusty1_amd64.deb new file mode 100644 index 000000000..e78b16986 Binary files /dev/null and b/binary_dependencies/builder/nsexec_1.22ubuntu1trusty1_amd64.deb differ diff --git a/buildserver/Dockerfile b/buildserver/Dockerfile deleted file mode 100644 index 967e3e105..000000000 --- a/buildserver/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM lopter/raring-base -MAINTAINER jake@devtable.com - -RUN echo deb http://archive.ubuntu.com/ubuntu precise universe > /etc/apt/sources.list.d/universe.list -RUN apt-get update -qq -RUN apt-get install -qqy iptables ca-certificates lxc python-virtualenv git python-dev xz-utils aufs-tools - -# This will use the latest public release. To use your own, comment it out... -ADD https://get.docker.io/builds/Linux/x86_64/docker-latest /usr/local/bin/docker -# ...then uncomment the following line, and copy your docker binary to current dir. -#ADD ./docker /usr/local/bin/docker - -# Install the files -ADD ./startserver /usr/local/bin/startserver -ADD ./buildserver.py ./buildserver.py -ADD ./requirements.txt ./requirements.txt - -RUN chmod +x /usr/local/bin/docker /usr/local/bin/startserver - -RUN virtualenv --distribute venv -RUN venv/bin/pip install -r requirements.txt - -VOLUME /var/lib/docker - -EXPOSE 5002 -CMD startserver \ No newline at end of file diff --git a/buildserver/Readme.md b/buildserver/Readme.md deleted file mode 100644 index 8dc0dff56..000000000 --- a/buildserver/Readme.md +++ /dev/null @@ -1,13 +0,0 @@ -To build: - -``` -sudo docker build -t quay.io/quay/buildserver . -sudo docker push quay.io/quay/buildserver -``` - -To run: - -``` -sudo docker pull quay.io/quay/buildserver -sudo docker run -d -privileged -lxc-conf="lxc.aa_profile=unconfined" quay.io/quay/buildserver -``` \ No newline at end of file diff --git a/buildserver/buildserver.py b/buildserver/buildserver.py deleted file mode 100644 index c661c079f..000000000 --- a/buildserver/buildserver.py +++ /dev/null @@ -1,214 +0,0 @@ -import docker -import logging -import shutil -import os -import re -import requests -import json - -from flask import Flask, jsonify, abort, make_response -from zipfile import ZipFile -from tempfile import TemporaryFile, mkdtemp -from multiprocessing.pool import ThreadPool -from base64 import b64encode - - -BUFFER_SIZE = 8 * 1024 -LOG_FORMAT = '%(asctime)-15s - %(levelname)s - %(pathname)s - ' + \ - '%(funcName)s - %(message)s' - -app = Flask(__name__) -logger = logging.getLogger(__name__) - - -def count_steps(dockerfile_path): - with open(dockerfile_path, 'r') as dockerfileobj: - steps = 0 - for line in dockerfileobj.readlines(): - stripped = line.strip() - if stripped and stripped[0] is not '#': - steps += 1 - return steps - - -def prepare_zip(request_file): - build_dir = mkdtemp(prefix='docker-build-') - - # Save the zip file to temp somewhere - with TemporaryFile() as zip_file: - zip_file.write(request_file.content) - to_extract = ZipFile(zip_file) - to_extract.extractall(build_dir) - - return build_dir - - -def prepare_dockerfile(request_file): - build_dir = mkdtemp(prefix='docker-build-') - dockerfile_path = os.path.join(build_dir, "Dockerfile") - with open(dockerfile_path, 'w') as dockerfile: - dockerfile.write(request_file.content) - - return build_dir - - -def total_completion(statuses, total_images): - percentage_with_sizes = float(len(statuses.values()))/total_images - sent_bytes = sum([status[u'current'] for status in statuses.values()]) - total_bytes = sum([status[u'total'] for status in statuses.values()]) - return float(sent_bytes)/total_bytes*percentage_with_sizes - - -def build_image(build_dir, tag_name, num_steps, result_object): - try: - logger.debug('Starting build.') - docker_cl = docker.Client(timeout=1200) - result_object['status'] = 'building' - build_status = docker_cl.build(path=build_dir, tag=tag_name, stream=True) - - current_step = 0 - built_image = None - for status in build_status: - # logger.debug('Status: %s', str(status)) - step_increment = re.search(r'Step ([0-9]+) :', status) - if step_increment: - current_step = int(step_increment.group(1)) - logger.debug('Step now: %s/%s' % (current_step, num_steps)) - result_object['current_command'] = current_step - continue - - complete = re.match(r'Successfully built ([a-z0-9]+)$', status) - if complete: - built_image = complete.group(1) - logger.debug('Final image ID is: %s' % built_image) - continue - - shutil.rmtree(build_dir) - - # Get the image count - if not built_image: - result_object['status'] = 'error' - result_object['message'] = 'Unable to build dockerfile.' - return - - history = json.loads(docker_cl.history(built_image)) - num_images = len(history) - result_object['total_images'] = num_images - - result_object['status'] = 'pushing' - logger.debug('Pushing to tag name: %s' % tag_name) - resp = docker_cl.push(tag_name, stream=True) - - for status_str in resp: - status = json.loads(status_str) - logger.debug('Status: %s', status_str) - if u'status' in status: - status_msg = status[u'status'] - - if status_msg == 'Pushing': - if u'progressDetail' in status and u'id' in status: - image_id = status[u'id'] - detail = status[u'progressDetail'] - - if u'current' in detail and 'total' in detail: - images = result_object['image_completion'] - - images[image_id] = detail - result_object['push_completion'] = total_completion(images, - num_images) - - elif u'errorDetail' in status: - result_object['status'] = 'error' - if u'message' in status[u'errorDetail']: - result_object['message'] = str(status[u'errorDetail'][u'message']) - return - - result_object['status'] = 'complete' - except Exception as e: - logger.exception('Exception when processing request.') - result_object['status'] = 'error' - result_object['message'] = str(e.message) - - -MIME_PROCESSORS = { - 'application/zip': prepare_zip, - 'text/plain': prepare_dockerfile, - 'application/octet-stream': prepare_dockerfile, -} - -# If this format it should also be changed in the api method get_repo_builds -build = { - 'total_commands': None, - 'current_command': None, - 'push_completion': 0.0, - 'status': 'waiting', - 'message': None, - 'image_completion': {}, -} -pool = ThreadPool(1) - - -@app.before_first_request -def start_build(): - resource_url = os.environ['RESOURCE_URL'] - tag_name = os.environ['TAG'] - acccess_token = os.environ['TOKEN'] - - logger.debug('Starting job with resource url: %s tag: %s and token: %s' % - (resource_url, tag_name, acccess_token)) - - # Save the token - host = re.match(r'([a-z0-9.:]+)/.+/.+$', tag_name) - if host: - docker_endpoint = 'http://%s/v1/' % host.group(1) - dockercfg_path = os.path.join(os.environ.get('HOME', '.'), '.dockercfg') - token = b64encode('$token:%s' % acccess_token) - with open(dockercfg_path, 'w') as dockercfg: - payload = { - docker_endpoint: { - 'auth': token, - 'email': '', - } - } - dockercfg.write(json.dumps(payload)) - - else: - raise Exception('Invalid tag name: %s' % tag_name) - - docker_resource = requests.get(resource_url) - c_type = docker_resource.headers['content-type'] - - logger.info('Request to build file of type: %s with tag: %s' % - (c_type, tag_name)) - - if c_type not in MIME_PROCESSORS: - raise Exception('Invalid dockerfile content type: %s' % c_type) - - build_dir = MIME_PROCESSORS[c_type](docker_resource) - - dockerfile_path = os.path.join(build_dir, "Dockerfile") - num_steps = count_steps(dockerfile_path) - logger.debug('Dockerfile had %s steps' % num_steps) - - logger.info('Sending job to builder pool.') - build['total_commands'] = num_steps - - pool.apply_async(build_image, [build_dir, tag_name, num_steps, - build]) - - -@app.route('/build/', methods=['GET']) -def get_status(): - if build: - return jsonify(build) - abort(404) - - -@app.route('/status/', methods=['GET']) -def health_check(): - return make_response('Running') - - -if __name__ == '__main__': - logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) - app.run(host='0.0.0.0', port=5002, threaded=True) diff --git a/buildserver/requirements.txt b/buildserver/requirements.txt deleted file mode 100644 index 3093eeb14..000000000 --- a/buildserver/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -mock==1.0.1 -requests==1.2.3 -six==1.3.0 -flask==0.10.1 --e git+git://github.com/DevTable/docker-py.git#egg=docker-py \ No newline at end of file diff --git a/buildserver/startserver b/buildserver/startserver deleted file mode 100644 index 9eb2e1114..000000000 --- a/buildserver/startserver +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -# First, make sure that cgroups are mounted correctly. -CGROUP=/sys/fs/cgroup - -[ -d $CGROUP ] || - mkdir $CGROUP - -mountpoint -q $CGROUP || - mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { - echo "Could not make a tmpfs mount. Did you use -privileged?" - exit 1 - } - -# Mount the cgroup hierarchies exactly as they are in the parent system. -for SUBSYS in $(cut -d: -f2 /proc/1/cgroup) -do - [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS - mountpoint -q $CGROUP/$SUBSYS || - mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS -done - -# Note: as I write those lines, the LXC userland tools cannot setup -# a "sub-container" properly if the "devices" cgroup is not in its -# own hierarchy. Let's detect this and issue a warning. -grep -q :devices: /proc/1/cgroup || - echo "WARNING: the 'devices' cgroup should be in its own hierarchy." -grep -qw devices /proc/1/cgroup || - echo "WARNING: it looks like the 'devices' cgroup is not mounted." - -# Now, close extraneous file descriptors. -pushd /proc/self/fd -for FD in * -do - case "$FD" in - # Keep stdin/stdout/stderr - [012]) - ;; - # Nuke everything else - *) - eval exec "$FD>&-" - ;; - esac -done -popd - -docker -d & -exec venv/bin/python buildserver.py \ No newline at end of file diff --git a/config.py b/config.py index bf5afb96d..c078e0312 100644 --- a/config.py +++ b/config.py @@ -1,15 +1,16 @@ import logging -import os import logstash_formatter from peewee import MySQLDatabase, SqliteDatabase from storage.s3 import S3Storage from storage.local import LocalStorage from data.userfiles import UserRequestFiles +from data.buildlogs import BuildLogs from util import analytics from test.teststorage import FakeStorage, FakeUserfiles from test import analytics as fake_analytics +from test.testlogs import TestBuildLogs class FlaskConfig(object): @@ -86,6 +87,15 @@ class S3Userfiles(AWSCredentials): AWSCredentials.REGISTRY_S3_BUCKET) +class RedisBuildLogs(object): + BUILDLOGS = BuildLogs('logs.quay.io') + + +class TestBuildLogs(object): + BUILDLOGS = TestBuildLogs('logs.quay.io', 'devtable', 'building', + 'deadbeef-dead-beef-dead-beefdeadbeef') + + class StripeTestConfig(object): STRIPE_SECRET_KEY = 'sk_test_PEbmJCYrLXPW0VRLSnWUiZ7Y' STRIPE_PUBLISHABLE_KEY = 'pk_test_uEDHANKm9CHCvVa2DLcipGRh' @@ -135,13 +145,13 @@ class BuildNodeConfig(object): BUILD_NODE_PULL_TOKEN = 'F02O2E86CQLKZUQ0O81J8XDHQ6F0N1V36L9JTOEEK6GKKMT1GI8PTJQT4OU88Y6G' -def logs_init_builder(level=logging.DEBUG): +def logs_init_builder(level=logging.DEBUG, + formatter=logstash_formatter.LogstashFormatter()): @staticmethod def init_logs(): handler = logging.StreamHandler() root_logger = logging.getLogger('') root_logger.setLevel(level) - formatter = logstash_formatter.LogstashFormatter() handler.setFormatter(formatter) root_logger.addHandler(handler) @@ -149,26 +159,25 @@ def logs_init_builder(level=logging.DEBUG): class TestConfig(FlaskConfig, FakeStorage, EphemeralDB, FakeUserfiles, - FakeAnalytics, StripeTestConfig): + FakeAnalytics, StripeTestConfig, RedisBuildLogs): LOGGING_CONFIG = logs_init_builder(logging.WARN) POPULATE_DB_TEST_DATA = True TESTING = True - INCLUDE_TEST_ENDPOINTS = True class DebugConfig(FlaskConfig, MailConfig, LocalStorage, SQLiteDB, StripeTestConfig, MixpanelTestConfig, GitHubTestConfig, - DigitalOceanConfig, BuildNodeConfig, S3Userfiles): - LOGGING_CONFIG = logs_init_builder() + DigitalOceanConfig, BuildNodeConfig, S3Userfiles, + TestBuildLogs): + LOGGING_CONFIG = logs_init_builder(formatter=logging.Formatter()) SEND_FILE_MAX_AGE_DEFAULT = 0 POPULATE_DB_TEST_DATA = True - INCLUDE_TEST_ENDPOINTS = True class LocalHostedConfig(FlaskConfig, MailConfig, S3Storage, RDSMySQL, StripeLiveConfig, MixpanelTestConfig, GitHubProdConfig, DigitalOceanConfig, - BuildNodeConfig, S3Userfiles): + BuildNodeConfig, S3Userfiles, RedisBuildLogs): LOGGING_CONFIG = logs_init_builder() SEND_FILE_MAX_AGE_DEFAULT = 0 @@ -176,7 +185,7 @@ class LocalHostedConfig(FlaskConfig, MailConfig, S3Storage, RDSMySQL, class ProductionConfig(FlaskProdConfig, MailConfig, S3Storage, RDSMySQL, StripeLiveConfig, MixpanelProdConfig, GitHubProdConfig, DigitalOceanConfig, BuildNodeConfig, - S3Userfiles): + S3Userfiles, RedisBuildLogs): LOGGING_CONFIG = logs_init_builder() SEND_FILE_MAX_AGE_DEFAULT = 0 diff --git a/data/buildlogs.py b/data/buildlogs.py new file mode 100644 index 000000000..bb96ac7dc --- /dev/null +++ b/data/buildlogs.py @@ -0,0 +1,63 @@ +import redis +import json + + +class BuildLogs(object): + ERROR = 'error' + COMMAND = 'command' + PHASE = 'phase' + + def __init__(self, redis_host): + self._redis = redis.StrictRedis(host=redis_host) + + @staticmethod + def _logs_key(build_id): + return 'builds/%s/logs' % build_id + + def append_log_entry(self, build_id, log_obj): + """ + Appends the serialized form of log_obj to the end of the log entry list + and returns the new length of the list. + """ + return self._redis.rpush(self._logs_key(build_id), json.dumps(log_obj)) + + def append_log_message(self, build_id, log_message, log_type=None): + """ + Wraps the message in an envelope and push it to the end of the log entry + list and returns the index at which it was inserted. + """ + log_obj = { + 'message': log_message + } + + if log_type: + log_obj['type'] = log_type + + return self._redis.rpush(self._logs_key(build_id), json.dumps(log_obj)) - 1 + + def get_log_entries(self, build_id, start_index): + """ + Returns a tuple of the current length of the list and an iterable of the + requested log entries. + """ + llen = self._redis.llen(self._logs_key(build_id)) + log_entries = self._redis.lrange(self._logs_key(build_id), start_index, -1) + return (llen, (json.loads(entry) for entry in log_entries)) + + @staticmethod + def _status_key(build_id): + return 'builds/%s/status' % build_id + + def set_status(self, build_id, status_obj): + """ + Sets the status key for this build to json serialized form of the supplied + obj. + """ + self._redis.set(self._status_key(build_id), json.dumps(status_obj)) + + def get_status(self, build_id): + """ + Loads the status information for the specified build id. + """ + fetched = self._redis.get(self._status_key(build_id)) + return json.loads(fetched) if fetched else None diff --git a/data/database.py b/data/database.py index d2319773b..86a797bed 100644 --- a/data/database.py +++ b/data/database.py @@ -1,5 +1,6 @@ import string import logging +import uuid from random import SystemRandom from datetime import datetime @@ -20,6 +21,10 @@ def random_string_generator(length=16): return random_string +def uuid_generator(): + return str(uuid.uuid4()) + + class BaseModel(Model): class Meta: database = db @@ -125,7 +130,7 @@ class RepositoryPermission(BaseModel): class PermissionPrototype(BaseModel): org = ForeignKeyField(User, index=True, related_name='orgpermissionproto') - uuid = CharField() + uuid = CharField(default=uuid_generator) activating_user = ForeignKeyField(User, index=True, null=True, related_name='userpermissionproto') delegate_user = ForeignKeyField(User, related_name='receivingpermission', @@ -204,13 +209,14 @@ class RepositoryTag(BaseModel): class RepositoryBuild(BaseModel): - repository = ForeignKeyField(Repository) + uuid = CharField(default=uuid_generator, index=True) + repository = ForeignKeyField(Repository, index=True) access_token = ForeignKeyField(AccessToken) resource_key = CharField() tag = CharField() - build_node_id = IntegerField(null=True) phase = CharField(default='waiting') - status_url = CharField(null=True) + started = DateTimeField(default=datetime.now) + display_name = CharField() class QueueItem(BaseModel): diff --git a/data/model.py b/data/model.py index c44568dc2..f6dd75965 100644 --- a/data/model.py +++ b/data/model.py @@ -4,9 +4,7 @@ import datetime import dateutil.parser import operator import json -import uuid -from datetime import timedelta from database import * from util.validation import * @@ -731,8 +729,7 @@ def update_prototype_permission(org, uid, role_name): def add_prototype_permission(org, role_name, activating_user, delegate_user=None, delegate_team=None): new_role = Role.get(Role.name == role_name) - uid = str(uuid.uuid4()) - return PermissionPrototype.create(org=org, uuid=uid, role=new_role, + return PermissionPrototype.create(org=org, role=new_role, activating_user=activating_user, delegate_user=delegate_user, delegate_team=delegate_team) @@ -1287,13 +1284,18 @@ def load_token_data(code): raise InvalidTokenException('Invalid delegate token code: %s' % code) -def get_repository_build(request_dbid): - try: - return RepositoryBuild.get(RepositoryBuild.id == request_dbid) - except RepositoryBuild.DoesNotExist: - msg = 'Unable to locate a build by id: %s' % request_dbid +def get_repository_build(namespace_name, repository_name, build_uuid): + joined = RepositoryBuild.select().join(Repository) + fetched = list(joined.where(Repository.name == repository_name, + Repository.namespace == namespace_name, + RepositoryBuild.uuid == build_uuid)) + + if not fetched: + msg = 'Unable to locate a build by id: %s' % build_uuid raise InvalidRepositoryBuildException(msg) + return fetched[0] + def list_repository_builds(namespace_name, repository_name, include_inactive=True): @@ -1307,9 +1309,11 @@ def list_repository_builds(namespace_name, repository_name, return fetched -def create_repository_build(repo, access_token, resource_key, tag): +def create_repository_build(repo, access_token, resource_key, tag, + display_name): return RepositoryBuild.create(repository=repo, access_token=access_token, - resource_key=resource_key, tag=tag) + resource_key=resource_key, tag=tag, + display_name=display_name) def create_webhook(repo, params_obj): diff --git a/data/queue.py b/data/queue.py index ef0026e52..cf0acd898 100644 --- a/data/queue.py +++ b/data/queue.py @@ -64,5 +64,5 @@ class WorkQueue(object): image_diff_queue = WorkQueue('imagediff') -dockerfile_build_queue = WorkQueue('dockerfilebuild') +dockerfile_build_queue = WorkQueue('dockerfilebuild2') webhook_queue = WorkQueue('webhook') diff --git a/data/userfiles.py b/data/userfiles.py index 86ddb62a2..c2a8bc63c 100644 --- a/data/userfiles.py +++ b/data/userfiles.py @@ -59,3 +59,9 @@ class UserRequestFiles(object): full_key = os.path.join(self._prefix, file_id) k = Key(self._bucket, full_key) return k.generate_url(expires_in) + + def get_file_checksum(self, file_id): + self._initialize_s3() + full_key = os.path.join(self._prefix, file_id) + k = self._bucket.lookup(full_key) + return k.etag[1:-1][:7] diff --git a/endpoints/api.py b/endpoints/api.py index e1c8710fa..92f0f5472 100644 --- a/endpoints/api.py +++ b/endpoints/api.py @@ -31,6 +31,7 @@ from datetime import datetime, timedelta store = app.config['STORAGE'] user_files = app.config['USERFILES'] +build_logs = app.config['BUILDLOGS'] logger = logging.getLogger(__name__) route_data = None @@ -69,7 +70,7 @@ def get_route_data(): routes = [] for rule in app.url_map.iter_rules(): if rule.endpoint.startswith('api.'): - endpoint_method = globals()[rule.endpoint[4:]] # Remove api. + endpoint_method = app.view_functions[rule.endpoint] is_internal = '__internal_call' in dir(endpoint_method) is_org_api = '__user_call' in dir(endpoint_method) methods = list(rule.methods.difference(['HEAD', 'OPTIONS'])) @@ -1148,40 +1149,69 @@ def get_repo(namespace, repository): abort(403) # Permission denied +def build_status_view(build_obj): + status = build_logs.get_status(build_obj.uuid) + return { + 'id': build_obj.uuid, + 'phase': build_obj.phase, + 'started': build_obj.started, + 'display_name': build_obj.display_name, + 'status': status, + } + + @api.route('/repository//build/', methods=['GET']) @parse_repository_name def get_repo_builds(namespace, repository): permission = ReadRepositoryPermission(namespace, repository) is_public = model.repository_is_public(namespace, repository) if permission.can() or is_public: - def build_view(build_obj): - # TODO(jake): Filter these logs if the current user can only *read* the repo. - if build_obj.status_url: - # Delegate the status to the build node - node_status = requests.get(build_obj.status_url).json() - node_status['id'] = build_obj.id - return node_status - - # If there was no status url, do the best we can - # The format of this block should mirror that of the buildserver. - return { - 'id': build_obj.id, - 'total_commands': None, - 'current_command': None, - 'push_completion': 0.0, - 'status': build_obj.phase, - 'message': None, - 'image_completion': {}, - } - builds = model.list_repository_builds(namespace, repository) return jsonify({ - 'builds': [build_view(build) for build in builds] + 'builds': [build_status_view(build) for build in builds] }) abort(403) # Permission denied +@api.route('/repository//build//status', + methods=['GET']) +@parse_repository_name +def get_repo_build_status(namespace, repository, build_uuid): + permission = ReadRepositoryPermission(namespace, repository) + is_public = model.repository_is_public(namespace, repository) + if permission.can() or is_public: + build = model.get_repository_build(namespace, repository, build_uuid) + return jsonify(build_status_view(build)) + + abort(403) # Permission denied + + +@api.route('/repository//build//logs', + methods=['GET']) +@parse_repository_name +def get_repo_build_logs(namespace, repository, build_uuid): + permission = ModifyRepositoryPermission(namespace, repository) + if permission.can(): + response_obj = {} + + build = model.get_repository_build(namespace, repository, build_uuid) + + start = int(request.args.get('start', 0)) + + count, logs = build_logs.get_log_entries(build.uuid, start) + + response_obj.update({ + 'start': start, + 'total': count, + 'logs': [log for log in logs], + }) + + return jsonify(response_obj) + + abort(403) # Permission denied + + @api.route('/repository//build/', methods=['POST']) @api_login_required @parse_repository_name @@ -1193,20 +1223,28 @@ def request_repo_build(namespace, repository): repo = model.get_repository(namespace, repository) token = model.create_access_token(repo, 'write') + display_name = user_files.get_file_checksum(dockerfile_id) + logger.debug('**********Md5: %s' % display_name) host = urlparse.urlparse(request.url).netloc tag = '%s/%s/%s' % (host, repo.namespace, repo.name) build_request = model.create_repository_build(repo, token, dockerfile_id, - tag) - dockerfile_build_queue.put(json.dumps({'build_id': build_request.id})) + tag, display_name) + dockerfile_build_queue.put(json.dumps({ + 'build_uuid': build_request.uuid, + 'namespace': namespace, + 'repository': repository, + })) log_action('build_dockerfile', namespace, {'repo': repository, 'namespace': namespace, 'fileid': dockerfile_id}, repo=repo) - resp = jsonify({ - 'started': True - }) + resp = jsonify(build_status_view(build_request)) + repo_string = '%s/%s' % (namespace, repository) + resp.headers['Location'] = url_for('api.get_repo_build_status', + repository=repo_string, + build_uuid=build_request.uuid) resp.status_code = 201 return resp diff --git a/endpoints/test.py b/endpoints/test.py deleted file mode 100644 index bbd9e286a..000000000 --- a/endpoints/test.py +++ /dev/null @@ -1,61 +0,0 @@ -import math - -from random import SystemRandom -from flask import jsonify -from app import app - - -def generate_image_completion(rand_func): - images = {} - for image_id in range(rand_func.randint(1, 11)): - total = int(math.pow(abs(rand_func.gauss(0, 1000)), 2)) - current = rand_func.randint(0, total) - image_id = 'image_id_%s' % image_id - images[image_id] = { - 'total': total, - 'current': current, - } - return images - - -@app.route('/test/build/status', methods=['GET']) -def generate_random_build_status(): - response = { - 'id': 1, - 'total_commands': None, - 'current_command': None, - 'push_completion': 0.0, - 'status': None, - 'message': None, - 'image_completion': {}, - } - - random = SystemRandom() - phases = { - 'waiting': {}, - 'starting': { - 'total_commands': 7, - 'current_command': 0, - }, - 'initializing': {}, - 'error': { - 'message': 'Oops!' - }, - 'complete': {}, - 'building': { - 'total_commands': 7, - 'current_command': random.randint(1, 7), - }, - 'pushing': { - 'total_commands': 7, - 'current_command': 7, - 'push_completion': random.random(), - 'image_completion': generate_image_completion(random), - }, - } - - phase = random.choice(phases.keys()) - response['status'] = phase - response.update(phases[phase]) - - return jsonify(response) diff --git a/initdb.py b/initdb.py index 0cdb5f91a..cb29d5246 100644 --- a/initdb.py +++ b/initdb.py @@ -275,6 +275,13 @@ def populate_database(): 'Empty repository which is building.', False, [], (0, [], None)) + token = model.create_access_token(building, 'write') + tag = 'ci.devtable.com:5000/%s/%s' % (building.namespace, building.name) + build = model.create_repository_build(building, token, '123-45-6789', tag, + 'build-name') + build.uuid = 'deadbeef-dead-beef-dead-beefdeadbeef' + build.save() + org = model.create_organization('buynlarge', 'quay@devtable.com', new_user_1) org.stripe_id = TEST_STRIPE_ID @@ -298,19 +305,11 @@ def populate_database(): model.add_user_to_team(new_user_2, reader_team) model.add_user_to_team(reader, reader_team) - token = model.create_access_token(building, 'write') - tag = 'ci.devtable.com:5000/%s/%s' % (building.namespace, building.name) - build = model.create_repository_build(building, token, '123-45-6789', tag) - - build.build_node_id = 1 - build.phase = 'building' - build.status_url = 'http://localhost:5000/test/build/status' - build.save() - __generate_repository(new_user_1, 'superwide', None, False, [], [(10, [], 'latest2'), (2, [], 'latest3'), - (2, [(1, [], 'latest11'), (2, [], 'latest12')], 'latest4'), + (2, [(1, [], 'latest11'), (2, [], 'latest12')], + 'latest4'), (2, [], 'latest5'), (2, [], 'latest6'), (2, [], 'latest7'), diff --git a/requirements-nover.txt b/requirements-nover.txt index c430edf5a..5b1ef8841 100644 --- a/requirements-nover.txt +++ b/requirements-nover.txt @@ -19,3 +19,7 @@ paramiko python-digitalocean xhtml2pdf logstash_formatter +redis +hiredis +git+https://github.com/dotcloud/docker-py.git +loremipsum \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 7438e6dce..8d9670b67 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,16 +13,20 @@ beautifulsoup4==4.3.2 blinker==1.3 boto==2.24.0 distribute==0.6.34 +git+https://github.com/dotcloud/docker-py.git ecdsa==0.10 gevent==1.0 greenlet==0.4.2 gunicorn==18.0 +hiredis==0.1.2 html5lib==1.0b3 itsdangerous==0.23 lockfile==0.9.1 logstash-formatter==0.5.8 +loremipsum==1.0.2 marisa-trie==0.5.1 mixpanel-py==3.1.1 +mock==1.0.1 paramiko==1.12.1 peewee==2.2.0 py-bcrypt==0.4 @@ -31,9 +35,11 @@ pycrypto==2.6.1 python-daemon==1.6 python-dateutil==2.2 python-digitalocean==0.6 +redis==2.9.1 reportlab==2.7 requests==2.2.1 six==1.5.2 stripe==1.12.0 +websocket-client==0.11.0 wsgiref==0.1.2 xhtml2pdf==0.0.5 diff --git a/static/css/quay.css b/static/css/quay.css index eac3a8c64..6c32680e1 100644 --- a/static/css/quay.css +++ b/static/css/quay.css @@ -507,35 +507,103 @@ i.toggle-icon:hover { color: #428bca; } -.status-boxes .popover { - margin-right: 20px; +.status-box a { + padding: 6px; + color: black; } -.status-boxes .popover-content { - width: 260px; +.status-box a b { + margin-right: 10px; } -.build-statuses { +.build-info { + margin: 4px; + padding: 4px; + margin-left: 6px; + margin-right: 6px; + border-bottom: 1px solid #eee; +} + +.build-info.clickable:hover { + background: rgba(66, 139, 202, 0.2); + cursor: pointer; + border-radius: 4px; +} + + +.build-info:last-child { + border-bottom: 0px; +} + +.phase-icon { + border-radius: 50%; + display: inline-block; + width: 12px; + height: 12px; + margin-right: 6px; +} + +.active .build-tab-link .phase-icon { + box-shadow: 0px 0px 10px #FFFFFF, 0px 0px 10px #FFFFFF; +} + +.build-status .phase-icon { + margin-top: 4px; + float: left; +} + +.phase-icon.error { + background-color: red; +} + +.phase-icon.waiting, .phase-icon.starting, .phase-icon.initializing { + background-color: #ddd; +} + +.phase-icon.building { + background-color: #f0ad4e; +} + +.phase-icon.pushing { + background-color: #5cb85c; +} + +.phase-icon.complete { + background-color: #428bca; +} + +.build-status { + display: inline-block; } .build-status-container { padding: 4px; margin-bottom: 10px; border-bottom: 1px solid #eee; - width: 230px; + width: 350px; } .build-status-container .build-message { display: block; white-space: nowrap; - font-size: 12px; + font-size: 14px; + margin-bottom: 10px; + padding-bottom: 10px; + border-bottom: 1px solid #eee; + margin-left: 20px; } .build-status-container .progress { - height: 12px; + height: 10px; margin: 0px; margin-top: 10px; - width: 230px; + margin-left: 20px; + width: 310px; +} + +.build-status-container .timing { + margin-left: 20px; + margin-top: 6px; } .build-status-container:last-child { @@ -1633,6 +1701,185 @@ p.editable:hover i { padding-left: 44px; } + +.repo-build .build-id:before { + content: "Build ID: " +} + +.repo-build .build-id { + float: right; + font-size: 12px; + color: #aaa; + padding: 10px; +} + +.repo-build .build-pane .timing { + float: right; +} + +.repo-build .build-tab-link { + white-space: nowrap; +} + +.repo-build .build-pane .build-header { + padding-top: 10px; + border-bottom: 1px solid #eee; + padding-bottom: 10px; +} + +.repo-build .build-pane .build-progress { + margin-top: 16px; + margin-bottom: 10px; +} + +.repo-build .build-pane .build-progress .progress { + height: 14px; + margin-bottom: 0px; +} + +.repo-build .build-pane .quay-spinner { + margin-top: 4px; + display: inline-block; +} + +.repo-build .build-pane .build-logs { + background: #222; + color: white; + padding: 10px; + overflow: auto; +} + +.repo-build .build-pane .build-logs .container-header { + padding: 2px; +} + +.repo-build .build-pane .build-logs .container-logs { + margin: 4px; + padding-bottom: 4px; +} + +.repo-build .build-pane .build-logs .command-title, +.repo-build .build-pane .build-logs .log-entry .message { + font-family: Consolas, "Lucida Console", Monaco, monospace; + font-size: 13px; +} + +.repo-build .build-pane .build-logs .container-header { + cursor: pointer; + position: relative; +} + +.repo-build .build-pane .build-logs .container-header i.fa.chevron { + color: #666; + margin-right: 4px; + width: 14px; + text-align: center; + + position: absolute; + top: 6px; + left: 0px; +} + +.repo-build .build-pane .build-logs .log-container.command { + margin-left: 42px; +} + +.repo-build .build-pane .build-logs .container-header.building { + margin-bottom: 10px; +} + +.repo-build .build-pane .build-logs .container-header.pushing { + margin-top: 10px; +} + +.repo-build .build-log-error-element { + position: relative; + display: inline-block; + margin: 10px; + padding: 10px; + background: rgba(255, 0, 0, 0.17); + border-radius: 10px; + margin-left: 22px; +} + +.repo-build .build-log-error-element i.fa { + color: red; + position: absolute; + top: 13px; + left: 11px; +} + +.repo-build .build-log-error-element .error-message { + display: inline-block; + margin-left: 25px; +} + +.repo-build .build-pane .build-logs .container-header .label { + padding-top: 4px; + text-align: right; + margin-right: 4px; + width: 86px; + display: inline-block; + + border-right: 4px solid #aaa; + background-color: #444; + + position: absolute; + top: 4px; + left: 24px; +} + +.repo-build .build-pane .build-logs .container-header .container-content { + display: block; + padding-left: 20px; +} + +.repo-build .build-pane .build-logs .container-header .container-content.build-log-command { + padding-left: 120px; +} + +.label.FROM { + border-color: #5bc0de !important; +} + +.label.CMD, .label.EXPOSE, .label.ENTRYPOINT { + border-color: #428bca !important; +} + +.label.RUN, .label.ADD { + border-color: #5cb85c !important; +} + +.label.ENV, .label.VOLUME, .label.USER, .label.WORKDIR { + border-color: #f0ad4e !important; +} + +.label.MAINTAINER { + border-color: #aaa !important; +} + +.repo-build .build-pane .build-logs .log-entry { + position: relative; +} + +.repo-build .build-pane .build-logs .log-entry .message { + display: inline-block; + margin-left: 46px; +} + +.repo-build .build-pane .build-logs .log-entry .id { + color: #aaa; + padding-right: 6px; + margin-right: 6px; + text-align: right; + font-size: 12px; + width: 40px; + + position: absolute; + top: 4px; + left: 4px; +} + .repo-admin .right-info { font-size: 11px; margin-top: 10px; @@ -1676,16 +1923,6 @@ p.editable:hover i { cursor: pointer; } -.repo .build-info { - padding: 10px; - margin: 0px; -} - -.repo .build-info .progress { - margin: 0px; - margin-top: 10px; -} - .repo .section { display: block; margin-bottom: 20px; diff --git a/static/directives/build-log-command.html b/static/directives/build-log-command.html new file mode 100644 index 000000000..211667ee4 --- /dev/null +++ b/static/directives/build-log-command.html @@ -0,0 +1,6 @@ + + + + + diff --git a/static/directives/build-log-error.html b/static/directives/build-log-error.html new file mode 100644 index 000000000..095f8edd0 --- /dev/null +++ b/static/directives/build-log-error.html @@ -0,0 +1,4 @@ + + + + diff --git a/static/directives/build-log-phase.html b/static/directives/build-log-phase.html new file mode 100644 index 000000000..503593923 --- /dev/null +++ b/static/directives/build-log-phase.html @@ -0,0 +1,4 @@ + + + + diff --git a/static/directives/build-message.html b/static/directives/build-message.html new file mode 100644 index 000000000..17895dd28 --- /dev/null +++ b/static/directives/build-message.html @@ -0,0 +1 @@ +{{ getBuildMessage(phase) }} diff --git a/static/directives/build-progress.html b/static/directives/build-progress.html new file mode 100644 index 000000000..ac719d449 --- /dev/null +++ b/static/directives/build-progress.html @@ -0,0 +1,6 @@ +
+
+
+
+
+
diff --git a/static/directives/build-status.html b/static/directives/build-status.html index 8c27dba53..cf5ded997 100644 --- a/static/directives/build-status.html +++ b/static/directives/build-status.html @@ -1,8 +1,11 @@
- {{ getBuildMessage(build) }} -
-
-
+
+ +
- +
+ + Started: +
+
diff --git a/static/js/app.js b/static/js/app.js index eb075afc1..bd74f5a69 100644 --- a/static/js/app.js +++ b/static/js/app.js @@ -103,7 +103,7 @@ function getMarkedDown(string) { } // Start the application code itself. -quayApp = angular.module('quay', ['ngRoute', 'chieffancypants.loadingBar', 'restangular', 'angularMoment', 'angulartics', /*'angulartics.google.analytics',*/ 'angulartics.mixpanel', '$strap.directives', 'ngCookies', 'ngSanitize', 'angular-md5'], function($provide, cfpLoadingBarProvider) { +quayApp = angular.module('quay', ['ngRoute', 'chieffancypants.loadingBar', 'restangular', 'angularMoment', 'angulartics', /*'angulartics.google.analytics',*/ 'angulartics.mixpanel', '$strap.directives', 'ngCookies', 'ngSanitize', 'angular-md5', 'pasvaz.bindonce'], function($provide, cfpLoadingBarProvider) { cfpLoadingBarProvider.includeSpinner = false; $provide.factory('UtilService', ['$sanitize', function($sanitize) { @@ -151,7 +151,7 @@ quayApp = angular.module('quay', ['ngRoute', 'chieffancypants.loadingBar', 'rest $provide.factory('ApiService', ['Restangular', function(Restangular) { var apiService = {}; - var getResource = function(path) { + var getResource = function(path, opt_background) { var resource = {}; resource.url = path; resource.withOptions = function(options) { @@ -169,6 +169,12 @@ quayApp = angular.module('quay', ['ngRoute', 'chieffancypants.loadingBar', 'rest 'hasError': false }; + if (opt_background) { + performer.withHttpConfig({ + 'ignoreLoadingBar': true + }); + } + performer.get(options).then(function(resp) { result.value = processor(resp); result.loading = false; @@ -240,27 +246,33 @@ quayApp = angular.module('quay', ['ngRoute', 'chieffancypants.loadingBar', 'rest var buildMethodsForEndpoint = function(endpoint) { var method = endpoint.methods[0].toLowerCase(); var methodName = formatMethodName(endpoint['name']); - apiService[methodName] = function(opt_options, opt_parameters) { - return Restangular.one(buildUrl(endpoint['path'], opt_parameters))['custom' + method.toUpperCase()](opt_options); + apiService[methodName] = function(opt_options, opt_parameters, opt_background) { + var one = Restangular.one(buildUrl(endpoint['path'], opt_parameters)); + if (opt_background) { + one.withHttpConfig({ + 'ignoreLoadingBar': true + }); + } + return one['custom' + method.toUpperCase()](opt_options); }; if (method == 'get') { - apiService[methodName + 'AsResource'] = function(opt_parameters) { - return getResource(buildUrl(endpoint['path'], opt_parameters)); + apiService[methodName + 'AsResource'] = function(opt_parameters, opt_background) { + return getResource(buildUrl(endpoint['path'], opt_parameters), opt_background); }; } if (endpoint['user_method']) { - apiService[getGenericMethodName(endpoint['user_method'])] = function(orgname, opt_options, opt_parameters) { + apiService[getGenericMethodName(endpoint['user_method'])] = function(orgname, opt_options, opt_parameters, opt_background) { if (orgname) { if (orgname.name) { orgname = orgname.name; } - var params = jQuery.extend({'orgname' : orgname}, opt_parameters || {}); + var params = jQuery.extend({'orgname' : orgname}, opt_parameters || {}, opt_background); return apiService[methodName](opt_options, params); } else { - return apiService[formatMethodName(endpoint['user_method'])](opt_options, opt_parameters); + return apiService[formatMethodName(endpoint['user_method'])](opt_options, opt_parameters, opt_background); } }; } @@ -779,6 +791,7 @@ quayApp = angular.module('quay', ['ngRoute', 'chieffancypants.loadingBar', 'rest fixFooter: false}). when('/repository/:namespace/:name/image/:image', {templateUrl: '/static/partials/image-view.html', controller: ImageViewCtrl, reloadOnSearch: false}). when('/repository/:namespace/:name/admin', {templateUrl: '/static/partials/repo-admin.html', controller:RepoAdminCtrl, reloadOnSearch: false}). + when('/repository/:namespace/:name/build', {templateUrl: '/static/partials/repo-build.html', controller:RepoBuildCtrl, reloadOnSearch: false}). when('/repository/', {title: 'Repositories', description: 'Public and private docker repositories list', templateUrl: '/static/partials/repo-list.html', controller: RepoListCtrl}). when('/user/', {title: 'Account Settings', description:'Account settings for Quay.io', templateUrl: '/static/partials/user-admin.html', @@ -2467,6 +2480,119 @@ quayApp.directive('namespaceSelector', function () { }); +quayApp.directive('buildLogPhase', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: '/static/directives/build-log-phase.html', + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'phase': '=phase' + }, + controller: function($scope, $element) { + } + }; + return directiveDefinitionObject; +}); + + +quayApp.directive('buildLogError', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: '/static/directives/build-log-error.html', + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'error': '=error' + }, + controller: function($scope, $element) { + } + }; + return directiveDefinitionObject; +}); + + +quayApp.directive('buildLogCommand', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: '/static/directives/build-log-command.html', + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'command': '=command' + }, + controller: function($scope, $element, $sanitize) { + var registryHandlers = { + 'quay.io': function(pieces) { + var rnamespace = pieces[pieces.length - 2]; + var rname = pieces[pieces.length - 1]; + return '/repository/' + rnamespace + '/' + rname + '/'; + }, + + '': function(pieces) { + var rnamespace = pieces.length == 1 ? '_' : pieces[0]; + var rname = pieces[pieces.length - 1]; + return 'https://index.docker.io/u/' + rnamespace + '/' + rname + '/'; + } + }; + + var kindHandlers = { + 'FROM': function(title) { + var pieces = title.split('/'); + var registry = pieces.length < 3 ? '' : pieces[0]; + if (!registryHandlers[registry]) { + return title; + } + + return ' ' + title + ''; + } + }; + + $scope.getCommandKind = function(fullTitle) { + var colon = fullTitle.indexOf(':'); + var title = getTitleWithoutStep(fullTitle); + if (!title) { + return null; + } + + var space = title.indexOf(' '); + return title.substring(0, space); + }; + + $scope.getCommandTitleHtml = function(fullTitle) { + var title = getTitleWithoutStep(fullTitle) || fullTitle; + var space = title.indexOf(' '); + if (space <= 0) { + return $sanitize(title); + } + + var kind = $scope.getCommandKind(fullTitle); + var sanitized = $sanitize(title.substring(space + 1)); + + var handler = kindHandlers[kind || '']; + if (handler) { + return handler(sanitized); + } else { + return sanitized; + } + }; + + var getTitleWithoutStep = function(fullTitle) { + var colon = fullTitle.indexOf(':'); + if (colon <= 0) { + return null; + } + + return $.trim(fullTitle.substring(colon + 1)); + }; + } + }; + return directiveDefinitionObject; +}); + quayApp.directive('buildStatus', function () { var directiveDefinitionObject = { priority: 0, @@ -2478,55 +2604,85 @@ quayApp.directive('buildStatus', function () { 'build': '=build' }, controller: function($scope, $element) { - $scope.getBuildProgress = function(buildInfo) { - switch (buildInfo.status) { - case 'building': - return (buildInfo.current_command / buildInfo.total_commands) * 100; - break; - - case 'pushing': - return buildInfo.push_completion * 100; - break; + } + }; + return directiveDefinitionObject; +}); - case 'complete': - return 100; - break; - case 'initializing': - case 'starting': - case 'waiting': - return 0; - break; - } +quayApp.directive('buildMessage', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: '/static/directives/build-message.html', + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'phase': '=phase' + }, + controller: function($scope, $element) { + $scope.getBuildMessage = function (phase) { + switch (phase) { + case 'starting': + case 'initializing': + return 'Starting Dockerfile build'; + + case 'waiting': + return 'Waiting for available build worker'; + + case 'building': + return 'Building image from Dockerfile'; + + case 'pushing': + return 'Pushing image built from Dockerfile'; - return -1; - }; + case 'complete': + return 'Dockerfile build completed and pushed'; + + case 'error': + return 'Dockerfile build failed'; + } + }; + } + }; + return directiveDefinitionObject; +}); - $scope.getBuildMessage = function(buildInfo) { - switch (buildInfo.status) { - case 'initializing': - return 'Starting Dockerfile build'; - break; - case 'starting': - case 'waiting': - case 'building': - return 'Building image from Dockerfile'; - break; +quayApp.directive('buildProgress', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: '/static/directives/build-progress.html', + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'build': '=build' + }, + controller: function($scope, $element) { + $scope.getPercentage = function(buildInfo) { + switch (buildInfo.phase) { + case 'building': + return (buildInfo.status.current_command / buildInfo.status.total_commands) * 100; + break; + + case 'pushing': + return buildInfo.status.push_completion * 100; + break; - case 'pushing': - return 'Pushing image built from Dockerfile'; - break; + case 'complete': + return 100; + break; - case 'complete': - return 'Dockerfile build completed and pushed'; - break; - - case 'error': - return 'Dockerfile build failed: ' + buildInfo.message; - break; - } - }; + case 'initializing': + case 'starting': + case 'waiting': + return 0; + break; + } + + return -1; + }; } }; return directiveDefinitionObject; @@ -2541,6 +2697,14 @@ quayApp.directive('ngBlur', function() { }; }); +quayApp.directive('ngVisible', function () { + return function (scope, element, attr) { + scope.$watch(attr.ngVisible, function (visible) { + element.css('visibility', visible ? 'visible' : 'hidden'); + }); + }; +}); + quayApp.run(['$location', '$rootScope', 'Restangular', 'UserService', 'PlanService', '$http', '$timeout', function($location, $rootScope, Restangular, UserService, PlanService, $http, $timeout) { diff --git a/static/js/controllers.js b/static/js/controllers.js index e468faba8..fe2e3681d 100644 --- a/static/js/controllers.js +++ b/static/js/controllers.js @@ -185,6 +185,11 @@ function RepoCtrl($scope, $sanitize, Restangular, ImageMetadataService, ApiServi $scope.getFormattedCommand = ImageMetadataService.getFormattedCommand; + $scope.showBuild = function(buildInfo) { + $location.path('/repository/' + namespace + '/' + name + '/build'); + $location.search('current', buildInfo.id); + }; + $scope.getTooltipCommand = function(image) { var sanitized = ImageMetadataService.getEscapedFormattedCommand(image); return '' + sanitized + ''; @@ -512,13 +517,11 @@ function RepoCtrl($scope, $sanitize, Restangular, ImageMetadataService, ApiServi }; var getBuildInfo = function(repo) { - // Note: We use restangular manually here because we need to turn off the loading bar. - var buildInfo = Restangular.one('repository/' + repo.namespace + '/' + repo.name + '/build/'); - buildInfo.withHttpConfig({ - 'ignoreLoadingBar': true - }); + var params = { + 'repository': repo.namespace + '/' + repo.name + }; - buildInfo.get().then(function(resp) { + ApiService.getRepoBuilds(null, params, true).then(function(resp) { var runningBuilds = []; for (var i = 0; i < resp.builds.length; ++i) { var build = resp.builds[i]; @@ -604,6 +607,197 @@ function RepoCtrl($scope, $sanitize, Restangular, ImageMetadataService, ApiServi loadViewInfo(); } +function RepoBuildCtrl($scope, Restangular, ApiService, $routeParams, $rootScope, $location, $interval, $sanitize) { + var namespace = $routeParams.namespace; + var name = $routeParams.name; + var pollTimerHandle = null; + + $scope.$on('$destroy', function() { + stopPollTimer(); + }); + + // Watch for changes to the current parameter. + $scope.$on('$routeUpdate', function(){ + if ($location.search().current) { + $scope.setCurrentBuild($location.search().current, false); + } + }); + + $scope.builds = []; + $scope.polling = false; + + $scope.adjustLogHeight = function() { + $('.build-logs').height($(window).height() - 365); + }; + + $scope.hasLogs = function(container) { + return ((container.logs && container.logs.length) || (container._logs && container._logs.length)); + }; + + $scope.toggleLogs = function(container) { + if (container._logs) { + container.logs = container._logs; + container._logs = null; + } else { + container._logs = container.logs; + container.logs = null; + } + }; + + $scope.setCurrentBuild = function(buildId, opt_updateURL) { + // Find the build. + for (var i = 0; i < $scope.builds.length; ++i) { + if ($scope.builds[i].id == buildId) { + $scope.setCurrentBuildInternal($scope.builds[i], opt_updateURL); + return; + } + } + }; + + $scope.setCurrentBuildInternal = function(build, opt_updateURL) { + if (build == $scope.currentBuild) { return; } + + stopPollTimer(); + + $scope.logEntries = null; + $scope.logStartIndex = null; + $scope.currentParentEntry = null; + + $scope.currentBuild = build; + + if (opt_updateURL) { + if (build) { + $location.search('current', build.id); + } else { + $location.search('current', null); + } + } + + // Timeout needed to ensure the log element has been created + // before its height is adjusted. + setTimeout(function() { + $scope.adjustLogHeight(); + }, 1); + + // Load the first set of logs. + getBuildStatusAndLogs(); + + // If the build is currently processing, start the build timer. + checkPollTimer(); + }; + + var checkPollTimer = function() { + var build = $scope.currentBuild; + if (!build) { + stopPollTimer(); + return; + } + + if (build['phase'] != 'complete' && build['phase'] != 'error') { + startPollTimer(); + return true; + } else { + stopPollTimer(); + return false; + } + }; + + var stopPollTimer = function() { + $interval.cancel(pollTimerHandle); + }; + + var startPollTimer = function() { + stopPollTimer(); + pollTimerHandle = $interval(getBuildStatusAndLogs, 2000); + }; + + var processLogs = function(logs, startIndex) { + if (!$scope.logEntries) { $scope.logEntries = []; } + + for (var i = 0; i < logs.length; ++i) { + var entry = logs[i]; + var type = entry['type'] || 'entry'; + if (type == 'command' || type == 'phase' || type == 'error') { + entry['_logs'] = []; + entry['index'] = startIndex + i; + + $scope.logEntries.push(entry); + $scope.currentParentEntry = entry; + } else if ($scope.currentParentEntry) { + if ($scope.currentParentEntry['logs']) { + $scope.currentParentEntry['logs'].push(entry); + } else { + $scope.currentParentEntry['_logs'].push(entry); + } + } + } + }; + + var getBuildStatusAndLogs = function() { + if (!$scope.currentBuild || $scope.polling) { return; } + + $scope.polling = true; + + var params = { + 'repository': namespace + '/' + name, + 'build_uuid': $scope.currentBuild.id + }; + + ApiService.getRepoBuildStatus(null, params, true).then(function(resp) { + // Note: We use extend here rather than replacing as Angular is depending on the + // root build object to remain the same object. + $.extend(true, $scope.currentBuild, resp); + checkPollTimer(); + + // Load the updated logs for the build. + var options = { + 'start': $scope.logStartIndex + }; + + ApiService.getRepoBuildLogsAsResource(params, true).withOptions(options).get(function(resp) { + processLogs(resp['logs'], resp['start']); + $scope.logStartIndex = resp['total']; + $scope.polling = false; + }); + }); + }; + + var fetchRepository = function() { + var params = {'repository': namespace + '/' + name}; + $rootScope.title = 'Loading Repository...'; + $scope.repository = ApiService.getRepoAsResource(params).get(function(repo) { + if (!repo.can_write) { + $rootScope.title = 'Unknown builds'; + $scope.accessDenied = true; + return; + } + + $rootScope.title = 'Repository Builds'; + $scope.repo = repo; + + getBuildInfo(); + }); + }; + + var getBuildInfo = function(repo) { + var params = { + 'repository': namespace + '/' + name + }; + + ApiService.getRepoBuilds(null, params).then(function(resp) { + $scope.builds = resp.builds; + + if ($location.search().current) { + $scope.setCurrentBuild($location.search().current, false); + } else if ($scope.builds.length > 0) { + $scope.setCurrentBuild($scope.builds[0].id, true); + } + }); + }; + + fetchRepository(); +} + function RepoAdminCtrl($scope, Restangular, ApiService, $routeParams, $rootScope) { var namespace = $routeParams.namespace; var name = $routeParams.name; @@ -854,8 +1048,13 @@ function RepoAdminCtrl($scope, Restangular, ApiService, $routeParams, $rootScope }; $scope.repository = ApiService.getRepoAsResource(params).get(function(repo) { - $scope.repo = repo; + if (!repo.can_admin) { + $rootScope.title = 'Forbidden'; + $scope.accessDenied = true; + return; + } + $scope.repo = repo; $rootScope.title = 'Settings - ' + namespace + '/' + name; $rootScope.description = 'Administrator settings for ' + namespace + '/' + name + ': Permissions, webhooks and other settings'; diff --git a/static/lib/bindonce.min.js b/static/lib/bindonce.min.js new file mode 100644 index 000000000..2c26c0cf0 --- /dev/null +++ b/static/lib/bindonce.min.js @@ -0,0 +1 @@ +(function(){"use strict";var bindonceModule=angular.module("pasvaz.bindonce",[]);bindonceModule.directive("bindonce",function(){var toBoolean=function(value){if(value&&value.length!==0){var v=angular.lowercase(""+value);value=!(v==="f"||v==="0"||v==="false"||v==="no"||v==="n"||v==="[]")}else{value=false}return value};var msie=parseInt((/msie (\d+)/.exec(angular.lowercase(navigator.userAgent))||[])[1],10);if(isNaN(msie)){msie=parseInt((/trident\/.*; rv:(\d+)/.exec(angular.lowercase(navigator.userAgent))||[])[1],10)}var bindonceDirective={restrict:"AM",controller:["$scope","$element","$attrs","$interpolate",function($scope,$element,$attrs,$interpolate){var showHideBinder=function(elm,attr,value){var show=attr==="show"?"":"none";var hide=attr==="hide"?"":"none";elm.css("display",toBoolean(value)?show:hide)};var classBinder=function(elm,value){if(angular.isObject(value)&&!angular.isArray(value)){var results=[];angular.forEach(value,function(value,index){if(value)results.push(index)});value=results}if(value){elm.addClass(angular.isArray(value)?value.join(" "):value)}};var ctrl={watcherRemover:undefined,binders:[],group:$attrs.boName,element:$element,ran:false,addBinder:function(binder){this.binders.push(binder);if(this.ran){this.runBinders()}},setupWatcher:function(bindonceValue){var that=this;this.watcherRemover=$scope.$watch(bindonceValue,function(newValue){if(newValue===undefined)return;that.removeWatcher();that.runBinders()},true)},removeWatcher:function(){if(this.watcherRemover!==undefined){this.watcherRemover();this.watcherRemover=undefined}},runBinders:function(){while(this.binders.length>0){var binder=this.binders.shift();if(this.group&&this.group!=binder.group)continue;var value=binder.scope.$eval(binder.interpolate?$interpolate(binder.value):binder.value);switch(binder.attr){case"boIf":if(toBoolean(value)){binder.transclude(binder.scope.$new(),function(clone){var parent=binder.element.parent();var afterNode=binder.element&&binder.element[binder.element.length-1];var parentNode=parent&&parent[0]||afterNode&&afterNode.parentNode;var afterNextSibling=afterNode&&afterNode.nextSibling||null;angular.forEach(clone,function(node){parentNode.insertBefore(node,afterNextSibling)})})}break;case"boSwitch":var selectedTranscludes,switchCtrl=binder.controller[0];if(selectedTranscludes=switchCtrl.cases["!"+value]||switchCtrl.cases["?"]){binder.scope.$eval(binder.attrs.change);angular.forEach(selectedTranscludes,function(selectedTransclude){selectedTransclude.transclude(binder.scope.$new(),function(clone){var parent=selectedTransclude.element.parent();var afterNode=selectedTransclude.element&&selectedTransclude.element[selectedTransclude.element.length-1];var parentNode=parent&&parent[0]||afterNode&&afterNode.parentNode;var afterNextSibling=afterNode&&afterNode.nextSibling||null;angular.forEach(clone,function(node){parentNode.insertBefore(node,afterNextSibling)})})})}break;case"boSwitchWhen":var ctrl=binder.controller[0];ctrl.cases["!"+binder.attrs.boSwitchWhen]=ctrl.cases["!"+binder.attrs.boSwitchWhen]||[];ctrl.cases["!"+binder.attrs.boSwitchWhen].push({transclude:binder.transclude,element:binder.element});break;case"boSwitchDefault":var ctrl=binder.controller[0];ctrl.cases["?"]=ctrl.cases["?"]||[];ctrl.cases["?"].push({transclude:binder.transclude,element:binder.element});break;case"hide":case"show":showHideBinder(binder.element,binder.attr,value);break;case"class":classBinder(binder.element,value);break;case"text":binder.element.text(value);break;case"html":binder.element.html(value);break;case"style":binder.element.css(value);break;case"src":binder.element.attr(binder.attr,value);if(msie)binder.element.prop("src",value);break;case"attr":angular.forEach(binder.attrs,function(attrValue,attrKey){var newAttr,newValue;if(attrKey.match(/^boAttr./)&&binder.attrs[attrKey]){newAttr=attrKey.replace(/^boAttr/,"").replace(/([a-z])([A-Z])/g,"$1-$2").toLowerCase();newValue=binder.scope.$eval(binder.attrs[attrKey]);binder.element.attr(newAttr,newValue)}});break;case"href":case"alt":case"title":case"id":case"value":binder.element.attr(binder.attr,value);break}}this.ran=true}};return ctrl}],link:function(scope,elm,attrs,bindonceController){var value=attrs.bindonce?scope.$eval(attrs.bindonce):true;if(value!==undefined){bindonceController.runBinders()}else{bindonceController.setupWatcher(attrs.bindonce);elm.bind("$destroy",bindonceController.removeWatcher)}}};return bindonceDirective});angular.forEach([{directiveName:"boShow",attribute:"show"},{directiveName:"boHide",attribute:"hide"},{directiveName:"boClass",attribute:"class"},{directiveName:"boText",attribute:"text"},{directiveName:"boHtml",attribute:"html"},{directiveName:"boSrcI",attribute:"src",interpolate:true},{directiveName:"boSrc",attribute:"src"},{directiveName:"boHrefI",attribute:"href",interpolate:true},{directiveName:"boHref",attribute:"href"},{directiveName:"boAlt",attribute:"alt"},{directiveName:"boTitle",attribute:"title"},{directiveName:"boId",attribute:"id"},{directiveName:"boStyle",attribute:"style"},{directiveName:"boValue",attribute:"value"},{directiveName:"boAttr",attribute:"attr"},{directiveName:"boIf",transclude:"element",terminal:true,priority:1e3},{directiveName:"boSwitch",require:"boSwitch",controller:function(){this.cases={}}},{directiveName:"boSwitchWhen",transclude:"element",priority:800,require:"^boSwitch"},{directiveName:"boSwitchDefault",transclude:"element",priority:800,require:"^boSwitch"}],function(boDirective){var childPriority=200;return bindonceModule.directive(boDirective.directiveName,function(){var bindonceDirective={priority:boDirective.priority||childPriority,transclude:boDirective.transclude||false,terminal:boDirective.terminal||false,require:["^bindonce"].concat(boDirective.require||[]),controller:boDirective.controller,compile:function(tElement,tAttrs,transclude){return function(scope,elm,attrs,controllers){var bindonceController=controllers[0];var name=attrs.boParent;if(name&&bindonceController.group!==name){var element=bindonceController.element.parent();bindonceController=undefined;var parentValue;while(element[0].nodeType!==9&&element.length){if((parentValue=element.data("$bindonceController"))&&parentValue.group===name){bindonceController=parentValue;break}element=element.parent()}if(!bindonceController){throw new Error("No bindonce controller: "+name)}}bindonceController.addBinder({element:elm,attr:boDirective.attribute||boDirective.directiveName,attrs:attrs,value:attrs[boDirective.directiveName],interpolate:boDirective.interpolate,group:name,transclude:transclude,controller:controllers.slice(1),scope:scope})}}};return bindonceDirective})})})(); \ No newline at end of file diff --git a/static/partials/repo-admin.html b/static/partials/repo-admin.html index 5c1b90393..e241360cf 100644 --- a/static/partials/repo-admin.html +++ b/static/partials/repo-admin.html @@ -1,5 +1,8 @@
+
+ You do not have permission to view this page +
diff --git a/static/partials/repo-build.html b/static/partials/repo-build.html new file mode 100644 index 000000000..defa1b636 --- /dev/null +++ b/static/partials/repo-build.html @@ -0,0 +1,83 @@ +
+
+ You do not have permission to view this page +
+
+
+ +

+ + +

+
+ +
+ There are no builds for this repository +
+ +
+ + + + +
+
+
+
+
+ + Started: +
+ + +
+
+ +
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+ +
+
+ + +
+
+ + +
+
+
+
+
+ + {{ build.id }} +
+
+
+
+
+
diff --git a/static/partials/view-repo.html b/static/partials/view-repo.html index 969b59275..3b888c141 100644 --- a/static/partials/view-repo.html +++ b/static/partials/view-repo.html @@ -38,13 +38,18 @@
-
- - - Building Images - - {{ buildsInfo ? buildsInfo.length : '-' }} +
+
diff --git a/templates/base.html b/templates/base.html index a880da5e2..795a44988 100644 --- a/templates/base.html +++ b/templates/base.html @@ -51,6 +51,7 @@ + diff --git a/test/data/test.db b/test/data/test.db index ef37436e4..7fd9750eb 100644 Binary files a/test/data/test.db and b/test/data/test.db differ diff --git a/test/testlogs.py b/test/testlogs.py new file mode 100644 index 000000000..76a68b1bf --- /dev/null +++ b/test/testlogs.py @@ -0,0 +1,189 @@ +import logging + +from random import SystemRandom +from loremipsum import get_sentence +from functools import wraps +from copy import deepcopy + +from data.buildlogs import BuildLogs + + +logger = logging.getLogger(__name__) +random = SystemRandom() + + +def maybe_advance_script(is_get_status=False): + def inner_advance(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + advance_units = random.randint(1, 500) + logger.debug('Advancing script %s units', advance_units) + while advance_units > 0 and self.remaining_script: + units = self.remaining_script[0][0] + + if advance_units > units: + advance_units -= units + self.advance_script(is_get_status) + else: + break + + return func(self, *args, **kwargs) + return wrapper + return inner_advance + + +class TestBuildLogs(BuildLogs): + COMMAND_TYPES = ['FROM', 'MAINTAINER', 'RUN', 'CMD', 'EXPOSE', 'ENV', 'ADD', + 'ENTRYPOINT', 'VOLUME', 'USER', 'WORKDIR'] + STATUS_TEMPLATE = { + 'total_commands': None, + 'current_command': None, + 'push_completion': 0.0, + 'image_completion': {}, + } + + def __init__(self, redis_host, namespace, repository, test_build_id): + super(TestBuildLogs, self).__init__(redis_host) + self.namespace = namespace + self.repository = repository + self.test_build_id = test_build_id + self.remaining_script = self._generate_script() + logger.debug('Total script size: %s', len(self.remaining_script)) + self._logs = [] + + self._status = {} + self._last_status = {} + + def advance_script(self, is_get_status): + (_, log, status_wrapper) = self.remaining_script.pop(0) + if log is not None: + self._logs.append(log) + + if status_wrapper is not None: + (phase, status) = status_wrapper + + from data import model + build_obj = model.get_repository_build(self.namespace, self.repository, + self.test_build_id) + build_obj.phase = phase + build_obj.save() + + self._status = status + if not is_get_status: + self._last_status = status + + def _generate_script(self): + script = [] + + # generate the init phase + script.append(self._generate_phase(400, 'initializing')) + script.extend(self._generate_logs(random.randint(1, 3))) + + # move to the building phase + script.append(self._generate_phase(400, 'building')) + total_commands = random.randint(5, 20) + for command_num in range(1, total_commands + 1): + command_weight = random.randint(50, 100) + script.append(self._generate_command(command_num, total_commands, + command_weight)) + + # we want 0 logs some percent of the time + num_logs = max(0, random.randint(-50, 400)) + script.extend(self._generate_logs(num_logs)) + + # move to the pushing phase + script.append(self._generate_phase(400, 'pushing')) + script.extend(self._generate_push_statuses(total_commands)) + + # move to the error or complete phase + if random.randint(0, 1) == 0: + script.append(self._generate_phase(400, 'complete')) + else: + script.append(self._generate_phase(400, 'error')) + script.append((1, {'message': 'Something bad happened! Oh noes!', + 'type': self.ERROR}, None)) + + return script + + def _generate_phase(self, start_weight, phase_name): + return (start_weight, {'message': phase_name, 'type': self.PHASE}, + (phase_name, deepcopy(self.STATUS_TEMPLATE))) + + def _generate_command(self, command_num, total_commands, command_weight): + sentence = get_sentence() + command = random.choice(self.COMMAND_TYPES) + if command == 'FROM': + sentence = random.choice(['ubuntu', 'lopter/raring-base', + 'quay.io/devtable/simple', + 'quay.io/buynlarge/orgrepo', + 'stackbrew/ubuntu:precise']) + + msg = { + 'message': 'Step %s: %s %s' % (command_num, command, sentence), + 'type': self.COMMAND, + } + status = deepcopy(self.STATUS_TEMPLATE) + status['total_commands'] = total_commands + status['current_command'] = command_num + return (command_weight, msg, ('building', status)) + + @staticmethod + def _generate_logs(count): + return [(1, {'message': get_sentence()}, None) for _ in range(count)] + + @staticmethod + def _compute_total_completion(statuses, total_images): + percentage_with_sizes = float(len(statuses.values()))/total_images + sent_bytes = sum([status[u'current'] for status in statuses.values()]) + total_bytes = sum([status[u'total'] for status in statuses.values()]) + return float(sent_bytes)/total_bytes*percentage_with_sizes + + @staticmethod + def _generate_push_statuses(total_commands): + push_status_template = deepcopy(TestBuildLogs.STATUS_TEMPLATE) + push_status_template['current_command'] = total_commands + push_status_template['total_commands'] = total_commands + + push_statuses = [] + + one_mb = 1 * 1024 * 1024 + + num_images = random.randint(2, 7) + sizes = [random.randint(one_mb, one_mb * 5) for _ in range(num_images)] + + image_completion = {} + for image_num, image_size in enumerate(sizes): + image_id = 'image_id_%s' % image_num + + image_completion[image_id] = { + 'current': 0, + 'total': image_size, + } + + for i in range(one_mb, image_size, one_mb): + image_completion[image_id]['current'] = i + new_status = deepcopy(push_status_template) + new_status['image_completion'] = deepcopy(image_completion) + + completion = TestBuildLogs._compute_total_completion(image_completion, + num_images) + new_status['push_completion'] = completion + push_statuses.append((250, None, ('pushing', new_status))) + + return push_statuses + + @maybe_advance_script() + def get_log_entries(self, build_id, start_index): + if build_id == self.test_build_id: + return (len(self._logs), self._logs[start_index:]) + else: + return super(TestBuildLogs, self).get_log_entries(build_id, start_index) + + @maybe_advance_script(True) + def get_status(self, build_id): + if build_id == self.test_build_id: + returnable_status = self._last_status + self._last_status = self._status + return returnable_status + else: + return super(TestBuildLogs, self).get_status(build_id) diff --git a/workers/README.md b/workers/README.md new file mode 100644 index 000000000..8e356181f --- /dev/null +++ b/workers/README.md @@ -0,0 +1,39 @@ +to prepare a new build node host: + +``` +sudo apt-get update +sudo apt-get install -y git python-virtualenv python-dev phantomjs libjpeg8 libjpeg62-dev libfreetype6 libfreetype6-dev libevent-dev gdebi-core +``` + +check out the code, install the kernel, custom docker, nsexec, and reboot: + +``` +git clone https://bitbucket.org/yackob03/quay.git +cd quay +sudo gdebi --n binary_dependencies/builder/linux-headers-3.11.0-17_3.11.0-17.28_all.deb +sudo gdebi --n binary_dependencies/builder/linux-headers-3.11.0-17-generic_3.11.0-17.28_amd64.deb +sudo gdebi --n binary_dependencies/builder/linux-image-3.11.0-17-generic_3.11.0-17.28_amd64.deb +sudo gdebi --n binary_dependencies/builder/linux-image-extra-3.11.0-17-generic_3.11.0-17.28_amd64.deb +sudo gdebi --n binary_dependencies/builder/nsexec_1.22ubuntu1trusty1_amd64.deb +sudo gdebi --n binary_dependencies/builder/lxc-docker-0.8.0-tutum_0.8.0-tutum-20140212002736-afad5c0-dirty_amd64.deb +sudo chown -R 100000:100000 /var/lib/docker +sudo shutdown -r now +``` + +pull some base images if you want (optional) +``` +sudo docker pull ubuntu +sudo docker pull stackbrew/ubuntu +sudo docker pull busybox +sudo docker pull lopter/raring-base +``` + +start the worker + +``` +cd quay +virtualenv --distribute venv +source venv/bin/activate +pip install -r requirements.txt +sudo STACK=prod venv/bin/python -m workers.dockerfilebuild -D +``` diff --git a/workers/dockerfilebuild.py b/workers/dockerfilebuild.py index acea05c20..6d50601da 100644 --- a/workers/dockerfilebuild.py +++ b/workers/dockerfilebuild.py @@ -1,20 +1,20 @@ import logging -import json import daemon -import time import argparse -import digitalocean -import requests import os +import requests +import re +import json +import shutil -from apscheduler.scheduler import Scheduler -from multiprocessing.pool import ThreadPool -from base64 import b64encode -from requests.exceptions import ConnectionError +from docker import Client, APIError +from tempfile import TemporaryFile, mkdtemp +from zipfile import ZipFile +from functools import partial from data.queue import dockerfile_build_queue from data import model -from data.database import db as db_connection +from workers.worker import Worker from app import app @@ -26,234 +26,300 @@ formatter = logging.Formatter(FORMAT) logger = logging.getLogger(__name__) -BUILD_SERVER_CMD = ('docker run -d -p 5002:5002 ' + - '-lxc-conf="lxc.aa_profile=unconfined" ' + - '-privileged -e \'RESOURCE_URL=%s\' -e \'TAG=%s\' ' + - '-e \'TOKEN=%s\' quay.io/quay/buildserver') +user_files = app.config['USERFILES'] +build_logs = app.config['BUILDLOGS'] -def retry_command(to_call, args=[], kwargs={}, retries=5, period=5): - try: - return to_call(*args, **kwargs) - except Exception as ex: - if retries: - logger.debug('Retrying command after %ss' % period) - time.sleep(period) - return retry_command(to_call, args, kwargs, retries-1, period) - raise ex - - -def get_status(url): - return retry_command(requests.get, [url]).json()['status'] - - -def babysit_builder(request): - """ Spin up a build node and ask it to build our job. Retryable errors - should return False, while fatal errors should return True. - """ - try: - logger.debug('Starting work item: %s' % request) - repository_build = model.get_repository_build(request['build_id']) - logger.debug('Request details: %s' % repository_build) - - # Initialize digital ocean API - do_client_id = app.config['DO_CLIENT_ID'] - do_api_key = app.config['DO_CLIENT_SECRET'] - manager = digitalocean.Manager(client_id=do_client_id, api_key=do_api_key) - - # check if there is already a DO node for this build, if so clean it up - old_id = repository_build.build_node_id - if old_id: - logger.debug('Cleaning up old DO node: %s' % old_id) - old_droplet = digitalocean.Droplet(id=old_id, client_id=do_client_id, - api_key=do_api_key) - retry_command(old_droplet.destroy) - - # Pick the region for the new droplet - allowed_regions = app.config['DO_ALLOWED_REGIONS'] - regions = retry_command(manager.get_all_regions) - available_regions = {region.id for region in regions} - regions = available_regions.intersection(allowed_regions) - if not regions: - logger.error('No droplets in our allowed regtions, available: %s' % - available_regions) - return False - - # start the DO node - name = 'dockerfile-build-%s' % repository_build.id - logger.debug('Starting DO node: %s' % name) - droplet = digitalocean.Droplet(client_id=do_client_id, - api_key=do_api_key, - name=name, - region_id=regions.pop(), - image_id=app.config['DO_DOCKER_IMAGE'], - size_id=66, # 512MB, - backup_active=False) - retry_command(droplet.create, [], - {'ssh_key_ids': [app.config['DO_SSH_KEY_ID']]}) - repository_build.build_node_id = droplet.id - repository_build.phase = 'starting' - repository_build.save() - - logger.debug('Waiting for DO node to be available.') - - startup = retry_command(droplet.get_events)[0] - while not startup.percentage or int(startup.percentage) != 100: - logger.debug('Droplet startup percentage: %s' % startup.percentage) - time.sleep(5) - retry_command(startup.load) - - retry_command(droplet.load) - logger.debug('Droplet started at ip address: %s' % droplet.ip_address) - - # connect to it with ssh - repository_build.phase = 'initializing' - repository_build.save() - - # We wait until here to import paramiko because otherwise it doesn't work - # under the daemon context. - import paramiko - ssh_client = paramiko.SSHClient() - ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - - logger.debug('Connecting to droplet through ssh at ip: %s' % - droplet.ip_address) - retry_command(ssh_client.connect, [droplet.ip_address, 22, 'root'], - {'look_for_keys': False, 'timeout': 10.0, - 'key_filename': app.config['DO_SSH_PRIVATE_KEY_FILENAME']}) - - # Load the node with the pull token - token = app.config['BUILD_NODE_PULL_TOKEN'] - basicauth = b64encode('%s:%s' % ('$token', token)) - auth_object = { - 'https://quay.io/v1/': { - 'auth': basicauth, - 'email': '', - }, +class StatusWrapper(object): + def __init__(self, build_uuid): + self._uuid = build_uuid + self._status = { + 'total_commands': None, + 'current_command': None, + 'push_completion': 0.0, + 'image_completion': {}, } - create_auth_cmd = 'echo \'%s\' > .dockercfg' % json.dumps(auth_object) - ssh_client.exec_command(create_auth_cmd) + self.__exit__(None, None, None) - # Pull and run the buildserver - pull_cmd = 'docker pull quay.io/quay/buildserver' - _, stdout, _ = ssh_client.exec_command(pull_cmd) - pull_status = stdout.channel.recv_exit_status() + def __enter__(self): + return self._status - if pull_status != 0: - logger.error('Pull command failed for host: %s' % droplet.ip_address) - return False - else: - logger.debug('Pull status was: %s' % pull_status) + def __exit__(self, exc_type, value, traceback): + build_logs.set_status(self._uuid, self._status) - # Remove the credentials we used to pull so crafty users cant steal them - remove_auth_cmd = 'rm .dockercfg' - ssh_client.exec_command(remove_auth_cmd) - # Prepare the signed resource url the build node can fetch the job from - user_files = app.config['USERFILES'] +class DockerfileBuildContext(object): + def __init__(self, build_context_dir, tag_name, push_token, build_uuid): + self._build_dir = build_context_dir + self._tag_name = tag_name + self._push_token = push_token + self._cl = Client(timeout=1200, version='1.7') + self._status = StatusWrapper(build_uuid) + self._build_logger = partial(build_logs.append_log_message, build_uuid) + + dockerfile_path = os.path.join(self._build_dir, "Dockerfile") + self._num_steps = DockerfileBuildContext.__count_steps(dockerfile_path) + + logger.debug('Will build and push to tag named: %s' % self._tag_name) + + def __enter__(self): + return self + + def __exit__(self, exc_type, value, traceback): + self.__cleanup() + + shutil.rmtree(self._build_dir) + + @staticmethod + def __count_steps(dockerfile_path): + with open(dockerfile_path, 'r') as dockerfileobj: + steps = 0 + for line in dockerfileobj.readlines(): + stripped = line.strip() + if stripped and stripped[0] is not '#': + steps += 1 + return steps + + @staticmethod + def __total_completion(statuses, total_images): + percentage_with_sizes = float(len(statuses.values()))/total_images + sent_bytes = sum([status[u'current'] for status in statuses.values()]) + total_bytes = sum([status[u'total'] for status in statuses.values()]) + return float(sent_bytes)/total_bytes*percentage_with_sizes + + def build(self): + logger.debug('Starting build.') + + with self._status as status: + status['total_commands'] = self._num_steps + + logger.debug('Building to tag named: %s' % self._tag_name) + build_status = self._cl.build(path=self._build_dir, tag=self._tag_name, + stream=True) + + current_step = 0 + built_image = None + for status in build_status: + status_str = str(status.encode('utf-8')) + logger.debug('Status: %s', status_str) + step_increment = re.search(r'Step ([0-9]+) :', status) + if step_increment: + self._build_logger(status_str, build_logs.COMMAND) + current_step = int(step_increment.group(1)) + logger.debug('Step now: %s/%s' % (current_step, self._num_steps)) + with self._status as status: + status['current_command'] = current_step + continue + else: + self._build_logger(status_str) + + complete = re.match(r'Successfully built ([a-z0-9]+)$', status) + if complete: + built_image = complete.group(1) + logger.debug('Final image ID is: %s' % built_image) + continue + + # Get the image count + if not built_image: + return + + return built_image + + def push(self, built_image): + # Login to the registry + host = re.match(r'([a-z0-9.:]+)/.+/.+$', self._tag_name) + if not host: + raise RuntimeError('Invalid tag name: %s' % self._tag_name) + + for protocol in ['https', 'http']: + registry_endpoint = '%s://%s/v1/' % (protocol, host.group(1)) + logger.debug('Attempting login to registry: %s' % registry_endpoint) + + try: + self._cl.login('$token', self._push_token, registry=registry_endpoint) + break + except APIError: + pass # Probably the wrong protocol + + history = json.loads(self._cl.history(built_image)) + num_images = len(history) + with self._status as status: + status['total_images'] = num_images + + logger.debug('Pushing to tag name: %s' % self._tag_name) + resp = self._cl.push(self._tag_name, stream=True) + + for status_str in resp: + status = json.loads(status_str) + logger.debug('Status: %s', status_str) + if u'status' in status: + status_msg = status[u'status'] + + if status_msg == 'Pushing': + if u'progressDetail' in status and u'id' in status: + image_id = status[u'id'] + detail = status[u'progressDetail'] + + if u'current' in detail and 'total' in detail: + with self._status as status: + images = status['image_completion'] + + images[image_id] = detail + status['push_completion'] = \ + DockerfileBuildContext.__total_completion(images, num_images) + + elif u'errorDetail' in status: + message = 'Error pushing image.' + if u'message' in status[u'errorDetail']: + message = str(status[u'errorDetail'][u'message']) + + raise RuntimeError(message) + + def __cleanup(self): + # First clean up any containers that might be holding the images + for running in self._cl.containers(quiet=True): + logger.debug('Killing container: %s' % running['Id']) + self._cl.kill(running['Id']) + + # Next, remove all of the containers (which should all now be killed) + for container in self._cl.containers(all=True, quiet=True): + logger.debug('Removing container: %s' % container['Id']) + self._cl.remove_container(container['Id']) + + # Iterate all of the images and remove the ones that the public registry + # doesn't know about, this should preserve base images. + images_to_remove = set() + repos = set() + for image in self._cl.images(): + images_to_remove.add(image['Id']) + + for tag in image['RepoTags']: + tag_repo = tag.split(':')[0] + if tag_repo != '': + repos.add(tag_repo) + + for repo in repos: + repo_url = 'https://index.docker.io/v1/repositories/%s/images' % repo + repo_info = requests.get(repo_url) + if repo_info.status_code / 100 == 2: + for repo_image in repo_info.json(): + if repo_image['id'] in images_to_remove: + logger.debug('Image was deemed public: %s' % repo_image['id']) + images_to_remove.remove(repo_image['id']) + + for to_remove in images_to_remove: + logger.debug('Removing private image: %s' % to_remove) + try: + self._cl.remove_image(to_remove) + except APIError: + # Sometimes an upstream image removed this one + pass + + # Verify that our images were actually removed + for image in self._cl.images(): + if image['Id'] in images_to_remove: + raise RuntimeError('Image was not removed: %s' % image['Id']) + + +class DockerfileBuildWorker(Worker): + def __init__(self, *vargs, **kwargs): + super(DockerfileBuildWorker, self).__init__(*vargs, **kwargs) + + self._mime_processors = { + 'application/zip': DockerfileBuildWorker.__prepare_zip, + 'text/plain': DockerfileBuildWorker.__prepare_dockerfile, + 'application/octet-stream': DockerfileBuildWorker.__prepare_dockerfile, + } + + @staticmethod + def __prepare_zip(request_file): + build_dir = mkdtemp(prefix='docker-build-') + + # Save the zip file to temp somewhere + with TemporaryFile() as zip_file: + zip_file.write(request_file.content) + to_extract = ZipFile(zip_file) + to_extract.extractall(build_dir) + + return build_dir + + @staticmethod + def __prepare_dockerfile(request_file): + build_dir = mkdtemp(prefix='docker-build-') + dockerfile_path = os.path.join(build_dir, "Dockerfile") + with open(dockerfile_path, 'w') as dockerfile: + dockerfile.write(request_file.content) + + return build_dir + + def process_queue_item(self, job_details): + repository_build = model.get_repository_build(job_details['namespace'], + job_details['repository'], + job_details['build_uuid']) + resource_url = user_files.get_file_url(repository_build.resource_key) + tag_name = repository_build.tag + access_token = repository_build.access_token.code - # Start the build server - start_cmd = BUILD_SERVER_CMD % (resource_url, repository_build.tag, - repository_build.access_token.code) - logger.debug('Sending build server request with command: %s' % start_cmd) - ssh_client.exec_command(start_cmd) + log_appender = partial(build_logs.append_log_message, + repository_build.uuid) - status_endpoint = 'http://%s:5002/build/' % droplet.ip_address - # wait for the server to be ready - logger.debug('Waiting for buildserver to be ready') - retry_command(requests.get, [status_endpoint]) + log_appender('initializing', build_logs.PHASE) - # wait for the job to be complete + start_msg = ('Starting job with resource url: %s tag: %s' % (resource_url, + tag_name)) + logger.debug(start_msg) + log_appender(start_msg) + + docker_resource = requests.get(resource_url) + c_type = docker_resource.headers['content-type'] + + filetype_msg = ('Request to build file of type: %s with tag: %s' % + (c_type, tag_name)) + logger.info(filetype_msg) + log_appender(filetype_msg) + + if c_type not in self._mime_processors: + raise RuntimeError('Invalid dockerfile content type: %s' % c_type) + + build_dir = self._mime_processors[c_type](docker_resource) + log_appender('building', build_logs.PHASE) repository_build.phase = 'building' - repository_build.status_url = status_endpoint repository_build.save() - logger.debug('Waiting for job to be complete') - status = get_status(status_endpoint) - while status != 'error' and status != 'complete': - logger.debug('Job status is: %s' % status) - time.sleep(5) - status = get_status(status_endpoint) + with DockerfileBuildContext(build_dir, tag_name, access_token, + repository_build.uuid) as build_ctxt: + try: + built_image = build_ctxt.build() - logger.debug('Job complete with status: %s' % status) - if status == 'error': - error_message = requests.get(status_endpoint).json()['message'] - logger.warning('Job error: %s' % error_message) - repository_build.phase = 'error' - else: - repository_build.phase = 'complete' + if not built_image: + log_appender('error', build_logs.PHASE) + repository_build.phase = 'error' + repository_build.save() + log_appender('Unable to build dockerfile.', build_logs.ERROR) + return False - # clean up the DO node - logger.debug('Cleaning up DO node.') - retry_command(droplet.destroy) + log_appender('pushing', build_logs.PHASE) + repository_build.phase = 'pushing' + repository_build.save() - repository_build.status_url = None - repository_build.build_node_id = None - repository_build.save() + build_ctxt.push(built_image) + + log_appender('complete', build_logs.PHASE) + repository_build.phase = 'complete' + repository_build.save() + + except Exception as exc: + log_appender('error', build_logs.PHASE) + logger.exception('Exception when processing request.') + repository_build.phase = 'error' + repository_build.save() + log_appender(str(exc), build_logs.ERROR) + return False return True - except Exception as outer_ex: - # We don't really know what these are, but they are probably retryable - logger.exception('Exception processing job: %s' % outer_ex.message) - return False - - finally: - if not db_connection.is_closed(): - logger.debug('Closing thread db connection.') - db_connection.close() - - -def process_work_items(pool): - logger.debug('Getting work item from queue.') - - item = dockerfile_build_queue.get(processing_time=60*60) # allow 1 hr - - while item: - logger.debug('Queue gave us some work: %s' % item.body) - - request = json.loads(item.body) - - def build_callback(item): - local_item = item - def complete_callback(completed): - if completed: - logger.debug('Queue item completed successfully, will be removed.') - dockerfile_build_queue.complete(local_item) - else: - # We have a retryable error, add the job back to the queue - logger.debug('Queue item incomplete, will be retryed.') - dockerfile_build_queue.incomplete(local_item) - - return complete_callback - - logger.debug('Sending work item to thread pool: %s' % pool) - pool.apply_async(babysit_builder, [request], - callback=build_callback(item)) - - item = dockerfile_build_queue.get() - - logger.debug('No more work.') - - if not db_connection.is_closed(): - logger.debug('Closing thread db connection.') - db_connection.close() - - -def start_worker(): - pool = ThreadPool(3) - logger.debug('Scheduling worker.') - - sched = Scheduler() - sched.start() - - sched.add_interval_job(process_work_items, args=[pool], seconds=30) - - while True: - time.sleep(60 * 60 * 24) # sleep one day, basically forever - desc = 'Worker daemon to monitor dockerfile build' parser = argparse.ArgumentParser(description=desc) @@ -264,16 +330,17 @@ parser.add_argument('--log', default='dockerfilebuild.log', args = parser.parse_args() +worker = DockerfileBuildWorker(dockerfile_build_queue) + if args.D: handler = logging.FileHandler(args.log) handler.setFormatter(formatter) root_logger.addHandler(handler) - with daemon.DaemonContext(files_preserve=[handler.stream], - working_directory=os.getcwd()): - start_worker() + with daemon.DaemonContext(files_preserve=[handler.stream]): + worker.start() else: handler = logging.StreamHandler() handler.setFormatter(formatter) root_logger.addHandler(handler) - start_worker() \ No newline at end of file + worker.start() \ No newline at end of file