From 75d41ca3711018b3ded1a4f529b15d4f31f39d55 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Mon, 23 Feb 2015 12:36:14 -0500 Subject: [PATCH 01/52] Switch to an inner inner subquery to make mysql arbitrarily happy. --- data/model/legacy.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/data/model/legacy.py b/data/model/legacy.py index 331bf2720..5e5705a19 100644 --- a/data/model/legacy.py +++ b/data/model/legacy.py @@ -1579,13 +1579,19 @@ def list_repository_tags(namespace_name, repository_name, include_hidden=False): def _garbage_collect_tags(namespace_name, repository_name): + inner = (RepositoryTag + .select(RepositoryTag.id, + RepositoryTag.lifetime_end_ts, + RepositoryTag.repository) + .alias('rt')) to_delete = (RepositoryTag - .select(RepositoryTag.id) - .join(Repository) + .select(inner.c.id) + .from_(inner) + .join(Repository, on=(inner.c.repository_id == Repository.id)) .join(Namespace, on=(Repository.namespace_user == Namespace.id)) .where(Repository.name == repository_name, Namespace.username == namespace_name, - ~(RepositoryTag.lifetime_end_ts >> None), - (RepositoryTag.lifetime_end_ts + Namespace.removed_tag_expiration_s) <= + ~(inner.c.lifetime_end_ts >> None), + (inner.c.lifetime_end_ts + Namespace.removed_tag_expiration_s) <= int(time.time()))) (RepositoryTag From 5f605b7cc8c520daa98c1c6b52140d2aa3581e58 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Mon, 23 Feb 2015 13:38:01 -0500 Subject: [PATCH 02/52] Fix queue handling to remove the dependency from repobuild, and have a cancel method --- data/database.py | 2 +- ...1eda_change_build_queue_reference_from_.py | 34 +++++++++++++++++ data/model/legacy.py | 18 +++------ data/queue.py | 37 +++++++++++++++++-- endpoints/api/build.py | 7 ++-- endpoints/common.py | 8 ++-- test/test_api_usage.py | 13 +++++++ 7 files changed, 95 insertions(+), 24 deletions(-) create mode 100644 data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py diff --git a/data/database.py b/data/database.py index 162057530..a1d139ff6 100644 --- a/data/database.py +++ b/data/database.py @@ -512,7 +512,7 @@ class RepositoryBuild(BaseModel): trigger = ForeignKeyField(RepositoryBuildTrigger, null=True, index=True) pull_robot = QuayUserField(null=True, related_name='buildpullrobot') logs_archived = BooleanField(default=False) - queue_item = ForeignKeyField(QueueItem, null=True, index=True) + queue_id = CharField(null=True, index=True) class LogEntryKind(BaseModel): diff --git a/data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py b/data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py new file mode 100644 index 000000000..9b2110df7 --- /dev/null +++ b/data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py @@ -0,0 +1,34 @@ +"""Change build queue reference from foreign key to an id. + +Revision ID: 707d5191eda +Revises: 4ef04c61fcf9 +Create Date: 2015-02-23 12:36:33.814528 + +""" + +# revision identifiers, used by Alembic. +revision = '707d5191eda' +down_revision = '4ef04c61fcf9' + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import mysql + +def upgrade(tables): + ### commands auto generated by Alembic - please adjust! ### + op.add_column('repositorybuild', sa.Column('queue_id', sa.String(length=255), nullable=True)) + op.create_index('repositorybuild_queue_id', 'repositorybuild', ['queue_id'], unique=False) + op.drop_constraint(u'fk_repositorybuild_queue_item_id_queueitem', 'repositorybuild', type_='foreignkey') + op.drop_index('repositorybuild_queue_item_id', table_name='repositorybuild') + op.drop_column('repositorybuild', 'queue_item_id') + ### end Alembic commands ### + + +def downgrade(tables): + ### commands auto generated by Alembic - please adjust! ### + op.add_column('repositorybuild', sa.Column('queue_item_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True)) + op.create_foreign_key(u'fk_repositorybuild_queue_item_id_queueitem', 'repositorybuild', 'queueitem', ['queue_item_id'], ['id']) + op.create_index('repositorybuild_queue_item_id', 'repositorybuild', ['queue_item_id'], unique=False) + op.drop_index('repositorybuild_queue_id', table_name='repositorybuild') + op.drop_column('repositorybuild', 'queue_id') + ### end Alembic commands ### diff --git a/data/model/legacy.py b/data/model/legacy.py index 331bf2720..d9b693630 100644 --- a/data/model/legacy.py +++ b/data/model/legacy.py @@ -2496,7 +2496,7 @@ def confirm_team_invite(code, user): found.delete_instance() return (team, inviter) -def cancel_repository_build(build): +def cancel_repository_build(build, work_queue): with config.app_config['DB_TRANSACTION_FACTORY'](db): # Reload the build for update. try: @@ -2504,22 +2504,14 @@ def cancel_repository_build(build): except RepositoryBuild.DoesNotExist: return False - if build.phase != BUILD_PHASE.WAITING or not build.queue_item: + if build.phase != BUILD_PHASE.WAITING or not build.queue_id: return False - # Load the build queue item for update. - try: - queue_item = db_for_update(QueueItem.select() - .where(QueueItem.id == build.queue_item.id)).get() - except QueueItem.DoesNotExist: + # Try to cancel the queue item. + if not work_queue.cancel(build.queue_id): return False - # Check the queue item. - if not queue_item.available or queue_item.retries_remaining == 0: - return False - - # Delete the queue item and build. - queue_item.delete_instance(recursive=True) + # Delete the build row. build.delete_instance() return True diff --git a/data/queue.py b/data/queue.py index c1fb871ad..60632f5b1 100644 --- a/data/queue.py +++ b/data/queue.py @@ -82,10 +82,19 @@ class WorkQueue(object): self._reporter(self._currently_processing, running_count, running_count + available_not_running_count) + def has_retries_remaining(self, item_id): + """ Returns whether the queue item with the given id has any retries remaining. If the + queue item does not exist, returns False. """ + with self._transaction_factory(db): + try: + return QueueItem.get(id=item_id).retries_remaining > 0 + except QueueItem.DoesNotExist: + return False + def put(self, canonical_name_list, message, available_after=0, retries_remaining=5): """ Put an item, if it shouldn't be processed for some number of seconds, - specify that amount as available_after. + specify that amount as available_after. Returns the ID of the queue item added. """ params = { @@ -98,7 +107,7 @@ class WorkQueue(object): params['available_after'] = available_date with self._transaction_factory(db): - return QueueItem.create(**params) + return str(QueueItem.create(**params).id) def get(self, processing_time=300): """ @@ -141,10 +150,32 @@ class WorkQueue(object): # Return a view of the queue item rather than an active db object return item + def cancel(self, item_id): + """ Attempts to cancel the queue item with the given ID from the queue. Returns true on success + and false if the queue item could not be canceled. A queue item can only be canceled if + if is available and has retries remaining. + """ + + with self._transaction_factory(db): + # Load the build queue item for update. + try: + queue_item = db_for_update(QueueItem.select() + .where(QueueItem.id == item_id)).get() + except QueueItem.DoesNotExist: + return False + + # Check the queue item. + if not queue_item.available or queue_item.retries_remaining == 0: + return False + + # Delete the queue item. + queue_item.delete_instance(recursive=True) + return True + def complete(self, completed_item): with self._transaction_factory(db): completed_item_obj = self._item_by_id_for_update(completed_item.id) - completed_item_obj.delete_instance() + completed_item_obj.delete_instance(recursive=True) self._currently_processing = False def incomplete(self, incomplete_item, retry_after=300, restore_retry=False): diff --git a/endpoints/api/build.py b/endpoints/api/build.py index 476c9ef72..69e23efae 100644 --- a/endpoints/api/build.py +++ b/endpoints/api/build.py @@ -5,7 +5,7 @@ import datetime from flask import request, redirect -from app import app, userfiles as user_files, build_logs, log_archive +from app import app, userfiles as user_files, build_logs, log_archive, dockerfile_build_queue from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource, require_repo_read, require_repo_write, validate_json_request, ApiResource, internal_only, format_date, api, Unauthorized, NotFound, @@ -79,7 +79,8 @@ def build_status_view(build_obj, can_write=False): # If the phase is internal error, return 'error' instead of the number if retries # on the queue item is 0. if phase == database.BUILD_PHASE.INTERNAL_ERROR: - if build_obj.queue_item is None or build_obj.queue_item.retries_remaining == 0: + retry = build_obj.queue_id and dockerfile_build_queue.has_retries_remaining(build_obj.queue_id) + if not retry: phase = database.BUILD_PHASE.ERROR logger.debug('Can write: %s job_config: %s', can_write, build_obj.job_config) @@ -226,7 +227,7 @@ class RepositoryBuildResource(RepositoryParamResource): if build.repository.name != repository or build.repository.namespace_user.username != namespace: raise NotFound() - if model.cancel_repository_build(build): + if model.cancel_repository_build(build, dockerfile_build_queue): return 'Okay', 201 else: raise InvalidRequest('Build is currently running or has finished') diff --git a/endpoints/common.py b/endpoints/common.py index 50c6239c8..9bebbd0c2 100644 --- a/endpoints/common.py +++ b/endpoints/common.py @@ -237,11 +237,11 @@ def start_build(repository, dockerfile_id, tags, build_name, subdir, manual, 'pull_credentials': model.get_pull_credentials(pull_robot_name) if pull_robot_name else None }) - queue_item = dockerfile_build_queue.put([repository.namespace_user.username, repository.name], - json_data, - retries_remaining=3) + queue_id = dockerfile_build_queue.put([repository.namespace_user.username, repository.name], + json_data, + retries_remaining=3) - build_request.queue_item = queue_item + build_request.queue_id = queue_id build_request.save() # Add the build to the repo's log. diff --git a/test/test_api_usage.py b/test/test_api_usage.py index c0cdf767f..791c2139f 100644 --- a/test/test_api_usage.py +++ b/test/test_api_usage.py @@ -1331,6 +1331,13 @@ class TestRepositoryBuildResource(ApiTestCase): self.assertEquals(1, len(json['builds'])) self.assertEquals(uuid, json['builds'][0]['id']) + # Find the build's queue item. + build_ref = database.RepositoryBuild.get(uuid=uuid) + queue_item = database.QueueItem.get(id=build_ref.queue_id) + + self.assertTrue(queue_item.available) + self.assertTrue(queue_item.retries_remaining > 0) + # Cancel the build. self.deleteResponse(RepositoryBuildResource, params=dict(repository=ADMIN_ACCESS_USER + '/simple', build_uuid=uuid), @@ -1342,6 +1349,12 @@ class TestRepositoryBuildResource(ApiTestCase): self.assertEquals(0, len(json['builds'])) + # Check for the build's queue item. + try: + database.QueueItem.get(id=build_ref.queue_id) + self.fail('QueueItem still exists for build') + except database.QueueItem.DoesNotExist: + pass def test_attemptcancel_scheduledbuild(self): self.login(ADMIN_ACCESS_USER) From 10e2eabb1ce287d89a8e4bebde0ddd187067582a Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Mon, 23 Feb 2015 13:47:21 -0500 Subject: [PATCH 03/52] Fix test --- test/test_api_usage.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_api_usage.py b/test/test_api_usage.py index 791c2139f..071630bd9 100644 --- a/test/test_api_usage.py +++ b/test/test_api_usage.py @@ -1375,7 +1375,8 @@ class TestRepositoryBuildResource(ApiTestCase): self.assertEquals(uuid, json['builds'][0]['id']) # Set queue item to be picked up. - qi = database.QueueItem.get(id=1) + build_ref = database.RepositoryBuild.get(uuid=uuid) + qi = database.QueueItem.get(id=build_ref.queue_id) qi.available = False qi.save() From 4020cc11023a97e567430fdc2bbea6d2f0ade5f5 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Mon, 23 Feb 2015 13:56:42 -0500 Subject: [PATCH 04/52] Reset the number of invalid login attempts when the user change's their password --- data/model/legacy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/data/model/legacy.py b/data/model/legacy.py index 0ac383bed..311d566ad 100644 --- a/data/model/legacy.py +++ b/data/model/legacy.py @@ -905,6 +905,7 @@ def change_password(user, new_password): raise InvalidPasswordException(INVALID_PASSWORD_MESSAGE) pw_hash = hash_password(new_password) + user.invalid_login_attempts = 0 user.password_hash = pw_hash user.save() From 34ceb274c3a1ac3c43460c9fc7b2fb88d72e8a35 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Mon, 23 Feb 2015 14:31:24 -0500 Subject: [PATCH 05/52] requirements: switch to vendored python-etcd --- requirements-nover.txt | 4 ++-- requirements.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements-nover.txt b/requirements-nover.txt index b81936ec7..5eb570581 100644 --- a/requirements-nover.txt +++ b/requirements-nover.txt @@ -41,10 +41,10 @@ git+https://github.com/DevTable/anunidecode.git git+https://github.com/DevTable/avatar-generator.git git+https://github.com/DevTable/pygithub.git git+https://github.com/DevTable/container-cloud-config.git -git+https://github.com/jplana/python-etcd.git +git+https://github.com/DevTable/python-etcd.git gipc pyOpenSSL pygpgme cachetools mock -psutil \ No newline at end of file +psutil diff --git a/requirements.txt b/requirements.txt index ee41fcc56..5b5f061a2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -66,5 +66,5 @@ git+https://github.com/DevTable/anunidecode.git git+https://github.com/DevTable/avatar-generator.git git+https://github.com/DevTable/pygithub.git git+https://github.com/DevTable/container-cloud-config.git +git+https://github.com/DevTable/python-etcd.git git+https://github.com/NateFerrero/oauth2lib.git -git+https://github.com/jplana/python-etcd.git From 246ff556b91b83d479c2fe40e4fc33a2d8a122b2 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Mon, 23 Feb 2015 15:06:24 -0500 Subject: [PATCH 06/52] Fix some other list reifications to use nested subqueries for performance and query size safety reasons. --- data/model/legacy.py | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/data/model/legacy.py b/data/model/legacy.py index 311d566ad..94f799992 100644 --- a/data/model/legacy.py +++ b/data/model/legacy.py @@ -1668,6 +1668,7 @@ def _garbage_collect_storage(storage_id_whitelist): logger.debug('Garbage collecting derived storage from candidates: %s', storage_id_whitelist) with config.app_config['DB_TRANSACTION_FACTORY'](db): # Find out which derived storages will be removed, and add them to the whitelist + # The comma after ImageStorage.id is VERY important, it makes it a tuple, which is a sequence orphaned_from_candidates = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id), storage_id_whitelist, (ImageStorage.id,))) @@ -1707,22 +1708,32 @@ def _garbage_collect_storage(storage_id_whitelist): paths_to_remove = placements_query_to_paths_set(placements_to_remove.clone()) # Remove the placements for orphaned storages - placements_subquery = list(placements_to_remove.clone().select(ImageStoragePlacement.id)) - if len(placements_subquery) > 0: - (ImageStoragePlacement - .delete() - .where(ImageStoragePlacement.id << list(placements_subquery)) - .execute()) + placements_subquery = (placements_to_remove + .clone() + .select(ImageStoragePlacement.id) + .alias('ps')) + inner = (ImageStoragePlacement + .select(placements_subquery.c.id) + .from_(placements_subquery)) + placements_removed = (ImageStoragePlacement + .delete() + .where(ImageStoragePlacement.id << inner) + .execute()) + logger.debug('Removed %s image storage placements', placements_removed) - # Remove the all orphaned storages - orphaned_storages = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id), - storage_id_whitelist, - (ImageStorage.id,))) - if len(orphaned_storages) > 0: - (ImageStorage - .delete() - .where(ImageStorage.id << orphaned_storages) - .execute()) + # Remove all orphaned storages + # The comma after ImageStorage.id is VERY important, it makes it a tuple, which is a sequence + orphaned_storages = orphaned_storage_query(ImageStorage.select(ImageStorage.id), + storage_id_whitelist, + (ImageStorage.id,)).alias('osq') + orphaned_storage_inner = (ImageStorage + .select(orphaned_storages.c.id) + .from_(orphaned_storages)) + storages_removed = (ImageStorage + .delete() + .where(ImageStorage.id << orphaned_storage_inner) + .execute()) + logger.debug('Removed %s image storage records', storages_removed) # We are going to make the conscious decision to not delete image storage blobs inside # transactions. From 450b112f2caebe2d6c3c8237b652e9f65f7a6f92 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Mon, 23 Feb 2015 15:07:02 -0500 Subject: [PATCH 07/52] Propagate the grant user context to the signed grant to fix image sharing. --- auth/auth.py | 6 ++++-- auth/auth_context.py | 9 +++++++++ endpoints/index.py | 3 ++- endpoints/registry.py | 7 ++++--- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/auth/auth.py b/auth/auth.py index 61c6b9a2c..b8d9065e6 100644 --- a/auth/auth.py +++ b/auth/auth.py @@ -15,7 +15,7 @@ from data import model from data.model import oauth from app import app, authentication from permissions import QuayDeferredPermissionUser -from auth_context import (set_authenticated_user, set_validated_token, +from auth_context import (set_authenticated_user, set_validated_token, set_grant_user_context, set_authenticated_user_deferred, set_validated_oauth_token) from util.http import abort @@ -131,10 +131,11 @@ def _process_basic_auth(auth): logger.debug('Basic auth present but could not be validated.') -def generate_signed_token(grants): +def generate_signed_token(grants, user_context): ser = SecureCookieSessionInterface().get_signing_serializer(app) data_to_sign = { 'grants': grants, + 'user_context': user_context, } encrypted = ser.dumps(data_to_sign) @@ -164,6 +165,7 @@ def _process_signed_grant(auth): logger.debug('Successfully validated signed grant with data: %s', token_data) loaded_identity = Identity(None, 'signed_grant') + set_grant_user_context(token_data['user_context']) loaded_identity.provides.update(token_data['grants']) identity_changed.send(app, identity=loaded_identity) diff --git a/auth/auth_context.py b/auth/auth_context.py index cfc6c7b5d..d4ae381be 100644 --- a/auth/auth_context.py +++ b/auth/auth_context.py @@ -30,6 +30,15 @@ def set_authenticated_user(user_or_robot): ctx.authenticated_user = user_or_robot +def get_grant_user_context(): + return getattr(_request_ctx_stack.top, 'grant_user_context', None) + + +def set_grant_user_context(username_or_robotname): + ctx = _request_ctx_stack.top + ctx.grant_user_context = username_or_robotname + + def set_authenticated_user_deferred(user_or_robot_db_uuid): logger.debug('Deferring loading of authenticated user object with uuid: %s', user_or_robot_db_uuid) ctx = _request_ctx_stack.top diff --git a/endpoints/index.py b/endpoints/index.py index a20c492d6..ca4b73362 100644 --- a/endpoints/index.py +++ b/endpoints/index.py @@ -60,7 +60,8 @@ def generate_headers(scope=GrantType.READ_REPOSITORY): if permission.can(): # Generate a signed grant which expires here - signature = generate_signed_token(grants) + user_context = get_authenticated_user() and get_authenticated_user().username + signature = generate_signed_token(grants, user_context) response.headers['WWW-Authenticate'] = signature response.headers['X-Docker-Token'] = signature else: diff --git a/endpoints/registry.py b/endpoints/registry.py index 07a33c4d9..73610910e 100644 --- a/endpoints/registry.py +++ b/endpoints/registry.py @@ -9,7 +9,7 @@ from time import time from app import storage as store, image_diff_queue, app from auth.auth import process_auth, extract_namespace_repo_from_session -from auth.auth_context import get_authenticated_user +from auth.auth_context import get_authenticated_user, get_grant_user_context from util import checksums, changes from util.http import abort, exact_abort from auth.permissions import (ReadRepositoryPermission, @@ -463,8 +463,9 @@ def put_image_json(namespace, repository, image_id): repo_image = model.get_repo_image_extended(namespace, repository, image_id) if not repo_image: - logger.debug('Image not found, creating image') - username = get_authenticated_user() and get_authenticated_user().username + username = (get_authenticated_user() and get_authenticated_user().username or + get_grant_user_context()) + logger.debug('Image not found, creating image with initiating user context: %s', username) repo_image = model.find_create_or_link_image(image_id, repo, username, {}, store.preferred_locations[0]) From a0833b79786bfb690ddce067472cbd41b7c110e9 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Mon, 23 Feb 2015 16:02:22 -0500 Subject: [PATCH 08/52] Fix the worker timeout for synchronous verbs workers. --- conf/gunicorn_verbs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/conf/gunicorn_verbs.py b/conf/gunicorn_verbs.py index f329a8cbe..cbb5e6d6a 100644 --- a/conf/gunicorn_verbs.py +++ b/conf/gunicorn_verbs.py @@ -3,3 +3,4 @@ workers = 4 logconfig = 'conf/logging.conf' pythonpath = '.' preload_app = True +timeout = 2000 # Because sync workers From 7554c47a30c7678a8b0fad9b02a6d55440fd6a9e Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Mon, 23 Feb 2015 20:53:21 -0500 Subject: [PATCH 09/52] nginx: burst=5 for API calls This means that requests are delayed until the client reaches the burst rate and then they will receive the 429. --- conf/server-base.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/server-base.conf b/conf/server-base.conf index bdb6b1a33..9c6fdcc32 100644 --- a/conf/server-base.conf +++ b/conf/server-base.conf @@ -37,7 +37,7 @@ location /v1/repositories/ { proxy_read_timeout 2000; proxy_temp_path /var/log/nginx/proxy_temp 1 2; - limit_req zone=repositories; + limit_req zone=repositories burst=5; } location /v1/ { @@ -59,7 +59,7 @@ location /c1/ { proxy_pass http://verbs_app_server; proxy_temp_path /var/log/nginx/proxy_temp 1 2; - limit_req zone=api; + limit_req zone=api burst=5; } location /static/ { From c58c19db8ab6e61053124c83394df6d1531d6e03 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Mon, 23 Feb 2015 22:02:38 -0500 Subject: [PATCH 10/52] Add support for the deprecated token method. We need this as a live migration strategy and we can remove it about an hour after we deploy the new version to prod. --- auth/auth.py | 51 ++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 3 deletions(-) diff --git a/auth/auth.py b/auth/auth.py index b8d9065e6..d80915f90 100644 --- a/auth/auth.py +++ b/auth/auth.py @@ -22,9 +22,8 @@ from util.http import abort logger = logging.getLogger(__name__) - -SIGNATURE_PREFIX = 'signature=' - +DEPRECATED_SIGNATURE_PREFIX = 'signature' +SIGNATURE_PREFIX = 'sigv2=' def _load_user_from_cookie(): if not current_user.is_anonymous(): @@ -196,6 +195,9 @@ def process_auth(func): if auth: logger.debug('Validating auth header: %s' % auth) _process_signed_grant(auth) + + # TODO(jschorr): Remove this once the new version is in prod for a day. + _process_token_deprecated(auth) _process_basic_auth(auth) else: logger.debug('No auth header.') @@ -223,3 +225,46 @@ def extract_namespace_repo_from_session(func): return func(session['namespace'], session['repository'], *args, **kwargs) return wrapper + + +def _process_token_deprecated(auth): + normalized = [part.strip() for part in auth.split(' ') if part] + if normalized[0].lower() != 'token' or len(normalized) != 2: + logger.debug('Not an auth token: %s' % auth) + return + + # Skip new tokens. + if SIGNATURE_PREFIX in normalized[1]: + return + + token_details = normalized[1].split(',') + + if len(token_details) != 1: + logger.warning('Invalid token format: %s' % auth) + abort(401, message='Invalid token format: %(auth)s', issue='invalid-auth-token', auth=auth) + + def safe_get(lst, index, default_value): + try: + return lst[index] + except IndexError: + return default_value + + token_vals = {val[0]: safe_get(val, 1, '') for val in + (detail.split('=') for detail in token_details)} + + if DEPRECATED_SIGNATURE_PREFIX not in token_vals: + logger.warning('Token does not contain signature: %s' % auth) + abort(401, message='Token does not contain a valid signature: %(auth)s', + issue='invalid-auth-token', auth=auth) + + try: + token_data = model.load_token_data(token_vals[DEPRECATED_SIGNATURE_PREFIX]) + except model.InvalidTokenException: + logger.warning('Token could not be validated: %s', token_vals[DEPRECATED_SIGNATURE_PREFIX]) + abort(401, message='Token could not be validated: %(auth)s', issue='invalid-auth-token', + auth=auth) + + logger.debug('Successfully validated token: %s', token_data.code) + set_validated_token(token_data) + + identity_changed.send(app, identity=Identity(token_data.code, 'token')) \ No newline at end of file From 6601e98770f780649daaf882879723d808b7f31d Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Tue, 24 Feb 2015 12:09:14 -0500 Subject: [PATCH 11/52] Temporarily switch back to the old type of access tokens. --- endpoints/index.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/endpoints/index.py b/endpoints/index.py index ca4b73362..cf9169064 100644 --- a/endpoints/index.py +++ b/endpoints/index.py @@ -54,16 +54,30 @@ def generate_headers(scope=GrantType.READ_REPOSITORY): if scope == GrantType.READ_REPOSITORY: permission = ReadRepositoryPermission(namespace, repository) grants.append(repository_read_grant(namespace, repository)) + + # TODO remove me when we we switch to signed grants + role = 'read' elif scope == GrantType.WRITE_REPOSITORY: permission = ModifyRepositoryPermission(namespace, repository) grants.append(repository_write_grant(namespace, repository)) + # TODO remove me when we we switch to signed grants + role = 'write' + if permission.can(): # Generate a signed grant which expires here - user_context = get_authenticated_user() and get_authenticated_user().username - signature = generate_signed_token(grants, user_context) - response.headers['WWW-Authenticate'] = signature - response.headers['X-Docker-Token'] = signature + # user_context = get_authenticated_user() and get_authenticated_user().username + # signature = generate_signed_token(grants, user_context) + # response.headers['WWW-Authenticate'] = signature + # response.headers['X-Docker-Token'] = signature + + # TODO remove me when we switch to signed grants + repo = model.get_repository(namespace, repository) + if repo: + token = model.create_access_token(repo, role, 'pushpull-token') + token_str = 'signature=%s' % token.code + response.headers['WWW-Authenticate'] = token_str + response.headers['X-Docker-Token'] = token_str else: logger.warning('Registry request with invalid credentials on repository: %s/%s', namespace, repository) From 204f58d95bd6435a30be613d544cc195dc2d820d Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Tue, 24 Feb 2015 13:22:19 -0500 Subject: [PATCH 12/52] Switch temporary token to grants now that the production stack supports both. --- endpoints/index.py | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/endpoints/index.py b/endpoints/index.py index cf9169064..ca4b73362 100644 --- a/endpoints/index.py +++ b/endpoints/index.py @@ -54,30 +54,16 @@ def generate_headers(scope=GrantType.READ_REPOSITORY): if scope == GrantType.READ_REPOSITORY: permission = ReadRepositoryPermission(namespace, repository) grants.append(repository_read_grant(namespace, repository)) - - # TODO remove me when we we switch to signed grants - role = 'read' elif scope == GrantType.WRITE_REPOSITORY: permission = ModifyRepositoryPermission(namespace, repository) grants.append(repository_write_grant(namespace, repository)) - # TODO remove me when we we switch to signed grants - role = 'write' - if permission.can(): # Generate a signed grant which expires here - # user_context = get_authenticated_user() and get_authenticated_user().username - # signature = generate_signed_token(grants, user_context) - # response.headers['WWW-Authenticate'] = signature - # response.headers['X-Docker-Token'] = signature - - # TODO remove me when we switch to signed grants - repo = model.get_repository(namespace, repository) - if repo: - token = model.create_access_token(repo, role, 'pushpull-token') - token_str = 'signature=%s' % token.code - response.headers['WWW-Authenticate'] = token_str - response.headers['X-Docker-Token'] = token_str + user_context = get_authenticated_user() and get_authenticated_user().username + signature = generate_signed_token(grants, user_context) + response.headers['WWW-Authenticate'] = signature + response.headers['X-Docker-Token'] = signature else: logger.warning('Registry request with invalid credentials on repository: %s/%s', namespace, repository) From 13e362a1dfe53128a2d95f6653f89e24151a9a97 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 24 Feb 2015 13:37:02 -0500 Subject: [PATCH 13/52] JS NPE fix --- static/js/graphing.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/static/js/graphing.js b/static/js/graphing.js index 18bb09496..60e7d1b33 100644 --- a/static/js/graphing.js +++ b/static/js/graphing.js @@ -521,6 +521,11 @@ ImageHistoryTree.prototype.pruneUnreferenced_ = function(node) { } node.children = surviving_children; } + + if (!node.tags) { + return node.children.length == 0; + } + return (node.children.length == 0 && node.tags.length == 0); }; From 68e1495e5439c9b25a375e471d79f4f63d420673 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Tue, 24 Feb 2015 14:31:19 -0500 Subject: [PATCH 14/52] Remove support for the old style push temporary tokens. --- auth/auth.py | 47 ----------------------------------------------- 1 file changed, 47 deletions(-) diff --git a/auth/auth.py b/auth/auth.py index d80915f90..79e07e3be 100644 --- a/auth/auth.py +++ b/auth/auth.py @@ -22,7 +22,6 @@ from util.http import abort logger = logging.getLogger(__name__) -DEPRECATED_SIGNATURE_PREFIX = 'signature' SIGNATURE_PREFIX = 'sigv2=' def _load_user_from_cookie(): @@ -195,9 +194,6 @@ def process_auth(func): if auth: logger.debug('Validating auth header: %s' % auth) _process_signed_grant(auth) - - # TODO(jschorr): Remove this once the new version is in prod for a day. - _process_token_deprecated(auth) _process_basic_auth(auth) else: logger.debug('No auth header.') @@ -225,46 +221,3 @@ def extract_namespace_repo_from_session(func): return func(session['namespace'], session['repository'], *args, **kwargs) return wrapper - - -def _process_token_deprecated(auth): - normalized = [part.strip() for part in auth.split(' ') if part] - if normalized[0].lower() != 'token' or len(normalized) != 2: - logger.debug('Not an auth token: %s' % auth) - return - - # Skip new tokens. - if SIGNATURE_PREFIX in normalized[1]: - return - - token_details = normalized[1].split(',') - - if len(token_details) != 1: - logger.warning('Invalid token format: %s' % auth) - abort(401, message='Invalid token format: %(auth)s', issue='invalid-auth-token', auth=auth) - - def safe_get(lst, index, default_value): - try: - return lst[index] - except IndexError: - return default_value - - token_vals = {val[0]: safe_get(val, 1, '') for val in - (detail.split('=') for detail in token_details)} - - if DEPRECATED_SIGNATURE_PREFIX not in token_vals: - logger.warning('Token does not contain signature: %s' % auth) - abort(401, message='Token does not contain a valid signature: %(auth)s', - issue='invalid-auth-token', auth=auth) - - try: - token_data = model.load_token_data(token_vals[DEPRECATED_SIGNATURE_PREFIX]) - except model.InvalidTokenException: - logger.warning('Token could not be validated: %s', token_vals[DEPRECATED_SIGNATURE_PREFIX]) - abort(401, message='Token could not be validated: %(auth)s', issue='invalid-auth-token', - auth=auth) - - logger.debug('Successfully validated token: %s', token_data.code) - set_validated_token(token_data) - - identity_changed.send(app, identity=Identity(token_data.code, 'token')) \ No newline at end of file From a7ddf46c2a1d3051096a9f2d10c17de62c99afce Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 24 Feb 2015 15:00:40 -0500 Subject: [PATCH 15/52] Fix default test DB --- test/data/test.db | Bin 729088 -> 729088 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/test/data/test.db b/test/data/test.db index 4da80c978b4df80b01a42e5de32d324dd0ae295b..8582e59130a84b68493c2507c2833e47109d678a 100644 GIT binary patch delta 8819 zcmeHMX?PS>*6t_!$W9U1(6&`OiUsvPmsc3z$J=dBu3FlpI`8k zoXYq}iB~vM75xgUa1yU_Tww1F$mYB!-jU@#94NbSH71ta^+o9=rPYBKZrqO9F5SPA zScAm|I&SL67HpWs)xEdP6__0GV#a-^Uofe@%D~z{DK__$Z(gbT;Ns1Jmjei!U3|r_ zGPZtnFwk;y7dHR>8C>0j23O#do3~>rt*IYN{j1&$+;U4lHvKOT{GDM^TLW`%^FE;aEe~%D05_ zliz)}ea1am>4ArS-G$k|`?hh?vZucZP`7W#?A1Gzn69;-1YWzn9}_%FwUWq$8G&_M zy_oaS$1lrjv`h^g-&%?Z3$LgziF>Rtz->d(QOC@;O!ipJYeNWw$G|2n4GF;l$^(vo zxj#Q9Igp!8H9Yh0zGQRgaBClFHV2mU-(;2cZY*ECDe!fFKgP|i8!Alr+ui&6x6hvt zSoRyoWqYsMYaG~Xa=CgnS(GJFR!CK&D3a3!iIi0zOG-4atDLS0vglVkdzw}%Z4LVD zCVh2tTSHs3+T2l7;o`l8SwcM{)YNlTVs?FDex1x0@i}=Ov7)H5xTw0af-UeCd5grV zB3Frvt0<&gbT%Uw6;x)`^@*)zmO0B}E0*S#>TqTbC_YR+Ul(d`V3upHq+vO_d|c&JiZklq{mY zZ%h*%0w;)^z)Gaw=i^9DV|Y^X3-FCo6^WJ@iKRL8=r<;=cbscHPs_uuIdE_-sn3#% z8!-4}nMK9tmo!Qu8QrInoD9^GuIk{Vtg0N%@VZDhcXqTlYC1&U!6*)zag=xGbQKl4 zYtt%M)Hmg`YBQNwdsncx1E*c zq1Ma~wn!;{xj=8q|&3QLq6e$8rk zrH5kqy5`!ZW_~zakw4BP?-T@1rqL&$c!i|XyukZel4CVkJu1XVPoDAjvfD;l!(toj_erlp~{r&$y7JKZg{xn6fu zo{wLdtLHh(R-vX-rb)$PWw4NiA{C6#ObbLv9y3Id@^6e;TzL`cz8Qe_#Lq=Vt1 zbVXo95gh|UuRyu8i={fIB)7h{f~}KEDr>2V0-@AZ$Jf;6)n_wB?s9LgmzK&oPf1}B z!@@Est}TSs%okCm1)lTQ3h!hEmgi`+;It{G(I>IGs)7 zRQ;U4O>bX5CO(BN4OwNhQbwt3Dri@2tFyXxCDRFWQ&`^Mbav!c5aCEuG8$7C_*6nTMWhO;=)1am}=nIqB1aiB;eIe}Lob!Z>V5hRhI=rSb;f~KjQ zrgwEH>PmgK)-k;2(iOQ}L1A5)rNF`bjm7qa!0o*cJ}cjb7`pRS-r22`TVGp3m3f{#{ZzowD8CPCt83M3B0v!pI7 zf~5NypI>K3MV*aQZ9{!W_KI${D$QL>=~a!QzqWIEr_#NuELY_jpQ})o%i34gw=~c~ zNn4ArovRTF1oY$?lXJ{$aZZloWmXQbbP#J2)ZHoD$LdOOKkcypXx#`b zgVER*9vN!>??cr){s;v&(^)o)`Iklv$c$&nOy>U(uGDdB;3wnteY2T>6no6xYEO-L zXJ2IE%MrZ=7I55X{-uc)a+c@6jbHD%C5ZVf(Ju|1DL?2K{-2^Y8nmS0o#UsXHgJde zyRsBMH_t=Wm1m1JMSN~CQzY{>j8u_VoLwVil~cS}?5g#YxGLP};7RigXRSX_DY5pwDt@3`TqGXn&7{5ev z4AexDDo~_fS43K+DNPl*Hoc`eUC~xGHg)Lj9UaY==uKm{H71=C(sTZC_drbK*u*(=-%p1eNG~r zIQhB?6)eq(1PGWLL^HNRe(38^>lh5x>4;Q_oz1NL#g62n>oV>(PtlSr3 z#0?%J-e<|e6rbu>C0!x?li_HEnp1@8M}W;*^!sEEDpFqabLig9mb~Eh`XiAj%c3xF zgXLHnJq{l>V47ym!Y^olWY~~ppS|>F#Dz1>GJEhJCH`lH120*+?Y#`6L7l7dQ1kj2 z_#?mp?PFPz)n&;K)szI)ui2Dl!VE?a4x1C9F+m4;hGYc_3T~0-Nfkm(>LNU3iUKbP zyk;_|8HWcL4Msxe1)bt$Ag5S3YFV(Ls47Fs8qYH}n)9)mL=Ap_+|s|*A&ELAGw?_O z1&j!hrimg$!lN*_KyxxFi+V7+FuNMZ@*2aUtm|wSU5KH4vn>W~xXwmhsPURjw%O?8 z>uj?w)HpiQf-i4!;N8^Y_U-cHM-1y@M|1*2&&p-b41GL^#Y2@ z3&0<4vQ^P3xkFEvJ83mMmbUVP0z-8^oGrq3Ha@V4zP z`@+2XDzB$Z@NiXBRzacYqKfExs#316%&TQ+sw}Uzth|EBEAr zixK?iDM^sg)cdhF#^W4pIyAx`z}^VH9XOjy(4XTdZt%+ou+X7_4S2`|YLan{0ecc> z@WX_c_%O6R%ouiA_=^##5m!f?v@f>Jux_PtY%8axJlx)!&=L4=E_su-JuCnu&Pw|45C`e5QZ{B0B3(~Zl< z+fY`kF$SIL#;XTzz#j|i{pj_@lV)OuX+~p+0b2=0NqYw^_Jmdk@fqjf!s-#+aSqOR zj^NYJ!MXMkJoy})Z5_drFhl(~!&LKdLrfxOm@-C?ylMna7!OOAjKJ~ZVX-j?FG4qN z!=op|mzb&J+j)JY{opn{2HamXY4SPpXl1xVOx*Z!+_Dk)qVX`hWCV^K4>OB~;iyTN zA!gW>mbPZCk_ak`j=>CZBkuJM2YdvMiXPjw3?J?~@rSO%J7>xlj?&PlN&SlAC3>!h3`Sm-Dg z^fPW4izddK9zkR9(Ll9#D7_7hO$`@~Fkqt$U5t~OG=kYiu^{UNGpO{&SjUVH4z~}( zQI^rJ!QtT}SjZ?A9FCcS!=ue(MN#b?BXA@XlQBjd)}tS#otGnJaEp- z=kNCX{=v)8dpvORZ_b)q+^`}6xdh-$>ZttI`D*ynXqy0>5p(8kn)uqUmZ1z0IE4ov z`KaV3Jr+GK0;lod<naDb$-rs(qIxw}eQ!Hzlz}t4E+lH| zrP90TfDD{59S`q{$B*noi_?MAwx;eF(H-*%dLtb;t;W+gV~uIY&}DOhGvm*1C?4v} zCnzNYIMe4JS>nDn?w@F525`nDd{aGr&f!jUJOel zPR#?(iym3FAsJmAhjz{fPX1uk9mNYGcB8@tz?pJ*o?njie2MN`0Gui2Kb)2<-dhGT z3G1@?iw*WY_Tq?L;opaEfYo*zud&o4n zCQUW{&Xfel{VBuZ8?)lj!aG13E=C;+dvD*H=-WGj((YJrw4~?r_fhwqAT932wNo8Q z(X&v(4v>Zu#kpbkn7&5O>;P%81y6sKi1yT@Svx@*Bg~WYi%*uLwL5_`K`|w$A1Hyrh=NU%+&5L zka7=j&RH6H(fs%auST|efz$d?>Ja{*EfY1~3!JkzKYB9bRAU)Ba4&FPJhS_$xu$y~ z(Bj_$=Yr<7Y~8VfHRz4s65A)uZ!UG!F5j;8P0u}hb2&SBv6-~XfgZk(Scy$I;&>r; zgP{_o>;@~whf4~{L=0AM&COWtIqlf2FkF0*eoiTE;YA~Cz zK;zvG^u`0kG;12>%X)p|z+U3>378mrnk-GaWf%H(2rSI(y)%Y?_npg8_fudYelYF3 z*@mvADB%!TAhtbuFyhTuThTLzKy_5~P0z+v816^24uk5b`zf`_DYC6h%1~_A{?E5V7lc{d>-ZMeY{zXHR zCw!2JT+ae$(t&H8PcfiT)x_oQ?`;TU#$A1T$DSGV3 z()CxriNhCc7`PJ5b8=z)9H(T5{%T zX0-8n;GDI;>ZEvQb{RVUJaEd-@0nilVck)*{srI+IdL~_{qrNM(Ww`JlRn}x=j~e^ zf_A8p-`edF8aKWbJZICbg9glvt-*F$ z-^QM|y^5t^U)rv~M22k-p%a^IIFeo>5>vukY-hr^gdYn3 z&^j0q9WgJ$AFaq_QH3V2Th(Lb#Q?FG++Xm8Fu(o~ndI1n;eT3lhk_u(T3D zu)7h%QsM!WK!a$jsI*IByCB%2t>AzUBid*SI-@o^iaOdZjWTZ~L=&KW?D5NdnEH?> z&;OkBJMa6R?LFtNJUDaZ!I>MC$oEYqola-5zXgAFTmR6$cSGh)w%2W>^$}|T7sG({+as1Xb;(Eb1u&}2>xKC%&6vZpC;*1Ym*uxis1EP7UQ(!{AR%?lpev=hU` zolo0VFUtz{ZXUoQp59HyW|#B^Cj}cZ!(d0h@u(OKt_YT66JFcyP5SB$d+I}n!qA8p&n zHw=4%ukGlKkA3m_rC0ACj9issv<=5$6%iH|ykTH7HfbBRJ`I21`QXDy>a#aTlVV*_Z!^qK9S@kN~1_3Kyn(v`elX?S&1PiKSK$EpOq<% zS9+GUE|xnNXc?{AlD5tTooz~6cR@v+$32HHlYCsG&z0x()>XKwgnYr>NH^q@qF7&3 zz*KlC_Z(MgbwR$*L;6@otn^fR+)P~=L$fqVaRgaT6O`znsVs)h;&`XTG8D~ijg##U_~K7u&Q4oBuxy@$k#JmUVed( z8?o#X!)QU|MYQT0Lu`sFXd)euB|;7bij@N#A(N^^D5~g(^`NmD&!Z>4F|d8(Tq7l# z8+I)P2iK;oGNt1>41Re=k`*O|B1J+JNtIw}0lcEYwE(FED8Hm>ewJ$M>F#P$HHdx+ zEvHa)N_S^rK3%eiuU}m4n$syQspPvFDtap23p%?pE8DA!sNRNnzmIFtmgY5OlCoIR z=qhNHTia;Vao!L+#x&MRQY=pip{P4BFfC!MX%tV>f~*mw#EJy?>nB9g&l3tM21LrQ zs1!+eYO<B6f9WwlvcPy=|36g``+XHU$c6wYuhxGS`AgT5elj zEwn9e@KpA6dXVEEh6!VAVw^lHND>{gDaCRzTrd-c!zD2kOZr8TAQ=fZ9gY$SDKvR$ zhNU%14hR&Z^|bbColSuzO}%KBv$d3ReexJ~^)F&5o7iTtCb}f_hmgaX>%51Bf zjbFTId9}Z>th=Rfd5x&BttBn3ZQO9UqJJDo#z~7J$D>cc;$}so=m1TVFadayfGARg zsLC8624tA4G%NEG)hV~CZ7n@rKZMA=aB+7{TYHVav96=Q+a)b=x+~kA#WlT(s^(R? z109`l|w&_EYf`!25Hv1UesA(9-7Q7B8%D7v%9yPhE3G2YTi^!<3k@@!@z8jv@3Zm=T~kM&>k8AykeB zGk6MCt02;ZCNrc)N~Fw-WP6XYxXFKUy0mzD%9^yLEw!Zu#kC6-x_FpA1wCG&M=HrH zuB&b=t10X4ke8QbDtMcE*(C*a1zl=>LsgN6p1ELfju|b=DROY$h(q69FxVVeBon#J zKE=L!D9T|>i|qTQM@@KP*s2Kq|N2<4Ts#-h0jF_c;V)e=rpd zK5h;CWW2s_Gg@Jay<<waf=YK8&6hA`ZoVv^^?0gCvBu@9WWA(Y z@HCcr+$?(boN*d*pEFjb3Jrw~uKE(soQ8rLww9?RU9KWeZ9|#Y=Ml-O@=CF`%ALm- z4jnjW{4BGY89p{eIJg3#{AlBTEUO{M*cNGTjcFkAq#F_~fr*qk97 zQ)N*S6;)PMbh_U}_0?xa(CN&Rqoc3vmD8n*QSS) z-qG4O{osV?p!ks@C&R*F%B>31<2s)nUuDX}G*RPthNB26Q42sGOfxd%p}a!SlosGf zR^?>KgYVm5DhzF}V?Y8!F@qaT$1 z|J=gC8+{^dAlEa;59$w>M$b+INvxzl{8hh z$UMo3JU#UNY16=6DUj2Xq9RBH1vv?v$bv}7e1IiLk@b@-2l)y~qk}h^b5b;t)kIaH z2sqsRV1~>SvO+S1U*kv_(g{}aE68z^nMhG6ndTu8BxK0FApen7LXjzj;NawkgOyew zO++0xnYk2>h9nkp6qxWdFeyJvh%#gb3?ox;#bIT?Uq&zAWX?)qcnFdtlaPt?AttX9 zq9AgxTu4d@NF=2xew4PtoSZ_lDl7U`=#y4M9;uL?`Wc##`~e=$c}Zjf0o1y}oHJL@ zBvuySIwL8XLa+geCq#;s2$g32oT75HNCo&Xiw>Wn*ZCH}T~KcA?vYy!4CKE8q;jxA zX&6C2&BF+Wo&r=>fdp7&REbCCK67r0#&W6i# z`@m{NWdu6Fz;H<#gcz=kgamn7D8H9!Riqiny3zhV^UM_RGyqlMIuhVn@Eu-VZ5#K!9tR zOp|b@@T0u-mWfv)RBo`uqm}C|7>bKNJ)0fo-rt1pwo(&f1a(U?E z4VHZsp;Y+d4m4_7_%+5n{pCB&XtC|5i>*Ihj9*gRcS+~iCFPeCTgDY%7FF}O;>*Hm zy1ck4^r$iFTG+Vq%i?SN>EiJ_^kuCF&sgrZUzazBYpAH^y>*QxRrSTCg;I%BMOT+K z_!=eBQ(96|S;|WlMQ%@CX@yko<_o>~)o{TpcDcM_btzfhfL%T?yi*k6mPQX1A27>vTUXa2kucVIT=o^H{ZET(SLY4beu5&T!yRO?CG&EcuxXTl$e$cp%LWP^RL z?lawQ;Y~6XF}WD0Pe#A)#}^LG%EQcIXn!ddh1#Mm%|j0rVr>@NEt6ImEmvh^7;bGD zN-M`+)*HLE7KFR7NhtE1X%?!i#P%aQZ;YDO(WCWf^P8Y}ev_J_TxTN2P@;dbKXx|3hJM_$N@B$-Td{$Y}ymj~_4>$&qTF>jZvD~Wp_|s@XAFbwcv#d>*hc*E@V@s<)(O)w-DJH!OouIoY^JN55|-J# zA${5#-Y_Ch94|BT zM&zr;%kSA%xH69-gR9Cm$x1gzY!Nd^)HcIHFo#ccO%`&Qm zs8L4f$sBJU(>~PPJ}k$WMyrOJM~rA;qgtprW(YNpGmbTi?CKtoqah28*W<7TLRCYZ zn`0r*Oc<6%mc_6*xC{SNWk~}+$K@-}W2>b~nwJ{M482*W=Lp=;+~r@`*4ddm`ikQ8 zx3v_NEAu z*{`z?M1CDv9dROp4L=?JFMZ;H&9oIg;;{bzH=F+h-)zu8rnM29y)>hK&YNv#?jWt7 zW7F0lob5l+g}x=N@O{cePD{w@-Hw)0)}7e2&u_&!$K2T{iM9@4Q@5}6QEzx%=r9cs z{hGGrhNNQ!D3!66W7y*T8MY(WHR220H*y9AFPW1Ch7z9qyWt94-!AD{p+-ckSqdodeY`4-2A{j$RYtVe)jv%On&yQ z-Ka?dW|A>~K}Pf6H=+X)FinH}9Lt4?@1X0mfEnI1@Je*Yl8?}vS-{M?Z{w62#~rt$ z)!D$Dv3LFA?8i32Jtzm5;dgaE8GY}`0kkd$nEc17o6FwYaTJ}-0p_gNKAT?ub@iQS z#kIgRZSpi&jx^nf&R+}6obJ|L-aYyvv}YDDrQ^$9srvN(wWwq^FcanmZ<)I4P9J(; zHZX~Il7F3>P1X(OTFo~gGij|y^LJUdVAD_KmGJ7Jqv-rDpj(Vv;_z3GKaQ6F4(PGx zK704-vujVHDZ7Er@5M{YR^GE6y|^3b5tZSGV}AGcgD8Cu(9`>lJerkIbpu+l2Q0>- zGF-NjlQYodyMa05DYZNM#Pn=*=iR_$pFVcDz_j-p_Qk(&d>xu~FEG;*&i-CZDZU0heJ?PbGtT`c zclYXBk^lF=Ozb~@Xqt0NB*O0lX3XOUx)Z6pbI_vufSEHty-u>byIHM}NH^nA5){Fn&|#EVSkUVA65hP8LmieIA;z515nJZh0Wx zJK#g>_W^U_ZC`CjzM*jodS@Rn69@Yfr)l4&pp_2-Gx6ZDN2YcAzCqtV81nt2uXLqX zP2Yz0J_O8g`&0Ksy?bIc@;q#9jGg%QsdtlK-FmV%XXKNyE-VZ^@vt=?OP}!AeCd|V zCkFRhKaaw~`KLBUCoDdXzC8%|t&S3flmy?+>(R@cmbM9#AvM(!7Y88a*9Si-tQ zAKLK(FsDX_-Al#pnv8N@1m;zb&1{;I*)bnI`64ir&$wSDPi#v;@}GejbNtl##QLq@ zBFjs_obX=WCyt2c9z{(r0h6EA-|qR0IF1gy1kA~kiw$Yd-EkMX{$*e~U%G8V-pb82 z=*^dbIpM-vM;(m*L$vw`Fmu-FD!hl7T`27+FsI%d6GokV--Om31!mZ@9>Kh~sT`d? z3e4172j&RZN4W6QBU<{|8;`Tja_%(Pj zmWH?CTP-+#2>;L;Yt6B&v1-EKl2}cWd4=Ru z#lL)L=%jT(kDfYZO&{{TVGS6D{_~V|k|}uIz3F`+gJ+x0>abX`W;JB4b;x?gMw Date: Tue, 24 Feb 2015 15:13:51 -0500 Subject: [PATCH 16/52] Add trigger metadata (which includes the SHA) and the built image_id to the event data --- buildman/component/buildcomponent.py | 14 ++++++++++++-- buildman/jobutil/buildjob.py | 8 ++++++-- endpoints/notificationevent.py | 29 ++++++++++++++++++++++++---- 3 files changed, 43 insertions(+), 8 deletions(-) diff --git a/buildman/component/buildcomponent.py b/buildman/component/buildcomponent.py index 647161190..00ec892a7 100644 --- a/buildman/component/buildcomponent.py +++ b/buildman/component/buildcomponent.py @@ -247,12 +247,22 @@ class BuildComponent(BaseComponent): """ Wraps up a completed build. Handles any errors and calls self._build_finished. """ try: # Retrieve the result. This will raise an ApplicationError on any error that occurred. - result.result() + result_value = result.result() + kwargs = {} + + # Note: If we are hitting an older builder that didn't return ANY map data, then the result + # value will be a bool instead of a proper CallResult object (because autobahn sucks). + # Therefore: we have a try-except guard here to ensure we don't hit this pitfall. + try: + kwargs = result_value.kwresults + except: + pass + self._build_status.set_phase(BUILD_PHASE.COMPLETE) trollius.async(self._build_finished(BuildJobResult.COMPLETE)) # Send the notification that the build has completed successfully. - self._current_job.send_notification('build_success') + self._current_job.send_notification('build_success', image_id=kwargs.get('image_id')) except ApplicationError as aex: worker_error = WorkerError(aex.error, aex.kwargs.get('base_error')) diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index a6361e83a..3c00a3bc3 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -28,16 +28,20 @@ class BuildJob(object): def has_retries_remaining(self): return self.job_item.retries_remaining > 0 - def send_notification(self, kind, error_message=None): + def send_notification(self, kind, error_message=None, image_id=None): tags = self.build_config.get('docker_tags', ['latest']) event_data = { 'build_id': self.repo_build.uuid, 'build_name': self.repo_build.display_name, 'docker_tags': tags, 'trigger_id': self.repo_build.trigger.uuid, - 'trigger_kind': self.repo_build.trigger.service.name + 'trigger_kind': self.repo_build.trigger.service.name, + 'trigger_metadata': self.build_config.get('trigger_metadata', {}) } + if image_id is not None: + event_data['image_id'] = image_id + if error_message is not None: event_data['error_message'] = error_message diff --git a/endpoints/notificationevent.py b/endpoints/notificationevent.py index 3f27623f5..8c38969cf 100644 --- a/endpoints/notificationevent.py +++ b/endpoints/notificationevent.py @@ -92,7 +92,12 @@ class BuildQueueEvent(NotificationEvent): 'build_id': build_uuid, 'build_name': 'some-fake-build', 'docker_tags': ['latest', 'foo', 'bar'], - 'trigger_kind': 'GitHub' + 'trigger_kind': 'GitHub', + 'trigger_metadata': { + "default_branch": "master", + "ref": "refs/heads/somebranch", + "commit_sha": "42d4a62c53350993ea41069e9f2cfdefb0df097d" + } }, subpage='/build?current=%s' % build_uuid) def get_summary(self, event_data, notification_data): @@ -114,7 +119,12 @@ class BuildStartEvent(NotificationEvent): 'build_id': build_uuid, 'build_name': 'some-fake-build', 'docker_tags': ['latest', 'foo', 'bar'], - 'trigger_kind': 'GitHub' + 'trigger_kind': 'GitHub', + 'trigger_metadata': { + "default_branch": "master", + "ref": "refs/heads/somebranch", + "commit_sha": "42d4a62c53350993ea41069e9f2cfdefb0df097d" + } }, subpage='/build?current=%s' % build_uuid) def get_summary(self, event_data, notification_data): @@ -136,7 +146,13 @@ class BuildSuccessEvent(NotificationEvent): 'build_id': build_uuid, 'build_name': 'some-fake-build', 'docker_tags': ['latest', 'foo', 'bar'], - 'trigger_kind': 'GitHub' + 'trigger_kind': 'GitHub', + 'trigger_metadata': { + "default_branch": "master", + "ref": "refs/heads/somebranch", + "commit_sha": "42d4a62c53350993ea41069e9f2cfdefb0df097d" + }, + 'image_id': '1245657346' }, subpage='/build?current=%s' % build_uuid) def get_summary(self, event_data, notification_data): @@ -159,7 +175,12 @@ class BuildFailureEvent(NotificationEvent): 'build_name': 'some-fake-build', 'docker_tags': ['latest', 'foo', 'bar'], 'trigger_kind': 'GitHub', - 'error_message': 'This is a fake error message' + 'error_message': 'This is a fake error message', + 'trigger_metadata': { + "default_branch": "master", + "ref": "refs/heads/somebranch", + "commit_sha": "42d4a62c53350993ea41069e9f2cfdefb0df097d" + } }, subpage='/build?current=%s' % build_uuid) def get_summary(self, event_data, notification_data): From 45bb05894b2d6adcf31a99218d402816cb5fb7dc Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 24 Feb 2015 15:42:31 -0500 Subject: [PATCH 17/52] Fix JS NPE --- static/js/services/plan-service.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/static/js/services/plan-service.js b/static/js/services/plan-service.js index ee06992e5..b940d2d3f 100644 --- a/static/js/services/plan-service.js +++ b/static/js/services/plan-service.js @@ -120,7 +120,7 @@ function(KeyService, UserService, CookieService, ApiService, Features, Config) { }; planService.getPlans = function(callback, opt_includePersonal) { - planService.verifyLoaded(function() { + planService.verifyLoaded(function(plans) { var filtered = []; for (var i = 0; i < plans.length; ++i) { var plan = plans[i]; From afe6be0dafb2083b0cfaf79430f9551aa3c18489 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 24 Feb 2015 16:51:03 -0500 Subject: [PATCH 18/52] Put a message if the download link won't appear --- static/css/quay.css | 5 +++++ static/directives/docker-auth-dialog.html | 3 +++ 2 files changed, 8 insertions(+) diff --git a/static/css/quay.css b/static/css/quay.css index 4fc72cc49..5f413f292 100644 --- a/static/css/quay.css +++ b/static/css/quay.css @@ -496,6 +496,11 @@ i.toggle-icon:hover { width: 100%; } +.docker-auth-dialog .download-cfg.not-supported { + font-size: 14px; + color: #ccc; +} + .docker-auth-dialog .download-cfg { float: left; padding-top: 6px; diff --git a/static/directives/docker-auth-dialog.html b/static/directives/docker-auth-dialog.html index 70ca8ae8c..270f8ac66 100644 --- a/static/directives/docker-auth-dialog.html +++ b/static/directives/docker-auth-dialog.html @@ -32,6 +32,9 @@ Download .dockercfg file + + .dockercfg download not supported in this browser + From d8a34427ecd6900d66ed178f22ea7f6f6dbd8019 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Tue, 24 Feb 2015 17:17:55 -0500 Subject: [PATCH 19/52] Try to get around the tag deadlock by using a select for update. --- data/model/legacy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/model/legacy.py b/data/model/legacy.py index 94f799992..85b533053 100644 --- a/data/model/legacy.py +++ b/data/model/legacy.py @@ -1803,7 +1803,7 @@ def create_or_update_tag(namespace_name, repository_name, tag_name, query = _tag_alive(RepositoryTag .select() .where(RepositoryTag.repository == repo, RepositoryTag.name == tag_name)) - tag = query.get() + tag = db_for_update(query).get() tag.lifetime_end_ts = now_ts tag.save() except RepositoryTag.DoesNotExist: From ace6da5514d5f336e7acf5929a8dc19c563bd54a Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 24 Feb 2015 17:41:30 -0500 Subject: [PATCH 20/52] Add a service status indicator to the footer and add a notification bar for any incidents --- .../css/directives/ui/quay-service-status.css | 29 ++++++++ static/css/quay.css | 73 +++++++++++-------- .../directives/quay-service-status-bar.html | 7 ++ static/directives/quay-service-status.html | 6 ++ .../directives/ui/quay-service-status-bar.js | 22 ++++++ .../js/directives/ui/quay-service-status.js | 24 ++++++ static/js/services/status-service.js | 40 ++++++++++ static/partials/landing-normal.html | 11 --- templates/base.html | 5 +- 9 files changed, 173 insertions(+), 44 deletions(-) create mode 100644 static/css/directives/ui/quay-service-status.css create mode 100644 static/directives/quay-service-status-bar.html create mode 100644 static/directives/quay-service-status.html create mode 100644 static/js/directives/ui/quay-service-status-bar.js create mode 100644 static/js/directives/ui/quay-service-status.js create mode 100644 static/js/services/status-service.js diff --git a/static/css/directives/ui/quay-service-status.css b/static/css/directives/ui/quay-service-status.css new file mode 100644 index 000000000..367a7dede --- /dev/null +++ b/static/css/directives/ui/quay-service-status.css @@ -0,0 +1,29 @@ +.quay-service-status-indicator { + display: inline-block; + border-radius: 50%; + width: 12px; + height: 12px; + margin-right: 6px; + background: #eee; + vertical-align: middle +} + +.quay-service-status-description { + vertical-align: middle; +} + +.quay-service-status-indicator.none { + background: #2fcc66; +} + +.quay-service-status-indicator.minor { + background: #f1c40f; +} + +.quay-service-status-indicator.major { + background: #e67e22; +} + +.quay-service-status-indicator.critical { + background: #e74c3c; +} \ No newline at end of file diff --git a/static/css/quay.css b/static/css/quay.css index 5f413f292..bc6e172e9 100644 --- a/static/css/quay.css +++ b/static/css/quay.css @@ -23,6 +23,47 @@ } } +.announcement a { + color: lightblue; +} + +.announcement { + position: absolute; + z-index: 9; + top: 0px; + left: 0px; + right: 0px; + + display: block; + background: rgba(8, 61, 95, 0.6); + min-height: 45px; + text-align: center; + font-size: 14px; + line-height: 45px; + color: white; +} + +.announcement.inline { + position: relative; +} + +.announcement .spacer { + display: inline-block; + width: 45px; +} + +.announcement img { + height: 45px; + padding-top: 6px; + padding-bottom: 6px; +} + +.announcement .plus { + display: inline-block; + margin-left: 10px; + margin-right: 10px; +} + .scrollable-menu { max-height: 400px; overflow: auto; @@ -1516,38 +1557,6 @@ i.toggle-icon:hover { margin-top: 20px; } -.landing .announcement { - position: absolute; - z-index: 9; - top: 0px; - left: 0px; - right: 0px; - - display: block; - background: rgba(8, 61, 95, 0.6); - min-height: 45px; - text-align: center; - font-size: 14px; - line-height: 45px; -} - -.landing .announcement .spacer { - display: inline-block; - width: 45px; -} - -.landing .announcement img { - height: 45px; - padding-top: 6px; - padding-bottom: 6px; -} - -.landing .announcement .plus { - display: inline-block; - margin-left: 10px; - margin-right: 10px; -} - .landing { color: white; diff --git a/static/directives/quay-service-status-bar.html b/static/directives/quay-service-status-bar.html new file mode 100644 index 000000000..ab5cda67b --- /dev/null +++ b/static/directives/quay-service-status-bar.html @@ -0,0 +1,7 @@ + \ No newline at end of file diff --git a/static/directives/quay-service-status.html b/static/directives/quay-service-status.html new file mode 100644 index 000000000..b7e77ef96 --- /dev/null +++ b/static/directives/quay-service-status.html @@ -0,0 +1,6 @@ + + + + {{ description }} + \ No newline at end of file diff --git a/static/js/directives/ui/quay-service-status-bar.js b/static/js/directives/ui/quay-service-status-bar.js new file mode 100644 index 000000000..1de75ddd7 --- /dev/null +++ b/static/js/directives/ui/quay-service-status-bar.js @@ -0,0 +1,22 @@ +/** + * An element which displays the current status of the service as an announcement bar. + */ +angular.module('quay').directive('quayServiceStatusBar', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: '/static/directives/quay-service-status-bar.html', + replace: false, + transclude: false, + restrict: 'C', + scope: {}, + controller: function($scope, $element, StatusService) { + $scope.indicator = 'loading'; + + StatusService.getStatus(function(data) { + $scope.indicator = data['status']['indicator']; + $scope.incidents = data['incidents']; + }); + } + }; + return directiveDefinitionObject; +}); \ No newline at end of file diff --git a/static/js/directives/ui/quay-service-status.js b/static/js/directives/ui/quay-service-status.js new file mode 100644 index 000000000..e661190c1 --- /dev/null +++ b/static/js/directives/ui/quay-service-status.js @@ -0,0 +1,24 @@ +/** + * An element which displays the current status of the service. + */ +angular.module('quay').directive('quayServiceStatus', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: '/static/directives/quay-service-status.html', + replace: false, + transclude: false, + restrict: 'C', + scope: {}, + controller: function($scope, $element, StatusService) { + $scope.indicator = 'loading'; + $scope.description = ''; + + StatusService.getStatus(function(data) { + $scope.indicator = data['status']['indicator']; + $scope.incidents = data['incidents']; + $scope.description = data['status']['description']; + }); + } + }; + return directiveDefinitionObject; +}); \ No newline at end of file diff --git a/static/js/services/status-service.js b/static/js/services/status-service.js new file mode 100644 index 000000000..8a9bba051 --- /dev/null +++ b/static/js/services/status-service.js @@ -0,0 +1,40 @@ +/** + * Helper service for retrieving the statuspage status of the quay service. + */ +angular.module('quay').factory('StatusService', ['Features', function(Features) { + if (!Features.BILLING) { + return; + } + + var STATUSPAGE_PAGE_ID = '8szqd6w4s277'; + var STATUSPAGE_SRC = 'https://statuspage-production.s3.amazonaws.com/se-v2.js'; + var statusPageHandler = null; + var statusPageData = null; + var callbacks = []; + + var handleGotData = function(data) { + if (!data) { return; } + statusPageData = data; + + for (var i = 0; i < callbacks.length; ++i) { + callbacks[i](data); + } + + callbacks = []; + }; + + $.getScript(STATUSPAGE_SRC, function(){ + statusPageHandler = new StatusPage.page({ page: STATUSPAGE_PAGE_ID }); + statusPageHandler.summary({ + success : handleGotData + }); + }); + + var statusService = {}; + statusService.getStatus = function(callback) { + callbacks.push(callback); + handleGotData(statusPageData); + }; + + return statusService; +}]); \ No newline at end of file diff --git a/static/partials/landing-normal.html b/static/partials/landing-normal.html index bc52552f3..f8b969cd8 100644 --- a/static/partials/landing-normal.html +++ b/static/partials/landing-normal.html @@ -1,15 +1,4 @@
-
- - - + - - - - - Quay.io is now part of CoreOS! Read the blog post. -
-
diff --git a/templates/base.html b/templates/base.html index 9c87c5a0d..91dee7842 100644 --- a/templates/base.html +++ b/templates/base.html @@ -96,6 +96,7 @@ mixpanel.init("{{ mixpanel_key }}", { track_pageview : false, debug: {{ is_debug