Merge branch 'master' into pagesnew

This commit is contained in:
Joseph Schorr 2015-03-05 14:22:10 -05:00
commit 86447c0a99
52 changed files with 553 additions and 211 deletions

View file

@ -55,9 +55,10 @@ ADD conf/init/buildmanager /etc/service/buildmanager
# Download any external libs. # Download any external libs.
RUN mkdir static/fonts static/ldn RUN mkdir static/fonts static/ldn
RUN venv/bin/python -m external_libraries RUN venv/bin/python -m external_libraries
RUN mkdir /usr/local/nginx/logs/
# Run the tests # Run the tests
RUN TEST=true venv/bin/python -m unittest discover RUN TEST=true venv/bin/python -m unittest discover -f
VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"] VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"]

View file

@ -15,16 +15,14 @@ from data import model
from data.model import oauth from data.model import oauth
from app import app, authentication from app import app, authentication
from permissions import QuayDeferredPermissionUser from permissions import QuayDeferredPermissionUser
from auth_context import (set_authenticated_user, set_validated_token, from auth_context import (set_authenticated_user, set_validated_token, set_grant_user_context,
set_authenticated_user_deferred, set_validated_oauth_token) set_authenticated_user_deferred, set_validated_oauth_token)
from util.http import abort from util.http import abort
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
SIGNATURE_PREFIX = 'sigv2='
SIGNATURE_PREFIX = 'signature='
def _load_user_from_cookie(): def _load_user_from_cookie():
if not current_user.is_anonymous(): if not current_user.is_anonymous():
@ -131,10 +129,11 @@ def _process_basic_auth(auth):
logger.debug('Basic auth present but could not be validated.') logger.debug('Basic auth present but could not be validated.')
def generate_signed_token(grants): def generate_signed_token(grants, user_context):
ser = SecureCookieSessionInterface().get_signing_serializer(app) ser = SecureCookieSessionInterface().get_signing_serializer(app)
data_to_sign = { data_to_sign = {
'grants': grants, 'grants': grants,
'user_context': user_context,
} }
encrypted = ser.dumps(data_to_sign) encrypted = ser.dumps(data_to_sign)
@ -164,6 +163,7 @@ def _process_signed_grant(auth):
logger.debug('Successfully validated signed grant with data: %s', token_data) logger.debug('Successfully validated signed grant with data: %s', token_data)
loaded_identity = Identity(None, 'signed_grant') loaded_identity = Identity(None, 'signed_grant')
set_grant_user_context(token_data['user_context'])
loaded_identity.provides.update(token_data['grants']) loaded_identity.provides.update(token_data['grants'])
identity_changed.send(app, identity=loaded_identity) identity_changed.send(app, identity=loaded_identity)

View file

@ -30,6 +30,15 @@ def set_authenticated_user(user_or_robot):
ctx.authenticated_user = user_or_robot ctx.authenticated_user = user_or_robot
def get_grant_user_context():
return getattr(_request_ctx_stack.top, 'grant_user_context', None)
def set_grant_user_context(username_or_robotname):
ctx = _request_ctx_stack.top
ctx.grant_user_context = username_or_robotname
def set_authenticated_user_deferred(user_or_robot_db_uuid): def set_authenticated_user_deferred(user_or_robot_db_uuid):
logger.debug('Deferring loading of authenticated user object with uuid: %s', user_or_robot_db_uuid) logger.debug('Deferring loading of authenticated user object with uuid: %s', user_or_robot_db_uuid)
ctx = _request_ctx_stack.top ctx = _request_ctx_stack.top

View file

@ -247,12 +247,22 @@ class BuildComponent(BaseComponent):
""" Wraps up a completed build. Handles any errors and calls self._build_finished. """ """ Wraps up a completed build. Handles any errors and calls self._build_finished. """
try: try:
# Retrieve the result. This will raise an ApplicationError on any error that occurred. # Retrieve the result. This will raise an ApplicationError on any error that occurred.
result.result() result_value = result.result()
kwargs = {}
# Note: If we are hitting an older builder that didn't return ANY map data, then the result
# value will be a bool instead of a proper CallResult object (because autobahn sucks).
# Therefore: we have a try-except guard here to ensure we don't hit this pitfall.
try:
kwargs = result_value.kwresults
except:
pass
self._build_status.set_phase(BUILD_PHASE.COMPLETE) self._build_status.set_phase(BUILD_PHASE.COMPLETE)
trollius.async(self._build_finished(BuildJobResult.COMPLETE)) trollius.async(self._build_finished(BuildJobResult.COMPLETE))
# Send the notification that the build has completed successfully. # Send the notification that the build has completed successfully.
self._current_job.send_notification('build_success') self._current_job.send_notification('build_success', image_id=kwargs.get('image_id'))
except ApplicationError as aex: except ApplicationError as aex:
worker_error = WorkerError(aex.error, aex.kwargs.get('base_error')) worker_error = WorkerError(aex.error, aex.kwargs.get('base_error'))

View file

@ -10,3 +10,4 @@ class BuildServerStatus(object):
STARTING = 'starting' STARTING = 'starting'
RUNNING = 'running' RUNNING = 'running'
SHUTDOWN = 'shutting_down' SHUTDOWN = 'shutting_down'
EXCEPTION = 'exception'

View file

@ -28,16 +28,20 @@ class BuildJob(object):
def has_retries_remaining(self): def has_retries_remaining(self):
return self.job_item.retries_remaining > 0 return self.job_item.retries_remaining > 0
def send_notification(self, kind, error_message=None): def send_notification(self, kind, error_message=None, image_id=None):
tags = self.build_config.get('docker_tags', ['latest']) tags = self.build_config.get('docker_tags', ['latest'])
event_data = { event_data = {
'build_id': self.repo_build.uuid, 'build_id': self.repo_build.uuid,
'build_name': self.repo_build.display_name, 'build_name': self.repo_build.display_name,
'docker_tags': tags, 'docker_tags': tags,
'trigger_id': self.repo_build.trigger.uuid, 'trigger_id': self.repo_build.trigger.uuid,
'trigger_kind': self.repo_build.trigger.service.name 'trigger_kind': self.repo_build.trigger.service.name,
'trigger_metadata': self.build_config.get('trigger_metadata', {})
} }
if image_id is not None:
event_data['image_id'] = image_id
if error_message is not None: if error_message is not None:
event_data['error_message'] = error_message event_data['error_message'] = error_message

View file

@ -1,5 +1,3 @@
from trollius import From
from buildman.enums import BuildJobResult from buildman.enums import BuildJobResult
from util.cloudwatch import get_queue from util.cloudwatch import get_queue

View file

@ -4,6 +4,7 @@ import uuid
import calendar import calendar
import os.path import os.path
import json import json
import boto
from datetime import datetime, timedelta from datetime import datetime, timedelta
from trollius import From, coroutine, Return, async from trollius import From, coroutine, Return, async
@ -77,7 +78,7 @@ class EphemeralBuilderManager(BaseManager):
try: try:
etcd_result = changed_key_future.result() etcd_result = changed_key_future.result()
except (ReadTimeoutError, ProtocolError): except (ReadTimeoutError, ProtocolError, etcd.EtcdException):
return return
change_callback(etcd_result) change_callback(etcd_result)
@ -90,6 +91,9 @@ class EphemeralBuilderManager(BaseManager):
self._watch_tasks[watch_task_key] = async(watch_future) self._watch_tasks[watch_task_key] = async(watch_future)
def _handle_builder_expiration(self, etcd_result): def _handle_builder_expiration(self, etcd_result):
if etcd_result is None:
return
if etcd_result.action == EtcdAction.EXPIRE: if etcd_result.action == EtcdAction.EXPIRE:
# Handle the expiration # Handle the expiration
logger.debug('Builder expired, clean up the old build node') logger.debug('Builder expired, clean up the old build node')
@ -100,6 +104,9 @@ class EphemeralBuilderManager(BaseManager):
async(self._executor.stop_builder(job_metadata['builder_id'])) async(self._executor.stop_builder(job_metadata['builder_id']))
def _handle_realm_change(self, etcd_result): def _handle_realm_change(self, etcd_result):
if etcd_result is None:
return
if etcd_result.action == EtcdAction.CREATE: if etcd_result.action == EtcdAction.CREATE:
# We must listen on the realm created by ourselves or another worker # We must listen on the realm created by ourselves or another worker
realm_spec = json.loads(etcd_result.value) realm_spec = json.loads(etcd_result.value)
@ -137,7 +144,7 @@ class EphemeralBuilderManager(BaseManager):
for realm in all_realms.children: for realm in all_realms.children:
if not realm.dir: if not realm.dir:
self._register_realm(json.loads(realm.value)) self._register_realm(json.loads(realm.value))
except KeyError: except (KeyError, etcd.EtcdKeyError):
# no realms have been registered yet # no realms have been registered yet
pass pass
@ -160,7 +167,8 @@ class EphemeralBuilderManager(BaseManager):
self._async_thread_executor = ThreadPoolExecutor(worker_threads) self._async_thread_executor = ThreadPoolExecutor(worker_threads)
self._etcd_client = AsyncWrapper(self._etcd_client_klass(host=etcd_host, port=etcd_port, self._etcd_client = AsyncWrapper(self._etcd_client_klass(host=etcd_host, port=etcd_port,
cert=etcd_auth, ca_cert=etcd_ca_cert, cert=etcd_auth, ca_cert=etcd_ca_cert,
protocol=etcd_protocol), protocol=etcd_protocol,
read_timeout=5),
executor=self._async_thread_executor) executor=self._async_thread_executor)
self._etcd_builder_prefix = self._manager_config.get('ETCD_BUILDER_PREFIX', 'building/') self._etcd_builder_prefix = self._manager_config.get('ETCD_BUILDER_PREFIX', 'building/')
@ -199,8 +207,11 @@ class EphemeralBuilderManager(BaseManager):
try: try:
building = yield From(self._etcd_client.read(self._etcd_builder_prefix, recursive=True)) building = yield From(self._etcd_client.read(self._etcd_builder_prefix, recursive=True))
workers_alive = sum(1 for child in building.children if not child.dir) workers_alive = sum(1 for child in building.children if not child.dir)
except KeyError: except (KeyError, etcd.EtcdKeyError):
workers_alive = 0 workers_alive = 0
except etcd.EtcdException:
logger.exception('Exception when reading job count from etcd')
raise Return(False)
logger.debug('Total jobs: %s', workers_alive) logger.debug('Total jobs: %s', workers_alive)
@ -227,17 +238,29 @@ class EphemeralBuilderManager(BaseManager):
try: try:
yield From(self._etcd_client.write(job_key, json.dumps(payload), prevExist=False, ttl=ttl)) yield From(self._etcd_client.write(job_key, json.dumps(payload), prevExist=False, ttl=ttl))
except KeyError: except (KeyError, etcd.EtcdKeyError):
# The job was already taken by someone else, we are probably a retry # The job was already taken by someone else, we are probably a retry
logger.error('Job already exists in etcd, are timeouts misconfigured or is the queue broken?') logger.error('Job already exists in etcd, are timeouts misconfigured or is the queue broken?')
raise Return(False) raise Return(False)
except etcd.EtcdException:
logger.exception('Exception when writing job %s to etcd', build_uuid)
raise Return(False)
logger.debug('Starting builder with executor: %s', self._executor) logger.debug('Starting builder with executor: %s', self._executor)
builder_id = yield From(self._executor.start_builder(realm, token, build_uuid))
try:
builder_id = yield From(self._executor.start_builder(realm, token, build_uuid))
except:
logger.exception('Exception when starting builder for job: %s', build_uuid)
raise Return(False)
# Store the builder in etcd associated with the job id # Store the builder in etcd associated with the job id
payload['builder_id'] = builder_id try:
yield From(self._etcd_client.write(job_key, json.dumps(payload), prevExist=True, ttl=ttl)) payload['builder_id'] = builder_id
yield From(self._etcd_client.write(job_key, json.dumps(payload), prevExist=True, ttl=ttl))
except etcd.EtcdException:
logger.exception('Exception when writing job %s to etcd', build_uuid)
raise Return(False)
# Store the realm spec which will allow any manager to accept this builder when it connects # Store the realm spec which will allow any manager to accept this builder when it connects
realm_spec = json.dumps({ realm_spec = json.dumps({
@ -246,12 +269,16 @@ class EphemeralBuilderManager(BaseManager):
'builder_id': builder_id, 'builder_id': builder_id,
'job_queue_item': build_job.job_item, 'job_queue_item': build_job.job_item,
}) })
try: try:
yield From(self._etcd_client.write(self._etcd_realm_key(realm), realm_spec, prevExist=False, yield From(self._etcd_client.write(self._etcd_realm_key(realm), realm_spec, prevExist=False,
ttl=ttl)) ttl=ttl))
except KeyError: except (KeyError, etcd.EtcdKeyError):
logger.error('Realm already exists in etcd. UUID collision or something is very very wrong.') logger.error('Realm already exists in etcd. UUID collision or something is very very wrong.')
raise Return(False) raise Return(False)
except etcd.EtcdException:
logger.exception('Exception when writing realm %s to etcd', realm)
raise Return(False)
raise Return(True) raise Return(True)
@ -266,7 +293,7 @@ class EphemeralBuilderManager(BaseManager):
logger.debug('Sending build %s to newly ready component on realm %s', logger.debug('Sending build %s to newly ready component on realm %s',
job.job_details['build_uuid'], build_component.builder_realm) job.job_details['build_uuid'], build_component.builder_realm)
yield From(build_component.start_build(job)) yield From(build_component.start_build(job))
except KeyError: except (KeyError, etcd.EtcdKeyError):
logger.debug('Builder is asking for more work, but work already completed') logger.debug('Builder is asking for more work, but work already completed')
def build_component_disposed(self, build_component, timed_out): def build_component_disposed(self, build_component, timed_out):

View file

@ -143,12 +143,18 @@ class EC2Executor(BuilderExecutor):
raise ExecutorException('EC2 started wrong number of instances!') raise ExecutorException('EC2 started wrong number of instances!')
launched = AsyncWrapper(reservation.instances[0]) launched = AsyncWrapper(reservation.instances[0])
yield From(launched.add_tags({
'Name': 'Quay Ephemeral Builder', for i in range(0, 2):
'Realm': realm, try:
'Token': token, yield From(launched.add_tags({
'BuildUUID': build_uuid, 'Name': 'Quay Ephemeral Builder',
})) 'Realm': realm,
'Token': token,
'BuildUUID': build_uuid,
}))
except boto.exception.EC2ResponseError:
logger.exception('Failed to write EC2 tags (attempt #%s)', i)
raise Return(launched.id) raise Return(launched.id)
@coroutine @coroutine

View file

@ -93,13 +93,8 @@ class BuilderServer(object):
logger.debug('Starting server on port %s, with controller on port %s', websocket_port, logger.debug('Starting server on port %s, with controller on port %s', websocket_port,
controller_port) controller_port)
TASKS = [
Task(self._initialize(loop, host, websocket_port, controller_port, ssl)),
Task(self._queue_metrics_updater()),
]
try: try:
loop.run_until_complete(trollius.wait(TASKS)) loop.run_until_complete(self._initialize(loop, host, websocket_port, controller_port, ssl))
except KeyboardInterrupt: except KeyboardInterrupt:
pass pass
finally: finally:
@ -155,6 +150,7 @@ class BuilderServer(object):
@trollius.coroutine @trollius.coroutine
def _work_checker(self): def _work_checker(self):
logger.debug('Initializing work checker')
while self._current_status == BuildServerStatus.RUNNING: while self._current_status == BuildServerStatus.RUNNING:
with database.CloseForLongOperation(app.config): with database.CloseForLongOperation(app.config):
yield From(trollius.sleep(WORK_CHECK_TIMEOUT)) yield From(trollius.sleep(WORK_CHECK_TIMEOUT))
@ -175,23 +171,36 @@ class BuilderServer(object):
continue continue
logger.debug('Build job found. Checking for an avaliable worker.') logger.debug('Build job found. Checking for an avaliable worker.')
scheduled = yield From(self._lifecycle_manager.schedule(build_job))
try:
scheduled = yield From(self._lifecycle_manager.schedule(build_job))
except:
logger.exception('Exception when scheduling job')
self._current_status = BuildServerStatus.EXCEPTION
return
if scheduled: if scheduled:
logger.debug('Marking build %s as scheduled', build_job.repo_build.uuid)
status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid) status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid)
status_handler.set_phase('build-scheduled') status_handler.set_phase('build-scheduled')
self._job_count = self._job_count + 1 self._job_count = self._job_count + 1
logger.debug('Build job scheduled. Running: %s', self._job_count) logger.debug('Build job %s scheduled. Running: %s', build_job.repo_build.uuid,
self._job_count)
else: else:
logger.debug('All workers are busy. Requeuing.') logger.debug('All workers are busy. Requeuing.')
self._queue.incomplete(job_item, restore_retry=True, retry_after=0) self._queue.incomplete(job_item, restore_retry=True, retry_after=0)
@trollius.coroutine @trollius.coroutine
def _queue_metrics_updater(self): def _queue_metrics_updater(self):
logger.debug('Initializing queue metrics updater')
while self._current_status == BuildServerStatus.RUNNING: while self._current_status == BuildServerStatus.RUNNING:
yield From(trollius.sleep(30)) logger.debug('Writing metrics')
self._queue.update_metrics() self._queue.update_metrics()
logger.debug('Metrics going to sleep for 30 seconds')
yield From(trollius.sleep(30))
@trollius.coroutine @trollius.coroutine
def _initialize(self, loop, host, websocket_port, controller_port, ssl=None): def _initialize(self, loop, host, websocket_port, controller_port, ssl=None):
self._loop = loop self._loop = loop
@ -204,5 +213,8 @@ class BuilderServer(object):
create_wsgi_server(self._controller_app, loop=loop, host=host, port=controller_port, ssl=ssl) create_wsgi_server(self._controller_app, loop=loop, host=host, port=controller_port, ssl=ssl)
yield From(loop.create_server(transport_factory, host, websocket_port, ssl=ssl)) yield From(loop.create_server(transport_factory, host, websocket_port, ssl=ssl))
# Initialize the metrics updater
trollius.async(self._queue_metrics_updater())
# Initialize the work queue checker. # Initialize the work queue checker.
yield From(self._work_checker()) yield From(self._work_checker())

View file

@ -3,3 +3,4 @@ workers = 4
logconfig = 'conf/logging.conf' logconfig = 'conf/logging.conf'
pythonpath = '.' pythonpath = '.'
preload_app = True preload_app = True
timeout = 2000 # Because sync workers

View file

@ -9,8 +9,7 @@ map $http_authorization $registry_bucket {
default $http_authorization; default $http_authorization;
} }
limit_req_zone $proxy_protocol_addr zone=webapp:10m rate=25r/s; limit_req_zone $proxy_protocol_addr zone=verbs:10m rate=2r/s;
limit_req_zone $proxy_protocol_addr zone=api:10m rate=1r/s; limit_req_zone $registry_bucket zone=repositories:10m rate=2r/s;
limit_req_zone $registry_bucket zone=repositories:10m rate=1r/s;
limit_req_status 429; limit_req_status 429;
limit_req_log_level warn; limit_req_log_level warn;

View file

@ -18,8 +18,6 @@ proxy_set_header Transfer-Encoding $http_transfer_encoding;
location / { location / {
proxy_pass http://web_app_server; proxy_pass http://web_app_server;
limit_req zone=webapp;
} }
location /realtime { location /realtime {
@ -28,6 +26,9 @@ location /realtime {
proxy_request_buffering off; proxy_request_buffering off;
} }
# At the begining and end of a push/pull, /v1/repositories is hit by the Docker
# client. By rate-limiting just this endpoint, we can avoid accidentally
# blocking pulls/pushes for images with many layers.
location /v1/repositories/ { location /v1/repositories/ {
proxy_buffering off; proxy_buffering off;
@ -37,7 +38,7 @@ location /v1/repositories/ {
proxy_read_timeout 2000; proxy_read_timeout 2000;
proxy_temp_path /var/log/nginx/proxy_temp 1 2; proxy_temp_path /var/log/nginx/proxy_temp 1 2;
limit_req zone=repositories; limit_req zone=repositories burst=10;
} }
location /v1/ { location /v1/ {
@ -59,7 +60,7 @@ location /c1/ {
proxy_pass http://verbs_app_server; proxy_pass http://verbs_app_server;
proxy_temp_path /var/log/nginx/proxy_temp 1 2; proxy_temp_path /var/log/nginx/proxy_temp 1 2;
limit_req zone=api; limit_req zone=verbs burst=10;
} }
location /static/ { location /static/ {

View file

@ -527,7 +527,7 @@ class RepositoryBuild(BaseModel):
trigger = ForeignKeyField(RepositoryBuildTrigger, null=True, index=True) trigger = ForeignKeyField(RepositoryBuildTrigger, null=True, index=True)
pull_robot = QuayUserField(null=True, related_name='buildpullrobot') pull_robot = QuayUserField(null=True, related_name='buildpullrobot')
logs_archived = BooleanField(default=False) logs_archived = BooleanField(default=False)
queue_item = ForeignKeyField(QueueItem, null=True, index=True) queue_id = CharField(null=True, index=True)
class LogEntryKind(BaseModel): class LogEntryKind(BaseModel):

View file

@ -0,0 +1,34 @@
"""Change build queue reference from foreign key to an id.
Revision ID: 707d5191eda
Revises: 4ef04c61fcf9
Create Date: 2015-02-23 12:36:33.814528
"""
# revision identifiers, used by Alembic.
revision = '707d5191eda'
down_revision = '4ef04c61fcf9'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.add_column('repositorybuild', sa.Column('queue_id', sa.String(length=255), nullable=True))
op.create_index('repositorybuild_queue_id', 'repositorybuild', ['queue_id'], unique=False)
op.drop_constraint(u'fk_repositorybuild_queue_item_id_queueitem', 'repositorybuild', type_='foreignkey')
op.drop_index('repositorybuild_queue_item_id', table_name='repositorybuild')
op.drop_column('repositorybuild', 'queue_item_id')
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.add_column('repositorybuild', sa.Column('queue_item_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.create_foreign_key(u'fk_repositorybuild_queue_item_id_queueitem', 'repositorybuild', 'queueitem', ['queue_item_id'], ['id'])
op.create_index('repositorybuild_queue_item_id', 'repositorybuild', ['queue_item_id'], unique=False)
op.drop_index('repositorybuild_queue_id', table_name='repositorybuild')
op.drop_column('repositorybuild', 'queue_id')
### end Alembic commands ###

View file

@ -541,7 +541,7 @@ def confirm_user_email(code):
old_email = None old_email = None
new_email = code.new_email new_email = code.new_email
if new_email: if new_email and new_email != old_email:
if find_user_by_email(new_email): if find_user_by_email(new_email):
raise DataModelException('E-mail address already used.') raise DataModelException('E-mail address already used.')
@ -903,6 +903,7 @@ def change_password(user, new_password):
raise InvalidPasswordException(INVALID_PASSWORD_MESSAGE) raise InvalidPasswordException(INVALID_PASSWORD_MESSAGE)
pw_hash = hash_password(new_password) pw_hash = hash_password(new_password)
user.invalid_login_attempts = 0
user.password_hash = pw_hash user.password_hash = pw_hash
user.save() user.save()
@ -1577,19 +1578,16 @@ def list_repository_tags(namespace_name, repository_name, include_hidden=False):
def _garbage_collect_tags(namespace_name, repository_name): def _garbage_collect_tags(namespace_name, repository_name):
to_delete = (RepositoryTag # We do this without using a join to prevent holding read locks on the repository table
.select(RepositoryTag.id) repo = _get_repository(namespace_name, repository_name)
.join(Repository) now = int(time.time())
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Repository.name == repository_name, Namespace.username == namespace_name,
~(RepositoryTag.lifetime_end_ts >> None),
(RepositoryTag.lifetime_end_ts + Namespace.removed_tag_expiration_s) <=
int(time.time())))
(RepositoryTag (RepositoryTag
.delete() .delete()
.where(RepositoryTag.id << to_delete) .where(RepositoryTag.repository == repo,
.execute()) ~(RepositoryTag.lifetime_end_ts >> None),
(RepositoryTag.lifetime_end_ts + repo.namespace_user.removed_tag_expiration_s) <= now)
.execute())
def garbage_collect_repository(namespace_name, repository_name): def garbage_collect_repository(namespace_name, repository_name):
@ -1659,6 +1657,7 @@ def _garbage_collect_storage(storage_id_whitelist):
logger.debug('Garbage collecting derived storage from candidates: %s', storage_id_whitelist) logger.debug('Garbage collecting derived storage from candidates: %s', storage_id_whitelist)
with config.app_config['DB_TRANSACTION_FACTORY'](db): with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Find out which derived storages will be removed, and add them to the whitelist # Find out which derived storages will be removed, and add them to the whitelist
# The comma after ImageStorage.id is VERY important, it makes it a tuple, which is a sequence
orphaned_from_candidates = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id), orphaned_from_candidates = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id),
storage_id_whitelist, storage_id_whitelist,
(ImageStorage.id,))) (ImageStorage.id,)))
@ -1698,22 +1697,32 @@ def _garbage_collect_storage(storage_id_whitelist):
paths_to_remove = placements_query_to_paths_set(placements_to_remove.clone()) paths_to_remove = placements_query_to_paths_set(placements_to_remove.clone())
# Remove the placements for orphaned storages # Remove the placements for orphaned storages
placements_subquery = list(placements_to_remove.clone().select(ImageStoragePlacement.id)) placements_subquery = (placements_to_remove
if len(placements_subquery) > 0: .clone()
(ImageStoragePlacement .select(ImageStoragePlacement.id)
.delete() .alias('ps'))
.where(ImageStoragePlacement.id << list(placements_subquery)) inner = (ImageStoragePlacement
.execute()) .select(placements_subquery.c.id)
.from_(placements_subquery))
placements_removed = (ImageStoragePlacement
.delete()
.where(ImageStoragePlacement.id << inner)
.execute())
logger.debug('Removed %s image storage placements', placements_removed)
# Remove the all orphaned storages # Remove all orphaned storages
orphaned_storages = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id), # The comma after ImageStorage.id is VERY important, it makes it a tuple, which is a sequence
storage_id_whitelist, orphaned_storages = orphaned_storage_query(ImageStorage.select(ImageStorage.id),
(ImageStorage.id,))) storage_id_whitelist,
if len(orphaned_storages) > 0: (ImageStorage.id,)).alias('osq')
(ImageStorage orphaned_storage_inner = (ImageStorage
.delete() .select(orphaned_storages.c.id)
.where(ImageStorage.id << orphaned_storages) .from_(orphaned_storages))
.execute()) storages_removed = (ImageStorage
.delete()
.where(ImageStorage.id << orphaned_storage_inner)
.execute())
logger.debug('Removed %s image storage records', storages_removed)
# We are going to make the conscious decision to not delete image storage blobs inside # We are going to make the conscious decision to not delete image storage blobs inside
# transactions. # transactions.
@ -1778,11 +1787,15 @@ def create_or_update_tag(namespace_name, repository_name, tag_name,
now_ts = int(time.time()) now_ts = int(time.time())
created = RepositoryTag.create(repository=repo, image=image, name=tag_name,
lifetime_start_ts=now_ts)
try: try:
# When we move a tag, we really end the timeline of the old one and create a new one # When we move a tag, we really end the timeline of the old one and create a new one
query = _tag_alive(RepositoryTag query = _tag_alive(RepositoryTag
.select() .select()
.where(RepositoryTag.repository == repo, RepositoryTag.name == tag_name)) .where(RepositoryTag.repository == repo, RepositoryTag.name == tag_name,
RepositoryTag.id != created.id))
tag = query.get() tag = query.get()
tag.lifetime_end_ts = now_ts tag.lifetime_end_ts = now_ts
tag.save() tag.save()
@ -1790,8 +1803,7 @@ def create_or_update_tag(namespace_name, repository_name, tag_name,
# No tag that needs to be ended # No tag that needs to be ended
pass pass
return RepositoryTag.create(repository=repo, image=image, name=tag_name, return created
lifetime_start_ts=now_ts)
def delete_tag(namespace_name, repository_name, tag_name): def delete_tag(namespace_name, repository_name, tag_name):
@ -2494,7 +2506,7 @@ def confirm_team_invite(code, user):
found.delete_instance() found.delete_instance()
return (team, inviter) return (team, inviter)
def cancel_repository_build(build): def cancel_repository_build(build, work_queue):
with config.app_config['DB_TRANSACTION_FACTORY'](db): with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Reload the build for update. # Reload the build for update.
try: try:
@ -2502,22 +2514,14 @@ def cancel_repository_build(build):
except RepositoryBuild.DoesNotExist: except RepositoryBuild.DoesNotExist:
return False return False
if build.phase != BUILD_PHASE.WAITING or not build.queue_item: if build.phase != BUILD_PHASE.WAITING or not build.queue_id:
return False return False
# Load the build queue item for update. # Try to cancel the queue item.
try: if not work_queue.cancel(build.queue_id):
queue_item = db_for_update(QueueItem.select()
.where(QueueItem.id == build.queue_item.id)).get()
except QueueItem.DoesNotExist:
return False return False
# Check the queue item. # Delete the build row.
if not queue_item.available or queue_item.retries_remaining == 0:
return False
# Delete the queue item and build.
queue_item.delete_instance(recursive=True)
build.delete_instance() build.delete_instance()
return True return True

View file

@ -82,10 +82,19 @@ class WorkQueue(object):
self._reporter(self._currently_processing, running_count, self._reporter(self._currently_processing, running_count,
running_count + available_not_running_count) running_count + available_not_running_count)
def has_retries_remaining(self, item_id):
""" Returns whether the queue item with the given id has any retries remaining. If the
queue item does not exist, returns False. """
with self._transaction_factory(db):
try:
return QueueItem.get(id=item_id).retries_remaining > 0
except QueueItem.DoesNotExist:
return False
def put(self, canonical_name_list, message, available_after=0, retries_remaining=5): def put(self, canonical_name_list, message, available_after=0, retries_remaining=5):
""" """
Put an item, if it shouldn't be processed for some number of seconds, Put an item, if it shouldn't be processed for some number of seconds,
specify that amount as available_after. specify that amount as available_after. Returns the ID of the queue item added.
""" """
params = { params = {
@ -98,7 +107,7 @@ class WorkQueue(object):
params['available_after'] = available_date params['available_after'] = available_date
with self._transaction_factory(db): with self._transaction_factory(db):
return QueueItem.create(**params) return str(QueueItem.create(**params).id)
def get(self, processing_time=300): def get(self, processing_time=300):
""" """
@ -141,10 +150,32 @@ class WorkQueue(object):
# Return a view of the queue item rather than an active db object # Return a view of the queue item rather than an active db object
return item return item
def cancel(self, item_id):
""" Attempts to cancel the queue item with the given ID from the queue. Returns true on success
and false if the queue item could not be canceled. A queue item can only be canceled if
if is available and has retries remaining.
"""
with self._transaction_factory(db):
# Load the build queue item for update.
try:
queue_item = db_for_update(QueueItem.select()
.where(QueueItem.id == item_id)).get()
except QueueItem.DoesNotExist:
return False
# Check the queue item.
if not queue_item.available or queue_item.retries_remaining == 0:
return False
# Delete the queue item.
queue_item.delete_instance(recursive=True)
return True
def complete(self, completed_item): def complete(self, completed_item):
with self._transaction_factory(db): with self._transaction_factory(db):
completed_item_obj = self._item_by_id_for_update(completed_item.id) completed_item_obj = self._item_by_id_for_update(completed_item.id)
completed_item_obj.delete_instance() completed_item_obj.delete_instance(recursive=True)
self._currently_processing = False self._currently_processing = False
def incomplete(self, incomplete_item, retry_after=300, restore_retry=False): def incomplete(self, incomplete_item, retry_after=300, restore_retry=False):

View file

@ -24,7 +24,7 @@ class LDAPConnection(object):
self._conn = None self._conn = None
def __enter__(self): def __enter__(self):
self._conn = ldap.initialize(self._ldap_uri) self._conn = ldap.initialize(self._ldap_uri, trace_level=1)
self._conn.simple_bind_s(self._user_dn, self._user_pw) self._conn.simple_bind_s(self._user_dn, self._user_pw)
return self._conn return self._conn

View file

@ -5,7 +5,7 @@ import datetime
from flask import request, redirect from flask import request, redirect
from app import app, userfiles as user_files, build_logs, log_archive from app import app, userfiles as user_files, build_logs, log_archive, dockerfile_build_queue
from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource, from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource,
require_repo_read, require_repo_write, validate_json_request, require_repo_read, require_repo_write, validate_json_request,
ApiResource, internal_only, format_date, api, Unauthorized, NotFound, ApiResource, internal_only, format_date, api, Unauthorized, NotFound,
@ -79,7 +79,8 @@ def build_status_view(build_obj, can_write=False):
# If the phase is internal error, return 'error' instead of the number if retries # If the phase is internal error, return 'error' instead of the number if retries
# on the queue item is 0. # on the queue item is 0.
if phase == database.BUILD_PHASE.INTERNAL_ERROR: if phase == database.BUILD_PHASE.INTERNAL_ERROR:
if build_obj.queue_item is None or build_obj.queue_item.retries_remaining == 0: retry = build_obj.queue_id and dockerfile_build_queue.has_retries_remaining(build_obj.queue_id)
if not retry:
phase = database.BUILD_PHASE.ERROR phase = database.BUILD_PHASE.ERROR
logger.debug('Can write: %s job_config: %s', can_write, build_obj.job_config) logger.debug('Can write: %s job_config: %s', can_write, build_obj.job_config)
@ -226,7 +227,7 @@ class RepositoryBuildResource(RepositoryParamResource):
if build.repository.name != repository or build.repository.namespace_user.username != namespace: if build.repository.name != repository or build.repository.namespace_user.username != namespace:
raise NotFound() raise NotFound()
if model.cancel_repository_build(build): if model.cancel_repository_build(build, dockerfile_build_queue):
return 'Okay', 201 return 'Okay', 201
else: else:
raise InvalidRequest('Build is currently running or has finished') raise InvalidRequest('Build is currently running or has finished')

View file

@ -190,7 +190,7 @@ class SuperUserList(ApiResource):
# If mailing is turned on, send the user a verification email. # If mailing is turned on, send the user a verification email.
if features.MAILING: if features.MAILING:
confirmation = model.create_confirm_email_code(user, new_email=user.email) confirmation = model.create_confirm_email_code(user)
send_confirmation_email(user.username, user.email, confirmation.code) send_confirmation_email(user.username, user.email, confirmation.code)
return { return {

View file

@ -237,11 +237,11 @@ def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
'pull_credentials': model.get_pull_credentials(pull_robot_name) if pull_robot_name else None 'pull_credentials': model.get_pull_credentials(pull_robot_name) if pull_robot_name else None
}) })
queue_item = dockerfile_build_queue.put([repository.namespace_user.username, repository.name], queue_id = dockerfile_build_queue.put([repository.namespace_user.username, repository.name],
json_data, json_data,
retries_remaining=3) retries_remaining=3)
build_request.queue_item = queue_item build_request.queue_id = queue_id
build_request.save() build_request.save()
# Add the build to the repo's log. # Add the build to the repo's log.

View file

@ -60,7 +60,8 @@ def generate_headers(scope=GrantType.READ_REPOSITORY):
if permission.can(): if permission.can():
# Generate a signed grant which expires here # Generate a signed grant which expires here
signature = generate_signed_token(grants) user_context = get_authenticated_user() and get_authenticated_user().username
signature = generate_signed_token(grants, user_context)
response.headers['WWW-Authenticate'] = signature response.headers['WWW-Authenticate'] = signature
response.headers['X-Docker-Token'] = signature response.headers['X-Docker-Token'] = signature
else: else:
@ -74,9 +75,6 @@ def generate_headers(scope=GrantType.READ_REPOSITORY):
@index.route('/users', methods=['POST']) @index.route('/users', methods=['POST'])
@index.route('/users/', methods=['POST']) @index.route('/users/', methods=['POST'])
def create_user(): def create_user():
if not features.USER_CREATION:
abort(400, 'User creation is disabled. Please speak to your administrator.')
user_data = request.get_json() user_data = request.get_json()
if not user_data or not 'username' in user_data: if not user_data or not 'username' in user_data:
abort(400, 'Missing username') abort(400, 'Missing username')
@ -125,6 +123,9 @@ def create_user():
abort(400, 'Invalid password.', issue='login-failure') abort(400, 'Invalid password.', issue='login-failure')
elif not features.USER_CREATION:
abort(400, 'User creation is disabled. Please speak to your administrator.')
else: else:
# New user case # New user case
logger.debug('Creating user') logger.debug('Creating user')
@ -293,17 +294,8 @@ def get_repository_images(namespace, repository):
if not repo: if not repo:
abort(404, message='Unknown repository', issue='unknown-repo') abort(404, message='Unknown repository', issue='unknown-repo')
all_images = []
logger.debug('Retrieving repository images')
for image in model.get_repository_images(namespace, repository):
new_image_view = {
'id': image.docker_image_id,
'checksum': image.storage.checksum,
}
all_images.append(new_image_view)
logger.debug('Building repository image response') logger.debug('Building repository image response')
resp = make_response(json.dumps(all_images), 200) resp = make_response(json.dumps([]), 200)
resp.mimetype = 'application/json' resp.mimetype = 'application/json'
track_and_log('pull_repo', repo) track_and_log('pull_repo', repo)

View file

@ -92,7 +92,12 @@ class BuildQueueEvent(NotificationEvent):
'build_id': build_uuid, 'build_id': build_uuid,
'build_name': 'some-fake-build', 'build_name': 'some-fake-build',
'docker_tags': ['latest', 'foo', 'bar'], 'docker_tags': ['latest', 'foo', 'bar'],
'trigger_kind': 'GitHub' 'trigger_kind': 'GitHub',
'trigger_metadata': {
"default_branch": "master",
"ref": "refs/heads/somebranch",
"commit_sha": "42d4a62c53350993ea41069e9f2cfdefb0df097d"
}
}, subpage='/build?current=%s' % build_uuid) }, subpage='/build?current=%s' % build_uuid)
def get_summary(self, event_data, notification_data): def get_summary(self, event_data, notification_data):
@ -114,7 +119,12 @@ class BuildStartEvent(NotificationEvent):
'build_id': build_uuid, 'build_id': build_uuid,
'build_name': 'some-fake-build', 'build_name': 'some-fake-build',
'docker_tags': ['latest', 'foo', 'bar'], 'docker_tags': ['latest', 'foo', 'bar'],
'trigger_kind': 'GitHub' 'trigger_kind': 'GitHub',
'trigger_metadata': {
"default_branch": "master",
"ref": "refs/heads/somebranch",
"commit_sha": "42d4a62c53350993ea41069e9f2cfdefb0df097d"
}
}, subpage='/build?current=%s' % build_uuid) }, subpage='/build?current=%s' % build_uuid)
def get_summary(self, event_data, notification_data): def get_summary(self, event_data, notification_data):
@ -136,7 +146,13 @@ class BuildSuccessEvent(NotificationEvent):
'build_id': build_uuid, 'build_id': build_uuid,
'build_name': 'some-fake-build', 'build_name': 'some-fake-build',
'docker_tags': ['latest', 'foo', 'bar'], 'docker_tags': ['latest', 'foo', 'bar'],
'trigger_kind': 'GitHub' 'trigger_kind': 'GitHub',
'trigger_metadata': {
"default_branch": "master",
"ref": "refs/heads/somebranch",
"commit_sha": "42d4a62c53350993ea41069e9f2cfdefb0df097d"
},
'image_id': '1245657346'
}, subpage='/build?current=%s' % build_uuid) }, subpage='/build?current=%s' % build_uuid)
def get_summary(self, event_data, notification_data): def get_summary(self, event_data, notification_data):
@ -159,7 +175,12 @@ class BuildFailureEvent(NotificationEvent):
'build_name': 'some-fake-build', 'build_name': 'some-fake-build',
'docker_tags': ['latest', 'foo', 'bar'], 'docker_tags': ['latest', 'foo', 'bar'],
'trigger_kind': 'GitHub', 'trigger_kind': 'GitHub',
'error_message': 'This is a fake error message' 'error_message': 'This is a fake error message',
'trigger_metadata': {
"default_branch": "master",
"ref": "refs/heads/somebranch",
"commit_sha": "42d4a62c53350993ea41069e9f2cfdefb0df097d"
}
}, subpage='/build?current=%s' % build_uuid) }, subpage='/build?current=%s' % build_uuid)
def get_summary(self, event_data, notification_data): def get_summary(self, event_data, notification_data):

View file

@ -9,7 +9,7 @@ from time import time
from app import storage as store, image_diff_queue, app from app import storage as store, image_diff_queue, app
from auth.auth import process_auth, extract_namespace_repo_from_session from auth.auth import process_auth, extract_namespace_repo_from_session
from auth.auth_context import get_authenticated_user from auth.auth_context import get_authenticated_user, get_grant_user_context
from util import checksums, changes from util import checksums, changes
from util.http import abort, exact_abort from util.http import abort, exact_abort
from auth.permissions import (ReadRepositoryPermission, from auth.permissions import (ReadRepositoryPermission,
@ -463,8 +463,9 @@ def put_image_json(namespace, repository, image_id):
repo_image = model.get_repo_image_extended(namespace, repository, image_id) repo_image = model.get_repo_image_extended(namespace, repository, image_id)
if not repo_image: if not repo_image:
logger.debug('Image not found, creating image') username = (get_authenticated_user() and get_authenticated_user().username or
username = get_authenticated_user() and get_authenticated_user().username get_grant_user_context())
logger.debug('Image not found, creating image with initiating user context: %s', username)
repo_image = model.find_create_or_link_image(image_id, repo, username, {}, repo_image = model.find_create_or_link_image(image_id, repo, username, {},
store.preferred_locations[0]) store.preferred_locations[0])

View file

@ -18,7 +18,6 @@ from formats.squashed import SquashedDockerImage
from formats.aci import ACIImage from formats.aci import ACIImage
# pylint: disable=invalid-name
verbs = Blueprint('verbs', __name__) verbs = Blueprint('verbs', __name__)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -100,11 +99,9 @@ def _write_synthetic_image_to_storage(verb, linked_storage_uuid, linked_location
done_uploading.save() done_uploading.save()
# pylint: disable=too-many-locals
def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None): def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None):
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
# pylint: disable=no-member
if not permission.can() and not model.repository_is_public(namespace, repository): if not permission.can() and not model.repository_is_public(namespace, repository):
abort(403) abort(403)
@ -134,7 +131,6 @@ def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None):
return (repo_image, tag_image, image_json) return (repo_image, tag_image, image_json)
# pylint: disable=too-many-locals
def _repo_verb_signature(namespace, repository, tag, verb, checker=None, **kwargs): def _repo_verb_signature(namespace, repository, tag, verb, checker=None, **kwargs):
# Verify that the image exists and that we have access to it. # Verify that the image exists and that we have access to it.
store = Storage(app) store = Storage(app)
@ -159,7 +155,6 @@ def _repo_verb_signature(namespace, repository, tag, verb, checker=None, **kwarg
return make_response(signature_entry.signature) return make_response(signature_entry.signature)
# pylint: disable=too-many-locals
def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker=None, **kwargs): def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker=None, **kwargs):
# Verify that the image exists and that we have access to it. # Verify that the image exists and that we have access to it.
store = Storage(app) store = Storage(app)
@ -263,7 +258,6 @@ def os_arch_checker(os, arch):
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/sig/<os>/<arch>/', methods=['GET']) @verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/sig/<os>/<arch>/', methods=['GET'])
@process_auth @process_auth
# pylint: disable=unused-argument
def get_aci_signature(server, namespace, repository, tag, os, arch): def get_aci_signature(server, namespace, repository, tag, os, arch):
return _repo_verb_signature(namespace, repository, tag, 'aci', checker=os_arch_checker(os, arch), return _repo_verb_signature(namespace, repository, tag, 'aci', checker=os_arch_checker(os, arch),
os=os, arch=arch) os=os, arch=arch)
@ -271,7 +265,6 @@ def get_aci_signature(server, namespace, repository, tag, os, arch):
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci/<os>/<arch>/', methods=['GET']) @verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci/<os>/<arch>/', methods=['GET'])
@process_auth @process_auth
# pylint: disable=unused-argument
def get_aci_image(server, namespace, repository, tag, os, arch): def get_aci_image(server, namespace, repository, tag, os, arch):
return _repo_verb(namespace, repository, tag, 'aci', ACIImage(), return _repo_verb(namespace, repository, tag, 'aci', ACIImage(),
sign=True, checker=os_arch_checker(os, arch), os=os, arch=arch) sign=True, checker=os_arch_checker(os, arch), os=os, arch=arch)

View file

@ -5,13 +5,10 @@ from formats.tarimageformatter import TarImageFormatter
import json import json
import re import re
# pylint: disable=bad-continuation
class ACIImage(TarImageFormatter): class ACIImage(TarImageFormatter):
""" Image formatter which produces an ACI-compatible TAR. """ Image formatter which produces an ACI-compatible TAR.
""" """
# pylint: disable=too-many-arguments
def stream_generator(self, namespace, repository, tag, synthetic_image_id, def stream_generator(self, namespace, repository, tag, synthetic_image_id,
layer_json, get_image_iterator, get_layer_iterator): layer_json, get_image_iterator, get_layer_iterator):
# ACI Format (.tar): # ACI Format (.tar):
@ -181,7 +178,7 @@ class ACIImage(TarImageFormatter):
"eventHandlers": [], "eventHandlers": [],
"workingDirectory": config.get('WorkingDir', '') or '/', "workingDirectory": config.get('WorkingDir', '') or '/',
"environment": [{"name": key, "value": value} "environment": [{"name": key, "value": value}
for (key, value) in [e.split('=') for e in config.get('Env')]], for (key, value) in [e.split('=') for e in config.get('Env', []) or []]],
"isolators": ACIImage._build_isolators(config), "isolators": ACIImage._build_isolators(config),
"mountPoints": ACIImage._build_volumes(config), "mountPoints": ACIImage._build_volumes(config),
"ports": ACIImage._build_ports(config), "ports": ACIImage._build_ports(config),

View file

@ -19,7 +19,6 @@ class SquashedDockerImage(TarImageFormatter):
command. command.
""" """
# pylint: disable=too-many-arguments,too-many-locals
def stream_generator(self, namespace, repository, tag, synthetic_image_id, def stream_generator(self, namespace, repository, tag, synthetic_image_id,
layer_json, get_image_iterator, get_layer_iterator): layer_json, get_image_iterator, get_layer_iterator):
# Docker import V1 Format (.tar): # Docker import V1 Format (.tar):

View file

@ -31,7 +31,7 @@ module.exports = function(grunt) {
}, },
cssmin: { cssmin: {
'../static/dist/<%= pkg.name %>.css': ['../static/lib/**/*.css', '../static/css/*.css'] '../static/dist/<%= pkg.name %>.css': ['../static/lib/**/*.css', '../static/css/**/*.css']
}, },
uglify: { uglify: {

View file

@ -99,8 +99,12 @@ def __create_subtree(repo, structure, creator_username, parent):
last_node_tags = [last_node_tags] last_node_tags = [last_node_tags]
for tag_name in last_node_tags: for tag_name in last_node_tags:
model.create_or_update_tag(repo.namespace_user.username, repo.name, tag_name, tag = model.create_or_update_tag(repo.namespace_user.username, repo.name, tag_name,
new_image.docker_image_id) new_image.docker_image_id)
if tag_name[0] == '#':
tag.lifetime_end_ts = int(time.time()) - 1
tag.save()
for subtree in subtrees: for subtree in subtrees:
__create_subtree(repo, subtree, creator_username, new_image) __create_subtree(repo, subtree, creator_username, new_image)
@ -360,6 +364,9 @@ def populate_database():
__generate_repository(new_user_1, 'simple', 'Simple repository.', False, __generate_repository(new_user_1, 'simple', 'Simple repository.', False,
[], (4, [], ['latest', 'prod'])) [], (4, [], ['latest', 'prod']))
__generate_repository(new_user_1, 'history', 'Historical repository.', False,
[], (4, [(2, [], 'latest'), (3, [], '#latest')], None))
__generate_repository(new_user_1, 'complex', __generate_repository(new_user_1, 'complex',
'Complex repository with many branches and tags.', 'Complex repository with many branches and tags.',
False, [(new_user_2, 'read'), (dtrobot[0], 'read')], False, [(new_user_2, 'read'), (dtrobot[0], 'read')],

View file

@ -41,10 +41,10 @@ git+https://github.com/DevTable/anunidecode.git
git+https://github.com/DevTable/avatar-generator.git git+https://github.com/DevTable/avatar-generator.git
git+https://github.com/DevTable/pygithub.git git+https://github.com/DevTable/pygithub.git
git+https://github.com/DevTable/container-cloud-config.git git+https://github.com/DevTable/container-cloud-config.git
git+https://github.com/jplana/python-etcd.git git+https://github.com/DevTable/python-etcd.git
gipc gipc
pyOpenSSL pyOpenSSL
pygpgme pygpgme
cachetools cachetools
mock mock
psutil psutil

View file

@ -66,5 +66,5 @@ git+https://github.com/DevTable/anunidecode.git
git+https://github.com/DevTable/avatar-generator.git git+https://github.com/DevTable/avatar-generator.git
git+https://github.com/DevTable/pygithub.git git+https://github.com/DevTable/pygithub.git
git+https://github.com/DevTable/container-cloud-config.git git+https://github.com/DevTable/container-cloud-config.git
git+https://github.com/DevTable/python-etcd.git
git+https://github.com/NateFerrero/oauth2lib.git git+https://github.com/NateFerrero/oauth2lib.git
git+https://github.com/jplana/python-etcd.git

View file

@ -0,0 +1,29 @@
.quay-service-status-indicator {
display: inline-block;
border-radius: 50%;
width: 12px;
height: 12px;
margin-right: 6px;
background: #eee;
vertical-align: middle
}
.quay-service-status-description {
vertical-align: middle;
}
.quay-service-status-indicator.none {
background: #2fcc66;
}
.quay-service-status-indicator.minor {
background: #f1c40f;
}
.quay-service-status-indicator.major {
background: #e67e22;
}
.quay-service-status-indicator.critical {
background: #e74c3c;
}

View file

@ -23,6 +23,47 @@
} }
} }
.announcement a {
color: lightblue;
}
.announcement {
position: absolute;
z-index: 9;
top: 0px;
left: 0px;
right: 0px;
display: block;
background: rgba(8, 61, 95, 0.6);
min-height: 45px;
text-align: center;
font-size: 14px;
line-height: 45px;
color: white;
}
.announcement.inline {
position: relative;
}
.announcement .spacer {
display: inline-block;
width: 45px;
}
.announcement img {
height: 45px;
padding-top: 6px;
padding-bottom: 6px;
}
.announcement .plus {
display: inline-block;
margin-left: 10px;
margin-right: 10px;
}
.scrollable-menu { .scrollable-menu {
max-height: 400px; max-height: 400px;
overflow: auto; overflow: auto;
@ -496,6 +537,11 @@ i.toggle-icon:hover {
width: 100%; width: 100%;
} }
.docker-auth-dialog .download-cfg.not-supported {
font-size: 14px;
color: #ccc;
}
.docker-auth-dialog .download-cfg { .docker-auth-dialog .download-cfg {
float: left; float: left;
padding-top: 6px; padding-top: 6px;
@ -1511,38 +1557,6 @@ i.toggle-icon:hover {
margin-top: 20px; margin-top: 20px;
} }
.landing .announcement {
position: absolute;
z-index: 9;
top: 0px;
left: 0px;
right: 0px;
display: block;
background: rgba(8, 61, 95, 0.6);
min-height: 45px;
text-align: center;
font-size: 14px;
line-height: 45px;
}
.landing .announcement .spacer {
display: inline-block;
width: 45px;
}
.landing .announcement img {
height: 45px;
padding-top: 6px;
padding-bottom: 6px;
}
.landing .announcement .plus {
display: inline-block;
margin-left: 10px;
margin-right: 10px;
}
.landing { .landing {
color: white; color: white;

View file

@ -240,7 +240,7 @@
<tr> <tr>
<td>Mail Sender:</td> <td>Mail Sender:</td>
<td> <td>
<input class="form-control" type="email" ng-model="config.DEFAULT_MAIL_SENDER" <input class="form-control" type="email" ng-model="config.MAIL_DEFAULT_SENDER"
placeholder="E-mail address"></span> placeholder="E-mail address"></span>
<div class="help-text"> <div class="help-text">
E-mail address from which all e-mails are sent. If not specified, E-mail address from which all e-mails are sent. If not specified,

View file

@ -81,9 +81,19 @@
</span> </span>
<input type="url" class="form-control" ng-model="currentConfig[field.name]" ng-switch-when="url" required> <input type="url" class="form-control" ng-model="currentConfig[field.name]" ng-switch-when="url" required>
<input type="text" class="form-control" ng-model="currentConfig[field.name]" ng-switch-when="string" required> <input type="text" class="form-control" ng-model="currentConfig[field.name]" ng-switch-when="string" required>
<input type="text" class="form-control" ng-model="currentConfig[field.name]" ng-switch-when="regex" required <div ng-switch-when="regex">
ng-pattern="getPattern(field)" <input type="text" class="form-control" ng-model="currentConfig[field.name]"
placeholder="{{ field.placeholder }}"> ng-pattern="getPattern(field)"
placeholder="{{ field.placeholder }}"
ng-name="field.name"
id="{{ field.name }}"
required>
<div class="alert alert-warning" style="margin-top: 10px; margin-bottom: 10px"
ng-if="field.regex_fail_message && hasRegexMismatch(createForm.$error, field.name)">
<span ng-bind-html="field.regex_fail_message"></span>
</div>
</div>
<div class="entity-search" namespace="repository.namespace" <div class="entity-search" namespace="repository.namespace"
placeholder="''" placeholder="''"
current-entity="currentConfig[field.name]" current-entity="currentConfig[field.name]"
@ -91,7 +101,8 @@
allowed-entities="['user', 'team', 'org']" allowed-entities="['user', 'team', 'org']"
ng-switch-when="entity"></div> ng-switch-when="entity"></div>
<div ng-if="getHelpUrl(field, currentConfig)" style="margin-top: 10px"> <div ng-if="getHelpUrl(field, currentConfig)"
style="margin-top: 10px; margin-bottom: 10px">
See: <a href="{{ getHelpUrl(field, currentConfig) }}" target="_blank">{{ getHelpUrl(field, currentConfig) }}</a> See: <a href="{{ getHelpUrl(field, currentConfig) }}" target="_blank">{{ getHelpUrl(field, currentConfig) }}</a>
</div> </div>
</div> </div>

View file

@ -32,6 +32,9 @@
<i class="fa fa-download"></i> <i class="fa fa-download"></i>
<a href="javascript:void(0)" ng-click="downloadCfg(shownRobot)">Download .dockercfg file</a> <a href="javascript:void(0)" ng-click="downloadCfg(shownRobot)">Download .dockercfg file</a>
</span> </span>
<span class="download-cfg not-supported" ng-show="!isDownloadSupported()">
.dockercfg download not supported in this browser
</span>
<div class="clipboard-copied-message" style="display: none"> <div class="clipboard-copied-message" style="display: none">
Copied Copied
</div> </div>

View file

@ -0,0 +1,7 @@
<div class="announcement inline" ng-show="indicator != 'none' && indicator != 'loading'">
<div ng-repeat="incident in incidents">
<span class="quay-service-status-indicator" ng-class="indicator"
ng-if="indicator != 'loading'"></span>
<a ng-href="{{ incident.shortlink }}" class="quay-service-status-description">{{ incident.name }}</a>
</div>
</div>

View file

@ -0,0 +1,6 @@
<span class="quay-service-status-element">
<span class="quay-service-status-indicator" ng-class="indicator"
ng-if="indicator != 'loading'"></span>
<span class="cor-loader-inline" ng-if="indicator == 'loading'"></span>
<a href="http://status.quay.io" class="quay-service-status-description">{{ description }}</a>
</span>

View file

@ -0,0 +1,11 @@
/**
* Adds an ng-name attribute which sets the name of a form field. Using the normal name field
* in Angular 1.3 works, but we're still on 1.2.
*/
angular.module('quay').directive('ngName', function () {
return function (scope, element, attr) {
scope.$watch(attr.ngName, function (name) {
element.attr('name', name);
});
};
});

View file

@ -38,6 +38,23 @@ angular.module('quay').directive('createExternalNotificationDialog', function ()
$scope.unauthorizedEmail = false; $scope.unauthorizedEmail = false;
}; };
$scope.hasRegexMismatch = function(err, fieldName) {
if (!err.pattern) {
return;
}
for (var i = 0; i < err.pattern.length; ++i) {
var current = err.pattern[i];
var value = current.$viewValue;
var elem = $element.find('#' + fieldName);
if (value == elem[0].value) {
return true;
}
}
return false;
};
$scope.createNotification = function() { $scope.createNotification = function() {
if (!$scope.currentConfig.email) { if (!$scope.currentConfig.email) {
$scope.performCreateNotification(); $scope.performCreateNotification();

View file

@ -0,0 +1,22 @@
/**
* An element which displays the current status of the service as an announcement bar.
*/
angular.module('quay').directive('quayServiceStatusBar', function () {
var directiveDefinitionObject = {
priority: 0,
templateUrl: '/static/directives/quay-service-status-bar.html',
replace: false,
transclude: false,
restrict: 'C',
scope: {},
controller: function($scope, $element, StatusService) {
$scope.indicator = 'loading';
StatusService.getStatus(function(data) {
$scope.indicator = data['status']['indicator'];
$scope.incidents = data['incidents'];
});
}
};
return directiveDefinitionObject;
});

View file

@ -0,0 +1,24 @@
/**
* An element which displays the current status of the service.
*/
angular.module('quay').directive('quayServiceStatus', function () {
var directiveDefinitionObject = {
priority: 0,
templateUrl: '/static/directives/quay-service-status.html',
replace: false,
transclude: false,
restrict: 'C',
scope: {},
controller: function($scope, $element, StatusService) {
$scope.indicator = 'loading';
$scope.description = '';
StatusService.getStatus(function(data) {
$scope.indicator = data['status']['indicator'];
$scope.incidents = data['incidents'];
$scope.description = data['status']['description'];
});
}
};
return directiveDefinitionObject;
});

View file

@ -523,7 +523,7 @@ ImageHistoryTree.prototype.pruneUnreferenced_ = function(node) {
} }
if (!node.tags) { if (!node.tags) {
return true; return node.children.length == 0;
} }
return (node.children.length == 0 && node.tags.length == 0); return (node.children.length == 0 && node.tags.length == 0);

View file

@ -101,8 +101,11 @@ function(Config, Features) {
'fields': [ 'fields': [
{ {
'name': 'room_id', 'name': 'room_id',
'type': 'string', 'type': 'regex',
'title': 'Room ID #' 'title': 'Room ID #',
'regex': '^[0-9]+$',
'help_url': 'https://hipchat.com/admin/rooms',
'regex_fail_message': 'We require the HipChat room <b>number</b>, not name.'
}, },
{ {
'name': 'notification_token', 'name': 'notification_token',

View file

@ -120,7 +120,7 @@ function(KeyService, UserService, CookieService, ApiService, Features, Config) {
}; };
planService.getPlans = function(callback, opt_includePersonal) { planService.getPlans = function(callback, opt_includePersonal) {
planService.verifyLoaded(function() { planService.verifyLoaded(function(plans) {
var filtered = []; var filtered = [];
for (var i = 0; i < plans.length; ++i) { for (var i = 0; i < plans.length; ++i) {
var plan = plans[i]; var plan = plans[i];

View file

@ -0,0 +1,40 @@
/**
* Helper service for retrieving the statuspage status of the quay service.
*/
angular.module('quay').factory('StatusService', ['Features', function(Features) {
if (!Features.BILLING) {
return;
}
var STATUSPAGE_PAGE_ID = '8szqd6w4s277';
var STATUSPAGE_SRC = 'https://statuspage-production.s3.amazonaws.com/se-v2.js';
var statusPageHandler = null;
var statusPageData = null;
var callbacks = [];
var handleGotData = function(data) {
if (!data) { return; }
statusPageData = data;
for (var i = 0; i < callbacks.length; ++i) {
callbacks[i](data);
}
callbacks = [];
};
$.getScript(STATUSPAGE_SRC, function(){
statusPageHandler = new StatusPage.page({ page: STATUSPAGE_PAGE_ID });
statusPageHandler.summary({
success : handleGotData
});
});
var statusService = {};
statusService.getStatus = function(callback) {
callbacks.push(callback);
handleGotData(statusPageData);
};
return statusService;
}]);

View file

@ -1,15 +1,4 @@
<div class="jumbotron landing"> <div class="jumbotron landing">
<div class="announcement">
<span class="hidden-xs-inline">
<img src="/static/img/white_horizontal.png" style="height: 40px">
<span class="plus">+</span>
<img src="/static/img/coreos-wordmark-horiz-white.svg">
</span>
<span class="spacer"></span>
Quay.io is now part of CoreOS! <a href="https://coreos.com/blog/CoreOS-enterprise-docker-registry/" target="_blank">Read the blog post.</a>
</div>
<div class="landing-background" ng-class="user.anonymous ? 'landing': 'signedin'"></div> <div class="landing-background" ng-class="user.anonymous ? 'landing': 'signedin'"></div>
<div class="landing-filter" ng-class="user.anonymous ? 'landing': 'signedin'"></div> <div class="landing-filter" ng-class="user.anonymous ? 'landing': 'signedin'"></div>
<div class="landing-content"> <div class="landing-content">

View file

@ -96,6 +96,7 @@ mixpanel.init("{{ mixpanel_key }}", { track_pageview : false, debug: {{ is_debug
<body ng-class="pageClass + ' ' + (user.anonymous ? 'anon' : 'signedin')" class="co-img-bg-network"> <body ng-class="pageClass + ' ' + (user.anonymous ? 'anon' : 'signedin')" class="co-img-bg-network">
<div id="co-l-footer-wrapper"> <div id="co-l-footer-wrapper">
<nav class="navbar navbar-default header-bar co-m-navbar co-fx-box-shadow" role="navigation"></nav> <nav class="navbar navbar-default header-bar co-m-navbar co-fx-box-shadow" role="navigation"></nav>
<div class="quay-service-status-bar" quay-require="['BILLING']"></div>
<div id="padding-container"> <div id="padding-container">
<div id="co-l-view-container"> <div id="co-l-view-container">
<div ng-class="newLayout ? '' : 'main-panel co-fx-box-shadow-heavy'"> <div ng-class="newLayout ? '' : 'main-panel co-fx-box-shadow-heavy'">
@ -118,7 +119,9 @@ mixpanel.init("{{ mixpanel_key }}", { track_pageview : false, debug: {{ is_debug
<li quay-require="['BILLING']"><a href="/security/" target="_self">Security</a></li> <li quay-require="['BILLING']"><a href="/security/" target="_self">Security</a></li>
<li quay-require="['BILLING']"><a href="/about/" target="_self">About</a></li> <li quay-require="['BILLING']"><a href="/about/" target="_self">About</a></li>
<li><b><a href="{{ contact_href or '/contact/' }}" target="_self">Contact</a></b></li> <li><b><a href="{{ contact_href or '/contact/' }}" target="_self">Contact</a></b></li>
<li quay-require="['BILLING']"><b><a href="http://status.quay.io" target="_self">Service Status</a></b></li> <li quay-require="['BILLING']">
<span class="quay-service-status"></span>
</li>
</ul> </ul>
</div> </div>
<div class="col-md-4"> <div class="col-md-4">

Binary file not shown.

View file

@ -1367,6 +1367,13 @@ class TestRepositoryBuildResource(ApiTestCase):
self.assertEquals(1, len(json['builds'])) self.assertEquals(1, len(json['builds']))
self.assertEquals(uuid, json['builds'][0]['id']) self.assertEquals(uuid, json['builds'][0]['id'])
# Find the build's queue item.
build_ref = database.RepositoryBuild.get(uuid=uuid)
queue_item = database.QueueItem.get(id=build_ref.queue_id)
self.assertTrue(queue_item.available)
self.assertTrue(queue_item.retries_remaining > 0)
# Cancel the build. # Cancel the build.
self.deleteResponse(RepositoryBuildResource, self.deleteResponse(RepositoryBuildResource,
params=dict(repository=ADMIN_ACCESS_USER + '/simple', build_uuid=uuid), params=dict(repository=ADMIN_ACCESS_USER + '/simple', build_uuid=uuid),
@ -1378,6 +1385,12 @@ class TestRepositoryBuildResource(ApiTestCase):
self.assertEquals(0, len(json['builds'])) self.assertEquals(0, len(json['builds']))
# Check for the build's queue item.
try:
database.QueueItem.get(id=build_ref.queue_id)
self.fail('QueueItem still exists for build')
except database.QueueItem.DoesNotExist:
pass
def test_attemptcancel_scheduledbuild(self): def test_attemptcancel_scheduledbuild(self):
self.login(ADMIN_ACCESS_USER) self.login(ADMIN_ACCESS_USER)
@ -1398,7 +1411,8 @@ class TestRepositoryBuildResource(ApiTestCase):
self.assertEquals(uuid, json['builds'][0]['id']) self.assertEquals(uuid, json['builds'][0]['id'])
# Set queue item to be picked up. # Set queue item to be picked up.
qi = database.QueueItem.get(id=1) build_ref = database.RepositoryBuild.get(uuid=uuid)
qi = database.QueueItem.get(id=build_ref.queue_id)
qi.available = False qi.available = False
qi.save() qi.save()

View file

@ -93,7 +93,8 @@ def _validate_mailing(config):
}) })
test_mail = Mail(test_app) test_mail = Mail(test_app)
test_msg = Message("Test e-mail from %s" % app.config['REGISTRY_TITLE']) test_msg = Message("Test e-mail from %s" % app.config['REGISTRY_TITLE'],
sender=config.get('MAIL_DEFAULT_SENDER'))
test_msg.add_recipient(get_authenticated_user().email) test_msg.add_recipient(get_authenticated_user().email)
test_mail.send(test_msg) test_mail.send(test_msg)

View file

@ -1,14 +1,15 @@
class ImageTreeNode(object): class ImageTreeNode(object):
""" A node in the image tree. """ """ A node in the image tree. """
def __init__(self, image): def __init__(self, image, child_map):
self.image = image self.image = image
self.parent = None self.parent = None
self.children = []
self.tags = [] self.tags = []
def add_child(self, child): self._child_map = child_map
self.children.append(child)
child.parent = self @property
def children(self):
return self._child_map.get(str(self.image.id), [])
def add_tag(self, tag): def add_tag(self, tag):
self.tags.append(tag) self.tags.append(tag)
@ -18,8 +19,8 @@ class ImageTree(object):
""" In-memory tree for easy traversal and lookup of images in a repository. """ """ In-memory tree for easy traversal and lookup of images in a repository. """
def __init__(self, all_images, all_tags, base_filter=None): def __init__(self, all_images, all_tags, base_filter=None):
self._tag_map = {}
self._image_map = {} self._image_map = {}
self._child_map = {}
self._build(all_images, all_tags, base_filter) self._build(all_images, all_tags, base_filter)
@ -33,18 +34,17 @@ class ImageTree(object):
if image.id != base_filter and not str(base_filter) in ancestors: if image.id != base_filter and not str(base_filter) in ancestors:
continue continue
self._image_map[image.id] = ImageTreeNode(image) # Create the node for the image.
image_node = ImageTreeNode(image, self._child_map)
self._image_map[image.id] = image_node
# Connect the nodes to their parents. # Add the node to the child map for its parent image (if any).
for image_node in self._image_map.values():
image = image_node.image
parent_image_id = image.ancestors.split('/')[-2] if image.ancestors else None parent_image_id = image.ancestors.split('/')[-2] if image.ancestors else None
if not parent_image_id: if parent_image_id:
continue if not parent_image_id in self._child_map:
self._child_map[parent_image_id] = []
parent_node = self._image_map.get(int(parent_image_id)) self._child_map[parent_image_id].append(image_node)
if parent_node is not None:
parent_node.add_child(image_node)
# Build the tag map. # Build the tag map.
for tag in all_tags: for tag in all_tags:
@ -52,7 +52,6 @@ class ImageTree(object):
if not image_node: if not image_node:
continue continue
self._tag_map = image_node
image_node.add_tag(tag.name) image_node.add_tag(tag.name)