Merge branch 'master' into v2
This commit is contained in:
commit
cf774e23df
74 changed files with 2123 additions and 1433 deletions
8
Bobfile
8
Bobfile
|
@ -9,14 +9,8 @@ version = 1
|
||||||
|
|
||||||
[[container]]
|
[[container]]
|
||||||
name = "quay"
|
name = "quay"
|
||||||
Dockerfile = "Dockerfile.web"
|
Dockerfile = "Dockerfile"
|
||||||
project = "quay"
|
project = "quay"
|
||||||
tags = ["git:short"]
|
tags = ["git:short"]
|
||||||
|
|
||||||
[[container]]
|
|
||||||
name = "builder"
|
|
||||||
Dockerfile = "Dockerfile.buildworker"
|
|
||||||
project = "builder"
|
|
||||||
tags = ["git:short"]
|
|
||||||
|
|
||||||
# vim:ft=toml
|
# vim:ft=toml
|
||||||
|
|
|
@ -1,34 +1,21 @@
|
||||||
# vim:ft=dockerfile
|
# vim:ft=dockerfile
|
||||||
|
|
||||||
###############################
|
FROM phusion/baseimage:0.9.16
|
||||||
# BEGIN COMMON SECION
|
|
||||||
###############################
|
|
||||||
|
|
||||||
FROM phusion/baseimage:0.9.15
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
ENV HOME /root
|
ENV HOME /root
|
||||||
|
|
||||||
# Install the dependencies.
|
# Install the dependencies.
|
||||||
RUN apt-get update # 11DEC2014
|
RUN apt-get update # 29JAN2015
|
||||||
|
|
||||||
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands
|
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands
|
||||||
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev
|
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev gpgme
|
||||||
|
|
||||||
# Build the python dependencies
|
# Build the python dependencies
|
||||||
ADD requirements.txt requirements.txt
|
ADD requirements.txt requirements.txt
|
||||||
RUN virtualenv --distribute venv
|
RUN virtualenv --distribute venv
|
||||||
RUN venv/bin/pip install -r requirements.txt
|
RUN venv/bin/pip install -r requirements.txt
|
||||||
|
|
||||||
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev gpgme
|
|
||||||
|
|
||||||
###############################
|
|
||||||
# END COMMON SECION
|
|
||||||
###############################
|
|
||||||
|
|
||||||
# Remove SSH.
|
|
||||||
RUN rm -rf /etc/service/sshd /etc/my_init.d/00_regen_ssh_host_keys.sh
|
|
||||||
|
|
||||||
# Install the binary dependencies
|
# Install the binary dependencies
|
||||||
ADD binary_dependencies binary_dependencies
|
ADD binary_dependencies binary_dependencies
|
||||||
RUN gdebi --n binary_dependencies/*.deb
|
RUN gdebi --n binary_dependencies/*.deb
|
||||||
|
@ -41,6 +28,10 @@ RUN npm install -g grunt-cli
|
||||||
ADD grunt grunt
|
ADD grunt grunt
|
||||||
RUN cd grunt && npm install
|
RUN cd grunt && npm install
|
||||||
|
|
||||||
|
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev
|
||||||
|
RUN apt-get autoremove -y
|
||||||
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||||
|
|
||||||
# Add all of the files!
|
# Add all of the files!
|
||||||
ADD . .
|
ADD . .
|
||||||
|
|
||||||
|
@ -65,14 +56,9 @@ ADD conf/init/buildmanager /etc/service/buildmanager
|
||||||
RUN mkdir static/fonts static/ldn
|
RUN mkdir static/fonts static/ldn
|
||||||
RUN venv/bin/python -m external_libraries
|
RUN venv/bin/python -m external_libraries
|
||||||
|
|
||||||
RUN apt-get autoremove -y
|
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
|
||||||
|
|
||||||
# Run the tests
|
# Run the tests
|
||||||
RUN TEST=true venv/bin/python -m unittest discover
|
RUN TEST=true venv/bin/python -m unittest discover
|
||||||
|
|
||||||
VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp"]
|
VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"]
|
||||||
|
|
||||||
EXPOSE 443 80
|
EXPOSE 443 8443 80
|
||||||
|
|
||||||
CMD ["/sbin/my_init"]
|
|
|
@ -1,49 +0,0 @@
|
||||||
# vim:ft=dockerfile
|
|
||||||
|
|
||||||
###############################
|
|
||||||
# BEGIN COMMON SECION
|
|
||||||
###############################
|
|
||||||
|
|
||||||
FROM phusion/baseimage:0.9.15
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
|
||||||
ENV HOME /root
|
|
||||||
|
|
||||||
# Install the dependencies.
|
|
||||||
RUN apt-get update # 11DEC2014
|
|
||||||
|
|
||||||
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands
|
|
||||||
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev
|
|
||||||
|
|
||||||
# Build the python dependencies
|
|
||||||
ADD requirements.txt requirements.txt
|
|
||||||
RUN virtualenv --distribute venv
|
|
||||||
RUN venv/bin/pip install -r requirements.txt
|
|
||||||
|
|
||||||
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev
|
|
||||||
|
|
||||||
###############################
|
|
||||||
# END COMMON SECION
|
|
||||||
###############################
|
|
||||||
|
|
||||||
RUN apt-get install -y lxc aufs-tools
|
|
||||||
|
|
||||||
RUN usermod -v 100000-200000 -w 100000-200000 root
|
|
||||||
|
|
||||||
ADD binary_dependencies/builder binary_dependencies/builder
|
|
||||||
RUN gdebi --n binary_dependencies/builder/*.deb
|
|
||||||
|
|
||||||
ADD . .
|
|
||||||
|
|
||||||
ADD conf/init/svlogd_config /svlogd_config
|
|
||||||
ADD conf/init/preplogsdir.sh /etc/my_init.d/
|
|
||||||
ADD conf/init/tutumdocker /etc/service/tutumdocker
|
|
||||||
ADD conf/init/dockerfilebuild /etc/service/dockerfilebuild
|
|
||||||
|
|
||||||
RUN apt-get remove -y --auto-remove nodejs npm git phantomjs
|
|
||||||
RUN apt-get autoremove -y
|
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
|
||||||
|
|
||||||
VOLUME ["/var/lib/docker", "/var/lib/lxc", "/conf/stack", "/var/log"]
|
|
||||||
|
|
||||||
CMD ["/sbin/my_init"]
|
|
48
app.py
48
app.py
|
@ -5,32 +5,31 @@ import yaml
|
||||||
|
|
||||||
from flask import Flask as BaseFlask, Config as BaseConfig, request, Request
|
from flask import Flask as BaseFlask, Config as BaseConfig, request, Request
|
||||||
from flask.ext.principal import Principal
|
from flask.ext.principal import Principal
|
||||||
from flask.ext.login import LoginManager
|
from flask.ext.login import LoginManager, UserMixin
|
||||||
from flask.ext.mail import Mail
|
from flask.ext.mail import Mail
|
||||||
|
|
||||||
import features
|
import features
|
||||||
|
|
||||||
|
from avatars.avatars import Avatar
|
||||||
from storage import Storage
|
from storage import Storage
|
||||||
from data import model
|
from data import model
|
||||||
from data import database
|
from data import database
|
||||||
from data.userfiles import Userfiles
|
from data.userfiles import Userfiles
|
||||||
from data.users import UserAuthentication
|
from data.users import UserAuthentication
|
||||||
from util.analytics import Analytics
|
|
||||||
from util.exceptionlog import Sentry
|
|
||||||
from util.queuemetrics import QueueMetrics
|
|
||||||
from util.names import urn_generator
|
|
||||||
from util.oauth import GoogleOAuthConfig, GithubOAuthConfig
|
|
||||||
from data.billing import Billing
|
from data.billing import Billing
|
||||||
from data.buildlogs import BuildLogs
|
from data.buildlogs import BuildLogs
|
||||||
from data.archivedlogs import LogArchive
|
from data.archivedlogs import LogArchive
|
||||||
from data.queue import WorkQueue
|
|
||||||
from data.userevent import UserEventsBuilderModule
|
from data.userevent import UserEventsBuilderModule
|
||||||
from avatars.avatars import Avatar
|
from data.queue import WorkQueue
|
||||||
|
from util.analytics import Analytics
|
||||||
|
from util.exceptionlog import Sentry
|
||||||
|
from util.names import urn_generator
|
||||||
|
from util.oauth import GoogleOAuthConfig, GithubOAuthConfig
|
||||||
from util.signing import Signer
|
from util.signing import Signer
|
||||||
|
from util.queuemetrics import QueueMetrics
|
||||||
|
|
||||||
|
|
||||||
# pylint: disable=invalid-name,too-many-public-methods,too-few-public-methods,too-many-ancestors
|
# pylint: disable=invalid-name,too-many-public-methods,too-few-public-methods,too-many-ancestors
|
||||||
|
|
||||||
|
|
||||||
class Config(BaseConfig):
|
class Config(BaseConfig):
|
||||||
""" Flask config enhanced with a `from_yamlfile` method """
|
""" Flask config enhanced with a `from_yamlfile` method """
|
||||||
|
|
||||||
|
@ -134,17 +133,18 @@ analytics = Analytics(app)
|
||||||
billing = Billing(app)
|
billing = Billing(app)
|
||||||
sentry = Sentry(app)
|
sentry = Sentry(app)
|
||||||
build_logs = BuildLogs(app)
|
build_logs = BuildLogs(app)
|
||||||
queue_metrics = QueueMetrics(app)
|
|
||||||
authentication = UserAuthentication(app)
|
authentication = UserAuthentication(app)
|
||||||
userevents = UserEventsBuilderModule(app)
|
userevents = UserEventsBuilderModule(app)
|
||||||
signer = Signer(app, OVERRIDE_CONFIG_DIRECTORY)
|
signer = Signer(app, OVERRIDE_CONFIG_DIRECTORY)
|
||||||
|
queue_metrics = QueueMetrics(app)
|
||||||
|
|
||||||
|
tf = app.config['DB_TRANSACTION_FACTORY']
|
||||||
|
|
||||||
github_login = GithubOAuthConfig(app, 'GITHUB_LOGIN_CONFIG')
|
github_login = GithubOAuthConfig(app, 'GITHUB_LOGIN_CONFIG')
|
||||||
github_trigger = GithubOAuthConfig(app, 'GITHUB_TRIGGER_CONFIG')
|
github_trigger = GithubOAuthConfig(app, 'GITHUB_TRIGGER_CONFIG')
|
||||||
google_login = GoogleOAuthConfig(app, 'GOOGLE_LOGIN_CONFIG')
|
google_login = GoogleOAuthConfig(app, 'GOOGLE_LOGIN_CONFIG')
|
||||||
oauth_apps = [github_login, github_trigger, google_login]
|
oauth_apps = [github_login, github_trigger, google_login]
|
||||||
|
|
||||||
tf = app.config['DB_TRANSACTION_FACTORY']
|
|
||||||
image_diff_queue = WorkQueue(app.config['DIFFS_QUEUE_NAME'], tf)
|
image_diff_queue = WorkQueue(app.config['DIFFS_QUEUE_NAME'], tf)
|
||||||
dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf,
|
dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf,
|
||||||
reporter=queue_metrics.report)
|
reporter=queue_metrics.report)
|
||||||
|
@ -154,5 +154,29 @@ database.configure(app.config)
|
||||||
model.config.app_config = app.config
|
model.config.app_config = app.config
|
||||||
model.config.store = storage
|
model.config.store = storage
|
||||||
|
|
||||||
|
@login_manager.user_loader
|
||||||
|
def load_user(user_uuid):
|
||||||
|
logger.debug('User loader loading deferred user with uuid: %s' % user_uuid)
|
||||||
|
return LoginWrappedDBUser(user_uuid)
|
||||||
|
|
||||||
|
class LoginWrappedDBUser(UserMixin):
|
||||||
|
def __init__(self, user_uuid, db_user=None):
|
||||||
|
self._uuid = user_uuid
|
||||||
|
self._db_user = db_user
|
||||||
|
|
||||||
|
def db_user(self):
|
||||||
|
if not self._db_user:
|
||||||
|
self._db_user = model.get_user_by_uuid(self._uuid)
|
||||||
|
return self._db_user
|
||||||
|
|
||||||
|
def is_authenticated(self):
|
||||||
|
return self.db_user() is not None
|
||||||
|
|
||||||
|
def is_active(self):
|
||||||
|
return self.db_user().verified
|
||||||
|
|
||||||
|
def get_id(self):
|
||||||
|
return unicode(self._uuid)
|
||||||
|
|
||||||
def get_app_url():
|
def get_app_url():
|
||||||
return '%s://%s' % (app.config['PREFERRED_URL_SCHEME'], app.config['SERVER_HOSTNAME'])
|
return '%s://%s' % (app.config['PREFERRED_URL_SCHEME'], app.config['SERVER_HOSTNAME'])
|
||||||
|
|
Binary file not shown.
Binary file not shown.
BIN
binary_dependencies/tengine_2.1.0-1_amd64.deb
Normal file
BIN
binary_dependencies/tengine_2.1.0-1_amd64.deb
Normal file
Binary file not shown.
2
build.sh
Executable file
2
build.sh
Executable file
|
@ -0,0 +1,2 @@
|
||||||
|
docker build -t quay.io/quay/quay:`git rev-parse --short HEAD` .
|
||||||
|
echo quay.io/quay/quay:`git rev-parse --short HEAD`
|
27
buildman/asyncutil.py
Normal file
27
buildman/asyncutil.py
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
from functools import partial, wraps
|
||||||
|
from trollius import get_event_loop
|
||||||
|
|
||||||
|
|
||||||
|
class AsyncWrapper(object):
|
||||||
|
""" Wrapper class which will transform a syncronous library to one that can be used with
|
||||||
|
trollius coroutines.
|
||||||
|
"""
|
||||||
|
def __init__(self, delegate, loop=None, executor=None):
|
||||||
|
self._loop = loop if loop is not None else get_event_loop()
|
||||||
|
self._delegate = delegate
|
||||||
|
self._executor = executor
|
||||||
|
|
||||||
|
def __getattr__(self, attrib):
|
||||||
|
delegate_attr = getattr(self._delegate, attrib)
|
||||||
|
|
||||||
|
if not callable(delegate_attr):
|
||||||
|
return delegate_attr
|
||||||
|
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
""" Wraps the delegate_attr with primitives that will transform sync calls to ones shelled
|
||||||
|
out to a thread pool.
|
||||||
|
"""
|
||||||
|
callable_delegate_attr = partial(delegate_attr, *args, **kwargs)
|
||||||
|
return self._loop.run_in_executor(self._executor, callable_delegate_attr)
|
||||||
|
|
||||||
|
return wrapper
|
|
@ -6,6 +6,7 @@ import time
|
||||||
from app import app, userfiles as user_files, build_logs, dockerfile_build_queue
|
from app import app, userfiles as user_files, build_logs, dockerfile_build_queue
|
||||||
|
|
||||||
from buildman.manager.enterprise import EnterpriseManager
|
from buildman.manager.enterprise import EnterpriseManager
|
||||||
|
from buildman.manager.ephemeral import EphemeralBuilderManager
|
||||||
from buildman.server import BuilderServer
|
from buildman.server import BuilderServer
|
||||||
|
|
||||||
from trollius import SSLContext
|
from trollius import SSLContext
|
||||||
|
@ -13,11 +14,17 @@ from trollius import SSLContext
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
BUILD_MANAGERS = {
|
BUILD_MANAGERS = {
|
||||||
'enterprise': EnterpriseManager
|
'enterprise': EnterpriseManager,
|
||||||
|
'ephemeral': EphemeralBuilderManager,
|
||||||
}
|
}
|
||||||
|
|
||||||
EXTERNALLY_MANAGED = 'external'
|
EXTERNALLY_MANAGED = 'external'
|
||||||
|
|
||||||
|
DEFAULT_WEBSOCKET_PORT = 8787
|
||||||
|
DEFAULT_CONTROLLER_PORT = 8686
|
||||||
|
|
||||||
|
LOG_FORMAT = "%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s"
|
||||||
|
|
||||||
def run_build_manager():
|
def run_build_manager():
|
||||||
if not features.BUILD_SUPPORT:
|
if not features.BUILD_SUPPORT:
|
||||||
logger.debug('Building is disabled. Please enable the feature flag')
|
logger.debug('Building is disabled. Please enable the feature flag')
|
||||||
|
@ -39,6 +46,19 @@ def run_build_manager():
|
||||||
if manager_klass is None:
|
if manager_klass is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
manager_hostname = os.environ.get('BUILDMAN_HOSTNAME',
|
||||||
|
app.config.get('BUILDMAN_HOSTNAME',
|
||||||
|
app.config['SERVER_HOSTNAME']))
|
||||||
|
websocket_port = int(os.environ.get('BUILDMAN_WEBSOCKET_PORT',
|
||||||
|
app.config.get('BUILDMAN_WEBSOCKET_PORT',
|
||||||
|
DEFAULT_WEBSOCKET_PORT)))
|
||||||
|
controller_port = int(os.environ.get('BUILDMAN_CONTROLLER_PORT',
|
||||||
|
app.config.get('BUILDMAN_CONTROLLER_PORT',
|
||||||
|
DEFAULT_CONTROLLER_PORT)))
|
||||||
|
|
||||||
|
logger.debug('Will pass buildman hostname %s to builders for websocket connection',
|
||||||
|
manager_hostname)
|
||||||
|
|
||||||
logger.debug('Starting build manager with lifecycle "%s"', build_manager_config[0])
|
logger.debug('Starting build manager with lifecycle "%s"', build_manager_config[0])
|
||||||
ssl_context = None
|
ssl_context = None
|
||||||
if os.environ.get('SSL_CONFIG'):
|
if os.environ.get('SSL_CONFIG'):
|
||||||
|
@ -48,9 +68,10 @@ def run_build_manager():
|
||||||
os.path.join(os.environ.get('SSL_CONFIG'), 'ssl.key'))
|
os.path.join(os.environ.get('SSL_CONFIG'), 'ssl.key'))
|
||||||
|
|
||||||
server = BuilderServer(app.config['SERVER_HOSTNAME'], dockerfile_build_queue, build_logs,
|
server = BuilderServer(app.config['SERVER_HOSTNAME'], dockerfile_build_queue, build_logs,
|
||||||
user_files, manager_klass)
|
user_files, manager_klass, build_manager_config[1], manager_hostname)
|
||||||
server.run('0.0.0.0', ssl=ssl_context)
|
server.run('0.0.0.0', websocket_port, controller_port, ssl=ssl_context)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
|
||||||
|
logging.getLogger('peewee').setLevel(logging.WARN)
|
||||||
run_build_manager()
|
run_build_manager()
|
||||||
|
|
|
@ -6,10 +6,10 @@ import trollius
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from autobahn.wamp.exception import ApplicationError
|
from autobahn.wamp.exception import ApplicationError
|
||||||
from trollius.coroutines import From
|
|
||||||
|
|
||||||
from buildman.server import BuildJobResult
|
from buildman.server import BuildJobResult
|
||||||
from buildman.component.basecomponent import BaseComponent
|
from buildman.component.basecomponent import BaseComponent
|
||||||
|
from buildman.jobutil.buildjob import BuildJobLoadException
|
||||||
from buildman.jobutil.buildpack import BuildPackage, BuildPackageException
|
from buildman.jobutil.buildpack import BuildPackage, BuildPackageException
|
||||||
from buildman.jobutil.buildstatus import StatusHandler
|
from buildman.jobutil.buildstatus import StatusHandler
|
||||||
from buildman.jobutil.workererror import WorkerError
|
from buildman.jobutil.workererror import WorkerError
|
||||||
|
@ -20,7 +20,7 @@ HEARTBEAT_DELTA = datetime.timedelta(seconds=30)
|
||||||
HEARTBEAT_TIMEOUT = 10
|
HEARTBEAT_TIMEOUT = 10
|
||||||
INITIAL_TIMEOUT = 25
|
INITIAL_TIMEOUT = 25
|
||||||
|
|
||||||
SUPPORTED_WORKER_VERSIONS = ['0.1-beta']
|
SUPPORTED_WORKER_VERSIONS = ['0.1-beta', '0.2']
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -39,13 +39,14 @@ class BuildComponent(BaseComponent):
|
||||||
self.builder_realm = realm
|
self.builder_realm = realm
|
||||||
|
|
||||||
self.parent_manager = None
|
self.parent_manager = None
|
||||||
self.server_hostname = None
|
self.registry_hostname = None
|
||||||
|
|
||||||
self._component_status = ComponentStatus.JOINING
|
self._component_status = ComponentStatus.JOINING
|
||||||
self._last_heartbeat = None
|
self._last_heartbeat = None
|
||||||
self._current_job = None
|
self._current_job = None
|
||||||
self._build_status = None
|
self._build_status = None
|
||||||
self._image_info = None
|
self._image_info = None
|
||||||
|
self._worker_version = None
|
||||||
|
|
||||||
BaseComponent.__init__(self, config, **kwargs)
|
BaseComponent.__init__(self, config, **kwargs)
|
||||||
|
|
||||||
|
@ -54,69 +55,89 @@ class BuildComponent(BaseComponent):
|
||||||
|
|
||||||
def onJoin(self, details):
|
def onJoin(self, details):
|
||||||
logger.debug('Registering methods and listeners for component %s', self.builder_realm)
|
logger.debug('Registering methods and listeners for component %s', self.builder_realm)
|
||||||
yield From(self.register(self._on_ready, u'io.quay.buildworker.ready'))
|
yield trollius.From(self.register(self._on_ready, u'io.quay.buildworker.ready'))
|
||||||
yield From(self.register(self._ping, u'io.quay.buildworker.ping'))
|
yield trollius.From(self.register(self._ping, u'io.quay.buildworker.ping'))
|
||||||
yield From(self.subscribe(self._on_heartbeat, 'io.quay.builder.heartbeat'))
|
yield trollius.From(self.subscribe(self._on_heartbeat, 'io.quay.builder.heartbeat'))
|
||||||
yield From(self.subscribe(self._on_log_message, 'io.quay.builder.logmessage'))
|
yield trollius.From(self.subscribe(self._on_log_message, 'io.quay.builder.logmessage'))
|
||||||
|
|
||||||
self._set_status(ComponentStatus.WAITING)
|
yield trollius.From(self._set_status(ComponentStatus.WAITING))
|
||||||
|
|
||||||
def is_ready(self):
|
def is_ready(self):
|
||||||
""" Determines whether a build component is ready to begin a build. """
|
""" Determines whether a build component is ready to begin a build. """
|
||||||
return self._component_status == ComponentStatus.RUNNING
|
return self._component_status == ComponentStatus.RUNNING
|
||||||
|
|
||||||
|
@trollius.coroutine
|
||||||
def start_build(self, build_job):
|
def start_build(self, build_job):
|
||||||
""" Starts a build. """
|
""" Starts a build. """
|
||||||
|
logger.debug('Starting build for component %s (worker version: %s)',
|
||||||
|
self.builder_realm, self._worker_version)
|
||||||
|
|
||||||
self._current_job = build_job
|
self._current_job = build_job
|
||||||
self._build_status = StatusHandler(self.build_logs, build_job.repo_build())
|
self._build_status = StatusHandler(self.build_logs, build_job.repo_build.uuid)
|
||||||
self._image_info = {}
|
self._image_info = {}
|
||||||
|
|
||||||
self._set_status(ComponentStatus.BUILDING)
|
yield trollius.From(self._set_status(ComponentStatus.BUILDING))
|
||||||
|
|
||||||
# Retrieve the job's buildpack.
|
# Send the notification that the build has started.
|
||||||
buildpack_url = self.user_files.get_file_url(build_job.repo_build().resource_key,
|
build_job.send_notification('build_start')
|
||||||
|
|
||||||
|
# Parse the build configuration.
|
||||||
|
try:
|
||||||
|
build_config = build_job.build_config
|
||||||
|
except BuildJobLoadException as irbe:
|
||||||
|
self._build_failure('Could not load build job information', irbe)
|
||||||
|
|
||||||
|
base_image_information = {}
|
||||||
|
buildpack_url = self.user_files.get_file_url(build_job.repo_build.resource_key,
|
||||||
requires_cors=False)
|
requires_cors=False)
|
||||||
|
|
||||||
logger.debug('Retreiving build package: %s', buildpack_url)
|
# TODO(jschorr): Remove as soon as the fleet has been transitioned to 0.2.
|
||||||
buildpack = None
|
if self._worker_version == '0.1-beta':
|
||||||
try:
|
# Retrieve the job's buildpack.
|
||||||
buildpack = BuildPackage.from_url(buildpack_url)
|
logger.debug('Retrieving build package: %s', buildpack_url)
|
||||||
except BuildPackageException as bpe:
|
buildpack = None
|
||||||
self._build_failure('Could not retrieve build package', bpe)
|
try:
|
||||||
return
|
buildpack = BuildPackage.from_url(buildpack_url)
|
||||||
|
except BuildPackageException as bpe:
|
||||||
|
self._build_failure('Could not retrieve build package', bpe)
|
||||||
|
raise trollius.Return()
|
||||||
|
|
||||||
# Extract the base image information from the Dockerfile.
|
# Extract the base image information from the Dockerfile.
|
||||||
parsed_dockerfile = None
|
parsed_dockerfile = None
|
||||||
logger.debug('Parsing dockerfile')
|
logger.debug('Parsing dockerfile')
|
||||||
|
|
||||||
build_config = build_job.build_config()
|
try:
|
||||||
try:
|
parsed_dockerfile = buildpack.parse_dockerfile(build_config.get('build_subdir'))
|
||||||
parsed_dockerfile = buildpack.parse_dockerfile(build_config.get('build_subdir'))
|
except BuildPackageException as bpe:
|
||||||
except BuildPackageException as bpe:
|
self._build_failure('Could not find Dockerfile in build package', bpe)
|
||||||
self._build_failure('Could not find Dockerfile in build package', bpe)
|
raise trollius.Return()
|
||||||
return
|
|
||||||
|
|
||||||
image_and_tag_tuple = parsed_dockerfile.get_image_and_tag()
|
image_and_tag_tuple = parsed_dockerfile.get_image_and_tag()
|
||||||
if image_and_tag_tuple is None or image_and_tag_tuple[0] is None:
|
if image_and_tag_tuple is None or image_and_tag_tuple[0] is None:
|
||||||
self._build_failure('Missing FROM line in Dockerfile')
|
self._build_failure('Missing FROM line in Dockerfile')
|
||||||
return
|
raise trollius.Return()
|
||||||
|
|
||||||
base_image_information = {
|
base_image_information = {
|
||||||
'repository': image_and_tag_tuple[0],
|
'repository': image_and_tag_tuple[0],
|
||||||
'tag': image_and_tag_tuple[1]
|
'tag': image_and_tag_tuple[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
# Extract the number of steps from the Dockerfile.
|
# Extract the number of steps from the Dockerfile.
|
||||||
with self._build_status as status_dict:
|
with self._build_status as status_dict:
|
||||||
status_dict['total_commands'] = len(parsed_dockerfile.commands)
|
status_dict['total_commands'] = len(parsed_dockerfile.commands)
|
||||||
|
else:
|
||||||
|
# TODO(jschorr): This is a HACK to make sure the progress bar (sort of) continues working
|
||||||
|
# until such time as we have the caching code in place.
|
||||||
|
with self._build_status as status_dict:
|
||||||
|
status_dict['total_commands'] = 25
|
||||||
|
|
||||||
# Add the pull robot information, if any.
|
# Add the pull robot information, if any.
|
||||||
if build_config.get('pull_credentials') is not None:
|
if build_job.pull_credentials:
|
||||||
base_image_information['username'] = build_config['pull_credentials'].get('username', '')
|
base_image_information['username'] = build_job.pull_credentials.get('username', '')
|
||||||
base_image_information['password'] = build_config['pull_credentials'].get('password', '')
|
base_image_information['password'] = build_job.pull_credentials.get('password', '')
|
||||||
|
|
||||||
# Retrieve the repository's fully qualified name.
|
# Retrieve the repository's fully qualified name.
|
||||||
repo = build_job.repo_build().repository
|
repo = build_job.repo_build.repository
|
||||||
repository_name = repo.namespace_user.username + '/' + repo.name
|
repository_name = repo.namespace_user.username + '/' + repo.name
|
||||||
|
|
||||||
# Parse the build queue item into build arguments.
|
# Parse the build queue item into build arguments.
|
||||||
|
@ -128,17 +149,17 @@ class BuildComponent(BaseComponent):
|
||||||
# push_token: The token to use to push the built image.
|
# push_token: The token to use to push the built image.
|
||||||
# tag_names: The name(s) of the tag(s) for the newly built image.
|
# tag_names: The name(s) of the tag(s) for the newly built image.
|
||||||
# base_image: The image name and credentials to use to conduct the base image pull.
|
# base_image: The image name and credentials to use to conduct the base image pull.
|
||||||
# repository: The repository to pull.
|
# repository: The repository to pull (DEPRECATED 0.2)
|
||||||
# tag: The tag to pull.
|
# tag: The tag to pull (DEPRECATED in 0.2)
|
||||||
# username: The username for pulling the base image (if any).
|
# username: The username for pulling the base image (if any).
|
||||||
# password: The password for pulling the base image (if any).
|
# password: The password for pulling the base image (if any).
|
||||||
build_arguments = {
|
build_arguments = {
|
||||||
'build_package': buildpack_url,
|
'build_package': buildpack_url,
|
||||||
'sub_directory': build_config.get('build_subdir', ''),
|
'sub_directory': build_config.get('build_subdir', ''),
|
||||||
'repository': repository_name,
|
'repository': repository_name,
|
||||||
'registry': self.server_hostname,
|
'registry': self.registry_hostname,
|
||||||
'pull_token': build_job.repo_build().access_token.code,
|
'pull_token': build_job.repo_build.access_token.code,
|
||||||
'push_token': build_job.repo_build().access_token.code,
|
'push_token': build_job.repo_build.access_token.code,
|
||||||
'tag_names': build_config.get('docker_tags', ['latest']),
|
'tag_names': build_config.get('docker_tags', ['latest']),
|
||||||
'base_image': base_image_information,
|
'base_image': base_image_information,
|
||||||
'cached_tag': build_job.determine_cached_tag() or ''
|
'cached_tag': build_job.determine_cached_tag() or ''
|
||||||
|
@ -148,9 +169,7 @@ class BuildComponent(BaseComponent):
|
||||||
logger.debug('Invoking build: %s', self.builder_realm)
|
logger.debug('Invoking build: %s', self.builder_realm)
|
||||||
logger.debug('With Arguments: %s', build_arguments)
|
logger.debug('With Arguments: %s', build_arguments)
|
||||||
|
|
||||||
return (self
|
self.call("io.quay.builder.build", **build_arguments).add_done_callback(self._build_complete)
|
||||||
.call("io.quay.builder.build", **build_arguments)
|
|
||||||
.add_done_callback(self._build_complete))
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _total_completion(statuses, total_images):
|
def _total_completion(statuses, total_images):
|
||||||
|
@ -241,14 +260,14 @@ class BuildComponent(BaseComponent):
|
||||||
def _build_failure(self, error_message, exception=None):
|
def _build_failure(self, error_message, exception=None):
|
||||||
""" Handles and logs a failed build. """
|
""" Handles and logs a failed build. """
|
||||||
self._build_status.set_error(error_message, {
|
self._build_status.set_error(error_message, {
|
||||||
'internal_error': exception.message if exception else None
|
'internal_error': str(exception) if exception else None
|
||||||
})
|
})
|
||||||
|
|
||||||
build_id = self._current_job.repo_build().uuid
|
build_id = self._current_job.repo_build.uuid
|
||||||
logger.warning('Build %s failed with message: %s', build_id, error_message)
|
logger.warning('Build %s failed with message: %s', build_id, error_message)
|
||||||
|
|
||||||
# Mark that the build has finished (in an error state)
|
# Mark that the build has finished (in an error state)
|
||||||
self._build_finished(BuildJobResult.ERROR)
|
trollius.async(self._build_finished(BuildJobResult.ERROR))
|
||||||
|
|
||||||
def _build_complete(self, result):
|
def _build_complete(self, result):
|
||||||
""" Wraps up a completed build. Handles any errors and calls self._build_finished. """
|
""" Wraps up a completed build. Handles any errors and calls self._build_finished. """
|
||||||
|
@ -256,7 +275,10 @@ class BuildComponent(BaseComponent):
|
||||||
# Retrieve the result. This will raise an ApplicationError on any error that occurred.
|
# Retrieve the result. This will raise an ApplicationError on any error that occurred.
|
||||||
result.result()
|
result.result()
|
||||||
self._build_status.set_phase(BUILD_PHASE.COMPLETE)
|
self._build_status.set_phase(BUILD_PHASE.COMPLETE)
|
||||||
self._build_finished(BuildJobResult.COMPLETE)
|
trollius.async(self._build_finished(BuildJobResult.COMPLETE))
|
||||||
|
|
||||||
|
# Send the notification that the build has completed successfully.
|
||||||
|
self._current_job.send_notification('build_success')
|
||||||
except ApplicationError as aex:
|
except ApplicationError as aex:
|
||||||
worker_error = WorkerError(aex.error, aex.kwargs.get('base_error'))
|
worker_error = WorkerError(aex.error, aex.kwargs.get('base_error'))
|
||||||
|
|
||||||
|
@ -264,52 +286,66 @@ class BuildComponent(BaseComponent):
|
||||||
self._build_status.set_error(worker_error.public_message(), worker_error.extra_data(),
|
self._build_status.set_error(worker_error.public_message(), worker_error.extra_data(),
|
||||||
internal_error=worker_error.is_internal_error())
|
internal_error=worker_error.is_internal_error())
|
||||||
|
|
||||||
|
# Send the notification that the build has failed.
|
||||||
|
self._current_job.send_notification('build_failure',
|
||||||
|
error_message=worker_error.public_message())
|
||||||
|
|
||||||
# Mark the build as completed.
|
# Mark the build as completed.
|
||||||
if worker_error.is_internal_error():
|
if worker_error.is_internal_error():
|
||||||
self._build_finished(BuildJobResult.INCOMPLETE)
|
trollius.async(self._build_finished(BuildJobResult.INCOMPLETE))
|
||||||
else:
|
else:
|
||||||
self._build_finished(BuildJobResult.ERROR)
|
trollius.async(self._build_finished(BuildJobResult.ERROR))
|
||||||
|
|
||||||
|
@trollius.coroutine
|
||||||
def _build_finished(self, job_status):
|
def _build_finished(self, job_status):
|
||||||
""" Alerts the parent that a build has completed and sets the status back to running. """
|
""" Alerts the parent that a build has completed and sets the status back to running. """
|
||||||
self.parent_manager.job_completed(self._current_job, job_status, self)
|
yield trollius.From(self.parent_manager.job_completed(self._current_job, job_status, self))
|
||||||
self._current_job = None
|
self._current_job = None
|
||||||
|
|
||||||
# Set the component back to a running state.
|
# Set the component back to a running state.
|
||||||
self._set_status(ComponentStatus.RUNNING)
|
yield trollius.From(self._set_status(ComponentStatus.RUNNING))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _ping():
|
def _ping():
|
||||||
""" Ping pong. """
|
""" Ping pong. """
|
||||||
return 'pong'
|
return 'pong'
|
||||||
|
|
||||||
|
@trollius.coroutine
|
||||||
def _on_ready(self, token, version):
|
def _on_ready(self, token, version):
|
||||||
if not version in SUPPORTED_WORKER_VERSIONS:
|
self._worker_version = version
|
||||||
logger.warning('Build component (token "%s") is running an out-of-date version: %s', version)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if self._component_status != 'waiting':
|
if not version in SUPPORTED_WORKER_VERSIONS:
|
||||||
|
logger.warning('Build component (token "%s") is running an out-of-date version: %s', token,
|
||||||
|
version)
|
||||||
|
raise trollius.Return(False)
|
||||||
|
|
||||||
|
if self._component_status != ComponentStatus.WAITING:
|
||||||
logger.warning('Build component (token "%s") is already connected', self.expected_token)
|
logger.warning('Build component (token "%s") is already connected', self.expected_token)
|
||||||
return False
|
raise trollius.Return(False)
|
||||||
|
|
||||||
if token != self.expected_token:
|
if token != self.expected_token:
|
||||||
logger.warning('Builder token mismatch. Expected: "%s". Found: "%s"', self.expected_token, token)
|
logger.warning('Builder token mismatch. Expected: "%s". Found: "%s"', self.expected_token,
|
||||||
return False
|
token)
|
||||||
|
raise trollius.Return(False)
|
||||||
|
|
||||||
self._set_status(ComponentStatus.RUNNING)
|
yield trollius.From(self._set_status(ComponentStatus.RUNNING))
|
||||||
|
|
||||||
# Start the heartbeat check and updating loop.
|
# Start the heartbeat check and updating loop.
|
||||||
loop = trollius.get_event_loop()
|
loop = trollius.get_event_loop()
|
||||||
loop.create_task(self._heartbeat())
|
loop.create_task(self._heartbeat())
|
||||||
logger.debug('Build worker %s is connected and ready', self.builder_realm)
|
logger.debug('Build worker %s is connected and ready', self.builder_realm)
|
||||||
return True
|
raise trollius.Return(True)
|
||||||
|
|
||||||
|
@trollius.coroutine
|
||||||
def _set_status(self, phase):
|
def _set_status(self, phase):
|
||||||
|
if phase == ComponentStatus.RUNNING:
|
||||||
|
yield trollius.From(self.parent_manager.build_component_ready(self))
|
||||||
|
|
||||||
self._component_status = phase
|
self._component_status = phase
|
||||||
|
|
||||||
def _on_heartbeat(self):
|
def _on_heartbeat(self):
|
||||||
""" Updates the last known heartbeat. """
|
""" Updates the last known heartbeat. """
|
||||||
self._last_heartbeat = datetime.datetime.now()
|
self._last_heartbeat = datetime.datetime.utcnow()
|
||||||
|
|
||||||
@trollius.coroutine
|
@trollius.coroutine
|
||||||
def _heartbeat(self):
|
def _heartbeat(self):
|
||||||
|
@ -317,13 +353,13 @@ class BuildComponent(BaseComponent):
|
||||||
and updating the heartbeat in the build status dictionary (if applicable). This allows
|
and updating the heartbeat in the build status dictionary (if applicable). This allows
|
||||||
the build system to catch crashes from either end.
|
the build system to catch crashes from either end.
|
||||||
"""
|
"""
|
||||||
yield From(trollius.sleep(INITIAL_TIMEOUT))
|
yield trollius.From(trollius.sleep(INITIAL_TIMEOUT))
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
# If the component is no longer running or actively building, nothing more to do.
|
# If the component is no longer running or actively building, nothing more to do.
|
||||||
if (self._component_status != ComponentStatus.RUNNING and
|
if (self._component_status != ComponentStatus.RUNNING and
|
||||||
self._component_status != ComponentStatus.BUILDING):
|
self._component_status != ComponentStatus.BUILDING):
|
||||||
return
|
raise trollius.Return()
|
||||||
|
|
||||||
# If there is an active build, write the heartbeat to its status.
|
# If there is an active build, write the heartbeat to its status.
|
||||||
build_status = self._build_status
|
build_status = self._build_status
|
||||||
|
@ -331,35 +367,36 @@ class BuildComponent(BaseComponent):
|
||||||
with build_status as status_dict:
|
with build_status as status_dict:
|
||||||
status_dict['heartbeat'] = int(time.time())
|
status_dict['heartbeat'] = int(time.time())
|
||||||
|
|
||||||
|
|
||||||
# Mark the build item.
|
# Mark the build item.
|
||||||
current_job = self._current_job
|
current_job = self._current_job
|
||||||
if current_job is not None:
|
if current_job is not None:
|
||||||
self.parent_manager.job_heartbeat(current_job)
|
yield trollius.From(self.parent_manager.job_heartbeat(current_job))
|
||||||
|
|
||||||
# Check the heartbeat from the worker.
|
# Check the heartbeat from the worker.
|
||||||
logger.debug('Checking heartbeat on realm %s', self.builder_realm)
|
logger.debug('Checking heartbeat on realm %s', self.builder_realm)
|
||||||
if self._last_heartbeat and self._last_heartbeat < datetime.datetime.now() - HEARTBEAT_DELTA:
|
if (self._last_heartbeat and
|
||||||
self._timeout()
|
self._last_heartbeat < datetime.datetime.utcnow() - HEARTBEAT_DELTA):
|
||||||
return
|
yield trollius.From(self._timeout())
|
||||||
|
raise trollius.Return()
|
||||||
|
|
||||||
yield From(trollius.sleep(HEARTBEAT_TIMEOUT))
|
yield trollius.From(trollius.sleep(HEARTBEAT_TIMEOUT))
|
||||||
|
|
||||||
|
@trollius.coroutine
|
||||||
def _timeout(self):
|
def _timeout(self):
|
||||||
self._set_status(ComponentStatus.TIMED_OUT)
|
if self._component_status == ComponentStatus.TIMED_OUT:
|
||||||
logger.warning('Build component with realm %s has timed out', self.builder_realm)
|
raise trollius.Return()
|
||||||
self._dispose(timed_out=True)
|
|
||||||
|
yield trollius.From(self._set_status(ComponentStatus.TIMED_OUT))
|
||||||
|
logger.warning('Build component with realm %s has timed out', self.builder_realm)
|
||||||
|
|
||||||
def _dispose(self, timed_out=False):
|
|
||||||
# If we still have a running job, then it has not completed and we need to tell the parent
|
# If we still have a running job, then it has not completed and we need to tell the parent
|
||||||
# manager.
|
# manager.
|
||||||
if self._current_job is not None:
|
if self._current_job is not None:
|
||||||
if timed_out:
|
self._build_status.set_error('Build worker timed out', internal_error=True)
|
||||||
self._build_status.set_error('Build worker timed out', internal_error=True)
|
|
||||||
|
|
||||||
self.parent_manager.job_completed(self._current_job, BuildJobResult.INCOMPLETE, self)
|
self.parent_manager.job_completed(self._current_job, BuildJobResult.INCOMPLETE, self)
|
||||||
self._build_status = None
|
self._build_status = None
|
||||||
self._current_job = None
|
self._current_job = None
|
||||||
|
|
||||||
# Unregister the current component so that it cannot be invoked again.
|
# Unregister the current component so that it cannot be invoked again.
|
||||||
self.parent_manager.build_component_disposed(self, timed_out)
|
self.parent_manager.build_component_disposed(self, True)
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
|
import json
|
||||||
|
|
||||||
|
from cachetools import lru_cache
|
||||||
|
from endpoints.notificationhelper import spawn_notification
|
||||||
from data import model
|
from data import model
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
class BuildJobLoadException(Exception):
|
class BuildJobLoadException(Exception):
|
||||||
""" Exception raised if a build job could not be instantiated for some reason. """
|
""" Exception raised if a build job could not be instantiated for some reason. """
|
||||||
|
@ -9,50 +12,69 @@ class BuildJobLoadException(Exception):
|
||||||
class BuildJob(object):
|
class BuildJob(object):
|
||||||
""" Represents a single in-progress build job. """
|
""" Represents a single in-progress build job. """
|
||||||
def __init__(self, job_item):
|
def __init__(self, job_item):
|
||||||
self._job_item = job_item
|
self.job_item = job_item
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._job_details = json.loads(job_item.body)
|
self.job_details = json.loads(job_item.body)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise BuildJobLoadException(
|
raise BuildJobLoadException(
|
||||||
'Could not parse build queue item config with ID %s' % self._job_details['build_uuid']
|
'Could not parse build queue item config with ID %s' % self.job_details['build_uuid']
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def send_notification(self, kind, error_message=None):
|
||||||
|
tags = self.build_config.get('docker_tags', ['latest'])
|
||||||
|
event_data = {
|
||||||
|
'build_id': self.repo_build.uuid,
|
||||||
|
'build_name': self.repo_build.display_name,
|
||||||
|
'docker_tags': tags,
|
||||||
|
'trigger_id': self.repo_build.trigger.uuid,
|
||||||
|
'trigger_kind': self.repo_build.trigger.service.name
|
||||||
|
}
|
||||||
|
|
||||||
|
if error_message is not None:
|
||||||
|
event_data['error_message'] = error_message
|
||||||
|
|
||||||
|
spawn_notification(self.repo_build.repository, kind, event_data,
|
||||||
|
subpage='build?current=%s' % self.repo_build.uuid,
|
||||||
|
pathargs=['build', self.repo_build.uuid])
|
||||||
|
|
||||||
|
|
||||||
|
@lru_cache(maxsize=1)
|
||||||
|
def _load_repo_build(self):
|
||||||
try:
|
try:
|
||||||
self._repo_build = model.get_repository_build(self._job_details['build_uuid'])
|
return model.get_repository_build(self.job_details['build_uuid'])
|
||||||
except model.InvalidRepositoryBuildException:
|
except model.InvalidRepositoryBuildException:
|
||||||
raise BuildJobLoadException(
|
raise BuildJobLoadException(
|
||||||
'Could not load repository build with ID %s' % self._job_details['build_uuid'])
|
'Could not load repository build with ID %s' % self.job_details['build_uuid'])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def repo_build(self):
|
||||||
|
return self._load_repo_build()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def pull_credentials(self):
|
||||||
|
""" Returns the pull credentials for this job, or None if none. """
|
||||||
|
return self.job_details.get('pull_credentials')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def build_config(self):
|
||||||
try:
|
try:
|
||||||
self._build_config = json.loads(self._repo_build.job_config)
|
return json.loads(self.repo_build.job_config)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise BuildJobLoadException(
|
raise BuildJobLoadException(
|
||||||
'Could not parse repository build job config with ID %s' % self._job_details['build_uuid']
|
'Could not parse repository build job config with ID %s' % self.job_details['build_uuid']
|
||||||
)
|
)
|
||||||
|
|
||||||
def determine_cached_tag(self):
|
def determine_cached_tag(self):
|
||||||
""" Returns the tag to pull to prime the cache or None if none. """
|
""" Returns the tag to pull to prime the cache or None if none. """
|
||||||
# TODO(jschorr): Change this to use the more complicated caching rules, once we have caching
|
# TODO(jschorr): Change this to use the more complicated caching rules, once we have caching
|
||||||
# be a pull of things besides the constructed tags.
|
# be a pull of things besides the constructed tags.
|
||||||
tags = self._build_config.get('docker_tags', ['latest'])
|
tags = self.build_config.get('docker_tags', ['latest'])
|
||||||
existing_tags = model.list_repository_tags(self._repo_build.repository.namespace_user.username,
|
existing_tags = model.list_repository_tags(self.repo_build.repository.namespace_user.username,
|
||||||
self._repo_build.repository.name)
|
self.repo_build.repository.name)
|
||||||
|
|
||||||
cached_tags = set(tags) & set([tag.name for tag in existing_tags])
|
cached_tags = set(tags) & set([tag.name for tag in existing_tags])
|
||||||
if cached_tags:
|
if cached_tags:
|
||||||
return list(cached_tags)[0]
|
return list(cached_tags)[0]
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def job_item(self):
|
|
||||||
""" Returns the job's queue item. """
|
|
||||||
return self._job_item
|
|
||||||
|
|
||||||
def repo_build(self):
|
|
||||||
""" Returns the repository build DB row for the job. """
|
|
||||||
return self._repo_build
|
|
||||||
|
|
||||||
def build_config(self):
|
|
||||||
""" Returns the parsed repository build config for the job. """
|
|
||||||
return self._build_config
|
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
from data.database import BUILD_PHASE
|
from data.database import BUILD_PHASE
|
||||||
|
from data import model
|
||||||
|
import datetime
|
||||||
|
|
||||||
class StatusHandler(object):
|
class StatusHandler(object):
|
||||||
""" Context wrapper for writing status to build logs. """
|
""" Context wrapper for writing status to build logs. """
|
||||||
|
|
||||||
def __init__(self, build_logs, repository_build):
|
def __init__(self, build_logs, repository_build_uuid):
|
||||||
self._current_phase = None
|
self._current_phase = None
|
||||||
self._repository_build = repository_build
|
self._uuid = repository_build_uuid
|
||||||
self._uuid = repository_build.uuid
|
|
||||||
self._build_logs = build_logs
|
self._build_logs = build_logs
|
||||||
|
|
||||||
self._status = {
|
self._status = {
|
||||||
|
@ -20,6 +21,8 @@ class StatusHandler(object):
|
||||||
self.__exit__(None, None, None)
|
self.__exit__(None, None, None)
|
||||||
|
|
||||||
def _append_log_message(self, log_message, log_type=None, log_data=None):
|
def _append_log_message(self, log_message, log_type=None, log_data=None):
|
||||||
|
log_data = log_data or {}
|
||||||
|
log_data['datetime'] = str(datetime.datetime.now())
|
||||||
self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data)
|
self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data)
|
||||||
|
|
||||||
def append_log(self, log_message, extra_data=None):
|
def append_log(self, log_message, extra_data=None):
|
||||||
|
@ -41,8 +44,12 @@ class StatusHandler(object):
|
||||||
|
|
||||||
self._current_phase = phase
|
self._current_phase = phase
|
||||||
self._append_log_message(phase, self._build_logs.PHASE, extra_data)
|
self._append_log_message(phase, self._build_logs.PHASE, extra_data)
|
||||||
self._repository_build.phase = phase
|
|
||||||
self._repository_build.save()
|
# Update the repository build with the new phase
|
||||||
|
repo_build = model.get_repository_build(self._uuid)
|
||||||
|
repo_build.phase = phase
|
||||||
|
repo_build.save()
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
|
|
|
@ -19,13 +19,19 @@ class WorkerError(object):
|
||||||
'is_internal': True
|
'is_internal': True
|
||||||
},
|
},
|
||||||
|
|
||||||
|
'io.quay.builder.dockerfileissue': {
|
||||||
|
'message': 'Could not find or parse Dockerfile',
|
||||||
|
'show_base_error': True
|
||||||
|
},
|
||||||
|
|
||||||
'io.quay.builder.cannotpullbaseimage': {
|
'io.quay.builder.cannotpullbaseimage': {
|
||||||
'message': 'Could not pull base image',
|
'message': 'Could not pull base image',
|
||||||
'show_base_error': True
|
'show_base_error': True
|
||||||
},
|
},
|
||||||
|
|
||||||
'io.quay.builder.internalerror': {
|
'io.quay.builder.internalerror': {
|
||||||
'message': 'An internal error occurred while building. Please submit a ticket.'
|
'message': 'An internal error occurred while building. Please submit a ticket.',
|
||||||
|
'is_internal': True
|
||||||
},
|
},
|
||||||
|
|
||||||
'io.quay.builder.buildrunerror': {
|
'io.quay.builder.buildrunerror': {
|
||||||
|
|
|
@ -1,12 +1,17 @@
|
||||||
|
from trollius import coroutine
|
||||||
|
|
||||||
class BaseManager(object):
|
class BaseManager(object):
|
||||||
""" Base for all worker managers. """
|
""" Base for all worker managers. """
|
||||||
def __init__(self, register_component, unregister_component, job_heartbeat_callback,
|
def __init__(self, register_component, unregister_component, job_heartbeat_callback,
|
||||||
job_complete_callback):
|
job_complete_callback, manager_hostname, heartbeat_period_sec):
|
||||||
self.register_component = register_component
|
self.register_component = register_component
|
||||||
self.unregister_component = unregister_component
|
self.unregister_component = unregister_component
|
||||||
self.job_heartbeat_callback = job_heartbeat_callback
|
self.job_heartbeat_callback = job_heartbeat_callback
|
||||||
self.job_complete_callback = job_complete_callback
|
self.job_complete_callback = job_complete_callback
|
||||||
|
self.manager_hostname = manager_hostname
|
||||||
|
self.heartbeat_period_sec = heartbeat_period_sec
|
||||||
|
|
||||||
|
@coroutine
|
||||||
def job_heartbeat(self, build_job):
|
def job_heartbeat(self, build_job):
|
||||||
""" Method invoked to tell the manager that a job is still running. This method will be called
|
""" Method invoked to tell the manager that a job is still running. This method will be called
|
||||||
every few minutes. """
|
every few minutes. """
|
||||||
|
@ -25,26 +30,36 @@ class BaseManager(object):
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def schedule(self, build_job, loop):
|
@coroutine
|
||||||
|
def schedule(self, build_job):
|
||||||
""" Schedules a queue item to be built. Returns True if the item was properly scheduled
|
""" Schedules a queue item to be built. Returns True if the item was properly scheduled
|
||||||
and False if all workers are busy.
|
and False if all workers are busy.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def initialize(self):
|
def initialize(self, manager_config):
|
||||||
""" Runs any initialization code for the manager. Called once the server is in a ready state.
|
""" Runs any initialization code for the manager. Called once the server is in a ready state.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def build_component_ready(self, build_component):
|
||||||
|
""" Method invoked whenever a build component announces itself as ready.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
def build_component_disposed(self, build_component, timed_out):
|
def build_component_disposed(self, build_component, timed_out):
|
||||||
""" Method invoked whenever a build component has been disposed. The timed_out boolean indicates
|
""" Method invoked whenever a build component has been disposed. The timed_out boolean indicates
|
||||||
whether the component's heartbeat timed out.
|
whether the component's heartbeat timed out.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@coroutine
|
||||||
def job_completed(self, build_job, job_status, build_component):
|
def job_completed(self, build_job, job_status, build_component):
|
||||||
""" Method invoked once a job_item has completed, in some manner. The job_status will be
|
""" Method invoked once a job_item has completed, in some manner. The job_status will be
|
||||||
one of: incomplete, error, complete. If incomplete, the job should be requeued.
|
one of: incomplete, error, complete. Implementations of this method should call
|
||||||
|
self.job_complete_callback with a status of Incomplete if they wish for the job to be
|
||||||
|
automatically requeued.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ from buildman.component.basecomponent import BaseComponent
|
||||||
from buildman.component.buildcomponent import BuildComponent
|
from buildman.component.buildcomponent import BuildComponent
|
||||||
from buildman.manager.basemanager import BaseManager
|
from buildman.manager.basemanager import BaseManager
|
||||||
|
|
||||||
from trollius.coroutines import From
|
from trollius import From, Return, coroutine
|
||||||
|
|
||||||
REGISTRATION_REALM = 'registration'
|
REGISTRATION_REALM = 'registration'
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -28,10 +28,15 @@ class DynamicRegistrationComponent(BaseComponent):
|
||||||
|
|
||||||
class EnterpriseManager(BaseManager):
|
class EnterpriseManager(BaseManager):
|
||||||
""" Build manager implementation for the Enterprise Registry. """
|
""" Build manager implementation for the Enterprise Registry. """
|
||||||
build_components = []
|
|
||||||
shutting_down = False
|
|
||||||
|
|
||||||
def initialize(self):
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.ready_components = set()
|
||||||
|
self.all_components = set()
|
||||||
|
self.shutting_down = False
|
||||||
|
|
||||||
|
super(EnterpriseManager, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def initialize(self, manager_config):
|
||||||
# Add a component which is used by build workers for dynamic registration. Unlike
|
# Add a component which is used by build workers for dynamic registration. Unlike
|
||||||
# production, build workers in enterprise are long-lived and register dynamically.
|
# production, build workers in enterprise are long-lived and register dynamically.
|
||||||
self.register_component(REGISTRATION_REALM, DynamicRegistrationComponent)
|
self.register_component(REGISTRATION_REALM, DynamicRegistrationComponent)
|
||||||
|
@ -45,30 +50,37 @@ class EnterpriseManager(BaseManager):
|
||||||
""" Adds a new build component for an Enterprise Registry. """
|
""" Adds a new build component for an Enterprise Registry. """
|
||||||
# Generate a new unique realm ID for the build worker.
|
# Generate a new unique realm ID for the build worker.
|
||||||
realm = str(uuid.uuid4())
|
realm = str(uuid.uuid4())
|
||||||
component = self.register_component(realm, BuildComponent, token="")
|
new_component = self.register_component(realm, BuildComponent, token="")
|
||||||
self.build_components.append(component)
|
self.all_components.add(new_component)
|
||||||
return realm
|
return realm
|
||||||
|
|
||||||
def schedule(self, build_job, loop):
|
@coroutine
|
||||||
|
def schedule(self, build_job):
|
||||||
""" Schedules a build for an Enterprise Registry. """
|
""" Schedules a build for an Enterprise Registry. """
|
||||||
if self.shutting_down:
|
if self.shutting_down or not self.ready_components:
|
||||||
return False
|
raise Return(False)
|
||||||
|
|
||||||
for component in self.build_components:
|
component = self.ready_components.pop()
|
||||||
if component.is_ready():
|
|
||||||
loop.call_soon(component.start_build, build_job)
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
yield From(component.start_build(build_job))
|
||||||
|
|
||||||
|
raise Return(True)
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def build_component_ready(self, build_component):
|
||||||
|
self.ready_components.add(build_component)
|
||||||
|
|
||||||
def shutdown(self):
|
def shutdown(self):
|
||||||
self.shutting_down = True
|
self.shutting_down = True
|
||||||
|
|
||||||
|
@coroutine
|
||||||
def job_completed(self, build_job, job_status, build_component):
|
def job_completed(self, build_job, job_status, build_component):
|
||||||
self.job_complete_callback(build_job, job_status)
|
self.job_complete_callback(build_job, job_status)
|
||||||
|
|
||||||
def build_component_disposed(self, build_component, timed_out):
|
def build_component_disposed(self, build_component, timed_out):
|
||||||
self.build_components.remove(build_component)
|
self.all_components.remove(build_component)
|
||||||
|
if build_component in self.ready_components:
|
||||||
|
self.ready_components.remove(build_component)
|
||||||
|
|
||||||
def num_workers(self):
|
def num_workers(self):
|
||||||
return len(self.build_components)
|
return len(self.all_components)
|
||||||
|
|
328
buildman/manager/ephemeral.py
Normal file
328
buildman/manager/ephemeral.py
Normal file
|
@ -0,0 +1,328 @@
|
||||||
|
import logging
|
||||||
|
import etcd
|
||||||
|
import uuid
|
||||||
|
import calendar
|
||||||
|
import os.path
|
||||||
|
import json
|
||||||
|
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from trollius import From, coroutine, Return, async
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
from urllib3.exceptions import ReadTimeoutError, ProtocolError
|
||||||
|
|
||||||
|
from buildman.manager.basemanager import BaseManager
|
||||||
|
from buildman.manager.executor import PopenExecutor, EC2Executor
|
||||||
|
from buildman.component.buildcomponent import BuildComponent
|
||||||
|
from buildman.jobutil.buildjob import BuildJob
|
||||||
|
from buildman.asyncutil import AsyncWrapper
|
||||||
|
from util.morecollections import AttrDict
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
ETCD_DISABLE_TIMEOUT = 0
|
||||||
|
|
||||||
|
|
||||||
|
class EtcdAction(object):
|
||||||
|
GET = 'get'
|
||||||
|
SET = 'set'
|
||||||
|
EXPIRE = 'expire'
|
||||||
|
UPDATE = 'update'
|
||||||
|
DELETE = 'delete'
|
||||||
|
CREATE = 'create'
|
||||||
|
COMPARE_AND_SWAP = 'compareAndSwap'
|
||||||
|
COMPARE_AND_DELETE = 'compareAndDelete'
|
||||||
|
|
||||||
|
|
||||||
|
class EphemeralBuilderManager(BaseManager):
|
||||||
|
""" Build manager implementation for the Enterprise Registry. """
|
||||||
|
_executors = {
|
||||||
|
'popen': PopenExecutor,
|
||||||
|
'ec2': EC2Executor,
|
||||||
|
}
|
||||||
|
|
||||||
|
_etcd_client_klass = etcd.Client
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self._shutting_down = False
|
||||||
|
|
||||||
|
self._manager_config = None
|
||||||
|
self._async_thread_executor = None
|
||||||
|
self._etcd_client = None
|
||||||
|
|
||||||
|
self._etcd_realm_prefix = None
|
||||||
|
self._etcd_builder_prefix = None
|
||||||
|
|
||||||
|
self._component_to_job = {}
|
||||||
|
self._job_uuid_to_component = {}
|
||||||
|
self._component_to_builder = {}
|
||||||
|
|
||||||
|
self._executor = None
|
||||||
|
|
||||||
|
# Map of etcd keys being watched to the tasks watching them
|
||||||
|
self._watch_tasks = {}
|
||||||
|
|
||||||
|
super(EphemeralBuilderManager, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def _watch_etcd(self, etcd_key, change_callback, recursive=True):
|
||||||
|
watch_task_key = (etcd_key, recursive)
|
||||||
|
def callback_wrapper(changed_key_future):
|
||||||
|
if watch_task_key not in self._watch_tasks or self._watch_tasks[watch_task_key].done():
|
||||||
|
self._watch_etcd(etcd_key, change_callback)
|
||||||
|
|
||||||
|
if changed_key_future.cancelled():
|
||||||
|
# Due to lack of interest, tomorrow has been cancelled
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
etcd_result = changed_key_future.result()
|
||||||
|
except (ReadTimeoutError, ProtocolError):
|
||||||
|
return
|
||||||
|
|
||||||
|
change_callback(etcd_result)
|
||||||
|
|
||||||
|
if not self._shutting_down:
|
||||||
|
watch_future = self._etcd_client.watch(etcd_key, recursive=recursive,
|
||||||
|
timeout=ETCD_DISABLE_TIMEOUT)
|
||||||
|
watch_future.add_done_callback(callback_wrapper)
|
||||||
|
logger.debug('Scheduling watch of key: %s%s', etcd_key, '/*' if recursive else '')
|
||||||
|
self._watch_tasks[watch_task_key] = async(watch_future)
|
||||||
|
|
||||||
|
def _handle_builder_expiration(self, etcd_result):
|
||||||
|
if etcd_result.action == EtcdAction.EXPIRE:
|
||||||
|
# Handle the expiration
|
||||||
|
logger.debug('Builder expired, clean up the old build node')
|
||||||
|
job_metadata = json.loads(etcd_result._prev_node.value)
|
||||||
|
|
||||||
|
if 'builder_id' in job_metadata:
|
||||||
|
logger.info('Terminating expired build node.')
|
||||||
|
async(self._executor.stop_builder(job_metadata['builder_id']))
|
||||||
|
|
||||||
|
def _handle_realm_change(self, etcd_result):
|
||||||
|
if etcd_result.action == EtcdAction.CREATE:
|
||||||
|
# We must listen on the realm created by ourselves or another worker
|
||||||
|
realm_spec = json.loads(etcd_result.value)
|
||||||
|
self._register_realm(realm_spec)
|
||||||
|
|
||||||
|
elif etcd_result.action == EtcdAction.DELETE or etcd_result.action == EtcdAction.EXPIRE:
|
||||||
|
# We must stop listening for new connections on the specified realm, if we did not get the
|
||||||
|
# connection
|
||||||
|
realm_spec = json.loads(etcd_result._prev_node.value)
|
||||||
|
build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
|
||||||
|
component = self._job_uuid_to_component.pop(build_job.job_details['build_uuid'], None)
|
||||||
|
if component is not None:
|
||||||
|
# We were not the manager which the worker connected to, remove the bookkeeping for it
|
||||||
|
logger.debug('Unregistering unused component on realm: %s', realm_spec['realm'])
|
||||||
|
del self._component_to_job[component]
|
||||||
|
del self._component_to_builder[component]
|
||||||
|
self.unregister_component(component)
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.warning('Unexpected action (%s) on realm key: %s', etcd_result.action, etcd_result.key)
|
||||||
|
|
||||||
|
def _register_realm(self, realm_spec):
|
||||||
|
logger.debug('Registering realm with manager: %s', realm_spec['realm'])
|
||||||
|
component = self.register_component(realm_spec['realm'], BuildComponent,
|
||||||
|
token=realm_spec['token'])
|
||||||
|
build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
|
||||||
|
self._component_to_job[component] = build_job
|
||||||
|
self._component_to_builder[component] = realm_spec['builder_id']
|
||||||
|
self._job_uuid_to_component[build_job.job_details['build_uuid']] = component
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def _register_existing_realms(self):
|
||||||
|
try:
|
||||||
|
all_realms = yield From(self._etcd_client.read(self._etcd_realm_prefix, recursive=True))
|
||||||
|
for realm in all_realms.children:
|
||||||
|
if not realm.dir:
|
||||||
|
self._register_realm(json.loads(realm.value))
|
||||||
|
except KeyError:
|
||||||
|
# no realms have been registered yet
|
||||||
|
pass
|
||||||
|
|
||||||
|
def initialize(self, manager_config):
|
||||||
|
logger.debug('Calling initialize')
|
||||||
|
self._manager_config = manager_config
|
||||||
|
|
||||||
|
executor_klass = self._executors.get(manager_config.get('EXECUTOR', ''), PopenExecutor)
|
||||||
|
self._executor = executor_klass(manager_config.get('EXECUTOR_CONFIG', {}),
|
||||||
|
self.manager_hostname)
|
||||||
|
|
||||||
|
etcd_host = self._manager_config.get('ETCD_HOST', '127.0.0.1')
|
||||||
|
etcd_port = self._manager_config.get('ETCD_PORT', 2379)
|
||||||
|
etcd_auth = self._manager_config.get('ETCD_CERT_AND_KEY', None)
|
||||||
|
etcd_ca_cert = self._manager_config.get('ETCD_CA_CERT', None)
|
||||||
|
etcd_protocol = 'http' if etcd_auth is None else 'https'
|
||||||
|
logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port)
|
||||||
|
|
||||||
|
worker_threads = self._manager_config.get('ETCD_WORKER_THREADS', 5)
|
||||||
|
self._async_thread_executor = ThreadPoolExecutor(worker_threads)
|
||||||
|
self._etcd_client = AsyncWrapper(self._etcd_client_klass(host=etcd_host, port=etcd_port,
|
||||||
|
cert=etcd_auth, ca_cert=etcd_ca_cert,
|
||||||
|
protocol=etcd_protocol),
|
||||||
|
executor=self._async_thread_executor)
|
||||||
|
|
||||||
|
self._etcd_builder_prefix = self._manager_config.get('ETCD_BUILDER_PREFIX', 'building/')
|
||||||
|
self._watch_etcd(self._etcd_builder_prefix, self._handle_builder_expiration)
|
||||||
|
|
||||||
|
self._etcd_realm_prefix = self._manager_config.get('ETCD_REALM_PREFIX', 'realm/')
|
||||||
|
self._watch_etcd(self._etcd_realm_prefix, self._handle_realm_change)
|
||||||
|
|
||||||
|
# Load components for all realms currently known to the cluster
|
||||||
|
async(self._register_existing_realms())
|
||||||
|
|
||||||
|
def setup_time(self):
|
||||||
|
setup_time = self._manager_config.get('MACHINE_SETUP_TIME', 300)
|
||||||
|
return setup_time
|
||||||
|
|
||||||
|
def shutdown(self):
|
||||||
|
logger.debug('Shutting down worker.')
|
||||||
|
self._shutting_down = True
|
||||||
|
|
||||||
|
for (etcd_key, _), task in self._watch_tasks.items():
|
||||||
|
if not task.done():
|
||||||
|
logger.debug('Canceling watch task for %s', etcd_key)
|
||||||
|
task.cancel()
|
||||||
|
|
||||||
|
if self._async_thread_executor is not None:
|
||||||
|
logger.debug('Shutting down thread pool executor.')
|
||||||
|
self._async_thread_executor.shutdown()
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def schedule(self, build_job):
|
||||||
|
build_uuid = build_job.job_details['build_uuid']
|
||||||
|
logger.debug('Calling schedule with job: %s', build_uuid)
|
||||||
|
|
||||||
|
# Check if there are worker slots avialable by checking the number of jobs in etcd
|
||||||
|
allowed_worker_count = self._manager_config.get('ALLOWED_WORKER_COUNT', 1)
|
||||||
|
try:
|
||||||
|
building = yield From(self._etcd_client.read(self._etcd_builder_prefix, recursive=True))
|
||||||
|
workers_alive = sum(1 for child in building.children if not child.dir)
|
||||||
|
except KeyError:
|
||||||
|
workers_alive = 0
|
||||||
|
|
||||||
|
logger.debug('Total jobs: %s', workers_alive)
|
||||||
|
|
||||||
|
if workers_alive >= allowed_worker_count:
|
||||||
|
logger.info('Too many workers alive, unable to start new worker. %s >= %s', workers_alive,
|
||||||
|
allowed_worker_count)
|
||||||
|
raise Return(False)
|
||||||
|
|
||||||
|
job_key = self._etcd_job_key(build_job)
|
||||||
|
|
||||||
|
# First try to take a lock for this job, meaning we will be responsible for its lifeline
|
||||||
|
realm = str(uuid.uuid4())
|
||||||
|
token = str(uuid.uuid4())
|
||||||
|
ttl = self.setup_time()
|
||||||
|
expiration = datetime.utcnow() + timedelta(seconds=ttl)
|
||||||
|
|
||||||
|
machine_max_expiration = self._manager_config.get('MACHINE_MAX_TIME', 7200)
|
||||||
|
max_expiration = datetime.utcnow() + timedelta(seconds=machine_max_expiration)
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
'expiration': calendar.timegm(expiration.timetuple()),
|
||||||
|
'max_expiration': calendar.timegm(max_expiration.timetuple()),
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield From(self._etcd_client.write(job_key, json.dumps(payload), prevExist=False, ttl=ttl))
|
||||||
|
except KeyError:
|
||||||
|
# The job was already taken by someone else, we are probably a retry
|
||||||
|
logger.error('Job already exists in etcd, are timeouts misconfigured or is the queue broken?')
|
||||||
|
raise Return(False)
|
||||||
|
|
||||||
|
logger.debug('Starting builder with executor: %s', self._executor)
|
||||||
|
builder_id = yield From(self._executor.start_builder(realm, token, build_uuid))
|
||||||
|
|
||||||
|
# Store the builder in etcd associated with the job id
|
||||||
|
payload['builder_id'] = builder_id
|
||||||
|
yield From(self._etcd_client.write(job_key, json.dumps(payload), prevExist=True, ttl=ttl))
|
||||||
|
|
||||||
|
# Store the realm spec which will allow any manager to accept this builder when it connects
|
||||||
|
realm_spec = json.dumps({
|
||||||
|
'realm': realm,
|
||||||
|
'token': token,
|
||||||
|
'builder_id': builder_id,
|
||||||
|
'job_queue_item': build_job.job_item,
|
||||||
|
})
|
||||||
|
try:
|
||||||
|
yield From(self._etcd_client.write(self._etcd_realm_key(realm), realm_spec, prevExist=False,
|
||||||
|
ttl=ttl))
|
||||||
|
except KeyError:
|
||||||
|
logger.error('Realm already exists in etcd. UUID collision or something is very very wrong.')
|
||||||
|
raise Return(False)
|
||||||
|
|
||||||
|
raise Return(True)
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def build_component_ready(self, build_component):
|
||||||
|
try:
|
||||||
|
# Clean up the bookkeeping for allowing any manager to take the job
|
||||||
|
job = self._component_to_job.pop(build_component)
|
||||||
|
del self._job_uuid_to_component[job.job_details['build_uuid']]
|
||||||
|
yield From(self._etcd_client.delete(self._etcd_realm_key(build_component.builder_realm)))
|
||||||
|
|
||||||
|
logger.debug('Sending build %s to newly ready component on realm %s',
|
||||||
|
job.job_details['build_uuid'], build_component.builder_realm)
|
||||||
|
yield From(build_component.start_build(job))
|
||||||
|
except KeyError:
|
||||||
|
logger.debug('Builder is asking for more work, but work already completed')
|
||||||
|
|
||||||
|
def build_component_disposed(self, build_component, timed_out):
|
||||||
|
logger.debug('Calling build_component_disposed.')
|
||||||
|
|
||||||
|
# TODO make it so that I don't have to unregister the component if it timed out
|
||||||
|
self.unregister_component(build_component)
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def job_completed(self, build_job, job_status, build_component):
|
||||||
|
logger.debug('Calling job_completed with status: %s', job_status)
|
||||||
|
|
||||||
|
# Kill the ephmeral builder
|
||||||
|
yield From(self._executor.stop_builder(self._component_to_builder.pop(build_component)))
|
||||||
|
|
||||||
|
# Release the lock in etcd
|
||||||
|
job_key = self._etcd_job_key(build_job)
|
||||||
|
yield From(self._etcd_client.delete(job_key))
|
||||||
|
|
||||||
|
self.job_complete_callback(build_job, job_status)
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def job_heartbeat(self, build_job):
|
||||||
|
# Extend the deadline in etcd
|
||||||
|
job_key = self._etcd_job_key(build_job)
|
||||||
|
build_job_metadata_response = yield From(self._etcd_client.read(job_key))
|
||||||
|
build_job_metadata = json.loads(build_job_metadata_response.value)
|
||||||
|
|
||||||
|
max_expiration = datetime.utcfromtimestamp(build_job_metadata['max_expiration'])
|
||||||
|
max_expiration_remaining = max_expiration - datetime.utcnow()
|
||||||
|
max_expiration_sec = max(0, int(max_expiration_remaining.total_seconds()))
|
||||||
|
|
||||||
|
ttl = min(self.heartbeat_period_sec * 2, max_expiration_sec)
|
||||||
|
new_expiration = datetime.utcnow() + timedelta(seconds=ttl)
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
'expiration': calendar.timegm(new_expiration.timetuple()),
|
||||||
|
'builder_id': build_job_metadata['builder_id'],
|
||||||
|
'max_expiration': build_job_metadata['max_expiration'],
|
||||||
|
}
|
||||||
|
|
||||||
|
yield From(self._etcd_client.write(job_key, json.dumps(payload), ttl=ttl))
|
||||||
|
|
||||||
|
self.job_heartbeat_callback(build_job)
|
||||||
|
|
||||||
|
def _etcd_job_key(self, build_job):
|
||||||
|
""" Create a key which is used to track a job in etcd.
|
||||||
|
"""
|
||||||
|
return os.path.join(self._etcd_builder_prefix, build_job.job_details['build_uuid'])
|
||||||
|
|
||||||
|
def _etcd_realm_key(self, realm):
|
||||||
|
""" Create a key which is used to track an incoming connection on a realm.
|
||||||
|
"""
|
||||||
|
return os.path.join(self._etcd_realm_prefix, realm)
|
||||||
|
|
||||||
|
def num_workers(self):
|
||||||
|
""" Return the number of workers we're managing locally.
|
||||||
|
"""
|
||||||
|
return len(self._component_to_builder)
|
237
buildman/manager/executor.py
Normal file
237
buildman/manager/executor.py
Normal file
|
@ -0,0 +1,237 @@
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import uuid
|
||||||
|
import threading
|
||||||
|
import boto.ec2
|
||||||
|
import requests
|
||||||
|
import cachetools
|
||||||
|
|
||||||
|
from jinja2 import FileSystemLoader, Environment
|
||||||
|
from trollius import coroutine, From, Return, get_event_loop
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from buildman.asyncutil import AsyncWrapper
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
ONE_HOUR = 60*60
|
||||||
|
|
||||||
|
ENV = Environment(loader=FileSystemLoader('buildman/templates'))
|
||||||
|
TEMPLATE = ENV.get_template('cloudconfig.yaml')
|
||||||
|
|
||||||
|
|
||||||
|
class ExecutorException(Exception):
|
||||||
|
""" Exception raised when there is a problem starting or stopping a builder.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class BuilderExecutor(object):
|
||||||
|
def __init__(self, executor_config, manager_hostname):
|
||||||
|
self.executor_config = executor_config
|
||||||
|
self.manager_hostname = manager_hostname
|
||||||
|
|
||||||
|
""" Interface which can be plugged into the EphemeralNodeManager to provide a strategy for
|
||||||
|
starting and stopping builders.
|
||||||
|
"""
|
||||||
|
@coroutine
|
||||||
|
def start_builder(self, realm, token, build_uuid):
|
||||||
|
""" Create a builder with the specified config. Returns a unique id which can be used to manage
|
||||||
|
the builder.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def stop_builder(self, builder_id):
|
||||||
|
""" Stop a builder which is currently running.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def get_manager_websocket_url(self):
|
||||||
|
return 'ws://{0}:'
|
||||||
|
|
||||||
|
def generate_cloud_config(self, realm, token, coreos_channel, manager_hostname,
|
||||||
|
quay_username=None, quay_password=None):
|
||||||
|
if quay_username is None:
|
||||||
|
quay_username = self.executor_config['QUAY_USERNAME']
|
||||||
|
|
||||||
|
if quay_password is None:
|
||||||
|
quay_password = self.executor_config['QUAY_PASSWORD']
|
||||||
|
|
||||||
|
return TEMPLATE.render(
|
||||||
|
realm=realm,
|
||||||
|
token=token,
|
||||||
|
quay_username=quay_username,
|
||||||
|
quay_password=quay_password,
|
||||||
|
manager_hostname=manager_hostname,
|
||||||
|
coreos_channel=coreos_channel,
|
||||||
|
worker_tag=self.executor_config['WORKER_TAG'],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class EC2Executor(BuilderExecutor):
|
||||||
|
""" Implementation of BuilderExecutor which uses libcloud to start machines on a variety of cloud
|
||||||
|
providers.
|
||||||
|
"""
|
||||||
|
COREOS_STACK_URL = 'http://%s.release.core-os.net/amd64-usr/current/coreos_production_ami_hvm.txt'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self._loop = get_event_loop()
|
||||||
|
super(EC2Executor, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def _get_conn(self):
|
||||||
|
""" Creates an ec2 connection which can be used to manage instances.
|
||||||
|
"""
|
||||||
|
return AsyncWrapper(boto.ec2.connect_to_region(
|
||||||
|
self.executor_config['EC2_REGION'],
|
||||||
|
aws_access_key_id=self.executor_config['AWS_ACCESS_KEY'],
|
||||||
|
aws_secret_access_key=self.executor_config['AWS_SECRET_KEY'],
|
||||||
|
))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@cachetools.ttl_cache(ttl=ONE_HOUR)
|
||||||
|
def _get_coreos_ami(cls, ec2_region, coreos_channel):
|
||||||
|
""" Retrieve the CoreOS AMI id from the canonical listing.
|
||||||
|
"""
|
||||||
|
stack_list_string = requests.get(EC2Executor.COREOS_STACK_URL % coreos_channel).text
|
||||||
|
stack_amis = dict([stack.split('=') for stack in stack_list_string.split('|')])
|
||||||
|
return stack_amis[ec2_region]
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def start_builder(self, realm, token, build_uuid):
|
||||||
|
region = self.executor_config['EC2_REGION']
|
||||||
|
channel = self.executor_config.get('COREOS_CHANNEL', 'stable')
|
||||||
|
get_ami_callable = partial(self._get_coreos_ami, region, channel)
|
||||||
|
coreos_ami = yield From(self._loop.run_in_executor(None, get_ami_callable))
|
||||||
|
user_data = self.generate_cloud_config(realm, token, channel, self.manager_hostname)
|
||||||
|
|
||||||
|
logger.debug('Generated cloud config: %s', user_data)
|
||||||
|
|
||||||
|
ec2_conn = self._get_conn()
|
||||||
|
|
||||||
|
ssd_root_ebs = boto.ec2.blockdevicemapping.BlockDeviceType(
|
||||||
|
size=32,
|
||||||
|
volume_type='gp2',
|
||||||
|
delete_on_termination=True,
|
||||||
|
)
|
||||||
|
block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping()
|
||||||
|
block_devices['/dev/xvda'] = ssd_root_ebs
|
||||||
|
|
||||||
|
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
|
||||||
|
subnet_id=self.executor_config['EC2_VPC_SUBNET_ID'],
|
||||||
|
groups=self.executor_config['EC2_SECURITY_GROUP_IDS'],
|
||||||
|
associate_public_ip_address=True,
|
||||||
|
)
|
||||||
|
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
|
||||||
|
|
||||||
|
reservation = yield From(ec2_conn.run_instances(
|
||||||
|
coreos_ami,
|
||||||
|
instance_type=self.executor_config['EC2_INSTANCE_TYPE'],
|
||||||
|
key_name=self.executor_config.get('EC2_KEY_NAME', None),
|
||||||
|
user_data=user_data,
|
||||||
|
instance_initiated_shutdown_behavior='terminate',
|
||||||
|
block_device_map=block_devices,
|
||||||
|
network_interfaces=interfaces,
|
||||||
|
))
|
||||||
|
|
||||||
|
if not reservation.instances:
|
||||||
|
raise ExecutorException('Unable to spawn builder instance.')
|
||||||
|
elif len(reservation.instances) != 1:
|
||||||
|
raise ExecutorException('EC2 started wrong number of instances!')
|
||||||
|
|
||||||
|
launched = AsyncWrapper(reservation.instances[0])
|
||||||
|
yield From(launched.add_tags({
|
||||||
|
'Name': 'Quay Ephemeral Builder',
|
||||||
|
'Realm': realm,
|
||||||
|
'Token': token,
|
||||||
|
'BuildUUID': build_uuid,
|
||||||
|
}))
|
||||||
|
raise Return(launched.id)
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def stop_builder(self, builder_id):
|
||||||
|
ec2_conn = self._get_conn()
|
||||||
|
terminated_instances = yield From(ec2_conn.terminate_instances([builder_id]))
|
||||||
|
if builder_id not in [si.id for si in terminated_instances]:
|
||||||
|
raise ExecutorException('Unable to terminate instance: %s' % builder_id)
|
||||||
|
|
||||||
|
|
||||||
|
class PopenExecutor(BuilderExecutor):
|
||||||
|
""" Implementation of BuilderExecutor which uses Popen to fork a quay-builder process.
|
||||||
|
"""
|
||||||
|
def __init__(self, executor_config, manager_hostname):
|
||||||
|
self._jobs = {}
|
||||||
|
|
||||||
|
super(PopenExecutor, self).__init__(executor_config, manager_hostname)
|
||||||
|
|
||||||
|
""" Executor which uses Popen to fork a quay-builder process.
|
||||||
|
"""
|
||||||
|
@coroutine
|
||||||
|
def start_builder(self, realm, token, build_uuid):
|
||||||
|
# Now start a machine for this job, adding the machine id to the etcd information
|
||||||
|
logger.debug('Forking process for build')
|
||||||
|
import subprocess
|
||||||
|
builder_env = {
|
||||||
|
'TOKEN': token,
|
||||||
|
'REALM': realm,
|
||||||
|
'ENDPOINT': 'ws://localhost:8787',
|
||||||
|
'DOCKER_TLS_VERIFY': os.environ.get('DOCKER_TLS_VERIFY', ''),
|
||||||
|
'DOCKER_CERT_PATH': os.environ.get('DOCKER_CERT_PATH', ''),
|
||||||
|
'DOCKER_HOST': os.environ.get('DOCKER_HOST', ''),
|
||||||
|
}
|
||||||
|
|
||||||
|
logpipe = LogPipe(logging.INFO)
|
||||||
|
spawned = subprocess.Popen('/Users/jake/bin/quay-builder', stdout=logpipe, stderr=logpipe,
|
||||||
|
env=builder_env)
|
||||||
|
|
||||||
|
builder_id = str(uuid.uuid4())
|
||||||
|
self._jobs[builder_id] = (spawned, logpipe)
|
||||||
|
logger.debug('Builder spawned with id: %s', builder_id)
|
||||||
|
raise Return(builder_id)
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def stop_builder(self, builder_id):
|
||||||
|
if builder_id not in self._jobs:
|
||||||
|
raise ExecutorException('Builder id not being tracked by executor.')
|
||||||
|
|
||||||
|
logger.debug('Killing builder with id: %s', builder_id)
|
||||||
|
spawned, logpipe = self._jobs[builder_id]
|
||||||
|
|
||||||
|
if spawned.poll() is None:
|
||||||
|
spawned.kill()
|
||||||
|
logpipe.close()
|
||||||
|
|
||||||
|
|
||||||
|
class LogPipe(threading.Thread):
|
||||||
|
""" Adapted from http://codereview.stackexchange.com/a/17959
|
||||||
|
"""
|
||||||
|
def __init__(self, level):
|
||||||
|
"""Setup the object with a logger and a loglevel
|
||||||
|
and start the thread
|
||||||
|
"""
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.daemon = False
|
||||||
|
self.level = level
|
||||||
|
self.fd_read, self.fd_write = os.pipe()
|
||||||
|
self.pipe_reader = os.fdopen(self.fd_read)
|
||||||
|
self.start()
|
||||||
|
|
||||||
|
def fileno(self):
|
||||||
|
"""Return the write file descriptor of the pipe
|
||||||
|
"""
|
||||||
|
return self.fd_write
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"""Run the thread, logging everything.
|
||||||
|
"""
|
||||||
|
for line in iter(self.pipe_reader.readline, ''):
|
||||||
|
logging.log(self.level, line.strip('\n'))
|
||||||
|
|
||||||
|
self.pipe_reader.close()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close the write end of the pipe.
|
||||||
|
"""
|
||||||
|
os.close(self.fd_write)
|
|
@ -12,7 +12,9 @@ from trollius.coroutines import From
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
|
|
||||||
from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException
|
from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException
|
||||||
|
from data import database
|
||||||
from data.queue import WorkQueue
|
from data.queue import WorkQueue
|
||||||
|
from app import app
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -21,8 +23,7 @@ TIMEOUT_PERIOD_MINUTES = 20
|
||||||
JOB_TIMEOUT_SECONDS = 300
|
JOB_TIMEOUT_SECONDS = 300
|
||||||
MINIMUM_JOB_EXTENSION = timedelta(minutes=2)
|
MINIMUM_JOB_EXTENSION = timedelta(minutes=2)
|
||||||
|
|
||||||
WEBSOCKET_PORT = 8787
|
HEARTBEAT_PERIOD_SEC = 30
|
||||||
CONTROLLER_PORT = 8686
|
|
||||||
|
|
||||||
class BuildJobResult(object):
|
class BuildJobResult(object):
|
||||||
""" Build job result enum """
|
""" Build job result enum """
|
||||||
|
@ -34,14 +35,15 @@ class BuilderServer(object):
|
||||||
""" Server which handles both HTTP and WAMP requests, managing the full state of the build
|
""" Server which handles both HTTP and WAMP requests, managing the full state of the build
|
||||||
controller.
|
controller.
|
||||||
"""
|
"""
|
||||||
def __init__(self, server_hostname, queue, build_logs, user_files, lifecycle_manager_klass):
|
def __init__(self, registry_hostname, queue, build_logs, user_files, lifecycle_manager_klass,
|
||||||
|
lifecycle_manager_config, manager_hostname):
|
||||||
self._loop = None
|
self._loop = None
|
||||||
self._current_status = 'starting'
|
self._current_status = 'starting'
|
||||||
self._current_components = []
|
self._current_components = []
|
||||||
self._job_count = 0
|
self._job_count = 0
|
||||||
|
|
||||||
self._session_factory = RouterSessionFactory(RouterFactory())
|
self._session_factory = RouterSessionFactory(RouterFactory())
|
||||||
self._server_hostname = server_hostname
|
self._registry_hostname = registry_hostname
|
||||||
self._queue = queue
|
self._queue = queue
|
||||||
self._build_logs = build_logs
|
self._build_logs = build_logs
|
||||||
self._user_files = user_files
|
self._user_files = user_files
|
||||||
|
@ -49,8 +51,11 @@ class BuilderServer(object):
|
||||||
self._register_component,
|
self._register_component,
|
||||||
self._unregister_component,
|
self._unregister_component,
|
||||||
self._job_heartbeat,
|
self._job_heartbeat,
|
||||||
self._job_complete
|
self._job_complete,
|
||||||
|
manager_hostname,
|
||||||
|
HEARTBEAT_PERIOD_SEC,
|
||||||
)
|
)
|
||||||
|
self._lifecycle_manager_config = lifecycle_manager_config
|
||||||
|
|
||||||
self._shutdown_event = Event()
|
self._shutdown_event = Event()
|
||||||
self._current_status = 'running'
|
self._current_status = 'running'
|
||||||
|
@ -67,18 +72,17 @@ class BuilderServer(object):
|
||||||
|
|
||||||
self._controller_app = controller_app
|
self._controller_app = controller_app
|
||||||
|
|
||||||
def run(self, host, ssl=None):
|
def run(self, host, websocket_port, controller_port, ssl=None):
|
||||||
logger.debug('Initializing the lifecycle manager')
|
logger.debug('Initializing the lifecycle manager')
|
||||||
self._lifecycle_manager.initialize()
|
self._lifecycle_manager.initialize(self._lifecycle_manager_config)
|
||||||
|
|
||||||
logger.debug('Initializing all members of the event loop')
|
logger.debug('Initializing all members of the event loop')
|
||||||
loop = trollius.get_event_loop()
|
loop = trollius.get_event_loop()
|
||||||
trollius.Task(self._initialize(loop, host, ssl))
|
|
||||||
|
|
||||||
logger.debug('Starting server on port %s, with controller on port %s', WEBSOCKET_PORT,
|
logger.debug('Starting server on port %s, with controller on port %s', websocket_port,
|
||||||
CONTROLLER_PORT)
|
controller_port)
|
||||||
try:
|
try:
|
||||||
loop.run_forever()
|
loop.run_until_complete(self._initialize(loop, host, websocket_port, controller_port, ssl))
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
pass
|
pass
|
||||||
finally:
|
finally:
|
||||||
|
@ -102,7 +106,7 @@ class BuilderServer(object):
|
||||||
component.parent_manager = self._lifecycle_manager
|
component.parent_manager = self._lifecycle_manager
|
||||||
component.build_logs = self._build_logs
|
component.build_logs = self._build_logs
|
||||||
component.user_files = self._user_files
|
component.user_files = self._user_files
|
||||||
component.server_hostname = self._server_hostname
|
component.registry_hostname = self._registry_hostname
|
||||||
|
|
||||||
self._current_components.append(component)
|
self._current_components.append(component)
|
||||||
self._session_factory.add(component)
|
self._session_factory.add(component)
|
||||||
|
@ -116,32 +120,32 @@ class BuilderServer(object):
|
||||||
self._session_factory.remove(component)
|
self._session_factory.remove(component)
|
||||||
|
|
||||||
def _job_heartbeat(self, build_job):
|
def _job_heartbeat(self, build_job):
|
||||||
WorkQueue.extend_processing(build_job.job_item(), seconds_from_now=JOB_TIMEOUT_SECONDS,
|
self._queue.extend_processing(build_job.job_item, seconds_from_now=JOB_TIMEOUT_SECONDS,
|
||||||
retry_count=1, minimum_extension=MINIMUM_JOB_EXTENSION)
|
minimum_extension=MINIMUM_JOB_EXTENSION)
|
||||||
|
|
||||||
def _job_complete(self, build_job, job_status):
|
def _job_complete(self, build_job, job_status):
|
||||||
if job_status == BuildJobResult.INCOMPLETE:
|
if job_status == BuildJobResult.INCOMPLETE:
|
||||||
self._queue.incomplete(build_job.job_item(), restore_retry=True, retry_after=30)
|
self._queue.incomplete(build_job.job_item, restore_retry=False, retry_after=30)
|
||||||
elif job_status == BuildJobResult.ERROR:
|
|
||||||
self._queue.incomplete(build_job.job_item(), restore_retry=False)
|
|
||||||
else:
|
else:
|
||||||
self._queue.complete(build_job.job_item())
|
self._queue.complete(build_job.job_item)
|
||||||
|
|
||||||
self._job_count = self._job_count - 1
|
self._job_count = self._job_count - 1
|
||||||
|
|
||||||
if self._current_status == 'shutting_down' and not self._job_count:
|
if self._current_status == 'shutting_down' and not self._job_count:
|
||||||
self._shutdown_event.set()
|
self._shutdown_event.set()
|
||||||
|
|
||||||
# TODO(jschorr): check for work here?
|
|
||||||
|
|
||||||
@trollius.coroutine
|
@trollius.coroutine
|
||||||
def _work_checker(self):
|
def _work_checker(self):
|
||||||
while self._current_status == 'running':
|
while self._current_status == 'running':
|
||||||
logger.debug('Checking for more work for %d active workers', self._lifecycle_manager.num_workers())
|
with database.CloseForLongOperation(app.config):
|
||||||
|
yield From(trollius.sleep(WORK_CHECK_TIMEOUT))
|
||||||
|
|
||||||
|
logger.debug('Checking for more work for %d active workers',
|
||||||
|
self._lifecycle_manager.num_workers())
|
||||||
|
|
||||||
job_item = self._queue.get(processing_time=self._lifecycle_manager.setup_time())
|
job_item = self._queue.get(processing_time=self._lifecycle_manager.setup_time())
|
||||||
if job_item is None:
|
if job_item is None:
|
||||||
logger.debug('No additional work found. Going to sleep for %s seconds', WORK_CHECK_TIMEOUT)
|
logger.debug('No additional work found. Going to sleep for %s seconds', WORK_CHECK_TIMEOUT)
|
||||||
yield From(trollius.sleep(WORK_CHECK_TIMEOUT))
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -149,20 +153,21 @@ class BuilderServer(object):
|
||||||
except BuildJobLoadException as irbe:
|
except BuildJobLoadException as irbe:
|
||||||
logger.exception(irbe)
|
logger.exception(irbe)
|
||||||
self._queue.incomplete(job_item, restore_retry=False)
|
self._queue.incomplete(job_item, restore_retry=False)
|
||||||
|
continue
|
||||||
|
|
||||||
logger.debug('Build job found. Checking for an avaliable worker.')
|
logger.debug('Build job found. Checking for an avaliable worker.')
|
||||||
if self._lifecycle_manager.schedule(build_job, self._loop):
|
scheduled = yield From(self._lifecycle_manager.schedule(build_job))
|
||||||
|
if scheduled:
|
||||||
self._job_count = self._job_count + 1
|
self._job_count = self._job_count + 1
|
||||||
logger.debug('Build job scheduled. Running: %s', self._job_count)
|
logger.debug('Build job scheduled. Running: %s', self._job_count)
|
||||||
else:
|
else:
|
||||||
logger.debug('All workers are busy. Requeuing.')
|
logger.debug('All workers are busy. Requeuing.')
|
||||||
self._queue.incomplete(job_item, restore_retry=True, retry_after=0)
|
self._queue.incomplete(job_item, restore_retry=True, retry_after=0)
|
||||||
|
|
||||||
yield From(trollius.sleep(WORK_CHECK_TIMEOUT))
|
|
||||||
|
|
||||||
|
|
||||||
@trollius.coroutine
|
@trollius.coroutine
|
||||||
def _initialize(self, loop, host, ssl=None):
|
def _initialize(self, loop, host, websocket_port, controller_port, ssl=None):
|
||||||
self._loop = loop
|
self._loop = loop
|
||||||
|
|
||||||
# Create the WAMP server.
|
# Create the WAMP server.
|
||||||
|
@ -170,8 +175,8 @@ class BuilderServer(object):
|
||||||
transport_factory.setProtocolOptions(failByDrop=True)
|
transport_factory.setProtocolOptions(failByDrop=True)
|
||||||
|
|
||||||
# Initialize the controller server and the WAMP server
|
# Initialize the controller server and the WAMP server
|
||||||
create_wsgi_server(self._controller_app, loop=loop, host=host, port=CONTROLLER_PORT, ssl=ssl)
|
create_wsgi_server(self._controller_app, loop=loop, host=host, port=controller_port, ssl=ssl)
|
||||||
yield From(loop.create_server(transport_factory, host, WEBSOCKET_PORT, ssl=ssl))
|
yield From(loop.create_server(transport_factory, host, websocket_port, ssl=ssl))
|
||||||
|
|
||||||
# Initialize the work queue checker.
|
# Initialize the work queue checker.
|
||||||
yield From(self._work_checker())
|
yield From(self._work_checker())
|
||||||
|
|
36
buildman/templates/cloudconfig.yaml
Normal file
36
buildman/templates/cloudconfig.yaml
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCC0m+hVmyR3vn/xoxJe9+atRWBxSK+YXgyufNVDMcb7H00Jfnc341QH3kDVYZamUbhVh/nyc2RP7YbnZR5zORFtgOaNSdkMYrPozzBvxjnvSUokkCCWbLqXDHvIKiR12r+UTSijPJE/Yk702Mb2ejAFuae1C3Ec+qKAoOCagDjpQ3THyb5oaKE7VPHdwCWjWIQLRhC+plu77ObhoXIFJLD13gCi01L/rp4mYVCxIc2lX5A8rkK+bZHnIZwWUQ4t8SIjWxIaUo0FE7oZ83nKuNkYj5ngmLHQLY23Nx2WhE9H6NBthUpik9SmqQPtVYbhIG+bISPoH9Xs8CLrFb0VRjz Joey's Mac
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCo6FhAP7mFFOAzM91gtaKW7saahtaN4lur42FMMztz6aqUycIltCmvxo+3FmrXgCG30maMNU36Vm1+9QRtVQEd+eRuoIWP28t+8MT01Fh4zPuE2Wca3pOHSNo3X81FfWJLzmwEHiQKs9HPQqUhezR9PcVWVkbMyAzw85c0UycGmHGFNb0UiRd9HFY6XbgbxhZv/mvKLZ99xE3xkOzS1PNsdSNvjUKwZR7pSUPqNS5S/1NXyR4GhFTU24VPH/bTATOv2ATH+PSzsZ7Qyz9UHj38tKC+ALJHEDJ4HXGzobyOUP78cHGZOfCB5FYubq0zmOudAjKIAhwI8XTFvJ2DX1P3 jimmyzelinskie
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNvw8qo9m8np7yQ/Smv/oklM8bo8VyNRZriGYBDuolWDL/mZpYCQnZJXphQo7RFdNABYistikjJlBuuwUohLf2uSq0iKoFa2TgwI43wViWzvuzU4nA02/ITD5BZdmWAFNyIoqeB50Ol4qUgDwLAZ+7Kv7uCi6chcgr9gTi99jY3GHyZjrMiXMHGVGi+FExFuzhVC2drKjbz5q6oRfQeLtNfG4psl5GU3MQU6FkX4fgoCx0r9R48/b7l4+TT7pWblJQiRfeldixu6308vyoTUEHasdkU3/X0OTaGz/h5XqTKnGQc6stvvoED3w+L3QFp0H5Z8sZ9stSsitmCBrmbcKZ jakemoshenko
|
||||||
|
|
||||||
|
write_files:
|
||||||
|
- path: /root/overrides.list
|
||||||
|
permission: '0644'
|
||||||
|
content: |
|
||||||
|
REALM={{ realm }}
|
||||||
|
TOKEN={{ token }}
|
||||||
|
SERVER=wss://{{ manager_hostname }}
|
||||||
|
|
||||||
|
coreos:
|
||||||
|
update:
|
||||||
|
reboot-strategy: off
|
||||||
|
group: {{ coreos_channel }}
|
||||||
|
|
||||||
|
units:
|
||||||
|
- name: quay-builder.service
|
||||||
|
command: start
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
Description=Quay builder container
|
||||||
|
Author=Jake Moshenko
|
||||||
|
After=docker.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
TimeoutStartSec=600
|
||||||
|
TimeoutStopSec=2000
|
||||||
|
ExecStartPre=/usr/bin/docker login -u {{ quay_username }} -p {{ quay_password }} -e unused quay.io
|
||||||
|
ExecStart=/usr/bin/docker run --rm --net=host --name quay-builder --privileged --env-file /root/overrides.list -v /var/run/docker.sock:/var/run/docker.sock -v /usr/share/ca-certificates:/etc/ssl/certs quay.io/coreos/registry-build-worker:{{ worker_tag }}
|
||||||
|
ExecStop=/usr/bin/docker stop quay-builder
|
||||||
|
ExecStopPost=/bin/sh -xc "/bin/sleep 120; /usr/bin/systemctl --no-block poweroff"
|
|
@ -1,3 +1,5 @@
|
||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 80 default_server;
|
listen 80 default_server;
|
||||||
server_name _;
|
server_name _;
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
types_hash_max_size 2048;
|
types_hash_max_size 2048;
|
||||||
include /usr/local/nginx/conf/mime.types.default;
|
include /usr/local/nginx/conf/mime.types.default;
|
||||||
|
|
||||||
|
@ -30,4 +32,4 @@ upstream build_manager_controller_server {
|
||||||
|
|
||||||
upstream build_manager_websocket_server {
|
upstream build_manager_websocket_server {
|
||||||
server localhost:8787;
|
server localhost:8787;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
exec svlogd /var/log/dockerfilebuild/
|
|
|
@ -1,6 +0,0 @@
|
||||||
#! /bin/bash
|
|
||||||
|
|
||||||
sv start tutumdocker || exit 1
|
|
||||||
|
|
||||||
cd /
|
|
||||||
venv/bin/python -m workers.dockerfilebuild
|
|
|
@ -1,2 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
exec svlogd /var/log/tutumdocker/
|
|
|
@ -1,96 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# First, make sure that cgroups are mounted correctly.
|
|
||||||
CGROUP=/sys/fs/cgroup
|
|
||||||
|
|
||||||
[ -d $CGROUP ] ||
|
|
||||||
mkdir $CGROUP
|
|
||||||
|
|
||||||
mountpoint -q $CGROUP ||
|
|
||||||
mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
|
|
||||||
echo "Could not make a tmpfs mount. Did you use -privileged?"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
|
|
||||||
then
|
|
||||||
mount -t securityfs none /sys/kernel/security || {
|
|
||||||
echo "Could not mount /sys/kernel/security."
|
|
||||||
echo "AppArmor detection and -privileged mode might break."
|
|
||||||
}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Mount the cgroup hierarchies exactly as they are in the parent system.
|
|
||||||
for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
|
|
||||||
do
|
|
||||||
[ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
|
|
||||||
mountpoint -q $CGROUP/$SUBSYS ||
|
|
||||||
mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
|
|
||||||
|
|
||||||
# The two following sections address a bug which manifests itself
|
|
||||||
# by a cryptic "lxc-start: no ns_cgroup option specified" when
|
|
||||||
# trying to start containers withina container.
|
|
||||||
# The bug seems to appear when the cgroup hierarchies are not
|
|
||||||
# mounted on the exact same directories in the host, and in the
|
|
||||||
# container.
|
|
||||||
|
|
||||||
# Named, control-less cgroups are mounted with "-o name=foo"
|
|
||||||
# (and appear as such under /proc/<pid>/cgroup) but are usually
|
|
||||||
# mounted on a directory named "foo" (without the "name=" prefix).
|
|
||||||
# Systemd and OpenRC (and possibly others) both create such a
|
|
||||||
# cgroup. To avoid the aforementioned bug, we symlink "foo" to
|
|
||||||
# "name=foo". This shouldn't have any adverse effect.
|
|
||||||
echo $SUBSYS | grep -q ^name= && {
|
|
||||||
NAME=$(echo $SUBSYS | sed s/^name=//)
|
|
||||||
ln -s $SUBSYS $CGROUP/$NAME
|
|
||||||
}
|
|
||||||
|
|
||||||
# Likewise, on at least one system, it has been reported that
|
|
||||||
# systemd would mount the CPU and CPU accounting controllers
|
|
||||||
# (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
|
|
||||||
# but on a directory called "cpu,cpuacct" (note the inversion
|
|
||||||
# in the order of the groups). This tries to work around it.
|
|
||||||
[ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
|
|
||||||
done
|
|
||||||
|
|
||||||
# Note: as I write those lines, the LXC userland tools cannot setup
|
|
||||||
# a "sub-container" properly if the "devices" cgroup is not in its
|
|
||||||
# own hierarchy. Let's detect this and issue a warning.
|
|
||||||
grep -q :devices: /proc/1/cgroup ||
|
|
||||||
echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
|
|
||||||
grep -qw devices /proc/1/cgroup ||
|
|
||||||
echo "WARNING: it looks like the 'devices' cgroup is not mounted."
|
|
||||||
|
|
||||||
# Now, close extraneous file descriptors.
|
|
||||||
pushd /proc/self/fd >/dev/null
|
|
||||||
for FD in *
|
|
||||||
do
|
|
||||||
case "$FD" in
|
|
||||||
# Keep stdin/stdout/stderr
|
|
||||||
[012])
|
|
||||||
;;
|
|
||||||
# Nuke everything else
|
|
||||||
*)
|
|
||||||
eval exec "$FD>&-"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
popd >/dev/null
|
|
||||||
|
|
||||||
|
|
||||||
# If a pidfile is still around (for example after a container restart),
|
|
||||||
# delete it so that docker can start.
|
|
||||||
rm -rf /var/run/docker.pid
|
|
||||||
|
|
||||||
chmod 777 /var/lib/lxc
|
|
||||||
chmod 777 /var/lib/docker
|
|
||||||
|
|
||||||
|
|
||||||
# If we were given a PORT environment variable, start as a simple daemon;
|
|
||||||
# otherwise, spawn a shell as well
|
|
||||||
if [ "$PORT" ]
|
|
||||||
then
|
|
||||||
exec docker -d -H 0.0.0.0:$PORT
|
|
||||||
else
|
|
||||||
docker -d -D -e lxc 2>&1
|
|
||||||
fi
|
|
|
@ -1,8 +1,12 @@
|
||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
include root-base.conf;
|
include root-base.conf;
|
||||||
|
|
||||||
http {
|
http {
|
||||||
include http-base.conf;
|
include http-base.conf;
|
||||||
|
|
||||||
|
include rate-limiting.conf;
|
||||||
|
|
||||||
server {
|
server {
|
||||||
include server-base.conf;
|
include server-base.conf;
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
include root-base.conf;
|
include root-base.conf;
|
||||||
|
|
||||||
http {
|
http {
|
||||||
|
@ -5,6 +7,8 @@ http {
|
||||||
|
|
||||||
include hosted-http-base.conf;
|
include hosted-http-base.conf;
|
||||||
|
|
||||||
|
include rate-limiting.conf;
|
||||||
|
|
||||||
server {
|
server {
|
||||||
include server-base.conf;
|
include server-base.conf;
|
||||||
|
|
||||||
|
@ -18,4 +22,20 @@ http {
|
||||||
ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP;
|
ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP;
|
||||||
ssl_prefer_server_ciphers on;
|
ssl_prefer_server_ciphers on;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
include proxy-protocol.conf;
|
||||||
|
|
||||||
|
include proxy-server-base.conf;
|
||||||
|
|
||||||
|
listen 8443 default proxy_protocol;
|
||||||
|
|
||||||
|
ssl on;
|
||||||
|
ssl_certificate ./stack/ssl.cert;
|
||||||
|
ssl_certificate_key ./stack/ssl.key;
|
||||||
|
ssl_session_timeout 5m;
|
||||||
|
ssl_protocols SSLv3 TLSv1;
|
||||||
|
ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP;
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
8
conf/proxy-protocol.conf
Normal file
8
conf/proxy-protocol.conf
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
set_real_ip_from 0.0.0.0/0;
|
||||||
|
real_ip_header proxy_protocol;
|
||||||
|
log_format elb_pp '$proxy_protocol_addr - $remote_user [$time_local] '
|
||||||
|
'"$request" $status $body_bytes_sent '
|
||||||
|
'"$http_referer" "$http_user_agent"';
|
||||||
|
access_log /var/log/nginx/nginx.access.log elb_pp;
|
91
conf/proxy-server-base.conf
Normal file
91
conf/proxy-server-base.conf
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
client_body_temp_path /var/log/nginx/client_body 1 2;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
keepalive_timeout 5;
|
||||||
|
|
||||||
|
if ($args ~ "_escaped_fragment_") {
|
||||||
|
rewrite ^ /snapshot$uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_protocol_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_redirect off;
|
||||||
|
|
||||||
|
proxy_set_header Transfer-Encoding $http_transfer_encoding;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
|
||||||
|
limit_req zone=webapp burst=25 nodelay;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /realtime {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_request_buffering off;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /v1/repositories/ {
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
proxy_request_buffering off;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
proxy_read_timeout 2000;
|
||||||
|
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
|
||||||
|
|
||||||
|
client_max_body_size 20G;
|
||||||
|
|
||||||
|
limit_req zone=repositories burst=5 nodelay;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /v1/ {
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
proxy_request_buffering off;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
proxy_read_timeout 2000;
|
||||||
|
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
|
||||||
|
|
||||||
|
client_max_body_size 20G;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /c1/ {
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
proxy_request_buffering off;
|
||||||
|
|
||||||
|
proxy_pass http://verbs_app_server;
|
||||||
|
proxy_read_timeout 2000;
|
||||||
|
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
|
||||||
|
|
||||||
|
limit_req zone=api burst=5 nodelay;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /static/ {
|
||||||
|
# checks for static file, if not found proxy to app
|
||||||
|
alias /static/;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /v1/_ping {
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
add_header X-Docker-Registry-Version 0.6.0;
|
||||||
|
add_header X-Docker-Registry-Standalone 0;
|
||||||
|
return 200 'true';
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/b1/controller(/?)(.*) {
|
||||||
|
proxy_pass http://build_manager_controller_server/$2;
|
||||||
|
proxy_read_timeout 2000;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/b1/socket(/?)(.*) {
|
||||||
|
proxy_pass http://build_manager_websocket_server/$2;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
}
|
7
conf/rate-limiting.conf
Normal file
7
conf/rate-limiting.conf
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
limit_req_zone $proxy_protocol_addr zone=webapp:10m rate=25r/s;
|
||||||
|
limit_req_zone $proxy_protocol_addr zone=repositories:10m rate=1r/s;
|
||||||
|
limit_req_zone $proxy_protocol_addr zone=api:10m rate=1r/s;
|
||||||
|
limit_req_status 429;
|
||||||
|
limit_req_log_level warn;
|
|
@ -1,3 +1,5 @@
|
||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
pid /tmp/nginx.pid;
|
pid /tmp/nginx.pid;
|
||||||
error_log /var/log/nginx/nginx.error.log;
|
error_log /var/log/nginx/nginx.error.log;
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
client_body_temp_path /var/log/nginx/client_body 1 2;
|
client_body_temp_path /var/log/nginx/client_body 1 2;
|
||||||
server_name _;
|
server_name _;
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,16 @@ SCHEME_RANDOM_FUNCTION = {
|
||||||
'postgresql+psycopg2': fn.Random,
|
'postgresql+psycopg2': fn.Random,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def real_for_update(query):
|
||||||
|
return query.for_update()
|
||||||
|
|
||||||
|
def null_for_update(query):
|
||||||
|
return query
|
||||||
|
|
||||||
|
SCHEME_SPECIALIZED_FOR_UPDATE = {
|
||||||
|
'sqlite': null_for_update,
|
||||||
|
}
|
||||||
|
|
||||||
class CallableProxy(Proxy):
|
class CallableProxy(Proxy):
|
||||||
def __call__(self, *args, **kwargs):
|
def __call__(self, *args, **kwargs):
|
||||||
if self.obj is None:
|
if self.obj is None:
|
||||||
|
@ -68,6 +78,15 @@ class UseThenDisconnect(object):
|
||||||
db = Proxy()
|
db = Proxy()
|
||||||
read_slave = Proxy()
|
read_slave = Proxy()
|
||||||
db_random_func = CallableProxy()
|
db_random_func = CallableProxy()
|
||||||
|
db_for_update = CallableProxy()
|
||||||
|
|
||||||
|
|
||||||
|
def validate_database_url(url, connect_timeout=5):
|
||||||
|
driver = _db_from_url(url, {
|
||||||
|
'connect_timeout': connect_timeout
|
||||||
|
})
|
||||||
|
driver.connect()
|
||||||
|
driver.close()
|
||||||
|
|
||||||
|
|
||||||
def _db_from_url(url, db_kwargs):
|
def _db_from_url(url, db_kwargs):
|
||||||
|
@ -82,6 +101,10 @@ def _db_from_url(url, db_kwargs):
|
||||||
if parsed_url.password:
|
if parsed_url.password:
|
||||||
db_kwargs['password'] = parsed_url.password
|
db_kwargs['password'] = parsed_url.password
|
||||||
|
|
||||||
|
# Note: sqlite does not support connect_timeout.
|
||||||
|
if parsed_url.drivername == 'sqlite' and 'connect_timeout' in db_kwargs:
|
||||||
|
del db_kwargs['connect_timeout']
|
||||||
|
|
||||||
return SCHEME_DRIVERS[parsed_url.drivername](parsed_url.database, **db_kwargs)
|
return SCHEME_DRIVERS[parsed_url.drivername](parsed_url.database, **db_kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
@ -93,6 +116,8 @@ def configure(config_object):
|
||||||
|
|
||||||
parsed_write_uri = make_url(write_db_uri)
|
parsed_write_uri = make_url(write_db_uri)
|
||||||
db_random_func.initialize(SCHEME_RANDOM_FUNCTION[parsed_write_uri.drivername])
|
db_random_func.initialize(SCHEME_RANDOM_FUNCTION[parsed_write_uri.drivername])
|
||||||
|
db_for_update.initialize(SCHEME_SPECIALIZED_FOR_UPDATE.get(parsed_write_uri.drivername,
|
||||||
|
real_for_update))
|
||||||
|
|
||||||
read_slave_uri = config_object.get('DB_READ_SLAVE_URI', None)
|
read_slave_uri = config_object.get('DB_READ_SLAVE_URI', None)
|
||||||
if read_slave_uri is not None:
|
if read_slave_uri is not None:
|
||||||
|
@ -122,8 +147,9 @@ def close_db_filter(_):
|
||||||
|
|
||||||
|
|
||||||
class QuayUserField(ForeignKeyField):
|
class QuayUserField(ForeignKeyField):
|
||||||
def __init__(self, allows_robots=False, *args, **kwargs):
|
def __init__(self, allows_robots=False, robot_null_delete=False, *args, **kwargs):
|
||||||
self.allows_robots = allows_robots
|
self.allows_robots = allows_robots
|
||||||
|
self.robot_null_delete = robot_null_delete
|
||||||
if not 'rel_model' in kwargs:
|
if not 'rel_model' in kwargs:
|
||||||
kwargs['rel_model'] = User
|
kwargs['rel_model'] = User
|
||||||
|
|
||||||
|
@ -157,7 +183,11 @@ class User(BaseModel):
|
||||||
for query, fk in self.dependencies(search_nullable=True):
|
for query, fk in self.dependencies(search_nullable=True):
|
||||||
if isinstance(fk, QuayUserField) and fk.allows_robots:
|
if isinstance(fk, QuayUserField) and fk.allows_robots:
|
||||||
model = fk.model_class
|
model = fk.model_class
|
||||||
model.delete().where(query).execute()
|
|
||||||
|
if fk.robot_null_delete:
|
||||||
|
model.update(**{fk.name: None}).where(query).execute()
|
||||||
|
else:
|
||||||
|
model.delete().where(query).execute()
|
||||||
|
|
||||||
# Delete the instance itself.
|
# Delete the instance itself.
|
||||||
super(User, self).delete_instance(recursive=False, delete_nullable=False)
|
super(User, self).delete_instance(recursive=False, delete_nullable=False)
|
||||||
|
@ -477,7 +507,7 @@ class LogEntry(BaseModel):
|
||||||
kind = ForeignKeyField(LogEntryKind, index=True)
|
kind = ForeignKeyField(LogEntryKind, index=True)
|
||||||
account = QuayUserField(index=True, related_name='account')
|
account = QuayUserField(index=True, related_name='account')
|
||||||
performer = QuayUserField(allows_robots=True, index=True, null=True,
|
performer = QuayUserField(allows_robots=True, index=True, null=True,
|
||||||
related_name='performer')
|
related_name='performer', robot_null_delete=True)
|
||||||
repository = ForeignKeyField(Repository, index=True, null=True)
|
repository = ForeignKeyField(Repository, index=True, null=True)
|
||||||
datetime = DateTimeField(default=datetime.now, index=True)
|
datetime = DateTimeField(default=datetime.now, index=True)
|
||||||
ip = CharField(null=True)
|
ip = CharField(null=True)
|
||||||
|
|
|
@ -15,7 +15,7 @@ from data.database import (User, Repository, Image, AccessToken, Role, Repositor
|
||||||
RepositoryNotification, RepositoryAuthorizedEmail, TeamMemberInvite,
|
RepositoryNotification, RepositoryAuthorizedEmail, TeamMemberInvite,
|
||||||
DerivedImageStorage, ImageStorageTransformation, random_string_generator,
|
DerivedImageStorage, ImageStorageTransformation, random_string_generator,
|
||||||
db, BUILD_PHASE, QuayUserField, ImageStorageSignature,
|
db, BUILD_PHASE, QuayUserField, ImageStorageSignature,
|
||||||
ImageStorageSignatureKind)
|
ImageStorageSignatureKind, validate_database_url, db_for_update)
|
||||||
from peewee import JOIN_LEFT_OUTER, fn
|
from peewee import JOIN_LEFT_OUTER, fn
|
||||||
from util.validation import (validate_username, validate_email, validate_password,
|
from util.validation import (validate_username, validate_email, validate_password,
|
||||||
INVALID_PASSWORD_MESSAGE)
|
INVALID_PASSWORD_MESSAGE)
|
||||||
|
@ -296,6 +296,9 @@ def delete_robot(robot_username):
|
||||||
|
|
||||||
|
|
||||||
def _list_entity_robots(entity_name):
|
def _list_entity_robots(entity_name):
|
||||||
|
""" Return the list of robots for the specified entity. This MUST return a query, not a
|
||||||
|
materialized list so that callers can use db_for_update.
|
||||||
|
"""
|
||||||
return (User
|
return (User
|
||||||
.select()
|
.select()
|
||||||
.join(FederatedLogin)
|
.join(FederatedLogin)
|
||||||
|
@ -904,14 +907,17 @@ def change_password(user, new_password):
|
||||||
delete_notifications_by_kind(user, 'password_required')
|
delete_notifications_by_kind(user, 'password_required')
|
||||||
|
|
||||||
|
|
||||||
def change_username(user, new_username):
|
def change_username(user_id, new_username):
|
||||||
(username_valid, username_issue) = validate_username(new_username)
|
(username_valid, username_issue) = validate_username(new_username)
|
||||||
if not username_valid:
|
if not username_valid:
|
||||||
raise InvalidUsernameException('Invalid username %s: %s' % (new_username, username_issue))
|
raise InvalidUsernameException('Invalid username %s: %s' % (new_username, username_issue))
|
||||||
|
|
||||||
with config.app_config['DB_TRANSACTION_FACTORY'](db):
|
with config.app_config['DB_TRANSACTION_FACTORY'](db):
|
||||||
|
# Reload the user for update
|
||||||
|
user = db_for_update(User.select().where(User.id == user_id)).get()
|
||||||
|
|
||||||
# Rename the robots
|
# Rename the robots
|
||||||
for robot in _list_entity_robots(user.username):
|
for robot in db_for_update(_list_entity_robots(user.username)):
|
||||||
_, robot_shortname = parse_robot_username(robot.username)
|
_, robot_shortname = parse_robot_username(robot.username)
|
||||||
new_robot_name = format_robot_username(new_username, robot_shortname)
|
new_robot_name = format_robot_username(new_username, robot_shortname)
|
||||||
robot.username = new_robot_name
|
robot.username = new_robot_name
|
||||||
|
@ -1252,9 +1258,9 @@ def _find_or_link_image(existing_image, repository, username, translations, pref
|
||||||
storage.locations = {placement.location.name
|
storage.locations = {placement.location.name
|
||||||
for placement in storage.imagestorageplacement_set}
|
for placement in storage.imagestorageplacement_set}
|
||||||
|
|
||||||
new_image = Image.create(docker_image_id=existing_image.docker_image_id,
|
new_image = Image.create(docker_image_id=existing_image.docker_image_id,
|
||||||
repository=repository, storage=storage,
|
repository=repository, storage=storage,
|
||||||
ancestors=new_image_ancestry)
|
ancestors=new_image_ancestry)
|
||||||
|
|
||||||
logger.debug('Storing translation %s -> %s', existing_image.id, new_image.id)
|
logger.debug('Storing translation %s -> %s', existing_image.id, new_image.id)
|
||||||
translations[existing_image.id] = new_image.id
|
translations[existing_image.id] = new_image.id
|
||||||
|
@ -1433,7 +1439,7 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
|
||||||
Image.docker_image_id == docker_image_id))
|
Image.docker_image_id == docker_image_id))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
fetched = query.get()
|
fetched = db_for_update(query).get()
|
||||||
except Image.DoesNotExist:
|
except Image.DoesNotExist:
|
||||||
raise DataModelException('No image with specified id and repository')
|
raise DataModelException('No image with specified id and repository')
|
||||||
|
|
||||||
|
@ -2287,11 +2293,20 @@ def delete_user(user):
|
||||||
# TODO: also delete any repository data associated
|
# TODO: also delete any repository data associated
|
||||||
|
|
||||||
|
|
||||||
def check_health():
|
def check_health(app_config):
|
||||||
|
# Attempt to connect to the database first. If the DB is not responding,
|
||||||
|
# using the validate_database_url will timeout quickly, as opposed to
|
||||||
|
# making a normal connect which will just hang (thus breaking the health
|
||||||
|
# check).
|
||||||
|
try:
|
||||||
|
validate_database_url(app_config['DB_URI'], connect_timeout=3)
|
||||||
|
except Exception:
|
||||||
|
logger.exception('Could not connect to the database')
|
||||||
|
return False
|
||||||
|
|
||||||
# We will connect to the db, check that it contains some log entry kinds
|
# We will connect to the db, check that it contains some log entry kinds
|
||||||
try:
|
try:
|
||||||
found_count = LogEntryKind.select().count()
|
return bool(list(LogEntryKind.select().limit(1)))
|
||||||
return found_count > 0
|
|
||||||
except:
|
except:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
from data.database import QueueItem, db
|
from data.database import QueueItem, db, db_for_update
|
||||||
from util.morecollections import AttrDict
|
from util.morecollections import AttrDict
|
||||||
|
|
||||||
|
|
||||||
|
@ -31,16 +31,24 @@ class WorkQueue(object):
|
||||||
QueueItem.processing_expires > now,
|
QueueItem.processing_expires > now,
|
||||||
QueueItem.queue_name ** name_match_query))
|
QueueItem.queue_name ** name_match_query))
|
||||||
|
|
||||||
def _available_jobs(self, now, name_match_query, running_query):
|
def _available_jobs(self, now, name_match_query):
|
||||||
return (QueueItem
|
return (QueueItem
|
||||||
.select()
|
.select()
|
||||||
.where(QueueItem.queue_name ** name_match_query, QueueItem.available_after <= now,
|
.where(QueueItem.queue_name ** name_match_query, QueueItem.available_after <= now,
|
||||||
((QueueItem.available == True) | (QueueItem.processing_expires <= now)),
|
((QueueItem.available == True) | (QueueItem.processing_expires <= now)),
|
||||||
QueueItem.retries_remaining > 0, ~(QueueItem.queue_name << running_query)))
|
QueueItem.retries_remaining > 0))
|
||||||
|
|
||||||
|
def _available_jobs_not_running(self, now, name_match_query, running_query):
|
||||||
|
return (self
|
||||||
|
._available_jobs(now, name_match_query)
|
||||||
|
.where(~(QueueItem.queue_name << running_query)))
|
||||||
|
|
||||||
def _name_match_query(self):
|
def _name_match_query(self):
|
||||||
return '%s%%' % self._canonical_name([self._queue_name] + self._canonical_name_match_list)
|
return '%s%%' % self._canonical_name([self._queue_name] + self._canonical_name_match_list)
|
||||||
|
|
||||||
|
def _item_by_id_for_update(self, queue_id):
|
||||||
|
return db_for_update(QueueItem.select().where(QueueItem.id == queue_id)).get()
|
||||||
|
|
||||||
def update_metrics(self):
|
def update_metrics(self):
|
||||||
if self._reporter is None:
|
if self._reporter is None:
|
||||||
return
|
return
|
||||||
|
@ -52,7 +60,7 @@ class WorkQueue(object):
|
||||||
running_query = self._running_jobs(now, name_match_query)
|
running_query = self._running_jobs(now, name_match_query)
|
||||||
running_count = running_query.distinct().count()
|
running_count = running_query.distinct().count()
|
||||||
|
|
||||||
avialable_query = self._available_jobs(now, name_match_query, running_query)
|
avialable_query = self._available_jobs_not_running(now, name_match_query, running_query)
|
||||||
available_count = avialable_query.select(QueueItem.queue_name).distinct().count()
|
available_count = avialable_query.select(QueueItem.queue_name).distinct().count()
|
||||||
|
|
||||||
self._reporter(self._currently_processing, running_count, running_count + available_count)
|
self._reporter(self._currently_processing, running_count, running_count + available_count)
|
||||||
|
@ -78,19 +86,26 @@ class WorkQueue(object):
|
||||||
def get(self, processing_time=300):
|
def get(self, processing_time=300):
|
||||||
"""
|
"""
|
||||||
Get an available item and mark it as unavailable for the default of five
|
Get an available item and mark it as unavailable for the default of five
|
||||||
minutes.
|
minutes. The result of this method must always be composed of simple
|
||||||
|
python objects which are JSON serializable for network portability reasons.
|
||||||
"""
|
"""
|
||||||
now = datetime.utcnow()
|
now = datetime.utcnow()
|
||||||
|
|
||||||
name_match_query = self._name_match_query()
|
name_match_query = self._name_match_query()
|
||||||
|
|
||||||
with self._transaction_factory(db):
|
running = self._running_jobs(now, name_match_query)
|
||||||
running = self._running_jobs(now, name_match_query)
|
avail = self._available_jobs_not_running(now, name_match_query, running)
|
||||||
avail = self._available_jobs(now, name_match_query, running)
|
|
||||||
|
|
||||||
item = None
|
item = None
|
||||||
try:
|
try:
|
||||||
db_item = avail.order_by(QueueItem.id).get()
|
db_item_candidate = avail.order_by(QueueItem.id).get()
|
||||||
|
|
||||||
|
with self._transaction_factory(db):
|
||||||
|
still_available_query = (db_for_update(self
|
||||||
|
._available_jobs(now, name_match_query)
|
||||||
|
.where(QueueItem.id == db_item_candidate.id)))
|
||||||
|
|
||||||
|
db_item = still_available_query.get()
|
||||||
db_item.available = False
|
db_item.available = False
|
||||||
db_item.processing_expires = now + timedelta(seconds=processing_time)
|
db_item.processing_expires = now + timedelta(seconds=processing_time)
|
||||||
db_item.retries_remaining -= 1
|
db_item.retries_remaining -= 1
|
||||||
|
@ -102,22 +117,22 @@ class WorkQueue(object):
|
||||||
})
|
})
|
||||||
|
|
||||||
self._currently_processing = True
|
self._currently_processing = True
|
||||||
except QueueItem.DoesNotExist:
|
except QueueItem.DoesNotExist:
|
||||||
self._currently_processing = False
|
self._currently_processing = False
|
||||||
|
|
||||||
# Return a view of the queue item rather than an active db object
|
# Return a view of the queue item rather than an active db object
|
||||||
return item
|
return item
|
||||||
|
|
||||||
def complete(self, completed_item):
|
def complete(self, completed_item):
|
||||||
with self._transaction_factory(db):
|
with self._transaction_factory(db):
|
||||||
completed_item_obj = QueueItem.get(QueueItem.id == completed_item.id)
|
completed_item_obj = self._item_by_id_for_update(completed_item.id)
|
||||||
completed_item_obj.delete_instance()
|
completed_item_obj.delete_instance()
|
||||||
self._currently_processing = False
|
self._currently_processing = False
|
||||||
|
|
||||||
def incomplete(self, incomplete_item, retry_after=300, restore_retry=False):
|
def incomplete(self, incomplete_item, retry_after=300, restore_retry=False):
|
||||||
with self._transaction_factory(db):
|
with self._transaction_factory(db):
|
||||||
retry_date = datetime.utcnow() + timedelta(seconds=retry_after)
|
retry_date = datetime.utcnow() + timedelta(seconds=retry_after)
|
||||||
incomplete_item_obj = QueueItem.get(QueueItem.id == incomplete_item.id)
|
incomplete_item_obj = self._item_by_id_for_update(incomplete_item.id)
|
||||||
incomplete_item_obj.available_after = retry_date
|
incomplete_item_obj.available_after = retry_date
|
||||||
incomplete_item_obj.available = True
|
incomplete_item_obj.available = True
|
||||||
|
|
||||||
|
@ -127,16 +142,12 @@ class WorkQueue(object):
|
||||||
incomplete_item_obj.save()
|
incomplete_item_obj.save()
|
||||||
self._currently_processing = False
|
self._currently_processing = False
|
||||||
|
|
||||||
@staticmethod
|
def extend_processing(self, item, seconds_from_now, minimum_extension=MINIMUM_EXTENSION):
|
||||||
def extend_processing(queue_item_info, seconds_from_now, retry_count=None,
|
with self._transaction_factory(db):
|
||||||
minimum_extension=MINIMUM_EXTENSION):
|
queue_item = self._item_by_id_for_update(item.id)
|
||||||
queue_item = QueueItem.get(QueueItem.id == queue_item_info.id)
|
new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now)
|
||||||
new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now)
|
|
||||||
|
|
||||||
# Only actually write the new expiration to the db if it moves the expiration some minimum
|
# Only actually write the new expiration to the db if it moves the expiration some minimum
|
||||||
if new_expiration - queue_item.processing_expires > minimum_extension:
|
if new_expiration - queue_item.processing_expires > minimum_extension:
|
||||||
if retry_count is not None:
|
queue_item.processing_expires = new_expiration
|
||||||
queue_item.retries_remaining = retry_count
|
queue_item.save()
|
||||||
|
|
||||||
queue_item.processing_expires = new_expiration
|
|
||||||
queue_item.save()
|
|
||||||
|
|
|
@ -4,6 +4,12 @@
|
||||||
<meta name="viewport" content="width=device-width" />
|
<meta name="viewport" content="width=device-width" />
|
||||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
|
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
|
||||||
<title>{{ subject }}</title>
|
<title>{{ subject }}</title>
|
||||||
|
|
||||||
|
{% if action_metadata %}
|
||||||
|
<script type="application/ld+json">
|
||||||
|
{{ action_metadata }}
|
||||||
|
</script>
|
||||||
|
{% endif %}
|
||||||
</head>
|
</head>
|
||||||
<body bgcolor="#FFFFFF" style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; width: 100% !important; height: 100%; margin: 0; padding: 0;"><style type="text/css">
|
<body bgcolor="#FFFFFF" style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; width: 100% !important; height: 100%; margin: 0; padding: 0;"><style type="text/css">
|
||||||
@media only screen and (max-width: 600px) {
|
@media only screen and (max-width: 600px) {
|
||||||
|
|
|
@ -72,8 +72,8 @@ def build_status_view(build_obj, can_write=False):
|
||||||
# minutes. If not, then the build timed out.
|
# minutes. If not, then the build timed out.
|
||||||
if phase != database.BUILD_PHASE.COMPLETE and phase != database.BUILD_PHASE.ERROR:
|
if phase != database.BUILD_PHASE.COMPLETE and phase != database.BUILD_PHASE.ERROR:
|
||||||
if status is not None and 'heartbeat' in status and status['heartbeat']:
|
if status is not None and 'heartbeat' in status and status['heartbeat']:
|
||||||
heartbeat = datetime.datetime.fromtimestamp(status['heartbeat'])
|
heartbeat = datetime.datetime.utcfromtimestamp(status['heartbeat'])
|
||||||
if datetime.datetime.now() - heartbeat > datetime.timedelta(minutes=1):
|
if datetime.datetime.utcnow() - heartbeat > datetime.timedelta(minutes=1):
|
||||||
phase = database.BUILD_PHASE.INTERNAL_ERROR
|
phase = database.BUILD_PHASE.INTERNAL_ERROR
|
||||||
|
|
||||||
logger.debug('Can write: %s job_config: %s', can_write, build_obj.job_config)
|
logger.debug('Can write: %s job_config: %s', can_write, build_obj.job_config)
|
||||||
|
|
|
@ -246,7 +246,7 @@ class User(ApiResource):
|
||||||
# Username already used
|
# Username already used
|
||||||
raise request_error(message='Username is already in use')
|
raise request_error(message='Username is already in use')
|
||||||
|
|
||||||
model.change_username(user, new_username)
|
model.change_username(user.id, new_username)
|
||||||
|
|
||||||
except model.InvalidPasswordException, ex:
|
except model.InvalidPasswordException, ex:
|
||||||
raise request_error(exception=ex)
|
raise request_error(exception=ex)
|
||||||
|
|
|
@ -4,14 +4,17 @@ import json
|
||||||
import string
|
import string
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
|
# Register the various exceptions via decorators.
|
||||||
|
import endpoints.decorated
|
||||||
|
|
||||||
from flask import make_response, render_template, request, abort, session
|
from flask import make_response, render_template, request, abort, session
|
||||||
from flask.ext.login import login_user, UserMixin
|
from flask.ext.login import login_user
|
||||||
from flask.ext.principal import identity_changed
|
from flask.ext.principal import identity_changed
|
||||||
from random import SystemRandom
|
from random import SystemRandom
|
||||||
|
|
||||||
from data import model
|
from data import model
|
||||||
from data.database import db
|
from data.database import db
|
||||||
from app import app, login_manager, dockerfile_build_queue, notification_queue, oauth_apps
|
from app import app, oauth_apps, dockerfile_build_queue, LoginWrappedDBUser
|
||||||
|
|
||||||
from auth.permissions import QuayDeferredPermissionUser
|
from auth.permissions import QuayDeferredPermissionUser
|
||||||
from auth import scopes
|
from auth import scopes
|
||||||
|
@ -21,7 +24,6 @@ from functools import wraps
|
||||||
from config import getFrontendVisibleConfig
|
from config import getFrontendVisibleConfig
|
||||||
from external_libraries import get_external_javascript, get_external_css
|
from external_libraries import get_external_javascript, get_external_css
|
||||||
from endpoints.notificationhelper import spawn_notification
|
from endpoints.notificationhelper import spawn_notification
|
||||||
from util.useremails import CannotSendEmailException
|
|
||||||
|
|
||||||
import features
|
import features
|
||||||
|
|
||||||
|
@ -84,34 +86,8 @@ def param_required(param_name):
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
@login_manager.user_loader
|
|
||||||
def load_user(user_uuid):
|
|
||||||
logger.debug('User loader loading deferred user with uuid: %s' % user_uuid)
|
|
||||||
return _LoginWrappedDBUser(user_uuid)
|
|
||||||
|
|
||||||
|
|
||||||
class _LoginWrappedDBUser(UserMixin):
|
|
||||||
def __init__(self, user_uuid, db_user=None):
|
|
||||||
self._uuid = user_uuid
|
|
||||||
self._db_user = db_user
|
|
||||||
|
|
||||||
def db_user(self):
|
|
||||||
if not self._db_user:
|
|
||||||
self._db_user = model.get_user_by_uuid(self._uuid)
|
|
||||||
return self._db_user
|
|
||||||
|
|
||||||
def is_authenticated(self):
|
|
||||||
return self.db_user() is not None
|
|
||||||
|
|
||||||
def is_active(self):
|
|
||||||
return self.db_user().verified
|
|
||||||
|
|
||||||
def get_id(self):
|
|
||||||
return unicode(self._uuid)
|
|
||||||
|
|
||||||
|
|
||||||
def common_login(db_user):
|
def common_login(db_user):
|
||||||
if login_user(_LoginWrappedDBUser(db_user.uuid, db_user)):
|
if login_user(LoginWrappedDBUser(db_user.uuid, db_user)):
|
||||||
logger.debug('Successfully signed in as: %s (%s)' % (db_user.username, db_user.uuid))
|
logger.debug('Successfully signed in as: %s (%s)' % (db_user.username, db_user.uuid))
|
||||||
new_identity = QuayDeferredPermissionUser(db_user.uuid, 'user_uuid', {scopes.DIRECT_LOGIN})
|
new_identity = QuayDeferredPermissionUser(db_user.uuid, 'user_uuid', {scopes.DIRECT_LOGIN})
|
||||||
identity_changed.send(app, identity=new_identity)
|
identity_changed.send(app, identity=new_identity)
|
||||||
|
@ -121,17 +97,6 @@ def common_login(db_user):
|
||||||
logger.debug('User could not be logged in, inactive?.')
|
logger.debug('User could not be logged in, inactive?.')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@app.errorhandler(model.DataModelException)
|
|
||||||
def handle_dme(ex):
|
|
||||||
logger.exception(ex)
|
|
||||||
return make_response(json.dumps({'message': ex.message}), 400)
|
|
||||||
|
|
||||||
@app.errorhandler(CannotSendEmailException)
|
|
||||||
def handle_emailexception(ex):
|
|
||||||
message = 'Could not send email. Please contact an administrator and report this problem.'
|
|
||||||
return make_response(json.dumps({'message': message}), 400)
|
|
||||||
|
|
||||||
def random_string():
|
def random_string():
|
||||||
random = SystemRandom()
|
random = SystemRandom()
|
||||||
return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(8)])
|
return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(8)])
|
||||||
|
@ -248,7 +213,7 @@ def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
|
||||||
dockerfile_build_queue.put([repository.namespace_user.username, repository.name], json.dumps({
|
dockerfile_build_queue.put([repository.namespace_user.username, repository.name], json.dumps({
|
||||||
'build_uuid': build_request.uuid,
|
'build_uuid': build_request.uuid,
|
||||||
'pull_credentials': model.get_pull_credentials(pull_robot_name) if pull_robot_name else None
|
'pull_credentials': model.get_pull_credentials(pull_robot_name) if pull_robot_name else None
|
||||||
}), retries_remaining=1)
|
}), retries_remaining=3)
|
||||||
|
|
||||||
# Add the build to the repo's log.
|
# Add the build to the repo's log.
|
||||||
metadata = {
|
metadata = {
|
||||||
|
|
19
endpoints/decorated.py
Normal file
19
endpoints/decorated.py
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
import logging
|
||||||
|
import json
|
||||||
|
|
||||||
|
from flask import make_response
|
||||||
|
from app import app
|
||||||
|
from util.useremails import CannotSendEmailException
|
||||||
|
from data import model
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@app.errorhandler(model.DataModelException)
|
||||||
|
def handle_dme(ex):
|
||||||
|
logger.exception(ex)
|
||||||
|
return make_response(json.dumps({'message': ex.message}), 400)
|
||||||
|
|
||||||
|
@app.errorhandler(CannotSendEmailException)
|
||||||
|
def handle_emailexception(ex):
|
||||||
|
message = 'Could not send email. Please contact an administrator and report this problem.'
|
||||||
|
return make_response(json.dumps({'message': message}), 400)
|
|
@ -380,6 +380,11 @@ def get_search():
|
||||||
resp.mimetype = 'application/json'
|
resp.mimetype = 'application/json'
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
# Note: This is *not* part of the Docker index spec. This is here for our own health check,
|
||||||
|
# since we have nginx handle the _ping below.
|
||||||
|
@index.route('/_internal_ping')
|
||||||
|
def internal_ping():
|
||||||
|
return make_response('true', 200)
|
||||||
|
|
||||||
@index.route('/_ping')
|
@index.route('/_ping')
|
||||||
@index.route('/_ping')
|
@index.route('/_ping')
|
||||||
|
|
|
@ -137,6 +137,10 @@ def get_image_layer(namespace, repository, image_id, headers):
|
||||||
if permission.can() or model.repository_is_public(namespace, repository):
|
if permission.can() or model.repository_is_public(namespace, repository):
|
||||||
profile.debug('Looking up repo image')
|
profile.debug('Looking up repo image')
|
||||||
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
|
||||||
|
if not repo_image:
|
||||||
|
profile.debug('Image not found')
|
||||||
|
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||||
|
image_id=image_id)
|
||||||
|
|
||||||
profile.debug('Looking up the layer path')
|
profile.debug('Looking up the layer path')
|
||||||
try:
|
try:
|
||||||
|
@ -157,7 +161,7 @@ def get_image_layer(namespace, repository, image_id, headers):
|
||||||
|
|
||||||
return Response(store.stream_read(repo_image.storage.locations, path), headers=headers)
|
return Response(store.stream_read(repo_image.storage.locations, path), headers=headers)
|
||||||
except (IOError, AttributeError):
|
except (IOError, AttributeError):
|
||||||
profile.debug('Image not found')
|
profile.exception('Image layer data not found')
|
||||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||||
image_id=image_id)
|
image_id=image_id)
|
||||||
|
|
||||||
|
@ -180,6 +184,7 @@ def put_image_layer(namespace, repository, image_id):
|
||||||
uuid = repo_image.storage.uuid
|
uuid = repo_image.storage.uuid
|
||||||
json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||||
except (IOError, AttributeError):
|
except (IOError, AttributeError):
|
||||||
|
profile.exception('Exception when retrieving image data')
|
||||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||||
image_id=image_id)
|
image_id=image_id)
|
||||||
|
|
||||||
|
|
|
@ -226,7 +226,7 @@ class GithubBuildTrigger(BuildTrigger):
|
||||||
'personal': False,
|
'personal': False,
|
||||||
'repos': repo_list,
|
'repos': repo_list,
|
||||||
'info': {
|
'info': {
|
||||||
'name': org.name,
|
'name': org.name or org.login,
|
||||||
'avatar_url': org.avatar_url
|
'avatar_url': org.avatar_url
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -6,7 +6,7 @@ from flask import (abort, redirect, request, url_for, make_response, Response,
|
||||||
from avatar_generator import Avatar
|
from avatar_generator import Avatar
|
||||||
from flask.ext.login import current_user
|
from flask.ext.login import current_user
|
||||||
from urlparse import urlparse
|
from urlparse import urlparse
|
||||||
from health.healthcheck import HealthCheck
|
from health.healthcheck import get_healthchecker
|
||||||
|
|
||||||
from data import model
|
from data import model
|
||||||
from data.model.oauth import DatabaseAuthorizationProvider
|
from data.model.oauth import DatabaseAuthorizationProvider
|
||||||
|
@ -27,6 +27,9 @@ import features
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Capture the unverified SSL errors.
|
||||||
|
logging.captureWarnings(True)
|
||||||
|
|
||||||
web = Blueprint('web', __name__)
|
web = Blueprint('web', __name__)
|
||||||
|
|
||||||
STATUS_TAGS = app.config['STATUS_TAGS']
|
STATUS_TAGS = app.config['STATUS_TAGS']
|
||||||
|
@ -161,33 +164,27 @@ def v1():
|
||||||
return index('')
|
return index('')
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(jschorr): Remove this mirrored endpoint once we migrate ELB.
|
||||||
@web.route('/health', methods=['GET'])
|
@web.route('/health', methods=['GET'])
|
||||||
|
@web.route('/health/instance', methods=['GET'])
|
||||||
@no_cache
|
@no_cache
|
||||||
def health():
|
def instance_health():
|
||||||
db_healthy = model.check_health()
|
checker = get_healthchecker(app)
|
||||||
buildlogs_healthy = build_logs.check_health()
|
(data, status_code) = checker.check_instance()
|
||||||
|
response = jsonify(dict(data=data, status_code=status_code))
|
||||||
check = HealthCheck.get_check(app.config['HEALTH_CHECKER'][0], app.config['HEALTH_CHECKER'][1])
|
response.status_code = status_code
|
||||||
(data, is_healthy) = check.conduct_healthcheck(db_healthy, buildlogs_healthy)
|
|
||||||
|
|
||||||
response = jsonify(dict(data=data, is_healthy=is_healthy))
|
|
||||||
response.status_code = 200 if is_healthy else 503
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(jschorr): Remove this mirrored endpoint once we migrate pingdom.
|
||||||
@web.route('/status', methods=['GET'])
|
@web.route('/status', methods=['GET'])
|
||||||
|
@web.route('/health/endtoend', methods=['GET'])
|
||||||
@no_cache
|
@no_cache
|
||||||
def status():
|
def endtoend_health():
|
||||||
db_healthy = model.check_health()
|
checker = get_healthchecker(app)
|
||||||
buildlogs_healthy = build_logs.check_health()
|
(data, status_code) = checker.check_endtoend()
|
||||||
|
response = jsonify(dict(data=data, status_code=status_code))
|
||||||
response = jsonify({
|
response.status_code = status_code
|
||||||
'db_healthy': db_healthy,
|
|
||||||
'buildlogs_healthy': buildlogs_healthy,
|
|
||||||
'is_testing': app.config['TESTING'],
|
|
||||||
})
|
|
||||||
response.status_code = 200 if db_healthy and buildlogs_healthy else 503
|
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ module.exports = function(grunt) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
build: {
|
build: {
|
||||||
src: ['../static/lib/**/*.js', '../static/js/*.js', '../static/dist/template-cache.js'],
|
src: ['../static/lib/**/*.js', '../static/js/**/*.js', '../static/dist/template-cache.js'],
|
||||||
dest: '../static/dist/<%= pkg.name %>.js'
|
dest: '../static/dist/<%= pkg.name %>.js'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,47 +1,84 @@
|
||||||
import boto.rds2
|
import boto.rds2
|
||||||
import logging
|
import logging
|
||||||
|
from health.services import check_all_services
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
class HealthCheck(object):
|
def get_healthchecker(app):
|
||||||
def __init__(self):
|
""" Returns a HealthCheck instance for the given app. """
|
||||||
pass
|
return HealthCheck.get_checker(app)
|
||||||
|
|
||||||
def conduct_healthcheck(self, db_healthy, buildlogs_healthy):
|
|
||||||
|
class HealthCheck(object):
|
||||||
|
def __init__(self, app):
|
||||||
|
self.app = app
|
||||||
|
|
||||||
|
def check_instance(self):
|
||||||
"""
|
"""
|
||||||
Conducts any custom healthcheck work, returning a dict representing the HealthCheck
|
Conducts a check on this specific instance, returning a dict representing the HealthCheck
|
||||||
output and a boolean indicating whether the instance is healthy.
|
output and a number indicating the health check response code.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
service_statuses = check_all_services(self.app)
|
||||||
|
return self.get_instance_health(service_statuses)
|
||||||
|
|
||||||
|
def check_endtoend(self):
|
||||||
|
"""
|
||||||
|
Conducts a check on all services, returning a dict representing the HealthCheck
|
||||||
|
output and a number indicating the health check response code.
|
||||||
|
"""
|
||||||
|
service_statuses = check_all_services(self.app)
|
||||||
|
return self.calculate_overall_health(service_statuses)
|
||||||
|
|
||||||
|
def get_instance_health(self, service_statuses):
|
||||||
|
"""
|
||||||
|
For the given service statuses, returns a dict representing the HealthCheck
|
||||||
|
output and a number indicating the health check response code. By default,
|
||||||
|
this simply ensures that all services are reporting as healthy.
|
||||||
|
"""
|
||||||
|
return self.calculate_overall_health(service_statuses)
|
||||||
|
|
||||||
|
def calculate_overall_health(self, service_statuses, skip=None, notes=None):
|
||||||
|
""" Returns true if and only if all the given service statuses report as healthy. """
|
||||||
|
is_healthy = True
|
||||||
|
notes = notes or []
|
||||||
|
|
||||||
|
for service_name in service_statuses:
|
||||||
|
if skip and service_name in skip:
|
||||||
|
notes.append('%s skipped in compute health' % service_name)
|
||||||
|
continue
|
||||||
|
|
||||||
|
is_healthy = is_healthy and service_statuses[service_name]
|
||||||
|
|
||||||
|
data = {
|
||||||
|
'services': service_statuses,
|
||||||
|
'notes': notes,
|
||||||
|
'is_testing': self.app.config['TESTING']
|
||||||
|
}
|
||||||
|
|
||||||
|
return (data, 200 if is_healthy else 503)
|
||||||
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_check(cls, name, parameters):
|
def get_checker(cls, app):
|
||||||
|
name = app.config['HEALTH_CHECKER'][0]
|
||||||
|
parameters = app.config['HEALTH_CHECKER'][1] or {}
|
||||||
|
|
||||||
for subc in cls.__subclasses__():
|
for subc in cls.__subclasses__():
|
||||||
if subc.check_name() == name:
|
if subc.check_name() == name:
|
||||||
return subc(**parameters)
|
return subc(app, **parameters)
|
||||||
|
|
||||||
raise Exception('Unknown health check with name %s' % name)
|
raise Exception('Unknown health check with name %s' % name)
|
||||||
|
|
||||||
|
|
||||||
class LocalHealthCheck(HealthCheck):
|
class LocalHealthCheck(HealthCheck):
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def check_name(cls):
|
def check_name(cls):
|
||||||
return 'LocalHealthCheck'
|
return 'LocalHealthCheck'
|
||||||
|
|
||||||
def conduct_healthcheck(self, db_healthy, buildlogs_healthy):
|
|
||||||
data = {
|
|
||||||
'db_healthy': db_healthy,
|
|
||||||
'buildlogs_healthy': buildlogs_healthy
|
|
||||||
}
|
|
||||||
|
|
||||||
return (data, db_healthy and buildlogs_healthy)
|
|
||||||
|
|
||||||
|
|
||||||
class ProductionHealthCheck(HealthCheck):
|
class ProductionHealthCheck(HealthCheck):
|
||||||
def __init__(self, access_key, secret_key):
|
def __init__(self, app, access_key, secret_key):
|
||||||
|
super(ProductionHealthCheck, self).__init__(app)
|
||||||
self.access_key = access_key
|
self.access_key = access_key
|
||||||
self.secret_key = secret_key
|
self.secret_key = secret_key
|
||||||
|
|
||||||
|
@ -49,36 +86,38 @@ class ProductionHealthCheck(HealthCheck):
|
||||||
def check_name(cls):
|
def check_name(cls):
|
||||||
return 'ProductionHealthCheck'
|
return 'ProductionHealthCheck'
|
||||||
|
|
||||||
def conduct_healthcheck(self, db_healthy, buildlogs_healthy):
|
def get_instance_health(self, service_statuses):
|
||||||
data = {
|
# Note: We skip the redis check because if redis is down, we don't want ELB taking the
|
||||||
'db_healthy': db_healthy,
|
# machines out of service. Redis is not considered a high avaliability-required service.
|
||||||
'buildlogs_healthy': buildlogs_healthy
|
skip = ['redis']
|
||||||
}
|
notes = []
|
||||||
|
|
||||||
# Only report unhealthy if the machine cannot connect to the DB. Redis isn't required for
|
# If the database is marked as unhealthy, check the status of RDS directly. If RDS is
|
||||||
# mission critical/high avaliability operations.
|
# reporting as available, then the problem is with this instance. Otherwise, the problem is
|
||||||
|
# with RDS, and so we skip the DB status so we can keep this machine as 'healthy'.
|
||||||
|
db_healthy = service_statuses['database']
|
||||||
if not db_healthy:
|
if not db_healthy:
|
||||||
# If the database is marked as unhealthy, check the status of RDS directly. If RDS is
|
rds_status = self._get_rds_status()
|
||||||
# reporting as available, then the problem is with this instance. Otherwise, the problem is
|
notes.append('DB reports unhealthy; RDS status: %s' % rds_status)
|
||||||
# with RDS, and we can keep this machine as 'healthy'.
|
|
||||||
is_rds_working = False
|
|
||||||
try:
|
|
||||||
region = boto.rds2.connect_to_region('us-east-1',
|
|
||||||
aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key)
|
|
||||||
response = region.describe_db_instances()['DescribeDBInstancesResponse']
|
|
||||||
result = response['DescribeDBInstancesResult']
|
|
||||||
instances = result['DBInstances']
|
|
||||||
status = instances[0]['DBInstanceStatus']
|
|
||||||
is_rds_working = status == 'available'
|
|
||||||
except:
|
|
||||||
logger.exception("Exception while checking RDS status")
|
|
||||||
pass
|
|
||||||
|
|
||||||
data['db_available_checked'] = True
|
# If the RDS is in any state but available, then we skip the DB check since it will
|
||||||
data['db_available_status'] = is_rds_working
|
# fail and bring down the instance.
|
||||||
|
if rds_status != 'available':
|
||||||
|
skip.append('database')
|
||||||
|
|
||||||
# If RDS is down, then we still report the machine as healthy, so that it can handle
|
return self.calculate_overall_health(service_statuses, skip=skip, notes=notes)
|
||||||
# requests once RDS comes back up.
|
|
||||||
return (data, not is_rds_working)
|
|
||||||
|
|
||||||
return (data, db_healthy)
|
|
||||||
|
def _get_rds_status(self):
|
||||||
|
""" Returns the status of the RDS instance as reported by AWS. """
|
||||||
|
try:
|
||||||
|
region = boto.rds2.connect_to_region('us-east-1',
|
||||||
|
aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key)
|
||||||
|
response = region.describe_db_instances()['DescribeDBInstancesResponse']
|
||||||
|
result = response['DescribeDBInstancesResult']
|
||||||
|
instances = result['DBInstances']
|
||||||
|
status = instances[0]['DBInstanceStatus']
|
||||||
|
return status
|
||||||
|
except:
|
||||||
|
logger.exception("Exception while checking RDS status")
|
||||||
|
return 'error'
|
||||||
|
|
46
health/services.py
Normal file
46
health/services.py
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
import logging
|
||||||
|
from data import model
|
||||||
|
from app import build_logs
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def _check_registry_gunicorn(app):
|
||||||
|
""" Returns the status of the registry gunicorn workers. """
|
||||||
|
# Compute the URL for checking the registry endpoint. We append a port if and only if the
|
||||||
|
# hostname contains one.
|
||||||
|
client = app.config['HTTPCLIENT']
|
||||||
|
hostname_parts = app.config['SERVER_HOSTNAME'].split(':')
|
||||||
|
port = ''
|
||||||
|
if len(hostname_parts) == 2:
|
||||||
|
port = ':' + hostname_parts[1]
|
||||||
|
|
||||||
|
registry_url = '%s://localhost%s/v1/_internal_ping' % (app.config['PREFERRED_URL_SCHEME'], port)
|
||||||
|
try:
|
||||||
|
return client.get(registry_url, verify=False, timeout=2).status_code == 200
|
||||||
|
except Exception:
|
||||||
|
logger.exception('Exception when checking registry health: %s', registry_url)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _check_database(app):
|
||||||
|
""" Returns the status of the database, as accessed from this instance. """
|
||||||
|
return model.check_health(app.config)
|
||||||
|
|
||||||
|
def _check_redis(app):
|
||||||
|
""" Returns the status of Redis, as accessed from this instance. """
|
||||||
|
return build_logs.check_health()
|
||||||
|
|
||||||
|
|
||||||
|
_SERVICES = {
|
||||||
|
'registry_gunicorn': _check_registry_gunicorn,
|
||||||
|
'database': _check_database,
|
||||||
|
'redis': _check_redis
|
||||||
|
}
|
||||||
|
|
||||||
|
def check_all_services(app):
|
||||||
|
""" Returns a dictionary containing the status of all the services defined. """
|
||||||
|
status = {}
|
||||||
|
for name in _SERVICES:
|
||||||
|
status[name] = _SERVICES[name](app)
|
||||||
|
|
||||||
|
return status
|
19
local-setup-osx.sh
Executable file
19
local-setup-osx.sh
Executable file
|
@ -0,0 +1,19 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Install Docker and C libraries on which Python libraries are dependent
|
||||||
|
brew update
|
||||||
|
brew install boot2docker docker libevent libmagic postgresql
|
||||||
|
|
||||||
|
# Some OSX installs don't have /usr/include, which is required for finding SASL headers for our LDAP library
|
||||||
|
if [ ! -e /usr/include ]; then
|
||||||
|
sudo ln -s `xcrun --show-sdk-path`/usr/include /usr/include
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install Python dependencies
|
||||||
|
sudo pip install -r requirements.txt
|
||||||
|
|
||||||
|
# Put the local testing config in place
|
||||||
|
git clone git@github.com:coreos-inc/quay-config.git ../quay-config
|
||||||
|
ln -s ../../quay-config/local conf/stack
|
|
@ -7,7 +7,6 @@ from endpoints.index import index
|
||||||
from endpoints.tags import tags
|
from endpoints.tags import tags
|
||||||
from endpoints.registry import registry
|
from endpoints.registry import registry
|
||||||
|
|
||||||
|
|
||||||
application.register_blueprint(index, url_prefix='/v1')
|
application.register_blueprint(index, url_prefix='/v1')
|
||||||
application.register_blueprint(tags, url_prefix='/v1')
|
application.register_blueprint(tags, url_prefix='/v1')
|
||||||
application.register_blueprint(registry, url_prefix='/v1')
|
application.register_blueprint(registry, url_prefix='/v1')
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
autobahn
|
autobahn==0.9.3-3
|
||||||
aiowsgi
|
aiowsgi
|
||||||
trollius
|
trollius
|
||||||
peewee
|
peewee
|
||||||
|
@ -40,5 +40,8 @@ git+https://github.com/DevTable/aniso8601-fake.git
|
||||||
git+https://github.com/DevTable/anunidecode.git
|
git+https://github.com/DevTable/anunidecode.git
|
||||||
git+https://github.com/DevTable/avatar-generator.git
|
git+https://github.com/DevTable/avatar-generator.git
|
||||||
git+https://github.com/DevTable/pygithub.git
|
git+https://github.com/DevTable/pygithub.git
|
||||||
|
git+https://github.com/jplana/python-etcd.git
|
||||||
gipc
|
gipc
|
||||||
pygpgme
|
pygpgme
|
||||||
|
cachetools
|
||||||
|
mock
|
||||||
|
|
|
@ -8,24 +8,22 @@ Jinja2==2.7.3
|
||||||
LogentriesLogger==0.2.1
|
LogentriesLogger==0.2.1
|
||||||
Mako==1.0.0
|
Mako==1.0.0
|
||||||
MarkupSafe==0.23
|
MarkupSafe==0.23
|
||||||
Pillow==2.6.1
|
Pillow==2.7.0
|
||||||
PyMySQL==0.6.2
|
PyMySQL==0.6.3
|
||||||
PyPDF2==1.23
|
PyPDF2==1.24
|
||||||
PyYAML==3.11
|
PyYAML==3.11
|
||||||
SQLAlchemy==0.9.8
|
SQLAlchemy==0.9.8
|
||||||
|
WebOb==1.4
|
||||||
Werkzeug==0.9.6
|
Werkzeug==0.9.6
|
||||||
alembic==0.7.0
|
|
||||||
git+https://github.com/DevTable/aniso8601-fake.git
|
|
||||||
git+https://github.com/DevTable/anunidecode.git
|
|
||||||
git+https://github.com/DevTable/avatar-generator.git
|
|
||||||
git+https://github.com/DevTable/pygithub.git
|
|
||||||
aiowsgi==0.3
|
aiowsgi==0.3
|
||||||
|
alembic==0.7.4
|
||||||
autobahn==0.9.3-3
|
autobahn==0.9.3-3
|
||||||
backports.ssl-match-hostname==3.4.0.2
|
backports.ssl-match-hostname==3.4.0.2
|
||||||
beautifulsoup4==4.3.2
|
beautifulsoup4==4.3.2
|
||||||
blinker==1.3
|
blinker==1.3
|
||||||
boto==2.34.0
|
boto==2.35.1
|
||||||
docker-py==0.6.0
|
cachetools==1.0.0
|
||||||
|
docker-py==0.7.1
|
||||||
ecdsa==0.11
|
ecdsa==0.11
|
||||||
futures==2.2.0
|
futures==2.2.0
|
||||||
gevent==1.0.1
|
gevent==1.0.1
|
||||||
|
@ -36,27 +34,34 @@ hiredis==0.1.5
|
||||||
html5lib==0.999
|
html5lib==0.999
|
||||||
itsdangerous==0.24
|
itsdangerous==0.24
|
||||||
jsonschema==2.4.0
|
jsonschema==2.4.0
|
||||||
marisa-trie==0.6
|
marisa-trie==0.7
|
||||||
mixpanel-py==3.2.0
|
mixpanel-py==3.2.1
|
||||||
git+https://github.com/NateFerrero/oauth2lib.git
|
mock==1.0.1
|
||||||
paramiko==1.15.1
|
paramiko==1.15.2
|
||||||
peewee==2.4.3
|
peewee==2.4.5
|
||||||
psycopg2==2.5.4
|
psycopg2==2.5.4
|
||||||
py-bcrypt==0.4
|
py-bcrypt==0.4
|
||||||
pycrypto==2.6.1
|
pycrypto==2.6.1
|
||||||
python-dateutil==2.2
|
python-dateutil==2.4.0
|
||||||
python-ldap==2.4.18
|
python-ldap==2.4.19
|
||||||
python-magic==0.4.6
|
python-magic==0.4.6
|
||||||
pytz==2014.9
|
|
||||||
pygpgme==0.3
|
pygpgme==0.3
|
||||||
|
pytz==2014.10
|
||||||
raven==5.1.1
|
raven==5.1.1
|
||||||
redis==2.10.3
|
redis==2.10.3
|
||||||
reportlab==2.7
|
reportlab==2.7
|
||||||
requests==2.4.3
|
requests==2.5.1
|
||||||
six==1.8.0
|
six==1.9.0
|
||||||
stripe==1.19.1
|
stripe==1.20.1
|
||||||
trollius==1.0.3
|
trollius==1.0.4
|
||||||
tzlocal==1.1.2
|
tzlocal==1.1.2
|
||||||
websocket-client==0.21.0
|
waitress==0.8.9
|
||||||
|
websocket-client==0.23.0
|
||||||
wsgiref==0.1.2
|
wsgiref==0.1.2
|
||||||
xhtml2pdf==0.0.6
|
xhtml2pdf==0.0.6
|
||||||
|
git+https://github.com/DevTable/aniso8601-fake.git
|
||||||
|
git+https://github.com/DevTable/anunidecode.git
|
||||||
|
git+https://github.com/DevTable/avatar-generator.git
|
||||||
|
git+https://github.com/DevTable/pygithub.git
|
||||||
|
git+https://github.com/NateFerrero/oauth2lib.git
|
||||||
|
git+https://github.com/jplana/python-etcd.git
|
||||||
|
|
|
@ -1096,12 +1096,6 @@ i.toggle-icon:hover {
|
||||||
border: 1px dashed #ccc;
|
border: 1px dashed #ccc;
|
||||||
}
|
}
|
||||||
|
|
||||||
.new-repo .initialize-repo .init-description {
|
|
||||||
color: #444;
|
|
||||||
font-size: 12px;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
.new-repo .initialize-repo .file-drop {
|
.new-repo .initialize-repo .file-drop {
|
||||||
margin: 10px;
|
margin: 10px;
|
||||||
}
|
}
|
||||||
|
@ -1666,7 +1660,7 @@ i.toggle-icon:hover {
|
||||||
padding-left: 70px;
|
padding-left: 70px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.landing-page .twitter-tweet .avatar img {
|
.landing-page .twitter-tweet .twitter-avatar img {
|
||||||
border-radius: 4px;
|
border-radius: 4px;
|
||||||
border: 2px solid rgb(70, 70, 70);
|
border: 2px solid rgb(70, 70, 70);
|
||||||
width: 50px;
|
width: 50px;
|
||||||
|
@ -4915,3 +4909,20 @@ i.slack-icon {
|
||||||
#gen-token input[type="checkbox"] {
|
#gen-token input[type="checkbox"] {
|
||||||
margin-right: 10px;
|
margin-right: 10px;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.dockerfile-build-form table td {
|
||||||
|
vertical-align: top;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.dockerfile-build-form input[type="file"] {
|
||||||
|
margin: 0px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.dockerfile-build-form .help-text {
|
||||||
|
font-size: 13px;
|
||||||
|
color: #aaa;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
padding-left: 22px;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
</div>
|
</div>
|
||||||
<div class="dockerfile-build-form" repository="repository" upload-failed="handleBuildFailed(message)"
|
<div class="dockerfile-build-form" repository="repository" upload-failed="handleBuildFailed(message)"
|
||||||
build-started="handleBuildStarted(build)" build-failed="handleBuildFailed(message)" start-now="startCounter"
|
build-started="handleBuildStarted(build)" build-failed="handleBuildFailed(message)" start-now="startCounter"
|
||||||
has-dockerfile="hasDockerfile" uploading="uploading" building="building"></div>
|
is-ready="hasDockerfile" uploading="uploading" building="building"></div>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-footer">
|
<div class="modal-footer">
|
||||||
<button type="button" class="btn btn-primary" ng-click="startBuild()" ng-disabled="building || uploading || !hasDockerfile">Start Build</button>
|
<button type="button" class="btn btn-primary" ng-click="startBuild()" ng-disabled="building || uploading || !hasDockerfile">Start Build</button>
|
||||||
|
|
|
@ -11,9 +11,44 @@
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="container" ng-show="!uploading && !building">
|
<div class="container" ng-show="!uploading && !building">
|
||||||
<div class="init-description">
|
<table>
|
||||||
Upload a <b>Dockerfile</b> or an archive (<code>.zip</code> or <code>.tar.gz</code>) containing a Dockerfile <b>in the root directory</b>
|
<tr>
|
||||||
</div>
|
<td style="vertical-align: middle;">Dockerfile or <code>.tar.gz</code> or <code>.zip</code>:</td>
|
||||||
<input id="file-drop" class="file-drop" type="file" file-present="internal.hasDockerfile">
|
<td><input id="file-drop" class="file-drop" type="file" file-present="internal.hasDockerfile">
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td></td>
|
||||||
|
<td>
|
||||||
|
<div class="help-text">If an archive, the Dockerfile must be at the root</div>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Base Image Pull Credentials:</td>
|
||||||
|
<td>
|
||||||
|
<!-- Select credentials -->
|
||||||
|
<div class="btn-group btn-group-sm">
|
||||||
|
<button type="button" class="btn btn-default"
|
||||||
|
ng-class="is_public ? 'active btn-info' : ''"
|
||||||
|
ng-click="is_public = true">
|
||||||
|
None
|
||||||
|
</button>
|
||||||
|
<button type="button" class="btn btn-default"
|
||||||
|
ng-class="is_public ? '' : 'active btn-info'"
|
||||||
|
ng-click="is_public = false">
|
||||||
|
<i class="fa fa-wrench"></i>
|
||||||
|
Robot account
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Robot Select -->
|
||||||
|
<div ng-show="!is_public" style="margin-top: 10px">
|
||||||
|
<div class="entity-search" namespace="repository.namespace"
|
||||||
|
placeholder="'Select robot account for pulling...'"
|
||||||
|
current-entity="pull_entity"
|
||||||
|
allowed-entities="['robot']"></div>
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
≡
|
≡
|
||||||
</button>
|
</button>
|
||||||
<a class="navbar-brand" href="/" target="{{ appLinkTarget() }}">
|
<a class="navbar-brand" href="/" target="{{ appLinkTarget() }}">
|
||||||
<span id="quay-logo" style="background-image: url('{{ getEnterpriseLogo() }}')"></span>
|
<span id="quay-logo" ng-style="{'background-image': 'url(' + getEnterpriseLogo() + ')'}"></span>
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
</p>
|
</p>
|
||||||
<div class="attribute">
|
<div class="attribute">
|
||||||
<span class="info-wrap">
|
<span class="info-wrap">
|
||||||
<img ng-src="/static/img/default-twitter.png">
|
<span class="twitter-avatar"><img ng-src="{{ avatarUrl }}" fallback-src="/static/img/default-twitter.png"></span>
|
||||||
<span class="info">
|
<span class="info">
|
||||||
<span class="author">{{ authorName }} (@{{authorUser}})</span>
|
<span class="author">{{ authorName }} (@{{authorUser}})</span>
|
||||||
<a class="reference" ng-href="{{ messageUrl }}">{{ messageDate }}</a>
|
<a class="reference" ng-href="{{ messageUrl }}">{{ messageDate }}</a>
|
||||||
|
|
|
@ -4354,6 +4354,8 @@ quayApp.directive('entitySearch', function () {
|
||||||
|
|
||||||
if (classes.length > 1) {
|
if (classes.length > 1) {
|
||||||
classes[classes.length - 1] = 'or ' + classes[classes.length - 1];
|
classes[classes.length - 1] = 'or ' + classes[classes.length - 1];
|
||||||
|
} else if (classes.length == 0) {
|
||||||
|
return '<div class="tt-empty">No matching entities found</div>';
|
||||||
}
|
}
|
||||||
|
|
||||||
var class_string = '';
|
var class_string = '';
|
||||||
|
@ -4439,7 +4441,6 @@ quayApp.directive('entitySearch', function () {
|
||||||
|
|
||||||
$scope.$watch('namespace', function(namespace) {
|
$scope.$watch('namespace', function(namespace) {
|
||||||
if (!namespace) { return; }
|
if (!namespace) { return; }
|
||||||
|
|
||||||
$scope.isAdmin = UserService.isNamespaceAdmin(namespace);
|
$scope.isAdmin = UserService.isNamespaceAdmin(namespace);
|
||||||
$scope.isOrganization = !!UserService.getOrganization(namespace);
|
$scope.isOrganization = !!UserService.getOrganization(namespace);
|
||||||
});
|
});
|
||||||
|
@ -6229,7 +6230,7 @@ quayApp.directive('dockerfileBuildForm', function () {
|
||||||
scope: {
|
scope: {
|
||||||
'repository': '=repository',
|
'repository': '=repository',
|
||||||
'startNow': '=startNow',
|
'startNow': '=startNow',
|
||||||
'hasDockerfile': '=hasDockerfile',
|
'isReady': '=isReady',
|
||||||
'uploadFailed': '&uploadFailed',
|
'uploadFailed': '&uploadFailed',
|
||||||
'uploadStarted': '&uploadStarted',
|
'uploadStarted': '&uploadStarted',
|
||||||
'buildStarted': '&buildStarted',
|
'buildStarted': '&buildStarted',
|
||||||
|
@ -6240,6 +6241,8 @@ quayApp.directive('dockerfileBuildForm', function () {
|
||||||
},
|
},
|
||||||
controller: function($scope, $element, ApiService) {
|
controller: function($scope, $element, ApiService) {
|
||||||
$scope.internal = {'hasDockerfile': false};
|
$scope.internal = {'hasDockerfile': false};
|
||||||
|
$scope.pull_entity = null;
|
||||||
|
$scope.is_public = true;
|
||||||
|
|
||||||
var handleBuildFailed = function(message) {
|
var handleBuildFailed = function(message) {
|
||||||
message = message || 'Dockerfile build failed to start';
|
message = message || 'Dockerfile build failed to start';
|
||||||
|
@ -6313,8 +6316,12 @@ quayApp.directive('dockerfileBuildForm', function () {
|
||||||
'file_id': fileId
|
'file_id': fileId
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (!$scope.is_public && $scope.pull_entity) {
|
||||||
|
data['pull_robot'] = $scope.pull_entity['name'];
|
||||||
|
}
|
||||||
|
|
||||||
var params = {
|
var params = {
|
||||||
'repository': repo.namespace + '/' + repo.name
|
'repository': repo.namespace + '/' + repo.name,
|
||||||
};
|
};
|
||||||
|
|
||||||
ApiService.requestRepoBuild(data, params).then(function(resp) {
|
ApiService.requestRepoBuild(data, params).then(function(resp) {
|
||||||
|
@ -6392,9 +6399,13 @@ quayApp.directive('dockerfileBuildForm', function () {
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.$watch('internal.hasDockerfile', function(d) {
|
var checkIsReady = function() {
|
||||||
$scope.hasDockerfile = d;
|
$scope.isReady = $scope.internal.hasDockerfile && ($scope.is_public || $scope.pull_entity);
|
||||||
});
|
};
|
||||||
|
|
||||||
|
$scope.$watch('pull_entity', checkIsReady);
|
||||||
|
$scope.$watch('is_public', checkIsReady);
|
||||||
|
$scope.$watch('internal.hasDockerfile', checkIsReady);
|
||||||
|
|
||||||
$scope.$watch('startNow', function() {
|
$scope.$watch('startNow', function() {
|
||||||
if ($scope.startNow && $scope.repository && !$scope.uploading && !$scope.building) {
|
if ($scope.startNow && $scope.repository && !$scope.uploading && !$scope.building) {
|
||||||
|
|
|
@ -207,7 +207,7 @@
|
||||||
</li>
|
</li>
|
||||||
|
|
||||||
<li>
|
<li>
|
||||||
<div class="twitter-view" avatar-url="https://pbs.twimg.com/profile_images/2578175278/ykn3l9ktfdy1hia5odij_bigger.jpeg"
|
<div class="twitter-view" avatar-url="https://pbs.twimg.com/profile_images/483391930147954688/pvJAHzy__bigger.jpeg"
|
||||||
author-name="Frank Macreery" author-user="fancyremarker" message-url="https://twitter.com/fancyremarker/statuses/448528623692025857"
|
author-name="Frank Macreery" author-user="fancyremarker" message-url="https://twitter.com/fancyremarker/statuses/448528623692025857"
|
||||||
message-date="March 25, 2014">
|
message-date="March 25, 2014">
|
||||||
<a href="https://twitter.com/quayio">@quayio</a> releases Docker build flair! <a href="http://t.co/72ULgveLj4">pic.twitter.com/72ULgveLj4</a>
|
<a href="https://twitter.com/quayio">@quayio</a> releases Docker build flair! <a href="http://t.co/72ULgveLj4">pic.twitter.com/72ULgveLj4</a>
|
||||||
|
|
|
@ -143,9 +143,9 @@
|
||||||
<div class="section-title">Upload <span ng-if="repo.initialize == 'dockerfile'">Dockerfile</span><span ng-if="repo.initialize == 'zipfile'">Archive</span></div>
|
<div class="section-title">Upload <span ng-if="repo.initialize == 'dockerfile'">Dockerfile</span><span ng-if="repo.initialize == 'zipfile'">Archive</span></div>
|
||||||
<div style="padding-top: 20px;">
|
<div style="padding-top: 20px;">
|
||||||
<div class="initialize-repo">
|
<div class="initialize-repo">
|
||||||
<div class="dockerfile-build-form" repository="createdForBuild" upload-failed="handleBuildFailed(message)"
|
<div class="dockerfile-build-form" repository="createdForBuild || repo" upload-failed="handleBuildFailed(message)"
|
||||||
build-started="handleBuildStarted()" build-failed="handleBuildFailed(message)" start-now="createdForBuild"
|
build-started="handleBuildStarted()" build-failed="handleBuildFailed(message)" start-now="createdForBuild"
|
||||||
has-dockerfile="hasDockerfile" uploading="uploading" building="building"></div>
|
is-ready="hasDockerfile" uploading="uploading" building="building"></div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
<div class="team-view container">
|
<div class="team-view container">
|
||||||
<div class="organization-header" organization="organization" team-name="teamname">
|
<div class="organization-header" organization="organization" team-name="teamname">
|
||||||
<div ng-show="canEditMembers" class="side-controls">
|
<div ng-show="canEditMembers" class="side-controls">
|
||||||
<div class="hidden-sm hidden-xs">
|
<div class="hidden-xs">
|
||||||
<button class="btn btn-success"
|
<button class="btn btn-success"
|
||||||
id="showAddMember"
|
id="showAddMember"
|
||||||
data-title="Add Team Member"
|
data-title="Add Team Member"
|
||||||
|
@ -82,7 +82,7 @@
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
<div ng-show="canEditMembers">
|
<div ng-show="canEditMembers">
|
||||||
<div ng-if-media="'(max-width: 560px)'">
|
<div ng-if-media="'(max-width: 767px)'">
|
||||||
<div ng-include="'/static/directives/team-view-add.html'"></div>
|
<div ng-include="'/static/directives/team-view-add.html'"></div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -8,82 +8,99 @@
|
||||||
<meta name="description" content="Privacy policy for Quay - Hosted private docker repository">
|
<meta name="description" content="Privacy policy for Quay - Hosted private docker repository">
|
||||||
{% endblock %}
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block added_stylesheets %}
|
||||||
|
<style>
|
||||||
|
dt.section {
|
||||||
|
font-variant: small-caps;
|
||||||
|
font-size: 1.4em;
|
||||||
|
margin-bottom: 10px;
|
||||||
|
}
|
||||||
|
h2 { font-variant: small-caps; }
|
||||||
|
</style>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
{% block body_content %}
|
{% block body_content %}
|
||||||
<div class="container privacy-policy">
|
<div class="container privacy-policy">
|
||||||
|
|
||||||
<h2>Privacy Policy</h2>
|
<h2>CoreOS Privacy Policy</h2>
|
||||||
|
<h4>Last Revised: February 2, 2015</h4>
|
||||||
|
<p>Welcome to Quay.io from CoreOS, Inc. (“<strong>CoreOS</strong>”, “<strong>we</strong>”, “<strong>us</strong>” or “<strong>our</strong>”).</p>
|
||||||
|
<p>This privacy policy explains how we collect, use and disclose information about you when you use any of the websites owned or operated by CoreOS (the “<strong>Sites</strong>”) and any of the online products and services that link to this privacy policy (collectively, the “<strong>Services</strong>”) or when you otherwise interact with us. By using any of our Services, you consent to our collection, use and disclosure of your information as described in this privacy policy.</p>
|
||||||
|
<p>The Services allow users to store, manage, and retrieve container repositories.</p>
|
||||||
|
<p>We may change this privacy policy from time-to-time. If we make changes, we will notify you by revising the date at the top of the policy and, in some cases, we will provide you with additional notice (such as adding a statement to our homepage or sending you an email notification). We encourage you to review the privacy policy periodically to stay informed about our practices and the ways you can help protect your privacy.</p>
|
||||||
<dl>
|
<dl>
|
||||||
<dt>What information do we collect?</dt>
|
<dt class="section">Collection of Information</dt>
|
||||||
|
|
||||||
|
<dt>Information You Provide to Us</dt>
|
||||||
<dd>
|
<dd>
|
||||||
We collect information from you when you register on our site or subscribe to the service..
|
We collect information you directly give us. For example, we collect information about you when you sign up for one of our Services, participate in any interactive features of the Services, fill out a form, give feedback, ideas or submissions about any of the Services, communicate with us via third party social media sites, request customer support or otherwise communicate with us. The types of information we may collect include your email address, username, your credit/debit card information and any other information you choose to provide. For information as to how to restrict the collection of contact information, please see the “<a href="#your-choices">Your Choices</a>” section below. If you choose not to provide certain information, we may not be able to provide certain of our Services to you or certain features of our Services may be unavailable or work differently.
|
||||||
When ordering or registering on our site, as appropriate, you may be asked to enter your: e-mail address, mailing address or credit card information. You may, however, visit the public portion of our site anonymously.
|
|
||||||
</dd>
|
</dd>
|
||||||
|
|
||||||
<dt>What do we use your information for?</dt>
|
<dt>Information We Collect Automatically When You Use the Services</dt>
|
||||||
<dd>Any of the information we collect from you may be used in one of the following ways:
|
<dd>
|
||||||
<ul>
|
When you access or use our Services (or certain portions of the Services), we automatically collect certain information about you. This information includes:
|
||||||
<li>To personalize your experience(your information helps us to better respond to your individual needs)</li>
|
<ul>
|
||||||
<li>
|
<li><strong>Log Information:</strong> We log information about your use of the Services, including the type of device you use, access times, IP address, pages viewed, and the page you visited before navigating to one of our Services. We use this information for analytic and product improvement purposes.</li>
|
||||||
To improve our website<br>
|
<li><strong>Device Information:</strong> We collect information about the computer you use to access our Services, including the hardware model, operating system and version and unique device identifiers.</li>
|
||||||
(we continually strive to improve our website offerings based on the information and feedback we receive from you)</li>
|
<li><strong>Information Collected by Cookies and Other Tracking Technologies:</strong> We use various technologies to collect information, and this may include cookies and web beacons. Cookies are small data files stored on your hard drive or in device memory. Web beacons (also known as “tracking pixels”) are non-visible electronic images. These technologies are used for analytic and product improvement purposes, such as seeing which areas and features of our Services are popular and determining whether an email has been opened and acted upon. For more information about cookies, and how to disable them, please see “<a href="#your-choices">Your Choices</a>” below.</li>
|
||||||
<li>
|
</ul>
|
||||||
To improve customer service<br>
|
</dd>
|
||||||
(your information helps us to more effectively respond to your customer service requests and support needs)
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
To process transactions<br>
|
|
||||||
Your information, whether public or private, will not be sold, exchanged, transferred, or given to any other company for any reason whatsoever, without your consent, other than for the express purpose of delivering the purchased product or service requested.
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
To send periodic emails<br>
|
|
||||||
The email address you provide for order processing, may be used to send you information and updates pertaining to your order, in addition to receiving occasional company news, updates, related product or service information, etc.<br>
|
|
||||||
Note: If at any time you would like to unsubscribe from receiving future emails, we include detailed unsubscribe instructions at the bottom of each email.
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
</dd>
|
|
||||||
|
|
||||||
<dt>How do we protect your information?</dt>
|
<dt>Information We Collect From Other Sources</dt>
|
||||||
<dd>
|
<dd>
|
||||||
We implement a variety of security measures to maintain the safety of your personal information when you place an order or enter, submit, or access your personal information.
|
We may also obtain information from other sources and combine that with information we collect through our Services. For example, if you create or log into your account through a site like Google.com or GitHub.com, we will have access to certain information from that site, such as your name, account information and friends lists, in accordance with the authorization procedures determined by these sites.
|
||||||
We offer the use of a secure server. All supplied sensitive/credit information is transmitted via Secure Socket Layer (SSL) technology and then encrypted into our Payment gateway providers database only to be accessible by those authorized with special access rights to such systems, and are required to keep the information confidential.
|
</dd>
|
||||||
After a transaction, your private information (credit cards, social security numbers, financials, etc.) will be kept on file for more than 60 days in order to continue subscription billing..
|
<dt class="section">Use of Information</dt>
|
||||||
</dd>
|
<dd>We may use information about you for various purposes, including to:
|
||||||
<dt>Do we use cookies?</dt>
|
<ul>
|
||||||
<dd>
|
<li>Provide, deliver, maintain, test and improve our Services;</li>
|
||||||
Yes (Cookies are small files that a site or its service provider transfers to your computers hard drive through your Web browser (if you allow) that enables the sites or service providers systems to recognize your browser and capture and remember certain information
|
<li>Send you technical notices, updates, confirmations, security alerts and support and administrative messages;</li>
|
||||||
We use cookies to understand and save your preferences for future visits and compile aggregate data about site traffic and site interaction so that we can offer better site experiences and tools in the future. We may contract with third-party service providers to assist us in better understanding our site visitors. These service providers are not permitted to use the information collected on our behalf except to help us conduct and improve our business.
|
<li>Respond to your comments, questions and requests and provide customer service;</li>
|
||||||
</dd>
|
<li>Communicate with you about products, services, offers, promotions, rewards and events offered by CoreOS and others, and provide news and information we think will be of interest to you;</li>
|
||||||
<dt>Do we disclose any information to outside parties?</dt>
|
<li>Monitor and analyze trends, usage and activities in connection with our Services and improve our Services;</li>
|
||||||
<dd>
|
<li>Detect, investigate and prevent any suspected breaches of the terms applicable to the use of our Services (including, our Sites); and</li>
|
||||||
We do not sell, trade, or otherwise transfer to outside parties your personally identifiable information. This does not include trusted third parties who assist us in operating our website, conducting our business, or servicing you, so long as those parties agree to keep this information confidential. We may also release your information when we believe release is appropriate to comply with the law, enforce our site policies, or protect ours or others rights, property, or safety. However, non-personally identifiable visitor information may be provided to other parties for marketing, advertising, or other uses.
|
<li>Link or combine with information we get from others to help understand your needs and provide you with better service.</li>
|
||||||
</dd>
|
</ul>
|
||||||
<dt>Third party links</dt>
|
CoreOS is based in the United States, and the information we collect is governed by U.S. law. By accessing or using any of our Services or otherwise providing information to us, you consent to the processing and transfer of information in and to the U.S. and other countries.
|
||||||
<dd>
|
</dd>
|
||||||
Occasionally, at our discretion, we may include or offer third party products or services on our website. These third party sites have separate and independent privacy policies. We therefore have no responsibility or liability for the content and activities of these linked sites. Nonetheless, we seek to protect the integrity of our site and welcome any feedback about these sites.
|
<dt class="section">Sharing of Information</dt>
|
||||||
</dd>
|
<dd>
|
||||||
<dt>California Online Privacy Protection Act Compliance</dt>
|
We may share information about you as follows or as otherwise described in this Privacy Policy:
|
||||||
<dd>
|
<ul>
|
||||||
Because we value your privacy we have taken the necessary precautions to be in compliance with the California Online Privacy Protection Act. We therefore will not distribute your personal information to outside parties without your consent.
|
<li>With vendors, consultants and other service providers who need access to such information to carry out work on our behalf;</li>
|
||||||
As part of the California Online Privacy Protection Act, all users of our site may make any changes to their information at anytime by logging into the service and modifying their Account Settings and Payment Information.
|
<li>In response to a request for information if we believe disclosure is in accordance with any applicable law, regulation or legal process, or as otherwise required by any applicable law, rule or regulation;</li>
|
||||||
</dd>
|
<li>If we believe your actions are inconsistent with the spirit or language of our user agreements or policies, or to protect the rights, property and safety of CoreOS or others;</li>
|
||||||
<dt>Childrens Online Privacy Protection Act Compliance</dt>
|
<li>In connection with, or during negotiations of, any financing with respect to CoreOS;</li>
|
||||||
<dd>
|
<li>In connection with, or during negotiations of, any merger, sale of CoreOS’ assets or acquisition of all or a portion of our business to another company; and</li>
|
||||||
We are in compliance with the requirements of COPPA (Childrens Online Privacy Protection Act), we do not collect any information from anyone under 13 years of age. Our website, products and services are all directed to people who are at least 13 years old or older.
|
<li>With your consent or at your direction, including if we notify you through any of the Services that the information you provide will be shared in a particular manner and you provide such information.</li>
|
||||||
</dd>
|
</ul>
|
||||||
<dt>Terms and Conditions </dt>
|
We may also share aggregated or anonymized information that does not directly identify you.
|
||||||
<dd>
|
</dd>
|
||||||
Please also visit our Terms and Conditions section establishing the use, disclaimers, and limitations of liability governing the use of our website at https://quay.io/tos
|
<dt class="section">Security</dt>
|
||||||
</dd>
|
<dd>
|
||||||
<dt>Your Consent</dt>
|
We take reasonable measures to help protect information about you from loss, theft, misuse and unauthorized access, disclosure, alteration and destruction.
|
||||||
<dd>
|
</dd>
|
||||||
By using our site, you consent to our privacy policy.
|
<dt class="section">Analytics Services</dt>
|
||||||
</dd>
|
<dd>
|
||||||
<dt>Changes to our Privacy Policy</dt>
|
We may allow others to provide analytics services in connection with the Services (or portions the Services). These entities may use cookies, web beacons and other technologies to collect information about your use of the Services and other websites, including your IP address, web browser, pages viewed, time spent on pages, links clicked and conversion information. We and others may use this information to, among other things, analyze and track data, determine the popularity of certain content, personalize the user experience, and better understand your activity.
|
||||||
<dd>
|
</dd>
|
||||||
If we decide to change our privacy policy, we will post those changes on this page.
|
<dt class="section"><a id="your-choices"></a>Your Choices</dt>
|
||||||
If you have any questions or concerns about our privacy policy, please direct them to the following email address:
|
<dt>Account Information</dt>
|
||||||
<a href="mailto:support@quay.io">support@quay.io</a>
|
<dd>
|
||||||
</dd>
|
If you wish to delete your account, please contact support at <a href="mailto:support@quay.io">support@quay.io</a>. Note that we may retain certain information as required by law or for legitimate business purposes as may be necessary to fulfill the purposes identified in the privacy policy. We may also retain cached or archived copies of information (including, location information) about you for a certain period of time.
|
||||||
|
</dd>
|
||||||
|
<dt>Cookies</dt>
|
||||||
|
<dd>
|
||||||
|
Most web browsers are set to accept cookies by default. If you prefer, you can usually choose to set your browser to remove or reject browser cookies. Please note that if you choose to remove or reject cookies, this could affect the availability and functionality of certain of the Services.
|
||||||
|
</dd>
|
||||||
|
<dt>Promotional Communications</dt>
|
||||||
|
<dd>
|
||||||
|
You may opt out of receiving promotional communications from CoreOS by following the instructions in those communications. If you opt out, we may still send you non-promotional communications, such as those about your account or our ongoing business relations.
|
||||||
|
</dd>
|
||||||
|
<dt>Contact Us</dt>
|
||||||
|
<dd>
|
||||||
|
If you have any questions or concerns about this privacy policy or any privacy issues, please email us at <a href="mailto:partners@coreos.com">partners@coreos.com</a>.
|
||||||
|
</dd>
|
||||||
|
</dl>
|
||||||
</div>
|
</div>
|
||||||
{% endblock %}
|
{% endblock %}
|
||||||
|
|
|
@ -8,94 +8,164 @@
|
||||||
<meta name="description" content="Terms of service for Quay - Hosted private docker repository">
|
<meta name="description" content="Terms of service for Quay - Hosted private docker repository">
|
||||||
{% endblock %}
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block added_stylesheets %}
|
||||||
|
<style>
|
||||||
|
ol {
|
||||||
|
padding-left: 20px;
|
||||||
|
}
|
||||||
|
table {
|
||||||
|
border-width: 0px;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
margin-top: 20px;
|
||||||
|
}
|
||||||
|
dt {
|
||||||
|
text-decoration: underline;
|
||||||
|
font-weight: normal;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
{% block body_content %}
|
{% block body_content %}
|
||||||
<div class="tos container">
|
<div class="tos container">
|
||||||
<h2>Terms of Service</h2>
|
<h2>CoreOS Terms of Service</h2>
|
||||||
<p>The following terms and conditions govern all use of the Quay.io website and all content, services and products available at or through the website. The Website is owned and operated by DevTable, LLC. (“DevTable”). The Website is offered subject to your acceptance without modification of all of the terms and conditions contained herein and all other operating rules, policies (including, without limitation, Quay.io’s Privacy Policy) and procedures that may be published from time to time on this Site by DevTable (collectively, the “Agreement”).</p>
|
<h4>Last Revised: February 5, 2015</h4>
|
||||||
<p>Please read this Agreement carefully before accessing or using the Website. By accessing or using any part of the web site, you agree to become bound by the terms and conditions of this agreement. If you do not agree to all the terms and conditions of this agreement, then you may not access the Website or use any services. If these terms and conditions are considered an offer by DevTable, acceptance is expressly limited to these terms. The Website is available only to individuals who are at least 13 years old.</p>
|
|
||||||
|
<p>These Quay.io Terms of Service (these “<strong>Terms</strong>”) apply to the features and functions provided by CoreOS, Inc. (“<strong>CoreOS</strong>,” “<strong>our</strong>,” or “<strong>we</strong>”) via quay.io (the “<strong>Site</strong>”) (collectively, the “<strong>Services</strong>”). By accessing or using the Services, you agree to be bound by these Terms. If you do not agree to these Terms, do not use any of the Services. The “<strong>Effective Date</strong>” of these Terms is the date you first access any of the Services.</p>
|
||||||
|
<p>If you are accessing the Services in your capacity as an employee, consultant or agent of a company (or other entity), you represent that you are an employee, consultant or agent of such company (or other entity) and you have the authority to agree (and be legally bound) on behalf of such company (or other entity) to all of the terms and conditions of these Terms.</p>
|
||||||
|
<p>For the purpose of these Terms, you and, if applicable, such company (or other entity) constitutes “<strong>Customer</strong>” or “<strong>you</strong>”.</p>
|
||||||
|
<p>CoreOS reserves the right to change or modify any of the terms and conditions contained in these Terms (or any policy or guideline of CoreOS) at any time and in its sole discretion by providing notice that these Terms have been modified. Such notice may be provided by sending an email, posting a notice on the Site, posting the revised Terms on the Site and revising the date at the top of these Terms or such other form of notice as determined by CoreOS. Any changes or modifications will be effective 30 days after providing notice that these Terms have been modified (the “<strong>Notice Period</strong>”). Your continued use of any of the Services following the Notice Period will constitute your acceptance of such changes or modifications. Therefore, you should review these Terms whenever you access the Services and at least every 30 days to make sure that you understand the terms and conditions that will apply to your use of the Services.</p>
|
||||||
|
<p>These terms form a binding agreement between you and CoreOS.</p>
|
||||||
|
|
||||||
<ol>
|
<ol>
|
||||||
<li>
|
<li>
|
||||||
<strong>Your Quay.io Account.</strong> If you create an account on the Website, you are responsible for maintaining the security of your account, and you are fully responsible for all activities that occur under the account and any other actions taken in connection with the account. You must immediately notify DevTable of any unauthorized uses of your account or any other breaches of security. DevTable will not be liable for any acts or omissions by You, including any damages of any kind incurred as a result of such acts or omissions.
|
<strong>Privacy</strong>
|
||||||
|
<p>Please see CoreOS’ privacy policy at <a href="/privacy">https://quay.io/privacy</a> for information about how CoreOS collects, uses and discloses information about users of the Site and the Services.</p>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<strong>Responsibility of Contributors.</strong> If you share your repository, publish images, code or content, or otherwise make (or allow any third party to make) material available by means of the Website (any such material, “Content”), You are entirely responsible for the content of, and any harm resulting from, that Content. That is the case regardless of whether the Content in question constitutes text, graphics, an audio file, or computer software. By making Content available, you represent and warrant that:
|
<strong>Registration</strong>
|
||||||
|
<p>In order to access the Services, you must complete the CoreOS registration form provided via the Site. During the registration process, you must select a CoreOS package which includes: (a) the monthly or annual period during which you can access the Services (the “<strong>Subscription Period</strong>”); and (b) the monthly or annual fee you must pay to CoreOS in exchange for your rights to the Services (the “<strong>Subscription Fees</strong>”). All such information is incorporated into these Terms by reference.</p>
|
||||||
|
<p>You agree to: (a) provide accurate, current and complete information about you as may be prompted by the registration forms via the Site (“<strong>Registration Data</strong>”); (b) maintain the security of your password; (c) maintain and promptly update the Registration Data, and any other information you provide to CoreOS, to keep it accurate, current and complete; and (d) accept all risks of unauthorized access to the Registration Data and any other information you provide to CoreOS.</p>
|
||||||
|
<p>You are responsible for safeguarding the password that you use to access the Services, and you agree to be fully responsible for activities or transactions that relate to your account and password</p>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<strong>Services</strong>
|
||||||
|
<p>Subject to the terms and conditions of these Terms, CoreOS grants you a limited, non-transferable, non-exclusive and revocable right and license to access and use the Services.</p>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<strong>Restrictions</strong>
|
||||||
|
<p>Except as expressly authorized by these Terms, you may not (a) modify, disclose, alter, translate or create derivative works of the Services, (b) license, sublicense, resell, distribute, lease, rent, lend, transfer, assign or otherwise dispose of the Services (or any components thereof), (c) use the Services to store or transmit any viruses, software routines or other code designed to permit unauthorized access, to disable, erase or otherwise harm software, hardware or data, or to perform any other harmful actions, (d) build a competitive product or service, or copy any features or functions of the Services, (e) interfere with or disrupt the integrity or performance of the Services, (f) disclose to any third party any performance information or analysis relating to the Services, (g) remove, alter or obscure any proprietary notices in or on the Services, including copyright notices, or (h) cause or permit any third party to do any of the foregoing.</p>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<strong>Your Responsibilities</strong>
|
||||||
|
<p>If you share your repository, publish images, code or content, or otherwise make (or allow any third party to make) material available by means of the Site (“<strong>Content</strong>”), you are entirely responsible for such Content of, and any harm resulting from, that Content. That is the case regardless of whether the Content in question constitutes text, graphics, an audio file, or computer software. By making Content available, you represent and warrant that:</p>
|
||||||
<ul>
|
<ul>
|
||||||
<li>
|
<li>the downloading, copying and use of the Content will not infringe, violate or misappropriate any Intellectual Property Rights of any third party;</li>
|
||||||
the downloading, copying and use of the Content will not infringe the proprietary rights, including but not limited to the copyright, patent, trademark or trade secret rights, of any third party;
|
<li>if your employer has rights to intellectual property you create, you have either (a) received permission from your employer to post or make available the Content, including but not limited to any software, or (b) secured from your employer a waiver as to all rights in or to the Content;</li>
|
||||||
</li>
|
<li>you have fully complied with any third-party licenses relating to the Content, and have done all things necessary to successfully pass through to end users any required terms;</li>
|
||||||
<li>
|
<li>the Content does not contain or install any viruses, worms, malware, Trojan horses or other harmful or destructive content;</li>
|
||||||
if your employer has rights to intellectual property you create, you have either (i) received permission from your employer to post or make available the Content, including but not limited to any software, or (ii) secured from your employer a waiver as to all rights in or to the Content;
|
<li>the Content is not spam, is not randomly-generated, and does not contain unethical or unwanted commercial content designed to drive traffic to third party sites or boost the search engine rankings of third party sites, or to further unlawful acts (such as phishing) or mislead recipients as to the source of the material (such as spoofing);</li>
|
||||||
</li>
|
<li>the Content does not contain threats or incite violence, and does not violate the privacy or publicity rights of any third party;</li>
|
||||||
<li>
|
<li>your Content is not getting advertised via unwanted electronic messages such as spam links on newsgroups, email lists, other blogs and web sites, and similar unsolicited promotional methods;</li>
|
||||||
you have fully complied with any third-party licenses relating to the Content, and have done all things necessary to successfully pass through to end users any required terms;
|
<li>your Content is not named in a manner that misleads your readers into thinking that you are another person or company. For example, your Content’s URL or name is not the name of a person other than yourself or company other than your own; and</li>
|
||||||
</li>
|
<li>you have, in the case of Content that includes computer code, accurately categorized and/or described the type, nature, uses and effects of the materials, whether requested to do so by CoreOS or otherwise.</li>
|
||||||
<li>
|
</ul>
|
||||||
the Content does not contain or install any viruses, worms, malware, Trojan horses or other harmful or destructive content;
|
<p>By submitting Content or computer code to CoreOS for inclusion in your repositories, you grant CoreOS a world-wide, royalty-free, and non-exclusive license to reproduce, modify, adapt and publish the Content solely for the purpose of providing the services you request. If you delete Content, CoreOS will use reasonable efforts to remove it from the Services, but you acknowledge that caching or references to the Content may not be made immediately unavailable.</p>
|
||||||
</li>
|
<p>Without limiting any of those representations or warranties, CoreOS has the right (though not the obligation) to, in CoreOS’ sole discretion (a) refuse or remove any content that, in CoreOS’ reasonable opinion, violates any CoreOS policy or is in any way harmful or objectionable, or (b) terminate or deny access to and use of the Site to any individual or entity for any reason, in CoreOS’ sole discretion. CoreOS will have no obligation to provide a refund of any amounts previously paid.</p>
|
||||||
<li>
|
|
||||||
the Content is not spam, is not randomly-generated, and does not contain unethical or unwanted commercial content designed to drive traffic to third party sites or boost the search engine rankings of third party sites, or to further unlawful acts (such as phishing) or mislead recipients as to the source of the material (such as spoofing);
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
the Content does not contain threats or incite violence, and does not violate the privacy or publicity rights of any third party;
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
your Content is not getting advertised via unwanted electronic messages such as spam links on newsgroups, email lists, other blogs and web sites, and similar unsolicited promotional methods;
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
your Content is not named in a manner that misleads your readers into thinking that you are another person or company. For example, your Content’s URL or name is not the name of a person other than yourself or company other than your own; and
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
you have, in the case of Content that includes computer code, accurately categorized and/or described the type, nature, uses and effects of the materials, whether requested to do so by DevTable or otherwise.
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
By submitting Content or computer code to DevTable for inclusion in your Repositories, you grant DevTable a world-wide, royalty-free, and non-exclusive license to reproduce, modify, adapt and publish the Content solely for the purpose of providing the services you request. If you delete Content, DevTable will use reasonable efforts to remove it from the Service, but you acknowledge that caching or references to the Content may not be made immediately unavailable.
|
<strong>Fees and Payment Terms</strong>
|
||||||
|
<p>In exchange for your rights to the Services, you will pay to CoreOS the Subscription Fees. The Subscription Fees do not include taxes, and the Subscription Fees are payable in advance in accordance with your Quay.io Plan.</p>
|
||||||
|
<p>Unless CoreOS states otherwise, all payments must be made (a) in U.S. Dollars; and (b) by payment card via an authorized CoreOS payment processor. If you pay via a payment card, you hereby (i) authorize CoreOS (or its authorized payment processor) to make automatic recurring charges to your designated payment card number in the applicable amount of the Subscription Fees on an annual or monthly basis (as applicable) for the duration of the Subscription Period, (ii) represent and warrant that you are authorized to use and have fees charged to the payment card number you provide to CoreOS, and (iii) understand that you may withdraw this consent by emailing CoreOS at <a href="mailto:support@quay.io">support@quay.io</a>. <strong>Accounts can be canceled at any time in the Plan and Usage section of your Account Settings. No refunds will be issued (unless expressly stated otherwise).</strong></p>
|
||||||
|
<p>Notwithstanding any terms to the contrary in these Terms, CoreOS, at its sole discretion, may modify its pricing during any Subscription Period and such modifications will be effective as of the directly subsequent Subscription Period.</p>
|
||||||
|
<p>Interest on any late payments will accrue at the rate of 1.5% per month, or the highest rate permitted by law, whichever is lower, from the date such amount is due until the date such amount is paid in full. You will be responsible for, and will pay all sales and similar taxes on, all license fees and similar fees levied upon the provision of the Services provided under these Terms, excluding only taxes based solely on CoreOS’ net income. You will indemnify and hold CoreOS harmless from and against any and all such taxes and related amounts levied upon the provision of the Services and any costs associated with the collection or withholding thereof, including penalties and interest.</p>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
Without limiting any of those representations or warranties, DevTable has the right (though not the obligation) to, in DevTable’s sole discretion (i) refuse or remove any content that, in DevTable’s reasonable opinion, violates any DevTable policy or is in any way harmful or objectionable, or (ii) terminate or deny access to and use of the Website to any individual or entity for any reason, in DevTable’s sole discretion. DevTable will have no obligation to provide a refund of any amounts previously paid.
|
<strong>Disclaimer</strong>
|
||||||
|
<p>COREOS DISCLAIMS ANY AND ALL REPRESENTATIONS OR WARRANTIES (EXPRESS OR IMPLIED, ORAL OR WRITTEN) WITH RESPECT TO THESE TERMS, SERVICES AND ANY OPEN SOURCE SOFTWARE (AS DEFINED BELOW), WHETHER ALLEGED TO ARISE BY OPERATION OF LAW, BY REASON OF CUSTOM OR USAGE IN THE TRADE, BY COURSE OF DEALING OR OTHERWISE. NOTWITHSTANDING ANY TERMS TO THE CONTRARY IN THESE TERMS, COMPANY ACKNOWLEDGES AND AGREES THAT COREOS MAY MODIFY THE FEATURES OF THE SERVICES FROM TIME-TO-TIME AT COREOS’ SOLE DISCRETION.</p>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<strong>Payment and Renewal.</strong>
|
<strong>Indemnification Obligations</strong>
|
||||||
<dl>
|
<p>You agree, at your sole expense, to defend, indemnify and hold CoreOS (and its directors, officers, employees, consultants and agents) harmless from and against any and all actual or threatened suits, actions, proceedings (at law or in equity), claims, damages, payments, deficiencies, fines, judgments, settlements, liabilities, losses, costs and expenses (including, but not limited to, reasonable attorneys’ fees, costs, penalties, interest and disbursements) for any death, injury, property damage caused by, arising out of, resulting from, attributable to or in any way incidental to any of your Content or any actual or alleged breach of any of your obligations under these Terms (including, but not limited to, any actual or alleged breach of any of your representations or warranties as set forth in these Terms).</p>
|
||||||
<dt>General Terms.</dt>
|
|
||||||
<dd>Paid services beyond the initial trial are available on the Website (any such services, an “Account”). By maintaining an Account you agree to pay DevTable the monthly or annual subscription fees indicated for that service. Payments will be charged on a pre-pay basis on the day you sign up for a plan and will cover the use of that service for a monthly or annual subscription period as indicated. Account fees are not refundable.</dd>
|
|
||||||
<dt>Automatic Renewal.</dt>
|
|
||||||
<dd>Unless you notify DevTable before the end of the applicable subscription period that you want to cancel an Account, your Account subscription will automatically renew and you authorize us to collect the then-applicable annual or monthly subscription fee for such Account (as well as any taxes) using any credit card or other payment mechanism we have on record for you. Accounts can be canceled at any time in the Payment Information section of your User Settings.</dd>
|
|
||||||
</dl>
|
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<strong>Responsibility of Website Visitors.</strong> DevTable has not reviewed, and cannot review, all of the material, including computer software, submitted to the Service, and cannot therefore be responsible for that material’s content, use or effects. By operating the Website, DevTable does not represent or imply that it endorses the material there posted, or that it believes such material to be accurate, useful or non-harmful. You are responsible for taking precautions as necessary to protect yourself and your computer systems from viruses, worms, Trojan horses, and other harmful or destructive content. The Website may contain content that is offensive, indecent, or otherwise objectionable, as well as content containing technical inaccuracies, typographical mistakes, and other errors. The Website may also contain material that violates the privacy or publicity rights, or infringes the intellectual property and other proprietary rights, of third parties, or the downloading, copying or use of which is subject to additional terms and conditions, stated or unstated. DevTable disclaims any responsibility for any harm resulting from the use by visitors of the Website, or from any downloading by those visitors of content there posted. </li>
|
<strong>Limitation of Liability</strong>
|
||||||
|
<p>IN NO EVENT WILL (A) COREOS’ TOTAL LIABILITY ARISING OUT OF OR RELATED TO THESE TERMS EXCEED THE TOTAL AMOUNT PAID BY YOU TO COREOS UNDER THESE TERMS THE SIX MONTHS IMMEDIATELY PRIOR TO THE ACCRUAL OF THE FIRST CLAIM, AND (B) COREOS BE LIABLE TO YOU OR ANY THIRD PARTY FOR ANY LOSS OF PROFITS, LOSS OF USE, LOSS OF REVENUE, LOSS OF GOODWILL, ANY INTERRUPTION OF BUSINESS, OR ANY INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, PUNITIVE OR CONSEQUENTIAL DAMAGES OF ANY KIND ARISING OUT OF, OR IN CONNECTION WITH THESE TERMS, WHETHER IN CONTRACT, TORT, STRICT LIABILITY OR OTHERWISE, EVEN IF SUCH PARTY HAS BEEN ADVISED OR IS OTHERWISE AWARE OF THE POSSIBILITY OF SUCH DAMAGES. MULTIPLE CLAIMS WILL NOT EXPAND THIS LIMITATION. THIS SECTION (LIMITATION OF LIABILITY) WILL BE GIVEN FULL EFFECT EVEN IF ANY REMEDY SPECIFIED IN THESE TERMS IS DEEMED TO HAVE FAILED OF ITS ESSENTIAL PURPOSE.</p>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<strong>Content Posted on Other Websites.</strong> We have not reviewed, and cannot review, all of the material, including computer software, made available through the websites and webpages to which Quay.io links, and that link to Quay.io. DevTable does not have any control over those non-DevTable websites and webpages, and is not responsible for their contents or their use. By linking to a non-DevTable website or webpage, DevTable does not represent or imply that it endorses such website or webpage. You are responsible for taking precautions as necessary to protect yourself and your computer systems from viruses, worms, Trojan horses, and other harmful or destructive content. DevTable disclaims any responsibility for any harm resulting from your use of non-DevTable websites and webpages. </li>
|
<strong>Ownership</strong>
|
||||||
|
<p>As between the parties and subject to Section 5 (Your Responsibilities), you own all right, title and interest in and to the Content and any and all Intellectual Property Rights (as defined below) embodied in or related to the foregoing. As between the parties and subject to Section 3 (Services), CoreOS owns all right, title and interest in and to the Services and any and all Intellectual Property Rights (as defined below) embodied in or related to the foregoing. CoreOS reserves all rights not expressly granted in these Terms, and no licenses are granted by CoreOS to you or any other party under these Terms, whether by implication, estoppel or otherwise, except as expressly set forth in these Terms. For the purpose of these Terms, “<strong>Intellectual Property Rights</strong>” means all patents, copyrights, moral rights, trademarks, trade secrets and any other form of intellectual property rights recognized in any jurisdiction, including applications and registrations for any of the foregoing.</p>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<strong>Copyright Infringement and DMCA Policy.</strong> As DevTable asks others to respect its intellectual property rights, it respects the intellectual property rights of others. If you believe that material located on or linked to by Quay.io violates your copyright, you are encouraged to notify DevTable in accordance with the provisions of the Digital Millennium Copyright Act (“DMCA”). DevTable will respond to all such notices, including as required or appropriate by removing the infringing material or disabling all links to the infringing material. DevTable will terminate a visitor’s access to and use of the Website if, under appropriate circumstances, the visitor is determined to be a repeat infringer of the copyrights or other intellectual property rights of DevTable or others. In the case of such termination, DevTable will have no obligation to provide a refund of any amounts previously paid to DevTable. </li>
|
<strong>Term, Termination and Effect of Termination</strong>
|
||||||
|
<p>Unless earlier terminated as set forth in these Terms, the term of these Terms commences upon the Effective Date and continues for the Subscription Period, and thereafter the term of these Terms automatically renews for one or more additional Subscription Periods unless a party terminates these Terms with no less than 15 days advance written notice prior to the close of the then-current term. Further, CoreOS may terminate or deny access to and use of the Services if CoreOS reasonably believes you have violate any of the terms or conditions of these Terms. Upon any termination of these Terms, your rights to the Services will immediately cease.</p>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<strong>Intellectual Property.</strong> This Agreement does not transfer from DevTable to you any DevTable or third party intellectual property, and all right, title and interest in and to such property will remain (as between the parties) solely with DevTable. DevTable, Quay.io, the Quay.io logo, and all other trademarks, service marks, graphics and logos used in connection with Quay.io, or the Website are trademarks or registered trademarks of DevTable or DevTable’s licensors. Other trademarks, service marks, graphics and logos used in connection with the Website may be the trademarks of other third parties. Your use of the Website grants you no right or license to reproduce or otherwise use any DevTable or third-party trademarks.
|
<strong>Copyright Policy</strong>
|
||||||
|
<p>CoreOS users may report content that appears on/via the Site or Services to CoreOS that he/she thinks violates these Terms, and CoreOS may remove such content, suspend or terminate the account of the user who made posted such content and/or take additional action to enforce these Terms against such user.</p>
|
||||||
|
<p>Also, in accordance with the Digital Millennium Copyright Act (DMCA) and other applicable law, CoreOS has adopted a policy of terminating, in appropriate circumstances and at our discretion, account holders who are deemed to be repeat infringers. CoreOS also may, at its discretion, limit access to the Services and terminate the accounts of any users who infringe any intellectual property rights of others, whether or not there is any repeat infringement.</p>
|
||||||
|
<p>If you think that anything on the Services infringes upon any copyright that you own or control, you may file a notification with CoreOS’ Designated Agent as set forth below:</p>
|
||||||
|
<table border=0>
|
||||||
|
<tr><td>Designated Agent:</td><td>DMCA Agent</td></tr>
|
||||||
|
<tr><td>Address of Designated Agent:</td><td>3043 Mission Street, San Francisco, CA 94110</td></tr>
|
||||||
|
<tr><td>Telephone Number of Designated Agent:</td><td>(800) 774-3507</td></tr>
|
||||||
|
<tr><td>Fax Number of Designated Agent:</td><td>(415) 580-7362</td></tr>
|
||||||
|
<tr><td>Email Address of Designated Agent:</td><td>support@quay.io</td></tr>
|
||||||
|
</table>
|
||||||
|
<p>Please see <a href="http://www.copyright.gov/title17/92chap5.html#512">17 U.S.C. § 512(c)(3)</a> for the requirements of a proper notification. If you knowingly misrepresent that any material or activity is infringing, you may be liable for any damages, including costs and attorneys’ fees, CoreOS or the alleged infringer incurs because we relied on the misrepresentation when removing or disabling access to the material or activity.</p>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<strong>Changes.</strong> DevTable reserves the right, at its sole discretion, to modify or replace any part of this Agreement. It is your responsibility to check this Agreement periodically for changes. Your continued use of or access to the Website following the posting of any changes to this Agreement constitutes acceptance of those changes. DevTable may also, in the future, offer new services and/or features through the Website (including, the release of new tools and resources). Such new features and/or services shall be subject to the terms and conditions of this Agreement.
|
<strong>Feedback</strong>
|
||||||
|
<p>Any suggestions, comments, or other feedback provided by you to CoreOS with respect to the Services or CoreOS (collectively, “<strong>Feedback</strong>”) will constitute confidential information of CoreOS. CoreOS will be free to use, disclose, reproduce, license, and otherwise distribute and exploit the Feedback provided to it as it sees fit, entirely without obligation or restriction of any kind, on account of intellectual property rights or otherwise.</p>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<strong>Termination.</strong> DevTable may terminate your access to all or any part of the Website at any time, with or without cause, with or without notice, effective immediately. If you wish to terminate this Agreement or your Quay.io account (if you have one), you may simply discontinue using the Website. All provisions of this Agreement which by their nature should survive termination shall survive termination, including, without limitation, ownership provisions, warranty disclaimers, indemnity and limitations of liability.
|
<strong>Links</strong>
|
||||||
|
<p>You are granted a limited, non-exclusive right to create a text hyperlink to the Services for noncommercial purposes, provided such link does not portray CoreOS or any of its products and services in a false, misleading, derogatory, or defamatory manner and that the linking site does not contain any material that is offensive, illegal, harassing, or otherwise objectionable. This limited right may be revoked at any time. CoreOS makes no claim or representation regarding, and accepts no responsibility for, the quality, content, nature, or reliability of third-party sites accessible by link from the Services or Site. CoreOS provides these links to you only as a convenience, and the inclusion of any link does not imply affiliation, endorsement, or adoption by CoreOS of the corresponding site or any information contained in (or made available via) that site. When you leave the Site, CoreOS’ terms and policies no longer govern. You should review the applicable terms and policies, including privacy and data-gathering practices, of any site to which you navigate from the Site.</p>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<strong>Disclaimer of Warranties.</strong> The Website is provided “as is”. DevTable and its suppliers and licensors hereby disclaim all warranties of any kind, express or implied, including, without limitation, the warranties of merchantability, fitness for a particular purpose and non-infringement. Neither DevTable nor its suppliers and licensors, makes any warranty that the Website will be error free or that access thereto will be continuous or uninterrupted. You understand that you download from, or otherwise obtain content or services through, the Website at your own discretion and risk.
|
<strong>Trademarks</strong>
|
||||||
|
<p>CoreOS’ name, trademarks, logos, and any other CoreOS product, service name, or slogan included in the Site are property of CoreOS and may not be copied, imitated, or used (in whole or in part) without CoreOS’ prior written consent. The look and feel of the Site, including all custom graphics, button icons, and scripts constitute service marks, trademarks, or trade dress of CoreOS and may not be copied, imitated, or used (in whole or in part) without CoreOS’ prior written consent. All other trademarks, registered trademarks, product names, and company names or logos mentioned in the Site (“<strong>Third-Party Trademarks</strong>”) are the property of their respective owners, and the use of such Third-Party Trademarks inures to the benefit of each owner. The use of such Third-Party Trademarks is intended to denote interoperability and does not constitute an affiliation by CoreOS and its licensors with such company or an endorsement or approval by such company of CoreOS or its licensors or their respective products or services.</p>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<strong>Limitation of Liability.</strong> In no event will DevTable, or its suppliers or licensors, be liable with respect to any subject matter of this agreement under any contract, negligence, strict liability or other legal or equitable theory for: (i) any special, incidental or consequential damages; (ii) the cost of procurement for substitute products or services; (iii) for interruption of use or loss or corruption of data; or (iv) for any amounts that exceed the fees paid by you to DevTable under this agreement during the twelve (12) month period prior to the cause of action. DevTable shall have no liability for any failure or delay due to matters beyond their reasonable control. The foregoing shall not apply to the extent prohibited by applicable law.
|
<strong>General Provisions</strong>
|
||||||
</li>
|
<p/>
|
||||||
<li>
|
<dl>
|
||||||
<strong>General Representation and Warranty.</strong> You represent and warrant that (i) your use of the Website will be in strict accordance with the Quay.io Privacy Policy, with this Agreement and with all applicable laws and regulations (including without limitation any local laws or regulations in your country, state, city, or other governmental area, regarding online conduct and acceptable content, and including all applicable laws regarding the transmission of technical data exported from the United States or the country in which you reside) and (ii) your use of the Website will not infringe or misappropriate the intellectual property rights of any third party.
|
<dt>Entire Agreement</dt>
|
||||||
</li>
|
<dd>
|
||||||
<li>
|
These Terms (together with all terms incorporated in by reference) are the complete and exclusive statement of the mutual understanding of the parties and supersedes and cancels all previous written and oral agreements and communications relating to the subject matter of these Terms.
|
||||||
<strong>Indemnification.</strong> You agree to indemnify and hold harmless DevTable, its contractors, and its licensors, and their respective directors, officers, employees and agents from and against any and all claims and expenses, including attorneys’ fees, arising out of your use of the Website, including but not limited to your violation of this Agreement.
|
</dd>
|
||||||
</li>
|
<dt>Governing Law and Venue</dt>
|
||||||
<li>
|
<dd>
|
||||||
<strong>Miscellaneous.</strong> This Agreement constitutes the entire agreement between DevTable and you concerning the subject matter hereof, and they may only be modified by a written amendment signed by an authorized executive of DevTable, or by the posting by DevTable of a revised version. Except to the extent applicable law, if any, provides otherwise, this Agreement, any access to or use of the Website will be governed by the laws of the state of New York, U.S.A., excluding its conflict of law provisions, and the proper venue for any disputes arising out of or relating to any of the same will be the state and federal courts located in New York County, New York. The prevailing party in any action or proceeding to enforce this Agreement shall be entitled to costs and attorneys’ fees. If any part of this Agreement is held invalid or unenforceable, that part will be construed to reflect the parties’ original intent, and the remaining portions will remain in full force and effect. A waiver by either party of any term or condition of this Agreement or any breach thereof, in any one instance, will not waive such term or condition or any subsequent breach thereof. You may assign your rights under this Agreement to any party that consents to, and agrees to be bound by, its terms and conditions; DevTable may assign its rights under this Agreement without condition. This Agreement will be binding upon and will inure to the benefit of the parties, their successors and permitted assigns.
|
These Terms will be governed by and construed in accordance with the laws of the State of California applicable to agreements made and to be entirely performed within the State of California, without resort to its conflict of law provisions. The federal court in San Mateo County, California will be the jurisdiction in which any suits should be filed if they relate to these Terms. Prior to the filing or initiation of any action or proceeding relating to these Terms, the parties must participate in good faith mediation in San Mateo County, California. If a party initiates any proceeding regarding these Terms, the prevailing party to such proceeding is entitled to reasonable attorneys’ fees and costs for claims arising out of these Terms.
|
||||||
|
</dd>
|
||||||
|
<dt>Publicity</dt>
|
||||||
|
<dd>
|
||||||
|
You consent to CoreOS’ use of your name and/or logo on the CoreOS website, identifying you as a customer of CoreOS and describing your use of the Services notwithstanding any terms to the contrary in these Terms. You agree that CoreOS may issue a press release identifying you as customer of CoreOS.
|
||||||
|
</dd>
|
||||||
|
<dt>Assignment</dt>
|
||||||
|
<dd>
|
||||||
|
Neither these Terms nor any right or duty under these Terms may be transferred, assigned or delegated by you, by operation of law or otherwise, without the prior written consent of CoreOS, and any attempted transfer, assignment or delegation without such consent will be void and without effect. CoreOS may freely transfer, assign or delegate these Terms or its rights and duties under these Terms. Subject to the foregoing, these Terms will be binding upon and will inure to the benefit of the parties and their respective representatives, heirs, administrators, successors and permitted assigns.
|
||||||
|
</dd>
|
||||||
|
<dt>Amendments and Waivers</dt>
|
||||||
|
<dd>
|
||||||
|
Unless expressly stated otherwise stated in your standard service terms, no modification, addition or deletion, or waiver of any rights under these Terms will be binding on a party unless clearly understood by the parties to be a modification or waiver and signed by a duly authorized representative of each party. No failure or delay (in whole or in part) on the part of a party to exercise any right or remedy hereunder will operate as a waiver thereof or effect any other right or remedy. All rights and remedies hereunder are cumulative and are not exclusive of any other rights or remedies provided hereunder or by law. The waiver of one breach or default or any delay in exercising any rights will not constitute a waiver of any subsequent breach or default.
|
||||||
|
</dd>
|
||||||
|
<dt>Electronic Communications</dt>
|
||||||
|
<dd>
|
||||||
|
CoreOS may choose to electronically deliver all communications with you, which may include email to the email address you provide to CoreOS. CoreOS’ electronic communications to you may transmit or convey information about action taken on your request, portions of your request that may be incomplete or require additional explanation, any notices required under applicable law and any other notices. You agree to do business electronically with CoreOS and to receive electronically all current and future notices, disclosures, communications and information and that the aforementioned electronic communications satisfy any legal requirement that such communications be in writing. An electronic notice will be deemed to have been received on the day of receipt as evidenced by such email.
|
||||||
|
</dd>
|
||||||
|
<dt>Severability</dt>
|
||||||
|
<dd>
|
||||||
|
If any provision of these Terms is invalid, illegal, or incapable of being enforced by any rule of law or public policy, all other provisions of these Terms will nonetheless remain in full force and effect so long as the economic and legal substance of the transactions contemplated by these Terms is not affected in any manner adverse to any party. Upon such determination that any provision is invalid, illegal, or incapable of being enforced, the parties will negotiate in good faith to modify these Terms so as to effect the original intent of the parties as closely as possible in an acceptable manner to the end that the transactions contemplated hereby are fulfilled.
|
||||||
|
</dd>
|
||||||
|
<dt>Force Majeure</dt>
|
||||||
|
<dd>
|
||||||
|
Except for payments due under these Terms, neither party will be responsible for any failure to perform or delay attributable in whole or in part to any cause beyond its reasonable control, including, but not limited to, acts of God (fire, storm, floods, earthquakes, etc.), civil disturbances, disruption of telecommunications, disruption of power or other essential services, interruption or termination of service by any service providers being used by CoreOS to host the Services or to link its servers to the Internet, labor disturbances, vandalism, cable cut, computer viruses or other similar occurrences, or any malicious or unlawful acts of any third party.
|
||||||
|
</dd>
|
||||||
|
<dt>Notice for California Users</dt>
|
||||||
|
<dd>
|
||||||
|
If you are a California resident, you may have these Terms mailed to you electronically by sending a letter to the foregoing address with your electronic mail address and a request for these Terms. Under California Civil Code Section 1789.3, California Website users are entitled to the following specific consumer rights notice: The Complaint Assistance Unit of the Division of Consumer Services of the California Department of Consumer Affairs may be contacted in writing at 1625 N. Market Blvd., Suite S-202, Sacramento, California 95834, or by telephone at (800) 952-5210.
|
||||||
|
</dd>
|
||||||
|
</dl>
|
||||||
</li>
|
</li>
|
||||||
</ol>
|
</ol>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -1965,6 +1965,9 @@ class TestOrgRobots(ApiTestCase):
|
||||||
pull_robot = model.get_user(membername)
|
pull_robot = model.get_user(membername)
|
||||||
model.create_build_trigger(repo, 'fakeservice', 'sometoken', user, pull_robot=pull_robot)
|
model.create_build_trigger(repo, 'fakeservice', 'sometoken', user, pull_robot=pull_robot)
|
||||||
|
|
||||||
|
# Add some log entries for the robot.
|
||||||
|
model.log_action('pull_repo', ORGANIZATION, performer=pull_robot, repository=repo)
|
||||||
|
|
||||||
# Delete the robot and verify it works.
|
# Delete the robot and verify it works.
|
||||||
self.deleteResponse(OrgRobot,
|
self.deleteResponse(OrgRobot,
|
||||||
params=dict(orgname=ORGANIZATION, robot_shortname='bender'))
|
params=dict(orgname=ORGANIZATION, robot_shortname='bender'))
|
||||||
|
|
235
test/test_buildman.py
Normal file
235
test/test_buildman.py
Normal file
|
@ -0,0 +1,235 @@
|
||||||
|
import unittest
|
||||||
|
import etcd
|
||||||
|
import os.path
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
|
||||||
|
from trollius import coroutine, get_event_loop, From, Future, sleep, Return
|
||||||
|
from mock import Mock
|
||||||
|
from threading import Event
|
||||||
|
from urllib3.exceptions import ReadTimeoutError
|
||||||
|
|
||||||
|
from buildman.manager.executor import BuilderExecutor
|
||||||
|
from buildman.manager.ephemeral import EphemeralBuilderManager, EtcdAction
|
||||||
|
from buildman.server import BuildJobResult
|
||||||
|
from buildman.component.buildcomponent import BuildComponent
|
||||||
|
|
||||||
|
|
||||||
|
BUILD_UUID = 'deadbeef-dead-beef-dead-deadbeefdead'
|
||||||
|
REALM_ID = '1234-realm'
|
||||||
|
|
||||||
|
|
||||||
|
def async_test(f):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
coro = coroutine(f)
|
||||||
|
future = coro(*args, **kwargs)
|
||||||
|
loop = get_event_loop()
|
||||||
|
loop.run_until_complete(future)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
class TestEphemeral(unittest.TestCase):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.etcd_client_mock = None
|
||||||
|
self.etcd_wait_event = Event()
|
||||||
|
self.test_executor = None
|
||||||
|
super(TestEphemeral, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def _create_mock_etcd_client(self, *args, **kwargs):
|
||||||
|
def hang_until_event(*args, **kwargs):
|
||||||
|
time.sleep(.01) # 10ms to simulate network latency
|
||||||
|
self.etcd_wait_event.wait()
|
||||||
|
|
||||||
|
self.etcd_client_mock = Mock(spec=etcd.Client, name='etcd.Client')
|
||||||
|
self.etcd_client_mock.watch = Mock(side_effect=hang_until_event)
|
||||||
|
return self.etcd_client_mock
|
||||||
|
|
||||||
|
def _create_completed_future(self, result=None):
|
||||||
|
def inner(*args, **kwargs):
|
||||||
|
new_future = Future()
|
||||||
|
new_future.set_result(result)
|
||||||
|
return new_future
|
||||||
|
return inner
|
||||||
|
|
||||||
|
def _create_mock_executor(self, *args, **kwargs):
|
||||||
|
self.test_executor = Mock(spec=BuilderExecutor)
|
||||||
|
self.test_executor.start_builder = Mock(side_effect=self._create_completed_future('123'))
|
||||||
|
self.test_executor.stop_builder = Mock(side_effect=self._create_completed_future())
|
||||||
|
return self.test_executor
|
||||||
|
|
||||||
|
def _create_build_job(self):
|
||||||
|
mock_job = Mock()
|
||||||
|
mock_job.job_details = {
|
||||||
|
'build_uuid': BUILD_UUID,
|
||||||
|
}
|
||||||
|
mock_job.job_item = {
|
||||||
|
'body': json.dumps(mock_job.job_details),
|
||||||
|
'id': 1,
|
||||||
|
}
|
||||||
|
return mock_job
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
EphemeralBuilderManager._executors['test'] = self._create_mock_executor
|
||||||
|
|
||||||
|
self.old_etcd_client_klass = EphemeralBuilderManager._etcd_client_klass
|
||||||
|
EphemeralBuilderManager._etcd_client_klass = self._create_mock_etcd_client
|
||||||
|
self.etcd_wait_event.clear()
|
||||||
|
|
||||||
|
self.register_component_callback = Mock()
|
||||||
|
self.unregister_component_callback = Mock()
|
||||||
|
self.job_heartbeat_callback = Mock()
|
||||||
|
self.job_complete_callback = Mock()
|
||||||
|
|
||||||
|
self.manager = EphemeralBuilderManager(
|
||||||
|
self.register_component_callback,
|
||||||
|
self.unregister_component_callback,
|
||||||
|
self.job_heartbeat_callback,
|
||||||
|
self.job_complete_callback,
|
||||||
|
'127.0.0.1',
|
||||||
|
30,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.manager.initialize({'EXECUTOR': 'test'})
|
||||||
|
|
||||||
|
self.mock_job = self._create_build_job()
|
||||||
|
self.mock_job_key = os.path.join('building/', BUILD_UUID)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.etcd_wait_event.set()
|
||||||
|
|
||||||
|
self.manager.shutdown()
|
||||||
|
|
||||||
|
del EphemeralBuilderManager._executors['test']
|
||||||
|
EphemeralBuilderManager._etcd_client_klass = self.old_etcd_client_klass
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def _setup_job_for_managers(self):
|
||||||
|
# Test that we are watching the realm location before anything else happens
|
||||||
|
self.etcd_client_mock.watch.assert_any_call('realm/', recursive=True, timeout=0)
|
||||||
|
|
||||||
|
self.etcd_client_mock.read = Mock(side_effect=KeyError)
|
||||||
|
test_component = Mock(spec=BuildComponent)
|
||||||
|
test_component.builder_realm = REALM_ID
|
||||||
|
test_component.start_build = Mock(side_effect=self._create_completed_future())
|
||||||
|
self.register_component_callback.return_value = test_component
|
||||||
|
|
||||||
|
# Ask for a builder to be scheduled
|
||||||
|
is_scheduled = yield From(self.manager.schedule(self.mock_job))
|
||||||
|
|
||||||
|
self.assertTrue(is_scheduled)
|
||||||
|
|
||||||
|
self.etcd_client_mock.read.assert_called_once_with('building/', recursive=True)
|
||||||
|
self.assertEqual(self.test_executor.start_builder.call_count, 1)
|
||||||
|
self.assertEqual(self.etcd_client_mock.write.call_args_list[0][0][0], self.mock_job_key)
|
||||||
|
self.assertEqual(self.etcd_client_mock.write.call_args_list[1][0][0], self.mock_job_key)
|
||||||
|
|
||||||
|
# Right now the job is not registered with any managers because etcd has not accepted the job
|
||||||
|
self.assertEqual(self.register_component_callback.call_count, 0)
|
||||||
|
|
||||||
|
realm_created = Mock(spec=etcd.EtcdResult)
|
||||||
|
realm_created.action = EtcdAction.CREATE
|
||||||
|
realm_created.key = os.path.join('realm/', REALM_ID)
|
||||||
|
realm_created.value = json.dumps({
|
||||||
|
'realm': REALM_ID,
|
||||||
|
'token': 'beef',
|
||||||
|
'builder_id': '123',
|
||||||
|
'job_queue_item': self.mock_job.job_item,
|
||||||
|
})
|
||||||
|
|
||||||
|
self.manager._handle_realm_change(realm_created)
|
||||||
|
|
||||||
|
self.assertEqual(self.register_component_callback.call_count, 1)
|
||||||
|
|
||||||
|
raise Return(test_component)
|
||||||
|
|
||||||
|
@async_test
|
||||||
|
def test_schedule_and_complete(self):
|
||||||
|
# Test that a job is properly registered with all of the managers
|
||||||
|
test_component = yield From(self._setup_job_for_managers())
|
||||||
|
|
||||||
|
# Take the job ourselves
|
||||||
|
yield From(self.manager.build_component_ready(test_component))
|
||||||
|
|
||||||
|
self.etcd_client_mock.delete.assert_called_once_with(os.path.join('realm/', REALM_ID))
|
||||||
|
self.etcd_client_mock.delete.reset_mock()
|
||||||
|
|
||||||
|
# Finish the job
|
||||||
|
yield From(self.manager.job_completed(self.mock_job, BuildJobResult.COMPLETE, test_component))
|
||||||
|
|
||||||
|
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
|
||||||
|
self.etcd_client_mock.delete.assert_called_once_with(self.mock_job_key)
|
||||||
|
|
||||||
|
@async_test
|
||||||
|
def test_another_manager_takes_job(self):
|
||||||
|
# Prepare a job to be taken by another manager
|
||||||
|
test_component = yield From(self._setup_job_for_managers())
|
||||||
|
|
||||||
|
realm_deleted = Mock(spec=etcd.EtcdResult)
|
||||||
|
realm_deleted.action = EtcdAction.DELETE
|
||||||
|
realm_deleted.key = os.path.join('realm/', REALM_ID)
|
||||||
|
|
||||||
|
realm_deleted._prev_node = Mock(spec=etcd.EtcdResult)
|
||||||
|
realm_deleted._prev_node.value = json.dumps({
|
||||||
|
'realm': REALM_ID,
|
||||||
|
'token': 'beef',
|
||||||
|
'builder_id': '123',
|
||||||
|
'job_queue_item': self.mock_job.job_item,
|
||||||
|
})
|
||||||
|
|
||||||
|
self.manager._handle_realm_change(realm_deleted)
|
||||||
|
|
||||||
|
self.unregister_component_callback.assert_called_once_with(test_component)
|
||||||
|
|
||||||
|
@async_test
|
||||||
|
def test_expiring_worker(self):
|
||||||
|
# Test that we are watching before anything else happens
|
||||||
|
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True, timeout=0)
|
||||||
|
|
||||||
|
# Send a signal to the callback that a worker has expired
|
||||||
|
expired_result = Mock(spec=etcd.EtcdResult)
|
||||||
|
expired_result.action = EtcdAction.EXPIRE
|
||||||
|
expired_result.key = self.mock_job_key
|
||||||
|
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
|
||||||
|
expired_result._prev_node.value = json.dumps({'builder_id': '1234'})
|
||||||
|
|
||||||
|
self.manager._handle_builder_expiration(expired_result)
|
||||||
|
|
||||||
|
yield From(sleep(.01))
|
||||||
|
|
||||||
|
self.test_executor.stop_builder.assert_called_once_with('1234')
|
||||||
|
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
|
||||||
|
|
||||||
|
@async_test
|
||||||
|
def test_change_worker(self):
|
||||||
|
# Send a signal to the callback that a worker key has been changed
|
||||||
|
set_result = Mock(sepc=etcd.EtcdResult)
|
||||||
|
set_result.action = 'set'
|
||||||
|
set_result.key = self.mock_job_key
|
||||||
|
|
||||||
|
self.manager._handle_builder_expiration(set_result)
|
||||||
|
|
||||||
|
yield From(sleep(.01))
|
||||||
|
|
||||||
|
self.assertEquals(self.test_executor.stop_builder.call_count, 0)
|
||||||
|
|
||||||
|
@async_test
|
||||||
|
def test_heartbeat_response(self):
|
||||||
|
expiration_timestamp = time.time() + 60
|
||||||
|
builder_result = Mock(spec=etcd.EtcdResult)
|
||||||
|
builder_result.value = json.dumps({
|
||||||
|
'builder_id': '123',
|
||||||
|
'expiration': expiration_timestamp,
|
||||||
|
'max_expiration': expiration_timestamp,
|
||||||
|
})
|
||||||
|
self.etcd_client_mock.read = Mock(return_value=builder_result)
|
||||||
|
|
||||||
|
yield From(self.manager.job_heartbeat(self.mock_job))
|
||||||
|
|
||||||
|
# Wait for threads to complete
|
||||||
|
yield From(sleep(.01))
|
||||||
|
|
||||||
|
self.job_heartbeat_callback.assert_called_once_with(self.mock_job)
|
||||||
|
self.assertEqual(self.etcd_client_mock.write.call_count, 1)
|
||||||
|
self.assertEqual(self.etcd_client_mock.write.call_args_list[0][0][0], self.mock_job_key)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
|
@ -162,3 +162,8 @@ class TestQueue(QueueTestCase):
|
||||||
one = self.queue.get()
|
one = self.queue.get()
|
||||||
self.assertNotEqual(None, one)
|
self.assertNotEqual(None, one)
|
||||||
self.assertEqual(self.TEST_MESSAGE_1, one.body)
|
self.assertEqual(self.TEST_MESSAGE_1, one.body)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
|
|
||||||
from config import DefaultConfig
|
from config import DefaultConfig
|
||||||
|
|
||||||
|
@ -13,10 +14,13 @@ class FakeTransaction(object):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
TEST_DB_FILE = NamedTemporaryFile(delete=True)
|
||||||
|
|
||||||
|
|
||||||
class TestConfig(DefaultConfig):
|
class TestConfig(DefaultConfig):
|
||||||
TESTING = True
|
TESTING = True
|
||||||
|
|
||||||
DB_URI = os.environ.get('TEST_DATABASE_URI', 'sqlite:///:memory:')
|
DB_URI = os.environ.get('TEST_DATABASE_URI', 'sqlite:///{0}'.format(TEST_DB_FILE.name))
|
||||||
DB_CONNECTION_ARGS = {
|
DB_CONNECTION_ARGS = {
|
||||||
'threadlocals': True,
|
'threadlocals': True,
|
||||||
'autorollback': True
|
'autorollback': True
|
||||||
|
|
27
tools/sendresetemail.py
Normal file
27
tools/sendresetemail.py
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
from app import app
|
||||||
|
|
||||||
|
from util.useremails import send_recovery_email
|
||||||
|
|
||||||
|
from data import model
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from flask import Flask, current_app
|
||||||
|
from flask_mail import Mail
|
||||||
|
|
||||||
|
def sendReset(username):
|
||||||
|
user = model.get_user(username)
|
||||||
|
if not user:
|
||||||
|
print 'No user found'
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
with app.app_context():
|
||||||
|
code = model.create_reset_password_email_code(user.email)
|
||||||
|
send_recovery_email(user.email, code.code)
|
||||||
|
print 'Email sent to %s' % (user.email)
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description='Sends a reset email')
|
||||||
|
parser.add_argument('username', help='The username')
|
||||||
|
args = parser.parse_args()
|
||||||
|
sendReset(args.username)
|
|
@ -1,8 +1,9 @@
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from multiprocessing import Process, Queue
|
from Queue import Queue
|
||||||
from mixpanel import Consumer, Mixpanel
|
from threading import Thread
|
||||||
|
from mixpanel import BufferedConsumer, Mixpanel
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -17,24 +18,23 @@ class MixpanelQueingConsumer(object):
|
||||||
self._mp_queue.put(json.dumps([endpoint, json_message]))
|
self._mp_queue.put(json.dumps([endpoint, json_message]))
|
||||||
|
|
||||||
|
|
||||||
class SendToMixpanel(Process):
|
class SendToMixpanel(Thread):
|
||||||
def __init__(self, request_queue):
|
def __init__(self, request_queue):
|
||||||
Process.__init__(self)
|
Thread.__init__(self)
|
||||||
|
self.daemon = True
|
||||||
|
|
||||||
self._mp_queue = request_queue
|
self._mp_queue = request_queue
|
||||||
self._consumer = Consumer()
|
self._consumer = BufferedConsumer()
|
||||||
self.daemon = True
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
logger.debug('Starting mixpanel sender process.')
|
logger.debug('Starting mixpanel sender process.')
|
||||||
while True:
|
while True:
|
||||||
mp_request = self._mp_queue.get()
|
mp_request = self._mp_queue.get()
|
||||||
logger.debug('Got queued mixpanel reqeust.')
|
logger.debug('Got queued mixpanel request.')
|
||||||
try:
|
try:
|
||||||
self._consumer.send(*json.loads(mp_request))
|
self._consumer.send(*json.loads(mp_request))
|
||||||
except:
|
except:
|
||||||
# Make sure we don't crash if Mixpanel request fails.
|
logger.exception('Failed to send Mixpanel request.')
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class FakeMixpanel(object):
|
class FakeMixpanel(object):
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
import logging
|
import logging
|
||||||
import boto
|
import boto
|
||||||
|
|
||||||
from multiprocessing import Process, Queue
|
from Queue import Queue
|
||||||
|
from threading import Thread
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -12,6 +14,7 @@ class NullReporter(object):
|
||||||
|
|
||||||
|
|
||||||
class QueueingCloudWatchReporter(object):
|
class QueueingCloudWatchReporter(object):
|
||||||
|
""" QueueingCloudWatchReporter reports metrics to the "SendToCloudWatch" process """
|
||||||
def __init__(self, request_queue, namespace, need_capacity_name, build_percent_name):
|
def __init__(self, request_queue, namespace, need_capacity_name, build_percent_name):
|
||||||
self._namespace = namespace
|
self._namespace = namespace
|
||||||
self._need_capacity_name = need_capacity_name
|
self._need_capacity_name = need_capacity_name
|
||||||
|
@ -34,26 +37,37 @@ class QueueingCloudWatchReporter(object):
|
||||||
unit='Percent')
|
unit='Percent')
|
||||||
|
|
||||||
|
|
||||||
class SendToCloudWatch(Process):
|
class SendToCloudWatch(Thread):
|
||||||
|
""" SendToCloudWatch loops indefinitely and pulls metrics off of a queue then sends it to
|
||||||
|
CloudWatch. """
|
||||||
def __init__(self, request_queue, aws_access_key, aws_secret_key):
|
def __init__(self, request_queue, aws_access_key, aws_secret_key):
|
||||||
Process.__init__(self)
|
Thread.__init__(self)
|
||||||
|
self.daemon = True
|
||||||
|
|
||||||
self._aws_access_key = aws_access_key
|
self._aws_access_key = aws_access_key
|
||||||
self._aws_secret_key = aws_secret_key
|
self._aws_secret_key = aws_secret_key
|
||||||
self._put_metrics_queue = request_queue
|
self._put_metrics_queue = request_queue
|
||||||
self.daemon = True
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
logger.debug('Starting cloudwatch sender process.')
|
try:
|
||||||
connection = boto.connect_cloudwatch(self._aws_access_key, self._aws_secret_key)
|
logger.debug('Starting CloudWatch sender process.')
|
||||||
|
connection = boto.connect_cloudwatch(self._aws_access_key, self._aws_secret_key)
|
||||||
|
except:
|
||||||
|
logger.exception('Failed to connect to CloudWatch.')
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
put_metric_args, kwargs = self._put_metrics_queue.get()
|
put_metric_args, kwargs = self._put_metrics_queue.get()
|
||||||
logger.debug('Got queued put metrics reqeust.')
|
logger.debug('Got queued put metrics request.')
|
||||||
connection.put_metric_data(*put_metric_args, **kwargs)
|
try:
|
||||||
|
connection.put_metric_data(*put_metric_args, **kwargs)
|
||||||
|
except:
|
||||||
|
logger.exception('Failed to write to CloudWatch')
|
||||||
|
|
||||||
|
|
||||||
class QueueMetrics(object):
|
class QueueMetrics(object):
|
||||||
def __init__(self, app=None):
|
def __init__(self, app=None):
|
||||||
self.app = app
|
self.app = app
|
||||||
|
self.sender = None
|
||||||
if app is not None:
|
if app is not None:
|
||||||
self.state = self.init_app(app)
|
self.state = self.init_app(app)
|
||||||
else:
|
else:
|
||||||
|
@ -72,8 +86,7 @@ class QueueMetrics(object):
|
||||||
request_queue = Queue()
|
request_queue = Queue()
|
||||||
reporter = QueueingCloudWatchReporter(request_queue, namespace, req_capacity_name,
|
reporter = QueueingCloudWatchReporter(request_queue, namespace, req_capacity_name,
|
||||||
build_percent_name)
|
build_percent_name)
|
||||||
sender = SendToCloudWatch(request_queue, access_key, secret_key)
|
self.sender = SendToCloudWatch(request_queue, access_key, secret_key)
|
||||||
sender.start()
|
|
||||||
else:
|
else:
|
||||||
reporter = NullReporter()
|
reporter = NullReporter()
|
||||||
|
|
||||||
|
@ -82,5 +95,11 @@ class QueueMetrics(object):
|
||||||
app.extensions['queuemetrics'] = reporter
|
app.extensions['queuemetrics'] = reporter
|
||||||
return reporter
|
return reporter
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
logger.debug('Asked to start CloudWatch reporter')
|
||||||
|
if self.sender is not None:
|
||||||
|
logger.debug('Starting CloudWatch reporter')
|
||||||
|
self.sender.start()
|
||||||
|
|
||||||
def __getattr__(self, name):
|
def __getattr__(self, name):
|
||||||
return getattr(self.state, name, None)
|
return getattr(self.state, name, None)
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
import traceback
|
import traceback
|
||||||
|
import json
|
||||||
|
|
||||||
from flask.ext.mail import Message
|
from flask.ext.mail import Message
|
||||||
|
|
||||||
|
@ -13,7 +14,42 @@ template_env = get_template_env("emails")
|
||||||
class CannotSendEmailException(Exception):
|
class CannotSendEmailException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def send_email(recipient, subject, template_file, parameters):
|
class GmailAction(object):
|
||||||
|
""" Represents an action that can be taken in Gmail in response to the email. """
|
||||||
|
def __init__(self, metadata):
|
||||||
|
self.metadata = metadata
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def confirm(name, url, description):
|
||||||
|
return GmailAction({
|
||||||
|
"@context": "http://schema.org",
|
||||||
|
"@type": "EmailMessage",
|
||||||
|
"action": {
|
||||||
|
"@type": 'ConfirmAction',
|
||||||
|
"name": name,
|
||||||
|
"handler": {
|
||||||
|
"@type": "HttpActionHandler",
|
||||||
|
"url": get_app_url() + '/' + url
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": description
|
||||||
|
})
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def view(name, url, description):
|
||||||
|
return GmailAction({
|
||||||
|
"@context": "http://schema.org",
|
||||||
|
"@type": "EmailMessage",
|
||||||
|
"action": {
|
||||||
|
"@type": 'ViewAction',
|
||||||
|
"name": name,
|
||||||
|
"url": get_app_url() + '/' + url
|
||||||
|
},
|
||||||
|
"description": description
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
def send_email(recipient, subject, template_file, parameters, action=None):
|
||||||
app_title = app.config['REGISTRY_TITLE_SHORT']
|
app_title = app.config['REGISTRY_TITLE_SHORT']
|
||||||
app_url = get_app_url()
|
app_url = get_app_url()
|
||||||
|
|
||||||
|
@ -29,7 +65,8 @@ def send_email(recipient, subject, template_file, parameters):
|
||||||
'app_logo': 'https://quay.io/static/img/quay-logo.png', # TODO: make this pull from config
|
'app_logo': 'https://quay.io/static/img/quay-logo.png', # TODO: make this pull from config
|
||||||
'app_url': app_url,
|
'app_url': app_url,
|
||||||
'app_title': app_title,
|
'app_title': app_title,
|
||||||
'app_link': app_link_handler
|
'app_link': app_link_handler,
|
||||||
|
'action_metadata': json.dumps(action.metadata) if action else None
|
||||||
})
|
})
|
||||||
|
|
||||||
rendered_html = template_env.get_template(template_file + '.html').render(parameters)
|
rendered_html = template_env.get_template(template_file + '.html').render(parameters)
|
||||||
|
@ -61,25 +98,34 @@ def send_change_email(username, email, token):
|
||||||
})
|
})
|
||||||
|
|
||||||
def send_confirmation_email(username, email, token):
|
def send_confirmation_email(username, email, token):
|
||||||
|
action = GmailAction.confirm('Confirm E-mail', 'confirm?code=' + token,
|
||||||
|
'Verification of e-mail address')
|
||||||
|
|
||||||
send_email(email, 'Please confirm your e-mail address', 'confirmemail', {
|
send_email(email, 'Please confirm your e-mail address', 'confirmemail', {
|
||||||
'username': username,
|
'username': username,
|
||||||
'token': token
|
'token': token
|
||||||
})
|
}, action=action)
|
||||||
|
|
||||||
def send_repo_authorization_email(namespace, repository, email, token):
|
def send_repo_authorization_email(namespace, repository, email, token):
|
||||||
|
action = GmailAction.confirm('Verify E-mail', 'authrepoemail?code=' + token,
|
||||||
|
'Verification of e-mail address')
|
||||||
|
|
||||||
subject = 'Please verify your e-mail address for repository %s/%s' % (namespace, repository)
|
subject = 'Please verify your e-mail address for repository %s/%s' % (namespace, repository)
|
||||||
send_email(email, subject, 'repoauthorizeemail', {
|
send_email(email, subject, 'repoauthorizeemail', {
|
||||||
'namespace': namespace,
|
'namespace': namespace,
|
||||||
'repository': repository,
|
'repository': repository,
|
||||||
'token': token
|
'token': token
|
||||||
})
|
}, action=action)
|
||||||
|
|
||||||
def send_recovery_email(email, token):
|
def send_recovery_email(email, token):
|
||||||
|
action = GmailAction.view('Recover Account', 'recovery?code=' + token,
|
||||||
|
'Recovery of an account')
|
||||||
|
|
||||||
subject = 'Account recovery'
|
subject = 'Account recovery'
|
||||||
send_email(email, subject, 'recovery', {
|
send_email(email, subject, 'recovery', {
|
||||||
'email': email,
|
'email': email,
|
||||||
'token': token
|
'token': token
|
||||||
})
|
}, action=action)
|
||||||
|
|
||||||
def send_payment_failed(email, username):
|
def send_payment_failed(email, username):
|
||||||
send_email(email, 'Subscription Payment Failure', 'paymentfailure', {
|
send_email(email, 'Subscription Payment Failure', 'paymentfailure', {
|
||||||
|
@ -87,12 +133,15 @@ def send_payment_failed(email, username):
|
||||||
})
|
})
|
||||||
|
|
||||||
def send_org_invite_email(member_name, member_email, orgname, team, adder, code):
|
def send_org_invite_email(member_name, member_email, orgname, team, adder, code):
|
||||||
|
action = GmailAction.view('Join %s' % team, 'confirminvite?code=' + code,
|
||||||
|
'Invitation to join a team')
|
||||||
|
|
||||||
send_email(member_email, 'Invitation to join team', 'teaminvite', {
|
send_email(member_email, 'Invitation to join team', 'teaminvite', {
|
||||||
'inviter': adder,
|
'inviter': adder,
|
||||||
'token': code,
|
'token': code,
|
||||||
'organization': orgname,
|
'organization': orgname,
|
||||||
'teamname': team
|
'teamname': team
|
||||||
})
|
}, action=action)
|
||||||
|
|
||||||
|
|
||||||
def send_invoice_email(email, contents):
|
def send_invoice_email(email, contents):
|
||||||
|
|
4
web.py
4
web.py
|
@ -1,7 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
|
|
||||||
from app import app as application
|
from app import app as application, queue_metrics
|
||||||
|
|
||||||
from endpoints.api import api_bp
|
from endpoints.api import api_bp
|
||||||
from endpoints.web import web
|
from endpoints.web import web
|
||||||
|
@ -9,6 +9,8 @@ from endpoints.webhooks import webhooks
|
||||||
from endpoints.realtime import realtime
|
from endpoints.realtime import realtime
|
||||||
from endpoints.callbacks import callback
|
from endpoints.callbacks import callback
|
||||||
|
|
||||||
|
# Start the cloudwatch reporting.
|
||||||
|
queue_metrics.run()
|
||||||
|
|
||||||
application.register_blueprint(web)
|
application.register_blueprint(web)
|
||||||
application.register_blueprint(callback, url_prefix='/oauth2')
|
application.register_blueprint(callback, url_prefix='/oauth2')
|
||||||
|
|
|
@ -1,704 +0,0 @@
|
||||||
import logging.config
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
logging.config.fileConfig('conf/logging.conf', disable_existing_loggers=False)
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import argparse
|
|
||||||
import os
|
|
||||||
import requests
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
import shutil
|
|
||||||
import tarfile
|
|
||||||
|
|
||||||
from docker import Client
|
|
||||||
from docker.utils import kwargs_from_env
|
|
||||||
from docker.errors import APIError
|
|
||||||
from tempfile import TemporaryFile, mkdtemp
|
|
||||||
from zipfile import ZipFile
|
|
||||||
from functools import partial
|
|
||||||
from datetime import datetime, timedelta
|
|
||||||
from threading import Event
|
|
||||||
from uuid import uuid4
|
|
||||||
from collections import defaultdict
|
|
||||||
from requests.exceptions import ConnectionError
|
|
||||||
|
|
||||||
from data import model
|
|
||||||
from data.database import BUILD_PHASE
|
|
||||||
from workers.worker import Worker, WorkerUnhealthyException, JobException
|
|
||||||
from app import userfiles as user_files, build_logs, sentry, dockerfile_build_queue
|
|
||||||
from endpoints.notificationhelper import spawn_notification
|
|
||||||
from util.safetar import safe_extractall
|
|
||||||
from util.dockerfileparse import parse_dockerfile, ParsedDockerfile, serialize_dockerfile
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
TIMEOUT_PERIOD_MINUTES = 20
|
|
||||||
CACHE_EXPIRATION_PERIOD_HOURS = 24
|
|
||||||
NO_TAGS = ['<none>:<none>']
|
|
||||||
RESERVATION_TIME = (TIMEOUT_PERIOD_MINUTES + 5) * 60
|
|
||||||
|
|
||||||
def build_docker_args():
|
|
||||||
args = kwargs_from_env()
|
|
||||||
if 'tls' in args and os.environ.get('IGNORE_TLS_ISSUES', False):
|
|
||||||
args['tls'].verify = False
|
|
||||||
return args
|
|
||||||
|
|
||||||
|
|
||||||
def matches_system_error(status_str):
|
|
||||||
""" Returns true if the given status string matches a known system error in the
|
|
||||||
Docker builder.
|
|
||||||
"""
|
|
||||||
KNOWN_MATCHES = ['lxc-start: invalid', 'lxc-start: failed to', 'lxc-start: Permission denied']
|
|
||||||
|
|
||||||
for match in KNOWN_MATCHES:
|
|
||||||
# 10 because we might have a Unix control code at the start.
|
|
||||||
found = status_str.find(match[0:len(match) + 10])
|
|
||||||
if found >= 0 and found <= 10:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class StatusWrapper(object):
|
|
||||||
def __init__(self, build_uuid):
|
|
||||||
self._uuid = build_uuid
|
|
||||||
self._status = {
|
|
||||||
'total_commands': None,
|
|
||||||
'current_command': None,
|
|
||||||
'push_completion': 0.0,
|
|
||||||
'pull_completion': 0.0,
|
|
||||||
}
|
|
||||||
|
|
||||||
self.__exit__(None, None, None)
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
return self._status
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, value, traceback):
|
|
||||||
build_logs.set_status(self._uuid, self._status)
|
|
||||||
|
|
||||||
|
|
||||||
class _IncompleteJsonError(Exception):
|
|
||||||
def __init__(self, start_from):
|
|
||||||
self.start_from = start_from
|
|
||||||
|
|
||||||
|
|
||||||
class _StreamingJSONDecoder(json.JSONDecoder):
|
|
||||||
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
|
|
||||||
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
|
|
||||||
|
|
||||||
def decode(self, s, _w=WHITESPACE.match):
|
|
||||||
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
|
|
||||||
instance containing a JSON document)
|
|
||||||
|
|
||||||
"""
|
|
||||||
start_from = 0
|
|
||||||
while start_from < len(s):
|
|
||||||
try:
|
|
||||||
obj, end = self.raw_decode(s[start_from:], idx=_w(s[start_from:], 0).end())
|
|
||||||
except ValueError:
|
|
||||||
raise _IncompleteJsonError(start_from)
|
|
||||||
end = _w(s[start_from:], end).end()
|
|
||||||
start_from += end
|
|
||||||
yield obj
|
|
||||||
|
|
||||||
|
|
||||||
class StreamingDockerClient(Client):
|
|
||||||
def _stream_helper(self, response):
|
|
||||||
"""Generator for data coming from a chunked-encoded HTTP response."""
|
|
||||||
content_buf = ''
|
|
||||||
for content in response.iter_content(chunk_size=256):
|
|
||||||
content_buf += content
|
|
||||||
try:
|
|
||||||
for val in json.loads(content_buf, cls=_StreamingJSONDecoder):
|
|
||||||
yield val
|
|
||||||
content_buf = ''
|
|
||||||
except _IncompleteJsonError as exc:
|
|
||||||
content_buf = content_buf[exc.start_from:]
|
|
||||||
|
|
||||||
|
|
||||||
class DockerfileBuildContext(object):
|
|
||||||
def __init__(self, build_context_dir, dockerfile_subdir, repo, tag_names,
|
|
||||||
push_token, build_uuid, cache_size_gb, pull_credentials=None):
|
|
||||||
self._build_dir = build_context_dir
|
|
||||||
self._dockerfile_subdir = dockerfile_subdir
|
|
||||||
self._repo = repo
|
|
||||||
self._tag_names = tag_names
|
|
||||||
self._push_token = push_token
|
|
||||||
self._status = StatusWrapper(build_uuid)
|
|
||||||
self._build_logger = partial(build_logs.append_log_message, build_uuid)
|
|
||||||
self._pull_credentials = pull_credentials
|
|
||||||
self._cache_size_gb = cache_size_gb
|
|
||||||
|
|
||||||
# Note: We have two different clients here because we (potentially) login
|
|
||||||
# with both, but with different credentials that we do not want shared between
|
|
||||||
# the build and push operations.
|
|
||||||
self._push_cl = StreamingDockerClient(timeout=1200, **build_docker_args())
|
|
||||||
self._build_cl = StreamingDockerClient(timeout=1200, **build_docker_args())
|
|
||||||
|
|
||||||
dockerfile_path = os.path.join(self._build_dir, dockerfile_subdir,
|
|
||||||
'Dockerfile')
|
|
||||||
if not os.path.exists(dockerfile_path):
|
|
||||||
raise RuntimeError('Build job did not contain a Dockerfile.')
|
|
||||||
|
|
||||||
# Compute the number of steps
|
|
||||||
with open(dockerfile_path, 'r') as dockerfileobj:
|
|
||||||
self._parsed_dockerfile = parse_dockerfile(dockerfileobj.read())
|
|
||||||
|
|
||||||
self.__inject_quay_repo_env(self._parsed_dockerfile, repo)
|
|
||||||
self._num_steps = len(self._parsed_dockerfile.commands)
|
|
||||||
|
|
||||||
with open(dockerfile_path, 'w') as dockerfileobj:
|
|
||||||
dockerfileobj.write(serialize_dockerfile(self._parsed_dockerfile))
|
|
||||||
|
|
||||||
logger.debug('Will build and push to repo %s with tags named: %s', self._repo,
|
|
||||||
self._tag_names)
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
try:
|
|
||||||
self.__cleanup_containers()
|
|
||||||
self.__cleanup_images()
|
|
||||||
self.__prune_cache()
|
|
||||||
except APIError:
|
|
||||||
sentry.client.captureException()
|
|
||||||
message = 'Docker installation is no longer healthy.'
|
|
||||||
logger.exception(message)
|
|
||||||
raise WorkerUnhealthyException(message)
|
|
||||||
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, value, traceback):
|
|
||||||
shutil.rmtree(self._build_dir)
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.__cleanup_containers()
|
|
||||||
except APIError:
|
|
||||||
sentry.client.captureException()
|
|
||||||
message = 'Docker installation is no longer healthy.'
|
|
||||||
logger.exception(message)
|
|
||||||
raise WorkerUnhealthyException(message)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __inject_quay_repo_env(parsed_dockerfile, quay_reponame):
|
|
||||||
env_command = {
|
|
||||||
'command': 'ENV',
|
|
||||||
'parameters': 'QUAY_REPOSITORY %s' % quay_reponame
|
|
||||||
}
|
|
||||||
for index, command in reversed(list(enumerate(parsed_dockerfile.commands))):
|
|
||||||
if command['command'] == 'FROM':
|
|
||||||
new_command_index = index + 1
|
|
||||||
logger.debug('Injecting env command at dockerfile index: %s', new_command_index)
|
|
||||||
parsed_dockerfile.commands.insert(new_command_index, env_command)
|
|
||||||
break
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __total_completion(statuses, total_images):
|
|
||||||
percentage_with_sizes = float(len(statuses.values()))/total_images
|
|
||||||
sent_bytes = sum([status['current'] for status in statuses.values()])
|
|
||||||
total_bytes = sum([status['total'] for status in statuses.values()])
|
|
||||||
return float(sent_bytes)/total_bytes*percentage_with_sizes
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __monitor_completion(status_stream, required_message, status_updater, status_completion_key,
|
|
||||||
num_images=0):
|
|
||||||
images = {}
|
|
||||||
for status in status_stream:
|
|
||||||
logger.debug('%s: %s', status_completion_key, status)
|
|
||||||
if 'status' in status:
|
|
||||||
status_msg = status['status']
|
|
||||||
|
|
||||||
if status_msg == required_message:
|
|
||||||
if 'progressDetail' in status and 'id' in status:
|
|
||||||
image_id = status['id']
|
|
||||||
detail = status['progressDetail']
|
|
||||||
|
|
||||||
if 'current' in detail and 'total' in detail:
|
|
||||||
images[image_id] = detail
|
|
||||||
with status_updater as status_update:
|
|
||||||
status_update[status_completion_key] = \
|
|
||||||
DockerfileBuildContext.__total_completion(images, max(len(images), num_images))
|
|
||||||
|
|
||||||
elif 'errorDetail' in status:
|
|
||||||
message = 'Error pushing image.'
|
|
||||||
if 'message' in status['errorDetail']:
|
|
||||||
message = str(status['errorDetail']['message'])
|
|
||||||
|
|
||||||
raise RuntimeError(message)
|
|
||||||
|
|
||||||
def pull(self):
|
|
||||||
image_and_tag_tuple = self._parsed_dockerfile.get_image_and_tag()
|
|
||||||
if image_and_tag_tuple is None or image_and_tag_tuple[0] is None:
|
|
||||||
self._build_logger('Missing FROM command in Dockerfile', build_logs.ERROR)
|
|
||||||
raise JobException('Missing FROM command in Dockerfile')
|
|
||||||
|
|
||||||
image_and_tag = ':'.join(image_and_tag_tuple)
|
|
||||||
|
|
||||||
# Login with the specified credentials (if any).
|
|
||||||
if self._pull_credentials:
|
|
||||||
logger.debug('Logging in with pull credentials: %s@%s',
|
|
||||||
self._pull_credentials['username'], self._pull_credentials['registry'])
|
|
||||||
|
|
||||||
self._build_logger('Pulling base image: %s' % image_and_tag, log_data={
|
|
||||||
'phasestep': 'login',
|
|
||||||
'username': self._pull_credentials['username'],
|
|
||||||
'registry': self._pull_credentials['registry']
|
|
||||||
})
|
|
||||||
|
|
||||||
self._build_cl.login(self._pull_credentials['username'], self._pull_credentials['password'],
|
|
||||||
registry=self._pull_credentials['registry'], reauth=True)
|
|
||||||
else:
|
|
||||||
self._build_logger('Pulling base image: %s' % image_and_tag, log_data={
|
|
||||||
'phasestep': 'pull',
|
|
||||||
'repo_url': image_and_tag
|
|
||||||
})
|
|
||||||
|
|
||||||
pull_status = self._build_cl.pull(image_and_tag, stream=True)
|
|
||||||
|
|
||||||
self.__monitor_completion(pull_status, 'Downloading', self._status, 'pull_completion')
|
|
||||||
|
|
||||||
def build(self, reservation_extension_method):
|
|
||||||
# Start the build itself.
|
|
||||||
logger.debug('Starting build.')
|
|
||||||
|
|
||||||
with self._status as status:
|
|
||||||
status['total_commands'] = self._num_steps
|
|
||||||
|
|
||||||
logger.debug('Building to tags named: %s', self._tag_names)
|
|
||||||
context_path = os.path.join(self._build_dir, self._dockerfile_subdir)
|
|
||||||
|
|
||||||
logger.debug('Final context path: %s exists: %s', context_path,
|
|
||||||
os.path.exists(context_path))
|
|
||||||
|
|
||||||
build_status = self._build_cl.build(path=context_path, stream=True)
|
|
||||||
|
|
||||||
current_step = 0
|
|
||||||
built_image = None
|
|
||||||
for status in build_status:
|
|
||||||
fully_unwrapped = ""
|
|
||||||
if isinstance(status, dict):
|
|
||||||
keys_to_extract = ['error', 'status', 'stream']
|
|
||||||
for key in keys_to_extract:
|
|
||||||
if key in status:
|
|
||||||
fully_unwrapped = status[key]
|
|
||||||
break
|
|
||||||
|
|
||||||
if not fully_unwrapped:
|
|
||||||
logger.debug('Status dict did not have any extractable keys and was: %s', status)
|
|
||||||
elif isinstance(status, basestring):
|
|
||||||
fully_unwrapped = status
|
|
||||||
|
|
||||||
status_str = str(fully_unwrapped.encode('utf-8'))
|
|
||||||
|
|
||||||
# Check for system errors when building.
|
|
||||||
# DISABLED: LXC is super flaky, but this is causing build nodes to spasm.
|
|
||||||
#if matches_system_error(status_str):
|
|
||||||
# raise WorkerUnhealthyException(status_str)
|
|
||||||
|
|
||||||
logger.debug('Status: %s', status_str)
|
|
||||||
step_increment = re.search(r'Step ([0-9]+) :', status_str)
|
|
||||||
if step_increment:
|
|
||||||
self._build_logger(status_str, build_logs.COMMAND)
|
|
||||||
current_step = int(step_increment.group(1))
|
|
||||||
logger.debug('Step now: %s/%s', current_step, self._num_steps)
|
|
||||||
with self._status as status_update:
|
|
||||||
status_update['current_command'] = current_step
|
|
||||||
|
|
||||||
# Tell the queue that we're making progress every time we advance a step
|
|
||||||
reservation_extension_method(RESERVATION_TIME)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
self._build_logger(status_str)
|
|
||||||
|
|
||||||
complete = re.match(r'Successfully built ([a-z0-9]+)$', status_str)
|
|
||||||
if complete:
|
|
||||||
built_image = complete.group(1)
|
|
||||||
logger.debug('Final image ID is: %s', built_image)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Get the image count
|
|
||||||
if not built_image:
|
|
||||||
return
|
|
||||||
|
|
||||||
return built_image
|
|
||||||
|
|
||||||
def push(self, built_image):
|
|
||||||
# Login to the registry
|
|
||||||
host = re.match(r'([a-z0-9.:]+)/.+/.+$', self._repo)
|
|
||||||
if not host:
|
|
||||||
raise RuntimeError('Invalid repo name: %s' % self._repo)
|
|
||||||
|
|
||||||
for protocol in ['https', 'http']:
|
|
||||||
registry_endpoint = '%s://%s/v1/' % (protocol, host.group(1))
|
|
||||||
logger.debug('Attempting login to registry: %s', registry_endpoint)
|
|
||||||
|
|
||||||
try:
|
|
||||||
self._push_cl.login('$token', self._push_token, registry=registry_endpoint)
|
|
||||||
break
|
|
||||||
except APIError:
|
|
||||||
pass # Probably the wrong protocol
|
|
||||||
|
|
||||||
for tag in self._tag_names:
|
|
||||||
logger.debug('Tagging image %s as %s:%s', built_image, self._repo, tag)
|
|
||||||
self._push_cl.tag(built_image, self._repo, tag)
|
|
||||||
|
|
||||||
history = self._push_cl.history(built_image)
|
|
||||||
num_images = len(history)
|
|
||||||
|
|
||||||
logger.debug('Pushing to repo %s', self._repo)
|
|
||||||
resp = self._push_cl.push(self._repo, stream=True)
|
|
||||||
self.__monitor_completion(resp, 'Pushing', self._status, 'push_completion', num_images)
|
|
||||||
|
|
||||||
def __cleanup_containers(self):
|
|
||||||
# First clean up any containers that might be holding the images
|
|
||||||
for running in self._build_cl.containers(quiet=True):
|
|
||||||
logger.debug('Killing container: %s', running['Id'])
|
|
||||||
self._build_cl.kill(running['Id'])
|
|
||||||
|
|
||||||
# Next, remove all of the containers (which should all now be killed)
|
|
||||||
for container in self._build_cl.containers(all=True, quiet=True):
|
|
||||||
logger.debug('Removing container: %s', container['Id'])
|
|
||||||
self._build_cl.remove_container(container['Id'])
|
|
||||||
|
|
||||||
def __cleanup_images(self):
|
|
||||||
""" Remove tags on internal nodes, and remove images older than the expiratino time. """
|
|
||||||
ids_to_images, ids_to_children = self.__compute_image_graph()
|
|
||||||
|
|
||||||
# Untag all internal nodes, which are usually the base images
|
|
||||||
for internal_id in ids_to_children.keys():
|
|
||||||
internal = ids_to_images[internal_id]
|
|
||||||
if internal['RepoTags'] != NO_TAGS:
|
|
||||||
for tag_name in internal['RepoTags']:
|
|
||||||
self._build_cl.remove_image(tag_name)
|
|
||||||
|
|
||||||
# Make sure all of the leaves have gibberish tags, and remove those older than our expiration
|
|
||||||
leaves = set(ids_to_images.keys()) - set(ids_to_children.keys())
|
|
||||||
now = datetime.now()
|
|
||||||
for leaf_id in leaves:
|
|
||||||
leaf = ids_to_images[leaf_id]
|
|
||||||
|
|
||||||
created = datetime.fromtimestamp(leaf['Created'])
|
|
||||||
expiration = created + timedelta(hours=CACHE_EXPIRATION_PERIOD_HOURS)
|
|
||||||
if expiration > now:
|
|
||||||
# Assign a new tag as a uuid to preserve this image
|
|
||||||
new_tag = str(uuid4())
|
|
||||||
self._build_cl.tag(leaf['Id'], new_tag)
|
|
||||||
|
|
||||||
# Remove all of the existing tags
|
|
||||||
if leaf['RepoTags'] != NO_TAGS:
|
|
||||||
for tag_name in leaf['RepoTags']:
|
|
||||||
self._build_cl.remove_image(tag_name)
|
|
||||||
|
|
||||||
def __prune_cache(self):
|
|
||||||
""" Remove the oldest leaf image until the cache size is the desired size. """
|
|
||||||
|
|
||||||
logger.debug('Pruning cache to size(gb): %s', self._cache_size_gb)
|
|
||||||
while self.__compute_cache_size_gb() > self._cache_size_gb:
|
|
||||||
logger.debug('Locating the oldest image in the cache to prune.')
|
|
||||||
# Find the oldest tagged image and remove it
|
|
||||||
oldest_creation_time = datetime.max
|
|
||||||
oldest_image = None
|
|
||||||
for image in self._build_cl.images():
|
|
||||||
created = datetime.fromtimestamp(image['Created'])
|
|
||||||
if created < oldest_creation_time:
|
|
||||||
oldest_creation_time = created
|
|
||||||
oldest_image = image
|
|
||||||
|
|
||||||
logger.debug('Removing oldest image from cache: %s', oldest_image['Id'])
|
|
||||||
# Remove all tags on the oldest image
|
|
||||||
if oldest_image['RepoTags'] == NO_TAGS:
|
|
||||||
# Remove the image id directly since there are no tags
|
|
||||||
self._build_cl.remove_image(oldest_image['Id'])
|
|
||||||
else:
|
|
||||||
# Remove all tags
|
|
||||||
for tag_name in oldest_image['RepoTags']:
|
|
||||||
self._build_cl.remove_image(tag_name)
|
|
||||||
|
|
||||||
def __compute_cache_size_gb(self):
|
|
||||||
all_images = self._build_cl.images(all=True)
|
|
||||||
size_in_bytes = sum([img['Size'] for img in all_images])
|
|
||||||
size_in_gb = float(size_in_bytes)/1024/1024/1024
|
|
||||||
logger.debug('Computed cache size(gb) of: %s', size_in_gb)
|
|
||||||
return size_in_gb
|
|
||||||
|
|
||||||
def __compute_image_graph(self):
|
|
||||||
all_images = self._build_cl.images(all=True)
|
|
||||||
|
|
||||||
ids_to_images = {}
|
|
||||||
ids_to_children = defaultdict(list)
|
|
||||||
for image in all_images:
|
|
||||||
if image['ParentId'] != '':
|
|
||||||
ids_to_children[image['ParentId']].append(image)
|
|
||||||
ids_to_images[image['Id']] = image
|
|
||||||
|
|
||||||
return (ids_to_images, ids_to_children)
|
|
||||||
|
|
||||||
|
|
||||||
class DockerfileBuildWorker(Worker):
|
|
||||||
def __init__(self, cache_size_gb, *vargs, **kwargs):
|
|
||||||
super(DockerfileBuildWorker, self).__init__(*vargs, **kwargs)
|
|
||||||
|
|
||||||
self._mime_processors = {
|
|
||||||
'application/zip': DockerfileBuildWorker.__prepare_zip,
|
|
||||||
'application/x-zip-compressed': DockerfileBuildWorker.__prepare_zip,
|
|
||||||
'text/plain': DockerfileBuildWorker.__prepare_dockerfile,
|
|
||||||
'application/octet-stream': DockerfileBuildWorker.__prepare_dockerfile,
|
|
||||||
'application/x-tar': DockerfileBuildWorker.__prepare_tarball,
|
|
||||||
'application/gzip': DockerfileBuildWorker.__prepare_tarball,
|
|
||||||
'application/x-gzip': DockerfileBuildWorker.__prepare_tarball,
|
|
||||||
}
|
|
||||||
|
|
||||||
self._timeout = Event()
|
|
||||||
self._cache_size_gb = cache_size_gb
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __prepare_zip(request_file):
|
|
||||||
build_dir = mkdtemp(prefix='docker-build-')
|
|
||||||
|
|
||||||
# Save the zip file to temp somewhere
|
|
||||||
with TemporaryFile() as zip_file:
|
|
||||||
zip_file.write(request_file.content)
|
|
||||||
to_extract = ZipFile(zip_file)
|
|
||||||
to_extract.extractall(build_dir)
|
|
||||||
|
|
||||||
return build_dir
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __prepare_dockerfile(request_file):
|
|
||||||
build_dir = mkdtemp(prefix='docker-build-')
|
|
||||||
dockerfile_path = os.path.join(build_dir, "Dockerfile")
|
|
||||||
with open(dockerfile_path, 'w') as dockerfile:
|
|
||||||
dockerfile.write(request_file.content)
|
|
||||||
|
|
||||||
return build_dir
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __prepare_tarball(request_file):
|
|
||||||
build_dir = mkdtemp(prefix='docker-build-')
|
|
||||||
|
|
||||||
# Save the zip file to temp somewhere
|
|
||||||
with tarfile.open(mode='r|*', fileobj=request_file.raw) as tar_stream:
|
|
||||||
safe_extractall(tar_stream, build_dir)
|
|
||||||
|
|
||||||
return build_dir
|
|
||||||
|
|
||||||
def watchdog(self):
|
|
||||||
logger.debug('Running build watchdog code.')
|
|
||||||
try:
|
|
||||||
docker_cl = Client(**build_docker_args())
|
|
||||||
|
|
||||||
# Iterate the running containers and kill ones that have been running more than 20 minutes
|
|
||||||
for container in docker_cl.containers():
|
|
||||||
start_time = datetime.fromtimestamp(container['Created'])
|
|
||||||
running_time = datetime.now() - start_time
|
|
||||||
if running_time > timedelta(minutes=TIMEOUT_PERIOD_MINUTES):
|
|
||||||
logger.warning('Container has been running too long: %s with command: %s',
|
|
||||||
container['Id'], container['Command'])
|
|
||||||
docker_cl.kill(container['Id'])
|
|
||||||
self._timeout.set()
|
|
||||||
|
|
||||||
except ConnectionError as exc:
|
|
||||||
logger.exception('Watchdog exception')
|
|
||||||
raise WorkerUnhealthyException(exc.message)
|
|
||||||
|
|
||||||
def process_queue_item(self, job_details):
|
|
||||||
self._timeout.clear()
|
|
||||||
|
|
||||||
# Make sure we have more information for debugging problems
|
|
||||||
sentry.client.user_context(job_details)
|
|
||||||
|
|
||||||
repository_build = model.get_repository_build(job_details['build_uuid'])
|
|
||||||
|
|
||||||
pull_credentials = job_details.get('pull_credentials', None)
|
|
||||||
|
|
||||||
job_config = json.loads(repository_build.job_config)
|
|
||||||
|
|
||||||
resource_url = user_files.get_file_url(repository_build.resource_key, requires_cors=False)
|
|
||||||
tag_names = job_config['docker_tags']
|
|
||||||
build_subdir = job_config['build_subdir']
|
|
||||||
|
|
||||||
# TODO remove the top branch when there are no more jobs with a repository config
|
|
||||||
if 'repository' in job_config:
|
|
||||||
repo = job_config['repository']
|
|
||||||
else:
|
|
||||||
repo = '%s/%s/%s' % (job_config['registry'],
|
|
||||||
repository_build.repository.namespace_user.username,
|
|
||||||
repository_build.repository.name)
|
|
||||||
|
|
||||||
access_token = repository_build.access_token.code
|
|
||||||
|
|
||||||
log_appender = partial(build_logs.append_log_message, repository_build.uuid)
|
|
||||||
|
|
||||||
# Lookup and save the version of docker being used.
|
|
||||||
try:
|
|
||||||
docker_cl = Client(**build_docker_args())
|
|
||||||
docker_version = docker_cl.version().get('Version', '')
|
|
||||||
except ConnectionError as exc:
|
|
||||||
logger.exception('Initial connection exception')
|
|
||||||
raise WorkerUnhealthyException(exc.message)
|
|
||||||
|
|
||||||
dash = docker_version.find('-')
|
|
||||||
|
|
||||||
# Strip any -tutum or whatever off of the version.
|
|
||||||
if dash > 0:
|
|
||||||
docker_version = docker_version[:dash]
|
|
||||||
|
|
||||||
log_appender('initializing', build_logs.PHASE, log_data={
|
|
||||||
'docker_version': docker_version
|
|
||||||
})
|
|
||||||
|
|
||||||
log_appender('Docker version: %s' % docker_version)
|
|
||||||
|
|
||||||
start_msg = ('Starting job with resource url: %s repo: %s' % (resource_url, repo))
|
|
||||||
logger.debug(start_msg)
|
|
||||||
|
|
||||||
docker_resource = requests.get(resource_url, stream=True)
|
|
||||||
c_type = docker_resource.headers['content-type']
|
|
||||||
|
|
||||||
if ';' in c_type:
|
|
||||||
c_type = c_type.split(';')[0]
|
|
||||||
|
|
||||||
filetype_msg = ('Request to build type: %s with repo: %s and tags: %s' %
|
|
||||||
(c_type, repo, tag_names))
|
|
||||||
logger.info(filetype_msg)
|
|
||||||
log_appender(filetype_msg)
|
|
||||||
|
|
||||||
# Spawn a notification that the build has started.
|
|
||||||
event_data = {
|
|
||||||
'build_id': repository_build.uuid,
|
|
||||||
'build_name': repository_build.display_name,
|
|
||||||
'docker_tags': tag_names,
|
|
||||||
'trigger_id': repository_build.trigger.uuid,
|
|
||||||
'trigger_kind': repository_build.trigger.service.name
|
|
||||||
}
|
|
||||||
|
|
||||||
spawn_notification(repository_build.repository, 'build_start', event_data,
|
|
||||||
subpage='build?current=%s' % repository_build.uuid,
|
|
||||||
pathargs=['build', repository_build.uuid])
|
|
||||||
|
|
||||||
|
|
||||||
# Setup a handler for spawning failure messages.
|
|
||||||
def spawn_failure(message, event_data):
|
|
||||||
event_data['error_message'] = message
|
|
||||||
spawn_notification(repository_build.repository, 'build_failure', event_data,
|
|
||||||
subpage='build?current=%s' % repository_build.uuid,
|
|
||||||
pathargs=['build', repository_build.uuid])
|
|
||||||
|
|
||||||
if c_type not in self._mime_processors:
|
|
||||||
log_appender('error', build_logs.PHASE)
|
|
||||||
repository_build.phase = BUILD_PHASE.ERROR
|
|
||||||
repository_build.save()
|
|
||||||
message = 'Unknown mime-type: %s' % c_type
|
|
||||||
log_appender(message, build_logs.ERROR)
|
|
||||||
spawn_failure(message, event_data)
|
|
||||||
raise JobException(message)
|
|
||||||
|
|
||||||
# Try to build the build directory package from the buildpack.
|
|
||||||
log_appender('unpacking', build_logs.PHASE)
|
|
||||||
repository_build.phase = BUILD_PHASE.UNPACKING
|
|
||||||
repository_build.save()
|
|
||||||
|
|
||||||
build_dir = None
|
|
||||||
try:
|
|
||||||
build_dir = self._mime_processors[c_type](docker_resource)
|
|
||||||
except Exception as ex:
|
|
||||||
cur_message = ex.message or 'Error while unpacking build package'
|
|
||||||
log_appender(cur_message, build_logs.ERROR)
|
|
||||||
spawn_failure(cur_message, event_data)
|
|
||||||
raise JobException(cur_message)
|
|
||||||
|
|
||||||
# Start the build process.
|
|
||||||
try:
|
|
||||||
with DockerfileBuildContext(build_dir, build_subdir, repo, tag_names, access_token,
|
|
||||||
repository_build.uuid, self._cache_size_gb,
|
|
||||||
pull_credentials) as build_ctxt:
|
|
||||||
log_appender('pulling', build_logs.PHASE)
|
|
||||||
repository_build.phase = BUILD_PHASE.PULLING
|
|
||||||
repository_build.save()
|
|
||||||
build_ctxt.pull()
|
|
||||||
|
|
||||||
self.extend_processing(RESERVATION_TIME)
|
|
||||||
|
|
||||||
log_appender('building', build_logs.PHASE)
|
|
||||||
repository_build.phase = BUILD_PHASE.BUILDING
|
|
||||||
repository_build.save()
|
|
||||||
built_image = build_ctxt.build(self.extend_processing)
|
|
||||||
|
|
||||||
if not built_image:
|
|
||||||
log_appender('error', build_logs.PHASE)
|
|
||||||
repository_build.phase = BUILD_PHASE.ERROR
|
|
||||||
repository_build.save()
|
|
||||||
|
|
||||||
message = 'Unable to build dockerfile.'
|
|
||||||
if self._timeout.is_set():
|
|
||||||
message = 'Build step was terminated after %s minutes.' % TIMEOUT_PERIOD_MINUTES
|
|
||||||
|
|
||||||
log_appender(message, build_logs.ERROR)
|
|
||||||
raise JobException(message)
|
|
||||||
|
|
||||||
self.extend_processing(RESERVATION_TIME)
|
|
||||||
|
|
||||||
log_appender('pushing', build_logs.PHASE)
|
|
||||||
repository_build.phase = BUILD_PHASE.PUSHING
|
|
||||||
repository_build.save()
|
|
||||||
|
|
||||||
build_ctxt.push(built_image)
|
|
||||||
|
|
||||||
log_appender('complete', build_logs.PHASE)
|
|
||||||
repository_build.phase = BUILD_PHASE.COMPLETE
|
|
||||||
repository_build.save()
|
|
||||||
|
|
||||||
# Spawn a notification that the build has completed.
|
|
||||||
spawn_notification(repository_build.repository, 'build_success', event_data,
|
|
||||||
subpage='build?current=%s' % repository_build.uuid,
|
|
||||||
pathargs=['build', repository_build.uuid])
|
|
||||||
|
|
||||||
except WorkerUnhealthyException as exc:
|
|
||||||
# Spawn a notification that the build has failed.
|
|
||||||
log_appender('Worker has become unhealthy. Will retry shortly.', build_logs.ERROR)
|
|
||||||
spawn_failure(exc.message, event_data)
|
|
||||||
|
|
||||||
# Raise the exception to the queue.
|
|
||||||
raise exc
|
|
||||||
|
|
||||||
except JobException as exc:
|
|
||||||
# Spawn a notification that the build has failed.
|
|
||||||
spawn_failure(exc.message, event_data)
|
|
||||||
|
|
||||||
# Raise the exception to the queue.
|
|
||||||
raise exc
|
|
||||||
|
|
||||||
except ConnectionError as exc:
|
|
||||||
# A connection exception means the worker has become unhealthy (Docker is down)
|
|
||||||
# so we re-raise as that exception.
|
|
||||||
logger.exception('Build connection exception')
|
|
||||||
log_appender('Docker daemon has gone away. Will retry shortly.', build_logs.ERROR)
|
|
||||||
raise WorkerUnhealthyException(exc.message)
|
|
||||||
|
|
||||||
except Exception as exc:
|
|
||||||
# Spawn a notification that the build has failed.
|
|
||||||
spawn_failure(exc.message, event_data)
|
|
||||||
|
|
||||||
# Write the error to the logs.
|
|
||||||
sentry.client.captureException()
|
|
||||||
log_appender('error', build_logs.PHASE)
|
|
||||||
logger.exception('Exception when processing request.')
|
|
||||||
repository_build.phase = BUILD_PHASE.ERROR
|
|
||||||
repository_build.save()
|
|
||||||
log_appender(str(exc), build_logs.ERROR)
|
|
||||||
|
|
||||||
# Raise the exception to the queue.
|
|
||||||
raise JobException(str(exc))
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
desc = 'Worker daemon to monitor dockerfile build'
|
|
||||||
parser = argparse.ArgumentParser(description=desc)
|
|
||||||
parser.add_argument('--cachegb', default=20, type=float,
|
|
||||||
help='Maximum cache size in gigabytes.')
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
worker = DockerfileBuildWorker(args.cachegb, dockerfile_build_queue,
|
|
||||||
reservation_seconds=RESERVATION_TIME)
|
|
||||||
worker.start(start_status_server_port=8000)
|
|
|
@ -98,7 +98,7 @@ class Worker(object):
|
||||||
def extend_processing(self, seconds_from_now):
|
def extend_processing(self, seconds_from_now):
|
||||||
with self._current_item_lock:
|
with self._current_item_lock:
|
||||||
if self.current_queue_item is not None:
|
if self.current_queue_item is not None:
|
||||||
WorkQueue.extend_processing(self.current_queue_item, seconds_from_now)
|
self._queue.extend_processing(self.current_queue_item, seconds_from_now)
|
||||||
|
|
||||||
def run_watchdog(self):
|
def run_watchdog(self):
|
||||||
logger.debug('Running watchdog.')
|
logger.debug('Running watchdog.')
|
||||||
|
|
Reference in a new issue