Merge master changes

This commit is contained in:
Joseph Schorr 2015-02-05 13:11:16 -05:00
commit 9dfe523615
46 changed files with 1635 additions and 1232 deletions

View file

@ -9,14 +9,8 @@ version = 1
[[container]]
name = "quay"
Dockerfile = "Dockerfile.web"
Dockerfile = "Dockerfile"
project = "quay"
tags = ["git:short"]
[[container]]
name = "builder"
Dockerfile = "Dockerfile.buildworker"
project = "builder"
tags = ["git:short"]
# vim:ft=toml

View file

@ -1,16 +1,12 @@
# vim:ft=dockerfile
###############################
# BEGIN COMMON SECION
###############################
FROM phusion/baseimage:0.9.15
FROM phusion/baseimage:0.9.16
ENV DEBIAN_FRONTEND noninteractive
ENV HOME /root
# Install the dependencies.
RUN apt-get update # 11DEC2014
RUN apt-get update # 29JAN2015
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev
@ -20,15 +16,6 @@ ADD requirements.txt requirements.txt
RUN virtualenv --distribute venv
RUN venv/bin/pip install -r requirements.txt
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev
###############################
# END COMMON SECION
###############################
# Remove SSH.
RUN rm -rf /etc/service/sshd /etc/my_init.d/00_regen_ssh_host_keys.sh
# Install the binary dependencies
ADD binary_dependencies binary_dependencies
RUN gdebi --n binary_dependencies/*.deb
@ -41,6 +28,10 @@ RUN npm install -g grunt-cli
ADD grunt grunt
RUN cd grunt && npm install
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev
RUN apt-get autoremove -y
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Add all of the files!
ADD . .
@ -65,14 +56,9 @@ ADD conf/init/buildmanager /etc/service/buildmanager
RUN mkdir static/fonts static/ldn
RUN venv/bin/python -m external_libraries
RUN apt-get autoremove -y
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Run the tests
RUN TEST=true venv/bin/python -m unittest discover
VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp"]
VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"]
EXPOSE 443 80
CMD ["/sbin/my_init"]
EXPOSE 443 8443 80

View file

@ -1,49 +0,0 @@
# vim:ft=dockerfile
###############################
# BEGIN COMMON SECION
###############################
FROM phusion/baseimage:0.9.15
ENV DEBIAN_FRONTEND noninteractive
ENV HOME /root
# Install the dependencies.
RUN apt-get update # 11DEC2014
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev
# Build the python dependencies
ADD requirements.txt requirements.txt
RUN virtualenv --distribute venv
RUN venv/bin/pip install -r requirements.txt
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev
###############################
# END COMMON SECION
###############################
RUN apt-get install -y lxc aufs-tools
RUN usermod -v 100000-200000 -w 100000-200000 root
ADD binary_dependencies/builder binary_dependencies/builder
RUN gdebi --n binary_dependencies/builder/*.deb
ADD . .
ADD conf/init/svlogd_config /svlogd_config
ADD conf/init/preplogsdir.sh /etc/my_init.d/
ADD conf/init/tutumdocker /etc/service/tutumdocker
ADD conf/init/dockerfilebuild /etc/service/dockerfilebuild
RUN apt-get remove -y --auto-remove nodejs npm git phantomjs
RUN apt-get autoremove -y
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
VOLUME ["/var/lib/docker", "/var/lib/lxc", "/conf/stack", "/var/log"]
CMD ["/sbin/my_init"]

2
build.sh Executable file
View file

@ -0,0 +1,2 @@
docker build -t quay.io/quay/quay:`git rev-parse --short HEAD` .
echo quay.io/quay/quay:`git rev-parse --short HEAD`

27
buildman/asyncutil.py Normal file
View file

@ -0,0 +1,27 @@
from functools import partial, wraps
from trollius import get_event_loop
class AsyncWrapper(object):
""" Wrapper class which will transform a syncronous library to one that can be used with
trollius coroutines.
"""
def __init__(self, delegate, loop=None, executor=None):
self._loop = loop if loop is not None else get_event_loop()
self._delegate = delegate
self._executor = executor
def __getattr__(self, attrib):
delegate_attr = getattr(self._delegate, attrib)
if not callable(delegate_attr):
return delegate_attr
def wrapper(*args, **kwargs):
""" Wraps the delegate_attr with primitives that will transform sync calls to ones shelled
out to a thread pool.
"""
callable_delegate_attr = partial(delegate_attr, *args, **kwargs)
return self._loop.run_in_executor(self._executor, callable_delegate_attr)
return wrapper

View file

@ -6,6 +6,7 @@ import time
from app import app, userfiles as user_files, build_logs, dockerfile_build_queue
from buildman.manager.enterprise import EnterpriseManager
from buildman.manager.ephemeral import EphemeralBuilderManager
from buildman.server import BuilderServer
from trollius import SSLContext
@ -13,11 +14,17 @@ from trollius import SSLContext
logger = logging.getLogger(__name__)
BUILD_MANAGERS = {
'enterprise': EnterpriseManager
'enterprise': EnterpriseManager,
'ephemeral': EphemeralBuilderManager,
}
EXTERNALLY_MANAGED = 'external'
DEFAULT_WEBSOCKET_PORT = 8787
DEFAULT_CONTROLLER_PORT = 8686
LOG_FORMAT = "%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s"
def run_build_manager():
if not features.BUILD_SUPPORT:
logger.debug('Building is disabled. Please enable the feature flag')
@ -41,6 +48,19 @@ def run_build_manager():
if manager_klass is None:
return
manager_hostname = os.environ.get('BUILDMAN_HOSTNAME',
app.config.get('BUILDMAN_HOSTNAME',
app.config['SERVER_HOSTNAME']))
websocket_port = int(os.environ.get('BUILDMAN_WEBSOCKET_PORT',
app.config.get('BUILDMAN_WEBSOCKET_PORT',
DEFAULT_WEBSOCKET_PORT)))
controller_port = int(os.environ.get('BUILDMAN_CONTROLLER_PORT',
app.config.get('BUILDMAN_CONTROLLER_PORT',
DEFAULT_CONTROLLER_PORT)))
logger.debug('Will pass buildman hostname %s to builders for websocket connection',
manager_hostname)
logger.debug('Starting build manager with lifecycle "%s"', build_manager_config[0])
ssl_context = None
if os.environ.get('SSL_CONFIG'):
@ -50,9 +70,10 @@ def run_build_manager():
os.path.join(os.environ.get('SSL_CONFIG'), 'ssl.key'))
server = BuilderServer(app.config['SERVER_HOSTNAME'], dockerfile_build_queue, build_logs,
user_files, manager_klass)
server.run('0.0.0.0', ssl=ssl_context)
user_files, manager_klass, build_manager_config[1], manager_hostname)
server.run('0.0.0.0', websocket_port, controller_port, ssl=ssl_context)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
logging.getLogger('peewee').setLevel(logging.WARN)
run_build_manager()

View file

@ -6,10 +6,10 @@ import trollius
import re
from autobahn.wamp.exception import ApplicationError
from trollius.coroutines import From
from buildman.server import BuildJobResult
from buildman.component.basecomponent import BaseComponent
from buildman.jobutil.buildjob import BuildJobLoadException
from buildman.jobutil.buildpack import BuildPackage, BuildPackageException
from buildman.jobutil.buildstatus import StatusHandler
from buildman.jobutil.workererror import WorkerError
@ -20,7 +20,7 @@ HEARTBEAT_DELTA = datetime.timedelta(seconds=30)
HEARTBEAT_TIMEOUT = 10
INITIAL_TIMEOUT = 25
SUPPORTED_WORKER_VERSIONS = ['0.1-beta']
SUPPORTED_WORKER_VERSIONS = ['0.1-beta', '0.2']
logger = logging.getLogger(__name__)
@ -39,13 +39,14 @@ class BuildComponent(BaseComponent):
self.builder_realm = realm
self.parent_manager = None
self.server_hostname = None
self.registry_hostname = None
self._component_status = ComponentStatus.JOINING
self._last_heartbeat = None
self._current_job = None
self._build_status = None
self._image_info = None
self._worker_version = None
BaseComponent.__init__(self, config, **kwargs)
@ -54,69 +55,89 @@ class BuildComponent(BaseComponent):
def onJoin(self, details):
logger.debug('Registering methods and listeners for component %s', self.builder_realm)
yield From(self.register(self._on_ready, u'io.quay.buildworker.ready'))
yield From(self.register(self._ping, u'io.quay.buildworker.ping'))
yield From(self.subscribe(self._on_heartbeat, 'io.quay.builder.heartbeat'))
yield From(self.subscribe(self._on_log_message, 'io.quay.builder.logmessage'))
yield trollius.From(self.register(self._on_ready, u'io.quay.buildworker.ready'))
yield trollius.From(self.register(self._ping, u'io.quay.buildworker.ping'))
yield trollius.From(self.subscribe(self._on_heartbeat, 'io.quay.builder.heartbeat'))
yield trollius.From(self.subscribe(self._on_log_message, 'io.quay.builder.logmessage'))
self._set_status(ComponentStatus.WAITING)
yield trollius.From(self._set_status(ComponentStatus.WAITING))
def is_ready(self):
""" Determines whether a build component is ready to begin a build. """
return self._component_status == ComponentStatus.RUNNING
@trollius.coroutine
def start_build(self, build_job):
""" Starts a build. """
logger.debug('Starting build for component %s (worker version: %s)',
self.builder_realm, self._worker_version)
self._current_job = build_job
self._build_status = StatusHandler(self.build_logs, build_job.repo_build())
self._build_status = StatusHandler(self.build_logs, build_job.repo_build.uuid)
self._image_info = {}
self._set_status(ComponentStatus.BUILDING)
yield trollius.From(self._set_status(ComponentStatus.BUILDING))
# Retrieve the job's buildpack.
buildpack_url = self.user_files.get_file_url(build_job.repo_build().resource_key,
# Send the notification that the build has started.
build_job.send_notification('build_start')
# Parse the build configuration.
try:
build_config = build_job.build_config
except BuildJobLoadException as irbe:
self._build_failure('Could not load build job information', irbe)
base_image_information = {}
buildpack_url = self.user_files.get_file_url(build_job.repo_build.resource_key,
requires_cors=False)
logger.debug('Retreiving build package: %s', buildpack_url)
buildpack = None
try:
buildpack = BuildPackage.from_url(buildpack_url)
except BuildPackageException as bpe:
self._build_failure('Could not retrieve build package', bpe)
return
# TODO(jschorr): Remove as soon as the fleet has been transitioned to 0.2.
if self._worker_version == '0.1-beta':
# Retrieve the job's buildpack.
logger.debug('Retrieving build package: %s', buildpack_url)
buildpack = None
try:
buildpack = BuildPackage.from_url(buildpack_url)
except BuildPackageException as bpe:
self._build_failure('Could not retrieve build package', bpe)
raise trollius.Return()
# Extract the base image information from the Dockerfile.
parsed_dockerfile = None
logger.debug('Parsing dockerfile')
# Extract the base image information from the Dockerfile.
parsed_dockerfile = None
logger.debug('Parsing dockerfile')
build_config = build_job.build_config()
try:
parsed_dockerfile = buildpack.parse_dockerfile(build_config.get('build_subdir'))
except BuildPackageException as bpe:
self._build_failure('Could not find Dockerfile in build package', bpe)
return
try:
parsed_dockerfile = buildpack.parse_dockerfile(build_config.get('build_subdir'))
except BuildPackageException as bpe:
self._build_failure('Could not find Dockerfile in build package', bpe)
raise trollius.Return()
image_and_tag_tuple = parsed_dockerfile.get_image_and_tag()
if image_and_tag_tuple is None or image_and_tag_tuple[0] is None:
self._build_failure('Missing FROM line in Dockerfile')
return
image_and_tag_tuple = parsed_dockerfile.get_image_and_tag()
if image_and_tag_tuple is None or image_and_tag_tuple[0] is None:
self._build_failure('Missing FROM line in Dockerfile')
raise trollius.Return()
base_image_information = {
'repository': image_and_tag_tuple[0],
'tag': image_and_tag_tuple[1]
}
base_image_information = {
'repository': image_and_tag_tuple[0],
'tag': image_and_tag_tuple[1]
}
# Extract the number of steps from the Dockerfile.
with self._build_status as status_dict:
status_dict['total_commands'] = len(parsed_dockerfile.commands)
# Extract the number of steps from the Dockerfile.
with self._build_status as status_dict:
status_dict['total_commands'] = len(parsed_dockerfile.commands)
else:
# TODO(jschorr): This is a HACK to make sure the progress bar (sort of) continues working
# until such time as we have the caching code in place.
with self._build_status as status_dict:
status_dict['total_commands'] = 25
# Add the pull robot information, if any.
if build_config.get('pull_credentials') is not None:
base_image_information['username'] = build_config['pull_credentials'].get('username', '')
base_image_information['password'] = build_config['pull_credentials'].get('password', '')
if build_job.pull_credentials:
base_image_information['username'] = build_job.pull_credentials.get('username', '')
base_image_information['password'] = build_job.pull_credentials.get('password', '')
# Retrieve the repository's fully qualified name.
repo = build_job.repo_build().repository
repo = build_job.repo_build.repository
repository_name = repo.namespace_user.username + '/' + repo.name
# Parse the build queue item into build arguments.
@ -128,17 +149,17 @@ class BuildComponent(BaseComponent):
# push_token: The token to use to push the built image.
# tag_names: The name(s) of the tag(s) for the newly built image.
# base_image: The image name and credentials to use to conduct the base image pull.
# repository: The repository to pull.
# tag: The tag to pull.
# repository: The repository to pull (DEPRECATED 0.2)
# tag: The tag to pull (DEPRECATED in 0.2)
# username: The username for pulling the base image (if any).
# password: The password for pulling the base image (if any).
build_arguments = {
'build_package': buildpack_url,
'sub_directory': build_config.get('build_subdir', ''),
'repository': repository_name,
'registry': self.server_hostname,
'pull_token': build_job.repo_build().access_token.code,
'push_token': build_job.repo_build().access_token.code,
'registry': self.registry_hostname,
'pull_token': build_job.repo_build.access_token.code,
'push_token': build_job.repo_build.access_token.code,
'tag_names': build_config.get('docker_tags', ['latest']),
'base_image': base_image_information,
'cached_tag': build_job.determine_cached_tag() or ''
@ -148,9 +169,7 @@ class BuildComponent(BaseComponent):
logger.debug('Invoking build: %s', self.builder_realm)
logger.debug('With Arguments: %s', build_arguments)
return (self
.call("io.quay.builder.build", **build_arguments)
.add_done_callback(self._build_complete))
self.call("io.quay.builder.build", **build_arguments).add_done_callback(self._build_complete)
@staticmethod
def _total_completion(statuses, total_images):
@ -241,14 +260,14 @@ class BuildComponent(BaseComponent):
def _build_failure(self, error_message, exception=None):
""" Handles and logs a failed build. """
self._build_status.set_error(error_message, {
'internal_error': exception.message if exception else None
'internal_error': str(exception) if exception else None
})
build_id = self._current_job.repo_build().uuid
build_id = self._current_job.repo_build.uuid
logger.warning('Build %s failed with message: %s', build_id, error_message)
# Mark that the build has finished (in an error state)
self._build_finished(BuildJobResult.ERROR)
trollius.async(self._build_finished(BuildJobResult.ERROR))
def _build_complete(self, result):
""" Wraps up a completed build. Handles any errors and calls self._build_finished. """
@ -256,7 +275,10 @@ class BuildComponent(BaseComponent):
# Retrieve the result. This will raise an ApplicationError on any error that occurred.
result.result()
self._build_status.set_phase(BUILD_PHASE.COMPLETE)
self._build_finished(BuildJobResult.COMPLETE)
trollius.async(self._build_finished(BuildJobResult.COMPLETE))
# Send the notification that the build has completed successfully.
self._current_job.send_notification('build_success')
except ApplicationError as aex:
worker_error = WorkerError(aex.error, aex.kwargs.get('base_error'))
@ -264,52 +286,66 @@ class BuildComponent(BaseComponent):
self._build_status.set_error(worker_error.public_message(), worker_error.extra_data(),
internal_error=worker_error.is_internal_error())
# Send the notification that the build has failed.
self._current_job.send_notification('build_failure',
error_message=worker_error.public_message())
# Mark the build as completed.
if worker_error.is_internal_error():
self._build_finished(BuildJobResult.INCOMPLETE)
trollius.async(self._build_finished(BuildJobResult.INCOMPLETE))
else:
self._build_finished(BuildJobResult.ERROR)
trollius.async(self._build_finished(BuildJobResult.ERROR))
@trollius.coroutine
def _build_finished(self, job_status):
""" Alerts the parent that a build has completed and sets the status back to running. """
self.parent_manager.job_completed(self._current_job, job_status, self)
yield trollius.From(self.parent_manager.job_completed(self._current_job, job_status, self))
self._current_job = None
# Set the component back to a running state.
self._set_status(ComponentStatus.RUNNING)
yield trollius.From(self._set_status(ComponentStatus.RUNNING))
@staticmethod
def _ping():
""" Ping pong. """
return 'pong'
@trollius.coroutine
def _on_ready(self, token, version):
if not version in SUPPORTED_WORKER_VERSIONS:
logger.warning('Build component (token "%s") is running an out-of-date version: %s', version)
return False
self._worker_version = version
if self._component_status != 'waiting':
if not version in SUPPORTED_WORKER_VERSIONS:
logger.warning('Build component (token "%s") is running an out-of-date version: %s', token,
version)
raise trollius.Return(False)
if self._component_status != ComponentStatus.WAITING:
logger.warning('Build component (token "%s") is already connected', self.expected_token)
return False
raise trollius.Return(False)
if token != self.expected_token:
logger.warning('Builder token mismatch. Expected: "%s". Found: "%s"', self.expected_token, token)
return False
logger.warning('Builder token mismatch. Expected: "%s". Found: "%s"', self.expected_token,
token)
raise trollius.Return(False)
self._set_status(ComponentStatus.RUNNING)
yield trollius.From(self._set_status(ComponentStatus.RUNNING))
# Start the heartbeat check and updating loop.
loop = trollius.get_event_loop()
loop.create_task(self._heartbeat())
logger.debug('Build worker %s is connected and ready', self.builder_realm)
return True
raise trollius.Return(True)
@trollius.coroutine
def _set_status(self, phase):
if phase == ComponentStatus.RUNNING:
yield trollius.From(self.parent_manager.build_component_ready(self))
self._component_status = phase
def _on_heartbeat(self):
""" Updates the last known heartbeat. """
self._last_heartbeat = datetime.datetime.now()
self._last_heartbeat = datetime.datetime.utcnow()
@trollius.coroutine
def _heartbeat(self):
@ -317,13 +353,13 @@ class BuildComponent(BaseComponent):
and updating the heartbeat in the build status dictionary (if applicable). This allows
the build system to catch crashes from either end.
"""
yield From(trollius.sleep(INITIAL_TIMEOUT))
yield trollius.From(trollius.sleep(INITIAL_TIMEOUT))
while True:
# If the component is no longer running or actively building, nothing more to do.
if (self._component_status != ComponentStatus.RUNNING and
self._component_status != ComponentStatus.BUILDING):
return
raise trollius.Return()
# If there is an active build, write the heartbeat to its status.
build_status = self._build_status
@ -331,35 +367,36 @@ class BuildComponent(BaseComponent):
with build_status as status_dict:
status_dict['heartbeat'] = int(time.time())
# Mark the build item.
current_job = self._current_job
if current_job is not None:
self.parent_manager.job_heartbeat(current_job)
yield trollius.From(self.parent_manager.job_heartbeat(current_job))
# Check the heartbeat from the worker.
logger.debug('Checking heartbeat on realm %s', self.builder_realm)
if self._last_heartbeat and self._last_heartbeat < datetime.datetime.now() - HEARTBEAT_DELTA:
self._timeout()
return
if (self._last_heartbeat and
self._last_heartbeat < datetime.datetime.utcnow() - HEARTBEAT_DELTA):
yield trollius.From(self._timeout())
raise trollius.Return()
yield From(trollius.sleep(HEARTBEAT_TIMEOUT))
yield trollius.From(trollius.sleep(HEARTBEAT_TIMEOUT))
@trollius.coroutine
def _timeout(self):
self._set_status(ComponentStatus.TIMED_OUT)
logger.warning('Build component with realm %s has timed out', self.builder_realm)
self._dispose(timed_out=True)
if self._component_status == ComponentStatus.TIMED_OUT:
raise trollius.Return()
yield trollius.From(self._set_status(ComponentStatus.TIMED_OUT))
logger.warning('Build component with realm %s has timed out', self.builder_realm)
def _dispose(self, timed_out=False):
# If we still have a running job, then it has not completed and we need to tell the parent
# manager.
if self._current_job is not None:
if timed_out:
self._build_status.set_error('Build worker timed out', internal_error=True)
self._build_status.set_error('Build worker timed out', internal_error=True)
self.parent_manager.job_completed(self._current_job, BuildJobResult.INCOMPLETE, self)
self._build_status = None
self._current_job = None
# Unregister the current component so that it cannot be invoked again.
self.parent_manager.build_component_disposed(self, timed_out)
self.parent_manager.build_component_disposed(self, True)

View file

@ -1,6 +1,9 @@
import json
from cachetools import lru_cache
from endpoints.notificationhelper import spawn_notification
from data import model
import json
class BuildJobLoadException(Exception):
""" Exception raised if a build job could not be instantiated for some reason. """
@ -9,50 +12,69 @@ class BuildJobLoadException(Exception):
class BuildJob(object):
""" Represents a single in-progress build job. """
def __init__(self, job_item):
self._job_item = job_item
self.job_item = job_item
try:
self._job_details = json.loads(job_item.body)
self.job_details = json.loads(job_item.body)
except ValueError:
raise BuildJobLoadException(
'Could not parse build queue item config with ID %s' % self._job_details['build_uuid']
'Could not parse build queue item config with ID %s' % self.job_details['build_uuid']
)
def send_notification(self, kind, error_message=None):
tags = self.build_config.get('docker_tags', ['latest'])
event_data = {
'build_id': self.repo_build.uuid,
'build_name': self.repo_build.display_name,
'docker_tags': tags,
'trigger_id': self.repo_build.trigger.uuid,
'trigger_kind': self.repo_build.trigger.service.name
}
if error_message is not None:
event_data['error_message'] = error_message
spawn_notification(self.repo_build.repository, kind, event_data,
subpage='build?current=%s' % self.repo_build.uuid,
pathargs=['build', self.repo_build.uuid])
@lru_cache(maxsize=1)
def _load_repo_build(self):
try:
self._repo_build = model.get_repository_build(self._job_details['build_uuid'])
return model.get_repository_build(self.job_details['build_uuid'])
except model.InvalidRepositoryBuildException:
raise BuildJobLoadException(
'Could not load repository build with ID %s' % self._job_details['build_uuid'])
'Could not load repository build with ID %s' % self.job_details['build_uuid'])
@property
def repo_build(self):
return self._load_repo_build()
@property
def pull_credentials(self):
""" Returns the pull credentials for this job, or None if none. """
return self.job_details.get('pull_credentials')
@property
def build_config(self):
try:
self._build_config = json.loads(self._repo_build.job_config)
return json.loads(self.repo_build.job_config)
except ValueError:
raise BuildJobLoadException(
'Could not parse repository build job config with ID %s' % self._job_details['build_uuid']
'Could not parse repository build job config with ID %s' % self.job_details['build_uuid']
)
def determine_cached_tag(self):
""" Returns the tag to pull to prime the cache or None if none. """
# TODO(jschorr): Change this to use the more complicated caching rules, once we have caching
# be a pull of things besides the constructed tags.
tags = self._build_config.get('docker_tags', ['latest'])
existing_tags = model.list_repository_tags(self._repo_build.repository.namespace_user.username,
self._repo_build.repository.name)
tags = self.build_config.get('docker_tags', ['latest'])
existing_tags = model.list_repository_tags(self.repo_build.repository.namespace_user.username,
self.repo_build.repository.name)
cached_tags = set(tags) & set([tag.name for tag in existing_tags])
if cached_tags:
return list(cached_tags)[0]
return None
def job_item(self):
""" Returns the job's queue item. """
return self._job_item
def repo_build(self):
""" Returns the repository build DB row for the job. """
return self._repo_build
def build_config(self):
""" Returns the parsed repository build config for the job. """
return self._build_config

View file

@ -1,12 +1,13 @@
from data.database import BUILD_PHASE
from data import model
import datetime
class StatusHandler(object):
""" Context wrapper for writing status to build logs. """
def __init__(self, build_logs, repository_build):
def __init__(self, build_logs, repository_build_uuid):
self._current_phase = None
self._repository_build = repository_build
self._uuid = repository_build.uuid
self._uuid = repository_build_uuid
self._build_logs = build_logs
self._status = {
@ -20,6 +21,8 @@ class StatusHandler(object):
self.__exit__(None, None, None)
def _append_log_message(self, log_message, log_type=None, log_data=None):
log_data = log_data or {}
log_data['datetime'] = str(datetime.datetime.now())
self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data)
def append_log(self, log_message, extra_data=None):
@ -41,8 +44,12 @@ class StatusHandler(object):
self._current_phase = phase
self._append_log_message(phase, self._build_logs.PHASE, extra_data)
self._repository_build.phase = phase
self._repository_build.save()
# Update the repository build with the new phase
repo_build = model.get_repository_build(self._uuid)
repo_build.phase = phase
repo_build.save()
return True
def __enter__(self):

View file

@ -19,13 +19,19 @@ class WorkerError(object):
'is_internal': True
},
'io.quay.builder.dockerfileissue': {
'message': 'Could not find or parse Dockerfile',
'show_base_error': True
},
'io.quay.builder.cannotpullbaseimage': {
'message': 'Could not pull base image',
'show_base_error': True
},
'io.quay.builder.internalerror': {
'message': 'An internal error occurred while building. Please submit a ticket.'
'message': 'An internal error occurred while building. Please submit a ticket.',
'is_internal': True
},
'io.quay.builder.buildrunerror': {

View file

@ -1,12 +1,17 @@
from trollius import coroutine
class BaseManager(object):
""" Base for all worker managers. """
def __init__(self, register_component, unregister_component, job_heartbeat_callback,
job_complete_callback):
job_complete_callback, manager_hostname, heartbeat_period_sec):
self.register_component = register_component
self.unregister_component = unregister_component
self.job_heartbeat_callback = job_heartbeat_callback
self.job_complete_callback = job_complete_callback
self.manager_hostname = manager_hostname
self.heartbeat_period_sec = heartbeat_period_sec
@coroutine
def job_heartbeat(self, build_job):
""" Method invoked to tell the manager that a job is still running. This method will be called
every few minutes. """
@ -25,26 +30,36 @@ class BaseManager(object):
"""
raise NotImplementedError
def schedule(self, build_job, loop):
@coroutine
def schedule(self, build_job):
""" Schedules a queue item to be built. Returns True if the item was properly scheduled
and False if all workers are busy.
"""
raise NotImplementedError
def initialize(self):
def initialize(self, manager_config):
""" Runs any initialization code for the manager. Called once the server is in a ready state.
"""
raise NotImplementedError
@coroutine
def build_component_ready(self, build_component):
""" Method invoked whenever a build component announces itself as ready.
"""
raise NotImplementedError
def build_component_disposed(self, build_component, timed_out):
""" Method invoked whenever a build component has been disposed. The timed_out boolean indicates
whether the component's heartbeat timed out.
"""
raise NotImplementedError
@coroutine
def job_completed(self, build_job, job_status, build_component):
""" Method invoked once a job_item has completed, in some manner. The job_status will be
one of: incomplete, error, complete. If incomplete, the job should be requeued.
one of: incomplete, error, complete. Implementations of this method should call
self.job_complete_callback with a status of Incomplete if they wish for the job to be
automatically requeued.
"""
raise NotImplementedError

View file

@ -5,7 +5,7 @@ from buildman.component.basecomponent import BaseComponent
from buildman.component.buildcomponent import BuildComponent
from buildman.manager.basemanager import BaseManager
from trollius.coroutines import From
from trollius import From, Return, coroutine
REGISTRATION_REALM = 'registration'
logger = logging.getLogger(__name__)
@ -28,10 +28,15 @@ class DynamicRegistrationComponent(BaseComponent):
class EnterpriseManager(BaseManager):
""" Build manager implementation for the Enterprise Registry. """
build_components = []
shutting_down = False
def initialize(self):
def __init__(self, *args, **kwargs):
self.ready_components = set()
self.all_components = set()
self.shutting_down = False
super(EnterpriseManager, self).__init__(*args, **kwargs)
def initialize(self, manager_config):
# Add a component which is used by build workers for dynamic registration. Unlike
# production, build workers in enterprise are long-lived and register dynamically.
self.register_component(REGISTRATION_REALM, DynamicRegistrationComponent)
@ -45,30 +50,37 @@ class EnterpriseManager(BaseManager):
""" Adds a new build component for an Enterprise Registry. """
# Generate a new unique realm ID for the build worker.
realm = str(uuid.uuid4())
component = self.register_component(realm, BuildComponent, token="")
self.build_components.append(component)
new_component = self.register_component(realm, BuildComponent, token="")
self.all_components.add(new_component)
return realm
def schedule(self, build_job, loop):
@coroutine
def schedule(self, build_job):
""" Schedules a build for an Enterprise Registry. """
if self.shutting_down:
return False
if self.shutting_down or not self.ready_components:
raise Return(False)
for component in self.build_components:
if component.is_ready():
loop.call_soon(component.start_build, build_job)
return True
component = self.ready_components.pop()
return False
yield From(component.start_build(build_job))
raise Return(True)
@coroutine
def build_component_ready(self, build_component):
self.ready_components.add(build_component)
def shutdown(self):
self.shutting_down = True
@coroutine
def job_completed(self, build_job, job_status, build_component):
self.job_complete_callback(build_job, job_status)
def build_component_disposed(self, build_component, timed_out):
self.build_components.remove(build_component)
self.all_components.remove(build_component)
if build_component in self.ready_components:
self.ready_components.remove(build_component)
def num_workers(self):
return len(self.build_components)
return len(self.all_components)

View file

@ -0,0 +1,328 @@
import logging
import etcd
import uuid
import calendar
import os.path
import json
from datetime import datetime, timedelta
from trollius import From, coroutine, Return, async
from concurrent.futures import ThreadPoolExecutor
from urllib3.exceptions import ReadTimeoutError, ProtocolError
from buildman.manager.basemanager import BaseManager
from buildman.manager.executor import PopenExecutor, EC2Executor
from buildman.component.buildcomponent import BuildComponent
from buildman.jobutil.buildjob import BuildJob
from buildman.asyncutil import AsyncWrapper
from util.morecollections import AttrDict
logger = logging.getLogger(__name__)
ETCD_DISABLE_TIMEOUT = 0
class EtcdAction(object):
GET = 'get'
SET = 'set'
EXPIRE = 'expire'
UPDATE = 'update'
DELETE = 'delete'
CREATE = 'create'
COMPARE_AND_SWAP = 'compareAndSwap'
COMPARE_AND_DELETE = 'compareAndDelete'
class EphemeralBuilderManager(BaseManager):
""" Build manager implementation for the Enterprise Registry. """
_executors = {
'popen': PopenExecutor,
'ec2': EC2Executor,
}
_etcd_client_klass = etcd.Client
def __init__(self, *args, **kwargs):
self._shutting_down = False
self._manager_config = None
self._async_thread_executor = None
self._etcd_client = None
self._etcd_realm_prefix = None
self._etcd_builder_prefix = None
self._component_to_job = {}
self._job_uuid_to_component = {}
self._component_to_builder = {}
self._executor = None
# Map of etcd keys being watched to the tasks watching them
self._watch_tasks = {}
super(EphemeralBuilderManager, self).__init__(*args, **kwargs)
def _watch_etcd(self, etcd_key, change_callback, recursive=True):
watch_task_key = (etcd_key, recursive)
def callback_wrapper(changed_key_future):
if watch_task_key not in self._watch_tasks or self._watch_tasks[watch_task_key].done():
self._watch_etcd(etcd_key, change_callback)
if changed_key_future.cancelled():
# Due to lack of interest, tomorrow has been cancelled
return
try:
etcd_result = changed_key_future.result()
except (ReadTimeoutError, ProtocolError):
return
change_callback(etcd_result)
if not self._shutting_down:
watch_future = self._etcd_client.watch(etcd_key, recursive=recursive,
timeout=ETCD_DISABLE_TIMEOUT)
watch_future.add_done_callback(callback_wrapper)
logger.debug('Scheduling watch of key: %s%s', etcd_key, '/*' if recursive else '')
self._watch_tasks[watch_task_key] = async(watch_future)
def _handle_builder_expiration(self, etcd_result):
if etcd_result.action == EtcdAction.EXPIRE:
# Handle the expiration
logger.debug('Builder expired, clean up the old build node')
job_metadata = json.loads(etcd_result._prev_node.value)
if 'builder_id' in job_metadata:
logger.info('Terminating expired build node.')
async(self._executor.stop_builder(job_metadata['builder_id']))
def _handle_realm_change(self, etcd_result):
if etcd_result.action == EtcdAction.CREATE:
# We must listen on the realm created by ourselves or another worker
realm_spec = json.loads(etcd_result.value)
self._register_realm(realm_spec)
elif etcd_result.action == EtcdAction.DELETE or etcd_result.action == EtcdAction.EXPIRE:
# We must stop listening for new connections on the specified realm, if we did not get the
# connection
realm_spec = json.loads(etcd_result._prev_node.value)
build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
component = self._job_uuid_to_component.pop(build_job.job_details['build_uuid'], None)
if component is not None:
# We were not the manager which the worker connected to, remove the bookkeeping for it
logger.debug('Unregistering unused component on realm: %s', realm_spec['realm'])
del self._component_to_job[component]
del self._component_to_builder[component]
self.unregister_component(component)
else:
logger.warning('Unexpected action (%s) on realm key: %s', etcd_result.action, etcd_result.key)
def _register_realm(self, realm_spec):
logger.debug('Registering realm with manager: %s', realm_spec['realm'])
component = self.register_component(realm_spec['realm'], BuildComponent,
token=realm_spec['token'])
build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
self._component_to_job[component] = build_job
self._component_to_builder[component] = realm_spec['builder_id']
self._job_uuid_to_component[build_job.job_details['build_uuid']] = component
@coroutine
def _register_existing_realms(self):
try:
all_realms = yield From(self._etcd_client.read(self._etcd_realm_prefix, recursive=True))
for realm in all_realms.children:
if not realm.dir:
self._register_realm(json.loads(realm.value))
except KeyError:
# no realms have been registered yet
pass
def initialize(self, manager_config):
logger.debug('Calling initialize')
self._manager_config = manager_config
executor_klass = self._executors.get(manager_config.get('EXECUTOR', ''), PopenExecutor)
self._executor = executor_klass(manager_config.get('EXECUTOR_CONFIG', {}),
self.manager_hostname)
etcd_host = self._manager_config.get('ETCD_HOST', '127.0.0.1')
etcd_port = self._manager_config.get('ETCD_PORT', 2379)
etcd_auth = self._manager_config.get('ETCD_CERT_AND_KEY', None)
etcd_ca_cert = self._manager_config.get('ETCD_CA_CERT', None)
etcd_protocol = 'http' if etcd_auth is None else 'https'
logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port)
worker_threads = self._manager_config.get('ETCD_WORKER_THREADS', 5)
self._async_thread_executor = ThreadPoolExecutor(worker_threads)
self._etcd_client = AsyncWrapper(self._etcd_client_klass(host=etcd_host, port=etcd_port,
cert=etcd_auth, ca_cert=etcd_ca_cert,
protocol=etcd_protocol),
executor=self._async_thread_executor)
self._etcd_builder_prefix = self._manager_config.get('ETCD_BUILDER_PREFIX', 'building/')
self._watch_etcd(self._etcd_builder_prefix, self._handle_builder_expiration)
self._etcd_realm_prefix = self._manager_config.get('ETCD_REALM_PREFIX', 'realm/')
self._watch_etcd(self._etcd_realm_prefix, self._handle_realm_change)
# Load components for all realms currently known to the cluster
async(self._register_existing_realms())
def setup_time(self):
setup_time = self._manager_config.get('MACHINE_SETUP_TIME', 300)
return setup_time
def shutdown(self):
logger.debug('Shutting down worker.')
self._shutting_down = True
for (etcd_key, _), task in self._watch_tasks.items():
if not task.done():
logger.debug('Canceling watch task for %s', etcd_key)
task.cancel()
if self._async_thread_executor is not None:
logger.debug('Shutting down thread pool executor.')
self._async_thread_executor.shutdown()
@coroutine
def schedule(self, build_job):
build_uuid = build_job.job_details['build_uuid']
logger.debug('Calling schedule with job: %s', build_uuid)
# Check if there are worker slots avialable by checking the number of jobs in etcd
allowed_worker_count = self._manager_config.get('ALLOWED_WORKER_COUNT', 1)
try:
building = yield From(self._etcd_client.read(self._etcd_builder_prefix, recursive=True))
workers_alive = sum(1 for child in building.children if not child.dir)
except KeyError:
workers_alive = 0
logger.debug('Total jobs: %s', workers_alive)
if workers_alive >= allowed_worker_count:
logger.info('Too many workers alive, unable to start new worker. %s >= %s', workers_alive,
allowed_worker_count)
raise Return(False)
job_key = self._etcd_job_key(build_job)
# First try to take a lock for this job, meaning we will be responsible for its lifeline
realm = str(uuid.uuid4())
token = str(uuid.uuid4())
ttl = self.setup_time()
expiration = datetime.utcnow() + timedelta(seconds=ttl)
machine_max_expiration = self._manager_config.get('MACHINE_MAX_TIME', 7200)
max_expiration = datetime.utcnow() + timedelta(seconds=machine_max_expiration)
payload = {
'expiration': calendar.timegm(expiration.timetuple()),
'max_expiration': calendar.timegm(max_expiration.timetuple()),
}
try:
yield From(self._etcd_client.write(job_key, json.dumps(payload), prevExist=False, ttl=ttl))
except KeyError:
# The job was already taken by someone else, we are probably a retry
logger.error('Job already exists in etcd, are timeouts misconfigured or is the queue broken?')
raise Return(False)
logger.debug('Starting builder with executor: %s', self._executor)
builder_id = yield From(self._executor.start_builder(realm, token, build_uuid))
# Store the builder in etcd associated with the job id
payload['builder_id'] = builder_id
yield From(self._etcd_client.write(job_key, json.dumps(payload), prevExist=True, ttl=ttl))
# Store the realm spec which will allow any manager to accept this builder when it connects
realm_spec = json.dumps({
'realm': realm,
'token': token,
'builder_id': builder_id,
'job_queue_item': build_job.job_item,
})
try:
yield From(self._etcd_client.write(self._etcd_realm_key(realm), realm_spec, prevExist=False,
ttl=ttl))
except KeyError:
logger.error('Realm already exists in etcd. UUID collision or something is very very wrong.')
raise Return(False)
raise Return(True)
@coroutine
def build_component_ready(self, build_component):
try:
# Clean up the bookkeeping for allowing any manager to take the job
job = self._component_to_job.pop(build_component)
del self._job_uuid_to_component[job.job_details['build_uuid']]
yield From(self._etcd_client.delete(self._etcd_realm_key(build_component.builder_realm)))
logger.debug('Sending build %s to newly ready component on realm %s',
job.job_details['build_uuid'], build_component.builder_realm)
yield From(build_component.start_build(job))
except KeyError:
logger.debug('Builder is asking for more work, but work already completed')
def build_component_disposed(self, build_component, timed_out):
logger.debug('Calling build_component_disposed.')
# TODO make it so that I don't have to unregister the component if it timed out
self.unregister_component(build_component)
@coroutine
def job_completed(self, build_job, job_status, build_component):
logger.debug('Calling job_completed with status: %s', job_status)
# Kill the ephmeral builder
yield From(self._executor.stop_builder(self._component_to_builder.pop(build_component)))
# Release the lock in etcd
job_key = self._etcd_job_key(build_job)
yield From(self._etcd_client.delete(job_key))
self.job_complete_callback(build_job, job_status)
@coroutine
def job_heartbeat(self, build_job):
# Extend the deadline in etcd
job_key = self._etcd_job_key(build_job)
build_job_metadata_response = yield From(self._etcd_client.read(job_key))
build_job_metadata = json.loads(build_job_metadata_response.value)
max_expiration = datetime.utcfromtimestamp(build_job_metadata['max_expiration'])
max_expiration_remaining = max_expiration - datetime.utcnow()
max_expiration_sec = max(0, int(max_expiration_remaining.total_seconds()))
ttl = min(self.heartbeat_period_sec * 2, max_expiration_sec)
new_expiration = datetime.utcnow() + timedelta(seconds=ttl)
payload = {
'expiration': calendar.timegm(new_expiration.timetuple()),
'builder_id': build_job_metadata['builder_id'],
'max_expiration': build_job_metadata['max_expiration'],
}
yield From(self._etcd_client.write(job_key, json.dumps(payload), ttl=ttl))
self.job_heartbeat_callback(build_job)
def _etcd_job_key(self, build_job):
""" Create a key which is used to track a job in etcd.
"""
return os.path.join(self._etcd_builder_prefix, build_job.job_details['build_uuid'])
def _etcd_realm_key(self, realm):
""" Create a key which is used to track an incoming connection on a realm.
"""
return os.path.join(self._etcd_realm_prefix, realm)
def num_workers(self):
""" Return the number of workers we're managing locally.
"""
return len(self._component_to_builder)

View file

@ -0,0 +1,237 @@
import logging
import os
import uuid
import threading
import boto.ec2
import requests
import cachetools
from jinja2 import FileSystemLoader, Environment
from trollius import coroutine, From, Return, get_event_loop
from functools import partial
from buildman.asyncutil import AsyncWrapper
logger = logging.getLogger(__name__)
ONE_HOUR = 60*60
ENV = Environment(loader=FileSystemLoader('buildman/templates'))
TEMPLATE = ENV.get_template('cloudconfig.yaml')
class ExecutorException(Exception):
""" Exception raised when there is a problem starting or stopping a builder.
"""
pass
class BuilderExecutor(object):
def __init__(self, executor_config, manager_hostname):
self.executor_config = executor_config
self.manager_hostname = manager_hostname
""" Interface which can be plugged into the EphemeralNodeManager to provide a strategy for
starting and stopping builders.
"""
@coroutine
def start_builder(self, realm, token, build_uuid):
""" Create a builder with the specified config. Returns a unique id which can be used to manage
the builder.
"""
raise NotImplementedError
@coroutine
def stop_builder(self, builder_id):
""" Stop a builder which is currently running.
"""
raise NotImplementedError
def get_manager_websocket_url(self):
return 'ws://{0}:'
def generate_cloud_config(self, realm, token, coreos_channel, manager_hostname,
quay_username=None, quay_password=None):
if quay_username is None:
quay_username = self.executor_config['QUAY_USERNAME']
if quay_password is None:
quay_password = self.executor_config['QUAY_PASSWORD']
return TEMPLATE.render(
realm=realm,
token=token,
quay_username=quay_username,
quay_password=quay_password,
manager_hostname=manager_hostname,
coreos_channel=coreos_channel,
worker_tag=self.executor_config['WORKER_TAG'],
)
class EC2Executor(BuilderExecutor):
""" Implementation of BuilderExecutor which uses libcloud to start machines on a variety of cloud
providers.
"""
COREOS_STACK_URL = 'http://%s.release.core-os.net/amd64-usr/current/coreos_production_ami_hvm.txt'
def __init__(self, *args, **kwargs):
self._loop = get_event_loop()
super(EC2Executor, self).__init__(*args, **kwargs)
def _get_conn(self):
""" Creates an ec2 connection which can be used to manage instances.
"""
return AsyncWrapper(boto.ec2.connect_to_region(
self.executor_config['EC2_REGION'],
aws_access_key_id=self.executor_config['AWS_ACCESS_KEY'],
aws_secret_access_key=self.executor_config['AWS_SECRET_KEY'],
))
@classmethod
@cachetools.ttl_cache(ttl=ONE_HOUR)
def _get_coreos_ami(cls, ec2_region, coreos_channel):
""" Retrieve the CoreOS AMI id from the canonical listing.
"""
stack_list_string = requests.get(EC2Executor.COREOS_STACK_URL % coreos_channel).text
stack_amis = dict([stack.split('=') for stack in stack_list_string.split('|')])
return stack_amis[ec2_region]
@coroutine
def start_builder(self, realm, token, build_uuid):
region = self.executor_config['EC2_REGION']
channel = self.executor_config.get('COREOS_CHANNEL', 'stable')
get_ami_callable = partial(self._get_coreos_ami, region, channel)
coreos_ami = yield From(self._loop.run_in_executor(None, get_ami_callable))
user_data = self.generate_cloud_config(realm, token, channel, self.manager_hostname)
logger.debug('Generated cloud config: %s', user_data)
ec2_conn = self._get_conn()
ssd_root_ebs = boto.ec2.blockdevicemapping.BlockDeviceType(
size=32,
volume_type='gp2',
delete_on_termination=True,
)
block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping()
block_devices['/dev/xvda'] = ssd_root_ebs
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=self.executor_config['EC2_VPC_SUBNET_ID'],
groups=self.executor_config['EC2_SECURITY_GROUP_IDS'],
associate_public_ip_address=True,
)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
reservation = yield From(ec2_conn.run_instances(
coreos_ami,
instance_type=self.executor_config['EC2_INSTANCE_TYPE'],
key_name=self.executor_config.get('EC2_KEY_NAME', None),
user_data=user_data,
instance_initiated_shutdown_behavior='terminate',
block_device_map=block_devices,
network_interfaces=interfaces,
))
if not reservation.instances:
raise ExecutorException('Unable to spawn builder instance.')
elif len(reservation.instances) != 1:
raise ExecutorException('EC2 started wrong number of instances!')
launched = AsyncWrapper(reservation.instances[0])
yield From(launched.add_tags({
'Name': 'Quay Ephemeral Builder',
'Realm': realm,
'Token': token,
'BuildUUID': build_uuid,
}))
raise Return(launched.id)
@coroutine
def stop_builder(self, builder_id):
ec2_conn = self._get_conn()
terminated_instances = yield From(ec2_conn.terminate_instances([builder_id]))
if builder_id not in [si.id for si in terminated_instances]:
raise ExecutorException('Unable to terminate instance: %s' % builder_id)
class PopenExecutor(BuilderExecutor):
""" Implementation of BuilderExecutor which uses Popen to fork a quay-builder process.
"""
def __init__(self, executor_config, manager_hostname):
self._jobs = {}
super(PopenExecutor, self).__init__(executor_config, manager_hostname)
""" Executor which uses Popen to fork a quay-builder process.
"""
@coroutine
def start_builder(self, realm, token, build_uuid):
# Now start a machine for this job, adding the machine id to the etcd information
logger.debug('Forking process for build')
import subprocess
builder_env = {
'TOKEN': token,
'REALM': realm,
'ENDPOINT': 'ws://localhost:8787',
'DOCKER_TLS_VERIFY': os.environ.get('DOCKER_TLS_VERIFY', ''),
'DOCKER_CERT_PATH': os.environ.get('DOCKER_CERT_PATH', ''),
'DOCKER_HOST': os.environ.get('DOCKER_HOST', ''),
}
logpipe = LogPipe(logging.INFO)
spawned = subprocess.Popen('/Users/jake/bin/quay-builder', stdout=logpipe, stderr=logpipe,
env=builder_env)
builder_id = str(uuid.uuid4())
self._jobs[builder_id] = (spawned, logpipe)
logger.debug('Builder spawned with id: %s', builder_id)
raise Return(builder_id)
@coroutine
def stop_builder(self, builder_id):
if builder_id not in self._jobs:
raise ExecutorException('Builder id not being tracked by executor.')
logger.debug('Killing builder with id: %s', builder_id)
spawned, logpipe = self._jobs[builder_id]
if spawned.poll() is None:
spawned.kill()
logpipe.close()
class LogPipe(threading.Thread):
""" Adapted from http://codereview.stackexchange.com/a/17959
"""
def __init__(self, level):
"""Setup the object with a logger and a loglevel
and start the thread
"""
threading.Thread.__init__(self)
self.daemon = False
self.level = level
self.fd_read, self.fd_write = os.pipe()
self.pipe_reader = os.fdopen(self.fd_read)
self.start()
def fileno(self):
"""Return the write file descriptor of the pipe
"""
return self.fd_write
def run(self):
"""Run the thread, logging everything.
"""
for line in iter(self.pipe_reader.readline, ''):
logging.log(self.level, line.strip('\n'))
self.pipe_reader.close()
def close(self):
"""Close the write end of the pipe.
"""
os.close(self.fd_write)

View file

@ -12,7 +12,9 @@ from trollius.coroutines import From
from datetime import timedelta
from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException
from data import database
from data.queue import WorkQueue
from app import app
logger = logging.getLogger(__name__)
@ -21,8 +23,7 @@ TIMEOUT_PERIOD_MINUTES = 20
JOB_TIMEOUT_SECONDS = 300
MINIMUM_JOB_EXTENSION = timedelta(minutes=2)
WEBSOCKET_PORT = 8787
CONTROLLER_PORT = 8686
HEARTBEAT_PERIOD_SEC = 30
class BuildJobResult(object):
""" Build job result enum """
@ -34,14 +35,15 @@ class BuilderServer(object):
""" Server which handles both HTTP and WAMP requests, managing the full state of the build
controller.
"""
def __init__(self, server_hostname, queue, build_logs, user_files, lifecycle_manager_klass):
def __init__(self, registry_hostname, queue, build_logs, user_files, lifecycle_manager_klass,
lifecycle_manager_config, manager_hostname):
self._loop = None
self._current_status = 'starting'
self._current_components = []
self._job_count = 0
self._session_factory = RouterSessionFactory(RouterFactory())
self._server_hostname = server_hostname
self._registry_hostname = registry_hostname
self._queue = queue
self._build_logs = build_logs
self._user_files = user_files
@ -49,8 +51,11 @@ class BuilderServer(object):
self._register_component,
self._unregister_component,
self._job_heartbeat,
self._job_complete
self._job_complete,
manager_hostname,
HEARTBEAT_PERIOD_SEC,
)
self._lifecycle_manager_config = lifecycle_manager_config
self._shutdown_event = Event()
self._current_status = 'running'
@ -67,18 +72,17 @@ class BuilderServer(object):
self._controller_app = controller_app
def run(self, host, ssl=None):
def run(self, host, websocket_port, controller_port, ssl=None):
logger.debug('Initializing the lifecycle manager')
self._lifecycle_manager.initialize()
self._lifecycle_manager.initialize(self._lifecycle_manager_config)
logger.debug('Initializing all members of the event loop')
loop = trollius.get_event_loop()
trollius.Task(self._initialize(loop, host, ssl))
logger.debug('Starting server on port %s, with controller on port %s', WEBSOCKET_PORT,
CONTROLLER_PORT)
logger.debug('Starting server on port %s, with controller on port %s', websocket_port,
controller_port)
try:
loop.run_forever()
loop.run_until_complete(self._initialize(loop, host, websocket_port, controller_port, ssl))
except KeyboardInterrupt:
pass
finally:
@ -102,7 +106,7 @@ class BuilderServer(object):
component.parent_manager = self._lifecycle_manager
component.build_logs = self._build_logs
component.user_files = self._user_files
component.server_hostname = self._server_hostname
component.registry_hostname = self._registry_hostname
self._current_components.append(component)
self._session_factory.add(component)
@ -116,32 +120,32 @@ class BuilderServer(object):
self._session_factory.remove(component)
def _job_heartbeat(self, build_job):
WorkQueue.extend_processing(build_job.job_item(), seconds_from_now=JOB_TIMEOUT_SECONDS,
retry_count=1, minimum_extension=MINIMUM_JOB_EXTENSION)
self._queue.extend_processing(build_job.job_item, seconds_from_now=JOB_TIMEOUT_SECONDS,
minimum_extension=MINIMUM_JOB_EXTENSION)
def _job_complete(self, build_job, job_status):
if job_status == BuildJobResult.INCOMPLETE:
self._queue.incomplete(build_job.job_item(), restore_retry=True, retry_after=30)
elif job_status == BuildJobResult.ERROR:
self._queue.incomplete(build_job.job_item(), restore_retry=False)
self._queue.incomplete(build_job.job_item, restore_retry=False, retry_after=30)
else:
self._queue.complete(build_job.job_item())
self._queue.complete(build_job.job_item)
self._job_count = self._job_count - 1
if self._current_status == 'shutting_down' and not self._job_count:
self._shutdown_event.set()
# TODO(jschorr): check for work here?
@trollius.coroutine
def _work_checker(self):
while self._current_status == 'running':
logger.debug('Checking for more work for %d active workers', self._lifecycle_manager.num_workers())
with database.CloseForLongOperation(app.config):
yield From(trollius.sleep(WORK_CHECK_TIMEOUT))
logger.debug('Checking for more work for %d active workers',
self._lifecycle_manager.num_workers())
job_item = self._queue.get(processing_time=self._lifecycle_manager.setup_time())
if job_item is None:
logger.debug('No additional work found. Going to sleep for %s seconds', WORK_CHECK_TIMEOUT)
yield From(trollius.sleep(WORK_CHECK_TIMEOUT))
continue
try:
@ -149,20 +153,21 @@ class BuilderServer(object):
except BuildJobLoadException as irbe:
logger.exception(irbe)
self._queue.incomplete(job_item, restore_retry=False)
continue
logger.debug('Build job found. Checking for an avaliable worker.')
if self._lifecycle_manager.schedule(build_job, self._loop):
scheduled = yield From(self._lifecycle_manager.schedule(build_job))
if scheduled:
self._job_count = self._job_count + 1
logger.debug('Build job scheduled. Running: %s', self._job_count)
else:
logger.debug('All workers are busy. Requeuing.')
self._queue.incomplete(job_item, restore_retry=True, retry_after=0)
yield From(trollius.sleep(WORK_CHECK_TIMEOUT))
@trollius.coroutine
def _initialize(self, loop, host, ssl=None):
def _initialize(self, loop, host, websocket_port, controller_port, ssl=None):
self._loop = loop
# Create the WAMP server.
@ -170,8 +175,8 @@ class BuilderServer(object):
transport_factory.setProtocolOptions(failByDrop=True)
# Initialize the controller server and the WAMP server
create_wsgi_server(self._controller_app, loop=loop, host=host, port=CONTROLLER_PORT, ssl=ssl)
yield From(loop.create_server(transport_factory, host, WEBSOCKET_PORT, ssl=ssl))
create_wsgi_server(self._controller_app, loop=loop, host=host, port=controller_port, ssl=ssl)
yield From(loop.create_server(transport_factory, host, websocket_port, ssl=ssl))
# Initialize the work queue checker.
yield From(self._work_checker())

View file

@ -0,0 +1,35 @@
#cloud-config
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCC0m+hVmyR3vn/xoxJe9+atRWBxSK+YXgyufNVDMcb7H00Jfnc341QH3kDVYZamUbhVh/nyc2RP7YbnZR5zORFtgOaNSdkMYrPozzBvxjnvSUokkCCWbLqXDHvIKiR12r+UTSijPJE/Yk702Mb2ejAFuae1C3Ec+qKAoOCagDjpQ3THyb5oaKE7VPHdwCWjWIQLRhC+plu77ObhoXIFJLD13gCi01L/rp4mYVCxIc2lX5A8rkK+bZHnIZwWUQ4t8SIjWxIaUo0FE7oZ83nKuNkYj5ngmLHQLY23Nx2WhE9H6NBthUpik9SmqQPtVYbhIG+bISPoH9Xs8CLrFb0VRjz Joey's Mac
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCo6FhAP7mFFOAzM91gtaKW7saahtaN4lur42FMMztz6aqUycIltCmvxo+3FmrXgCG30maMNU36Vm1+9QRtVQEd+eRuoIWP28t+8MT01Fh4zPuE2Wca3pOHSNo3X81FfWJLzmwEHiQKs9HPQqUhezR9PcVWVkbMyAzw85c0UycGmHGFNb0UiRd9HFY6XbgbxhZv/mvKLZ99xE3xkOzS1PNsdSNvjUKwZR7pSUPqNS5S/1NXyR4GhFTU24VPH/bTATOv2ATH+PSzsZ7Qyz9UHj38tKC+ALJHEDJ4HXGzobyOUP78cHGZOfCB5FYubq0zmOudAjKIAhwI8XTFvJ2DX1P3 jimmyzelinskie
write_files:
- path: /root/overrides.list
permission: '0644'
content: |
REALM={{ realm }}
TOKEN={{ token }}
SERVER=wss://{{ manager_hostname }}
coreos:
update:
reboot-strategy: off
group: {{ coreos_channel }}
units:
- name: quay-builder.service
command: start
content: |
[Unit]
Description=Quay builder container
Author=Jake Moshenko
After=docker.service
[Service]
TimeoutStartSec=600
TimeoutStopSec=2000
ExecStartPre=/usr/bin/docker login -u {{ quay_username }} -p {{ quay_password }} -e unused quay.io
ExecStart=/usr/bin/docker run --rm --net=host --name quay-builder --privileged --env-file /root/overrides.list -v /var/run/docker.sock:/var/run/docker.sock -v /usr/share/ca-certificates:/etc/ssl/certs quay.io/coreos/registry-build-worker:{{ worker_tag }}
ExecStop=/usr/bin/docker stop quay-builder
ExecStopPost=/bin/sh -xc "/bin/sleep 120; /usr/bin/systemctl --no-block poweroff"

View file

@ -1,2 +0,0 @@
#!/bin/sh
exec svlogd /var/log/dockerfilebuild/

View file

@ -1,6 +0,0 @@
#! /bin/bash
sv start tutumdocker || exit 1
cd /
venv/bin/python -m workers.dockerfilebuild

View file

@ -1,2 +0,0 @@
#!/bin/sh
exec svlogd /var/log/tutumdocker/

View file

@ -1,96 +0,0 @@
#!/bin/bash
# First, make sure that cgroups are mounted correctly.
CGROUP=/sys/fs/cgroup
[ -d $CGROUP ] ||
mkdir $CGROUP
mountpoint -q $CGROUP ||
mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
echo "Could not make a tmpfs mount. Did you use -privileged?"
exit 1
}
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
then
mount -t securityfs none /sys/kernel/security || {
echo "Could not mount /sys/kernel/security."
echo "AppArmor detection and -privileged mode might break."
}
fi
# Mount the cgroup hierarchies exactly as they are in the parent system.
for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
do
[ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
mountpoint -q $CGROUP/$SUBSYS ||
mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
# The two following sections address a bug which manifests itself
# by a cryptic "lxc-start: no ns_cgroup option specified" when
# trying to start containers withina container.
# The bug seems to appear when the cgroup hierarchies are not
# mounted on the exact same directories in the host, and in the
# container.
# Named, control-less cgroups are mounted with "-o name=foo"
# (and appear as such under /proc/<pid>/cgroup) but are usually
# mounted on a directory named "foo" (without the "name=" prefix).
# Systemd and OpenRC (and possibly others) both create such a
# cgroup. To avoid the aforementioned bug, we symlink "foo" to
# "name=foo". This shouldn't have any adverse effect.
echo $SUBSYS | grep -q ^name= && {
NAME=$(echo $SUBSYS | sed s/^name=//)
ln -s $SUBSYS $CGROUP/$NAME
}
# Likewise, on at least one system, it has been reported that
# systemd would mount the CPU and CPU accounting controllers
# (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
# but on a directory called "cpu,cpuacct" (note the inversion
# in the order of the groups). This tries to work around it.
[ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
done
# Note: as I write those lines, the LXC userland tools cannot setup
# a "sub-container" properly if the "devices" cgroup is not in its
# own hierarchy. Let's detect this and issue a warning.
grep -q :devices: /proc/1/cgroup ||
echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
grep -qw devices /proc/1/cgroup ||
echo "WARNING: it looks like the 'devices' cgroup is not mounted."
# Now, close extraneous file descriptors.
pushd /proc/self/fd >/dev/null
for FD in *
do
case "$FD" in
# Keep stdin/stdout/stderr
[012])
;;
# Nuke everything else
*)
eval exec "$FD>&-"
;;
esac
done
popd >/dev/null
# If a pidfile is still around (for example after a container restart),
# delete it so that docker can start.
rm -rf /var/run/docker.pid
chmod 777 /var/lib/lxc
chmod 777 /var/lib/docker
# If we were given a PORT environment variable, start as a simple daemon;
# otherwise, spawn a shell as well
if [ "$PORT" ]
then
exec docker -d -H 0.0.0.0:$PORT
else
docker -d -D -e lxc 2>&1
fi

View file

@ -22,4 +22,20 @@ http {
ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP;
ssl_prefer_server_ciphers on;
}
server {
include proxy-protocol.conf;
include proxy-server-base.conf;
listen 8443 default proxy_protocol;
ssl on;
ssl_certificate ./stack/ssl.cert;
ssl_certificate_key ./stack/ssl.key;
ssl_session_timeout 5m;
ssl_protocols SSLv3 TLSv1;
ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP;
ssl_prefer_server_ciphers on;
}
}

8
conf/proxy-protocol.conf Normal file
View file

@ -0,0 +1,8 @@
# vim: ft=nginx
set_real_ip_from 0.0.0.0/0;
real_ip_header proxy_protocol;
log_format elb_pp '$proxy_protocol_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
access_log /var/log/nginx/nginx.access.log elb_pp;

View file

@ -0,0 +1,91 @@
# vim: ft=nginx
client_body_temp_path /var/log/nginx/client_body 1 2;
server_name _;
keepalive_timeout 5;
if ($args ~ "_escaped_fragment_") {
rewrite ^ /snapshot$uri;
}
proxy_set_header X-Forwarded-For $proxy_protocol_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header Transfer-Encoding $http_transfer_encoding;
location / {
proxy_pass http://web_app_server;
limit_req zone=webapp burst=25 nodelay;
}
location /realtime {
proxy_pass http://web_app_server;
proxy_buffering off;
proxy_request_buffering off;
}
location /v1/repositories/ {
proxy_buffering off;
proxy_request_buffering off;
proxy_pass http://registry_app_server;
proxy_read_timeout 2000;
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
client_max_body_size 20G;
limit_req zone=repositories burst=5 nodelay;
}
location /v1/ {
proxy_buffering off;
proxy_request_buffering off;
proxy_pass http://registry_app_server;
proxy_read_timeout 2000;
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
client_max_body_size 20G;
}
location /c1/ {
proxy_buffering off;
proxy_request_buffering off;
proxy_pass http://verbs_app_server;
proxy_read_timeout 2000;
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
limit_req zone=api burst=5 nodelay;
}
location /static/ {
# checks for static file, if not found proxy to app
alias /static/;
}
location /v1/_ping {
add_header Content-Type text/plain;
add_header X-Docker-Registry-Version 0.6.0;
add_header X-Docker-Registry-Standalone 0;
return 200 'true';
}
location ~ ^/b1/controller(/?)(.*) {
proxy_pass http://build_manager_controller_server/$2;
proxy_read_timeout 2000;
}
location ~ ^/b1/socket(/?)(.*) {
proxy_pass http://build_manager_websocket_server/$2;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}

View file

@ -1,6 +1,7 @@
# vim: ft=nginx
limit_req_zone $binary_remote_addr zone=webapp:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=api:10m rate=1r/s;
limit_req_zone $proxy_protocol_addr zone=webapp:10m rate=25r/s;
limit_req_zone $proxy_protocol_addr zone=repositories:10m rate=1r/s;
limit_req_zone $proxy_protocol_addr zone=api:10m rate=1r/s;
limit_req_status 429;
limit_req_log_level warn;

View file

@ -21,8 +21,6 @@ proxy_set_header Transfer-Encoding $http_transfer_encoding;
location / {
proxy_pass http://web_app_server;
#limit_req zone=webapp burst=10 nodelay;
}
location /realtime {
@ -41,8 +39,6 @@ location /v1/ {
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
client_max_body_size 20G;
#limit_req zone=api burst=5 nodelay;
}
location /c1/ {
@ -53,8 +49,6 @@ location /c1/ {
proxy_pass http://verbs_app_server;
proxy_read_timeout 2000;
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
#limit_req zone=api burst=5 nodelay;
}
location /static/ {

View file

@ -29,6 +29,16 @@ SCHEME_RANDOM_FUNCTION = {
'postgresql+psycopg2': fn.Random,
}
def real_for_update(query):
return query.for_update()
def null_for_update(query):
return query
SCHEME_SPECIALIZED_FOR_UPDATE = {
'sqlite': null_for_update,
}
class CallableProxy(Proxy):
def __call__(self, *args, **kwargs):
if self.obj is None:
@ -68,6 +78,7 @@ class UseThenDisconnect(object):
db = Proxy()
read_slave = Proxy()
db_random_func = CallableProxy()
db_for_update = CallableProxy()
def validate_database_url(url, connect_timeout=5):
@ -105,6 +116,8 @@ def configure(config_object):
parsed_write_uri = make_url(write_db_uri)
db_random_func.initialize(SCHEME_RANDOM_FUNCTION[parsed_write_uri.drivername])
db_for_update.initialize(SCHEME_SPECIALIZED_FOR_UPDATE.get(parsed_write_uri.drivername,
real_for_update))
read_slave_uri = config_object.get('DB_READ_SLAVE_URI', None)
if read_slave_uri is not None:

View file

@ -14,7 +14,7 @@ from data.database import (User, Repository, Image, AccessToken, Role, Repositor
ExternalNotificationEvent, ExternalNotificationMethod,
RepositoryNotification, RepositoryAuthorizedEmail, TeamMemberInvite,
DerivedImageStorage, ImageStorageTransformation, random_string_generator,
db, BUILD_PHASE, QuayUserField, validate_database_url)
db, BUILD_PHASE, QuayUserField, validate_database_url, db_for_update)
from peewee import JOIN_LEFT_OUTER, fn
from util.validation import (validate_username, validate_email, validate_password,
INVALID_PASSWORD_MESSAGE)
@ -295,6 +295,9 @@ def delete_robot(robot_username):
def _list_entity_robots(entity_name):
""" Return the list of robots for the specified entity. This MUST return a query, not a
materialized list so that callers can use db_for_update.
"""
return (User
.select()
.join(FederatedLogin)
@ -903,14 +906,17 @@ def change_password(user, new_password):
delete_notifications_by_kind(user, 'password_required')
def change_username(user, new_username):
def change_username(user_id, new_username):
(username_valid, username_issue) = validate_username(new_username)
if not username_valid:
raise InvalidUsernameException('Invalid username %s: %s' % (new_username, username_issue))
with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Reload the user for update
user = db_for_update(User.select().where(User.id == user_id)).get()
# Rename the robots
for robot in _list_entity_robots(user.username):
for robot in db_for_update(_list_entity_robots(user.username)):
_, robot_shortname = parse_robot_username(robot.username)
new_robot_name = format_robot_username(new_username, robot_shortname)
robot.username = new_robot_name
@ -1251,9 +1257,9 @@ def _find_or_link_image(existing_image, repository, username, translations, pref
storage.locations = {placement.location.name
for placement in storage.imagestorageplacement_set}
new_image = Image.create(docker_image_id=existing_image.docker_image_id,
repository=repository, storage=storage,
ancestors=new_image_ancestry)
new_image = Image.create(docker_image_id=existing_image.docker_image_id,
repository=repository, storage=storage,
ancestors=new_image_ancestry)
logger.debug('Storing translation %s -> %s', existing_image.id, new_image.id)
translations[existing_image.id] = new_image.id
@ -1403,7 +1409,7 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
Image.docker_image_id == docker_image_id))
try:
fetched = query.get()
fetched = db_for_update(query).get()
except Image.DoesNotExist:
raise DataModelException('No image with specified id and repository')

View file

@ -1,6 +1,6 @@
from datetime import datetime, timedelta
from data.database import QueueItem, db
from data.database import QueueItem, db, db_for_update
from util.morecollections import AttrDict
@ -31,16 +31,24 @@ class WorkQueue(object):
QueueItem.processing_expires > now,
QueueItem.queue_name ** name_match_query))
def _available_jobs(self, now, name_match_query, running_query):
def _available_jobs(self, now, name_match_query):
return (QueueItem
.select()
.where(QueueItem.queue_name ** name_match_query, QueueItem.available_after <= now,
((QueueItem.available == True) | (QueueItem.processing_expires <= now)),
QueueItem.retries_remaining > 0, ~(QueueItem.queue_name << running_query)))
QueueItem.retries_remaining > 0))
def _available_jobs_not_running(self, now, name_match_query, running_query):
return (self
._available_jobs(now, name_match_query)
.where(~(QueueItem.queue_name << running_query)))
def _name_match_query(self):
return '%s%%' % self._canonical_name([self._queue_name] + self._canonical_name_match_list)
def _item_by_id_for_update(self, queue_id):
return db_for_update(QueueItem.select().where(QueueItem.id == queue_id)).get()
def update_metrics(self):
if self._reporter is None:
return
@ -52,7 +60,7 @@ class WorkQueue(object):
running_query = self._running_jobs(now, name_match_query)
running_count = running_query.distinct().count()
avialable_query = self._available_jobs(now, name_match_query, running_query)
avialable_query = self._available_jobs_not_running(now, name_match_query, running_query)
available_count = avialable_query.select(QueueItem.queue_name).distinct().count()
self._reporter(self._currently_processing, running_count, running_count + available_count)
@ -78,19 +86,26 @@ class WorkQueue(object):
def get(self, processing_time=300):
"""
Get an available item and mark it as unavailable for the default of five
minutes.
minutes. The result of this method must always be composed of simple
python objects which are JSON serializable for network portability reasons.
"""
now = datetime.utcnow()
name_match_query = self._name_match_query()
with self._transaction_factory(db):
running = self._running_jobs(now, name_match_query)
avail = self._available_jobs(now, name_match_query, running)
running = self._running_jobs(now, name_match_query)
avail = self._available_jobs_not_running(now, name_match_query, running)
item = None
try:
db_item = avail.order_by(QueueItem.id).get()
item = None
try:
db_item_candidate = avail.order_by(QueueItem.id).get()
with self._transaction_factory(db):
still_available_query = (db_for_update(self
._available_jobs(now, name_match_query)
.where(QueueItem.id == db_item_candidate.id)))
db_item = still_available_query.get()
db_item.available = False
db_item.processing_expires = now + timedelta(seconds=processing_time)
db_item.retries_remaining -= 1
@ -102,22 +117,22 @@ class WorkQueue(object):
})
self._currently_processing = True
except QueueItem.DoesNotExist:
self._currently_processing = False
except QueueItem.DoesNotExist:
self._currently_processing = False
# Return a view of the queue item rather than an active db object
return item
# Return a view of the queue item rather than an active db object
return item
def complete(self, completed_item):
with self._transaction_factory(db):
completed_item_obj = QueueItem.get(QueueItem.id == completed_item.id)
completed_item_obj = self._item_by_id_for_update(completed_item.id)
completed_item_obj.delete_instance()
self._currently_processing = False
def incomplete(self, incomplete_item, retry_after=300, restore_retry=False):
with self._transaction_factory(db):
retry_date = datetime.utcnow() + timedelta(seconds=retry_after)
incomplete_item_obj = QueueItem.get(QueueItem.id == incomplete_item.id)
incomplete_item_obj = self._item_by_id_for_update(incomplete_item.id)
incomplete_item_obj.available_after = retry_date
incomplete_item_obj.available = True
@ -127,16 +142,12 @@ class WorkQueue(object):
incomplete_item_obj.save()
self._currently_processing = False
@staticmethod
def extend_processing(queue_item_info, seconds_from_now, retry_count=None,
minimum_extension=MINIMUM_EXTENSION):
queue_item = QueueItem.get(QueueItem.id == queue_item_info.id)
new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now)
def extend_processing(self, item, seconds_from_now, minimum_extension=MINIMUM_EXTENSION):
with self._transaction_factory(db):
queue_item = self._item_by_id_for_update(item.id)
new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now)
# Only actually write the new expiration to the db if it moves the expiration some minimum
if new_expiration - queue_item.processing_expires > minimum_extension:
if retry_count is not None:
queue_item.retries_remaining = retry_count
queue_item.processing_expires = new_expiration
queue_item.save()
# Only actually write the new expiration to the db if it moves the expiration some minimum
if new_expiration - queue_item.processing_expires > minimum_extension:
queue_item.processing_expires = new_expiration
queue_item.save()

View file

@ -72,8 +72,8 @@ def build_status_view(build_obj, can_write=False):
# minutes. If not, then the build timed out.
if phase != database.BUILD_PHASE.COMPLETE and phase != database.BUILD_PHASE.ERROR:
if status is not None and 'heartbeat' in status and status['heartbeat']:
heartbeat = datetime.datetime.fromtimestamp(status['heartbeat'])
if datetime.datetime.now() - heartbeat > datetime.timedelta(minutes=1):
heartbeat = datetime.datetime.utcfromtimestamp(status['heartbeat'])
if datetime.datetime.utcnow() - heartbeat > datetime.timedelta(minutes=1):
phase = database.BUILD_PHASE.INTERNAL_ERROR
logger.debug('Can write: %s job_config: %s', can_write, build_obj.job_config)

View file

@ -246,7 +246,7 @@ class User(ApiResource):
# Username already used
raise request_error(message='Username is already in use')
model.change_username(user, new_username)
model.change_username(user.id, new_username)
except model.InvalidPasswordException, ex:
raise request_error(exception=ex)

View file

@ -211,7 +211,7 @@ def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
dockerfile_build_queue.put([repository.namespace_user.username, repository.name], json.dumps({
'build_uuid': build_request.uuid,
'pull_credentials': model.get_pull_credentials(pull_robot_name) if pull_robot_name else None
}), retries_remaining=1)
}), retries_remaining=3)
# Add the build to the repo's log.
metadata = {

View file

@ -137,6 +137,10 @@ def get_image_layer(namespace, repository, image_id, headers):
if permission.can() or model.repository_is_public(namespace, repository):
profile.debug('Looking up repo image')
repo_image = model.get_repo_image_extended(namespace, repository, image_id)
if not repo_image:
profile.debug('Image not found')
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id)
profile.debug('Looking up the layer path')
try:
@ -157,7 +161,7 @@ def get_image_layer(namespace, repository, image_id, headers):
return Response(store.stream_read(repo_image.storage.locations, path), headers=headers)
except (IOError, AttributeError):
profile.debug('Image not found')
profile.exception('Image layer data not found')
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id)
@ -180,6 +184,7 @@ def put_image_layer(namespace, repository, image_id):
uuid = repo_image.storage.uuid
json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
except (IOError, AttributeError):
profile.exception('Exception when retrieving image data')
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id)

19
local-setup-osx.sh Executable file
View file

@ -0,0 +1,19 @@
#!/bin/bash
set -e
# Install Docker and C libraries on which Python libraries are dependent
brew update
brew install boot2docker docker libevent libmagic postgresql
# Some OSX installs don't have /usr/include, which is required for finding SASL headers for our LDAP library
if [ ! -e /usr/include ]; then
sudo ln -s `xcrun --show-sdk-path`/usr/include /usr/include
fi
# Install Python dependencies
sudo pip install -r requirements.txt
# Put the local testing config in place
git clone git@github.com:coreos-inc/quay-config.git ../quay-config
ln -s ../../quay-config/local conf/stack

View file

@ -40,5 +40,8 @@ git+https://github.com/DevTable/aniso8601-fake.git
git+https://github.com/DevTable/anunidecode.git
git+https://github.com/DevTable/avatar-generator.git
git+https://github.com/DevTable/pygithub.git
git+https://github.com/jplana/python-etcd.git
gipc
pyOpenSSL
cachetools
mock

View file

@ -22,6 +22,7 @@ backports.ssl-match-hostname==3.4.0.2
beautifulsoup4==4.3.2
blinker==1.3
boto==2.35.1
cachetools==1.0.0
docker-py==0.7.1
ecdsa==0.11
futures==2.2.0
@ -35,6 +36,7 @@ itsdangerous==0.24
jsonschema==2.4.0
marisa-trie==0.7
mixpanel-py==3.2.1
mock==1.0.1
paramiko==1.15.2
peewee==2.4.5
psycopg2==2.5.4
@ -62,3 +64,4 @@ git+https://github.com/DevTable/anunidecode.git
git+https://github.com/DevTable/avatar-generator.git
git+https://github.com/DevTable/pygithub.git
git+https://github.com/NateFerrero/oauth2lib.git
git+https://github.com/jplana/python-etcd.git

View file

@ -4,7 +4,7 @@
&equiv;
</button>
<a class="navbar-brand" href="/" target="{{ appLinkTarget() }}">
<span id="quay-logo" style="background-image: url('{{ getEnterpriseLogo() }}')"></span>
<span id="quay-logo" ng-style="{'background-image': 'url(' + getEnterpriseLogo() + ')'}"></span>
</a>
</div>

View file

@ -8,82 +8,99 @@
<meta name="description" content="Privacy policy for Quay - Hosted private docker repository">
{% endblock %}
{% block added_stylesheets %}
<style>
dt.section {
font-variant: small-caps;
font-size: 1.4em;
margin-bottom: 10px;
}
h2 { font-variant: small-caps; }
</style>
{% endblock %}
{% block body_content %}
<div class="container privacy-policy">
<h2>Privacy Policy</h2>
<h2>CoreOS Privacy Policy</h2>
<h4>Last Revised: February 2, 2015</h4>
<p>Welcome to Quay.io from CoreOS, Inc. (“<strong>CoreOS</strong>”, “<strong>we</strong>”, “<strong>us</strong>” or “<strong>our</strong>”).</p>
<p>This privacy policy explains how we collect, use and disclose information about you when you use any of the websites owned or operated by CoreOS (the “<strong>Sites</strong>”) and any of the online products and services that link to this privacy policy (collectively, the “<strong>Services</strong>”) or when you otherwise interact with us. By using any of our Services, you consent to our collection, use and disclosure of your information as described in this privacy policy.</p>
<p>The Services allow users to store, manage, and retrieve container repositories.</p>
<p>We may change this privacy policy from time-to-time. If we make changes, we will notify you by revising the date at the top of the policy and, in some cases, we will provide you with additional notice (such as adding a statement to our homepage or sending you an email notification). We encourage you to review the privacy policy periodically to stay informed about our practices and the ways you can help protect your privacy.</p>
<dl>
<dt>What information do we collect?</dt>
<dt class="section">Collection of Information</dt>
<dt>Information You Provide to Us</dt>
<dd>
We collect information from you when you register on our site or subscribe to the service..
When ordering or registering on our site, as appropriate, you may be asked to enter your: e-mail address, mailing address or credit card information. You may, however, visit the public portion of our site anonymously.
We collect information you directly give us. For example, we collect information about you when you sign up for one of our Services, participate in any interactive features of the Services, fill out a form, give feedback, ideas or submissions about any of the Services, communicate with us via third party social media sites, request customer support or otherwise communicate with us. The types of information we may collect include your email address, username, your credit/debit card information and any other information you choose to provide. For information as to how to restrict the collection of contact information, please see the “<a href="#your-choices">Your Choices</a>” section below. If you choose not to provide certain information, we may not be able to provide certain of our Services to you or certain features of our Services may be unavailable or work differently.
</dd>
<dt>What do we use your information for?</dt>
<dd>Any of the information we collect from you may be used in one of the following ways:
<ul>
<li>To personalize your experience(your information helps us to better respond to your individual needs)</li>
<li>
To improve our website<br>
(we continually strive to improve our website offerings based on the information and feedback we receive from you)</li>
<li>
To improve customer service<br>
(your information helps us to more effectively respond to your customer service requests and support needs)
</li>
<li>
To process transactions<br>
Your information, whether public or private, will not be sold, exchanged, transferred, or given to any other company for any reason whatsoever, without your consent, other than for the express purpose of delivering the purchased product or service requested.
</li>
<li>
To send periodic emails<br>
The email address you provide for order processing, may be used to send you information and updates pertaining to your order, in addition to receiving occasional company news, updates, related product or service information, etc.<br>
Note: If at any time you would like to unsubscribe from receiving future emails, we include detailed unsubscribe instructions at the bottom of each email.
</li>
</ul>
</dd>
<dt>Information We Collect Automatically When You Use the Services</dt>
<dd>
When you access or use our Services (or certain portions of the Services), we automatically collect certain information about you. This information includes:
<ul>
<li><strong>Log Information:</strong> We log information about your use of the Services, including the type of device you use, access times, IP address, pages viewed, and the page you visited before navigating to one of our Services. We use this information for analytic and product improvement purposes.</li>
<li><strong>Device Information:</strong> We collect information about the computer you use to access our Services, including the hardware model, operating system and version and unique device identifiers.</li>
<li><strong>Information Collected by Cookies and Other Tracking Technologies:</strong> We use various technologies to collect information, and this may include cookies and web beacons. Cookies are small data files stored on your hard drive or in device memory. Web beacons (also known as “tracking pixels”) are non-visible electronic images. These technologies are used for analytic and product improvement purposes, such as seeing which areas and features of our Services are popular and determining whether an email has been opened and acted upon. For more information about cookies, and how to disable them, please see “<a href="#your-choices">Your Choices</a>” below.</li>
</ul>
</dd>
<dt>How do we protect your information?</dt>
<dd>
We implement a variety of security measures to maintain the safety of your personal information when you place an order or enter, submit, or access your personal information.
We offer the use of a secure server. All supplied sensitive/credit information is transmitted via Secure Socket Layer (SSL) technology and then encrypted into our Payment gateway providers database only to be accessible by those authorized with special access rights to such systems, and are required to keep the information confidential.
After a transaction, your private information (credit cards, social security numbers, financials, etc.) will be kept on file for more than 60 days in order to continue subscription billing..
</dd>
<dt>Do we use cookies?</dt>
<dd>
Yes (Cookies are small files that a site or its service provider transfers to your computers hard drive through your Web browser (if you allow) that enables the sites or service providers systems to recognize your browser and capture and remember certain information
We use cookies to understand and save your preferences for future visits and compile aggregate data about site traffic and site interaction so that we can offer better site experiences and tools in the future. We may contract with third-party service providers to assist us in better understanding our site visitors. These service providers are not permitted to use the information collected on our behalf except to help us conduct and improve our business.
</dd>
<dt>Do we disclose any information to outside parties?</dt>
<dd>
We do not sell, trade, or otherwise transfer to outside parties your personally identifiable information. This does not include trusted third parties who assist us in operating our website, conducting our business, or servicing you, so long as those parties agree to keep this information confidential. We may also release your information when we believe release is appropriate to comply with the law, enforce our site policies, or protect ours or others rights, property, or safety. However, non-personally identifiable visitor information may be provided to other parties for marketing, advertising, or other uses.
</dd>
<dt>Third party links</dt>
<dd>
Occasionally, at our discretion, we may include or offer third party products or services on our website. These third party sites have separate and independent privacy policies. We therefore have no responsibility or liability for the content and activities of these linked sites. Nonetheless, we seek to protect the integrity of our site and welcome any feedback about these sites.
</dd>
<dt>California Online Privacy Protection Act Compliance</dt>
<dd>
Because we value your privacy we have taken the necessary precautions to be in compliance with the California Online Privacy Protection Act. We therefore will not distribute your personal information to outside parties without your consent.
As part of the California Online Privacy Protection Act, all users of our site may make any changes to their information at anytime by logging into the service and modifying their Account Settings and Payment Information.
</dd>
<dt>Childrens Online Privacy Protection Act Compliance</dt>
<dd>
We are in compliance with the requirements of COPPA (Childrens Online Privacy Protection Act), we do not collect any information from anyone under 13 years of age. Our website, products and services are all directed to people who are at least 13 years old or older.
</dd>
<dt>Terms and Conditions </dt>
<dd>
Please also visit our Terms and Conditions section establishing the use, disclaimers, and limitations of liability governing the use of our website at https://quay.io/tos
</dd>
<dt>Your Consent</dt>
<dd>
By using our site, you consent to our privacy policy.
</dd>
<dt>Changes to our Privacy Policy</dt>
<dd>
If we decide to change our privacy policy, we will post those changes on this page.
If you have any questions or concerns about our privacy policy, please direct them to the following email address:
<a href="mailto:support@quay.io">support@quay.io</a>
</dd>
<dt>Information We Collect From Other Sources</dt>
<dd>
We may also obtain information from other sources and combine that with information we collect through our Services. For example, if you create or log into your account through a site like Google.com or GitHub.com, we will have access to certain information from that site, such as your name, account information and friends lists, in accordance with the authorization procedures determined by these sites.
</dd>
<dt class="section">Use of Information</dt>
<dd>We may use information about you for various purposes, including to:
<ul>
<li>Provide, deliver, maintain, test and improve our Services;</li>
<li>Send you technical notices, updates, confirmations, security alerts and support and administrative messages;</li>
<li>Respond to your comments, questions and requests and provide customer service;</li>
<li>Communicate with you about products, services, offers, promotions, rewards and events offered by CoreOS and others, and provide news and information we think will be of interest to you;</li>
<li>Monitor and analyze trends, usage and activities in connection with our Services and improve our Services;</li>
<li>Detect, investigate and prevent any suspected breaches of the terms applicable to the use of our Services (including, our Sites); and</li>
<li>Link or combine with information we get from others to help understand your needs and provide you with better service.</li>
</ul>
CoreOS is based in the United States, and the information we collect is governed by U.S. law. By accessing or using any of our Services or otherwise providing information to us, you consent to the processing and transfer of information in and to the U.S. and other countries.
</dd>
<dt class="section">Sharing of Information</dt>
<dd>
We may share information about you as follows or as otherwise described in this Privacy Policy:
<ul>
<li>With vendors, consultants and other service providers who need access to such information to carry out work on our behalf;</li>
<li>In response to a request for information if we believe disclosure is in accordance with any applicable law, regulation or legal process, or as otherwise required by any applicable law, rule or regulation;</li>
<li>If we believe your actions are inconsistent with the spirit or language of our user agreements or policies, or to protect the rights, property and safety of CoreOS or others;</li>
<li>In connection with, or during negotiations of, any financing with respect to CoreOS;</li>
<li>In connection with, or during negotiations of, any merger, sale of CoreOS assets or acquisition of all or a portion of our business to another company; and</li>
<li>With your consent or at your direction, including if we notify you through any of the Services that the information you provide will be shared in a particular manner and you provide such information.</li>
</ul>
We may also share aggregated or anonymized information that does not directly identify you.
</dd>
<dt class="section">Security</dt>
<dd>
We take reasonable measures to help protect information about you from loss, theft, misuse and unauthorized access, disclosure, alteration and destruction.
</dd>
<dt class="section">Analytics Services</dt>
<dd>
We may allow others to provide analytics services in connection with the Services (or portions the Services). These entities may use cookies, web beacons and other technologies to collect information about your use of the Services and other websites, including your IP address, web browser, pages viewed, time spent on pages, links clicked and conversion information. We and others may use this information to, among other things, analyze and track data, determine the popularity of certain content, personalize the user experience, and better understand your activity.
</dd>
<dt class="section"><a id="your-choices"></a>Your Choices</dt>
<dt>Account Information</dt>
<dd>
If you wish to delete your account, please contact support at <a href="mailto:support@quay.io">support@quay.io</a>. Note that we may retain certain information as required by law or for legitimate business purposes as may be necessary to fulfill the purposes identified in the privacy policy. We may also retain cached or archived copies of information (including, location information) about you for a certain period of time.
</dd>
<dt>Cookies</dt>
<dd>
Most web browsers are set to accept cookies by default. If you prefer, you can usually choose to set your browser to remove or reject browser cookies. Please note that if you choose to remove or reject cookies, this could affect the availability and functionality of certain of the Services.
</dd>
<dt>Promotional Communications</dt>
<dd>
You may opt out of receiving promotional communications from CoreOS by following the instructions in those communications. If you opt out, we may still send you non-promotional communications, such as those about your account or our ongoing business relations.
</dd>
<dt>Contact Us</dt>
<dd>
If you have any questions or concerns about this privacy policy or any privacy issues, please email us at <a href="mailto:partners@coreos.com">partners@coreos.com</a>.
</dd>
</dl>
</div>
{% endblock %}

View file

@ -8,94 +8,164 @@
<meta name="description" content="Terms of service for Quay - Hosted private docker repository">
{% endblock %}
{% block added_stylesheets %}
<style>
ol {
padding-left: 20px;
}
table {
border-width: 0px;
margin-bottom: 20px;
margin-top: 20px;
}
dt {
text-decoration: underline;
font-weight: normal;
}
</style>
{% endblock %}
{% block body_content %}
<div class="tos container">
<h2>Terms of Service</h2>
<p>The following terms and conditions govern all use of the Quay.io website and all content, services and products available at or through the website. The Website is owned and operated by DevTable, LLC. (“DevTable”). The Website is offered subject to your acceptance without modification of all of the terms and conditions contained herein and all other operating rules, policies (including, without limitation, Quay.ios Privacy Policy) and procedures that may be published from time to time on this Site by DevTable (collectively, the “Agreement”).</p>
<p>Please read this Agreement carefully before accessing or using the Website. By accessing or using any part of the web site, you agree to become bound by the terms and conditions of this agreement. If you do not agree to all the terms and conditions of this agreement, then you may not access the Website or use any services. If these terms and conditions are considered an offer by DevTable, acceptance is expressly limited to these terms. The Website is available only to individuals who are at least 13 years old.</p>
<h2>CoreOS Terms of Service</h2>
<h4>Last Revised: February 2, 2015</h4>
<p>These Quay.io Terms of Service (these “<strong>Terms</strong>”) apply to the features and functions provided by CoreOS, Inc. (“<strong>CoreOS</strong>,” “<strong>our</strong>,” or “<strong>we</strong>”) via quay.io (the “<strong>Site</strong>”) (collectively, the “<strong>Services</strong>”). By accessing or using the Services, you agree to be bound by these Terms. If you do not agree to these Terms, do not use any of the Services. The “<strong>Effective Date</strong>” of these Terms is the date you first access any of the Services.</p>
<p>If you are accessing the Services in your capacity as an employee, consultant or agent of a company (or other entity), you represent that you are an employee, consultant or agent of such company (or other entity) and you have the authority to agree (and be legally bound) on behalf of such company (or other entity) to all of the terms and conditions of these Terms.</p>
<p>For the purpose of these Terms, you and, if applicable, such company (or other entity) constitutes “<strong>Customer</strong>” or “<strong>you</strong>”.</p>
<p>CoreOS reserves the right to change or modify any of the terms and conditions contained in these Terms (or any policy or guideline of CoreOS) at any time and in its sole discretion by providing notice that these Terms have been modified. Such notice may be provided by sending an email, posting a notice on the Site, posting the revised Terms on the Site and revising the date at the top of these Terms or such other form of notice as determined by CoreOS. Any changes or modifications will be effective 30 days after providing notice that these Terms have been modified (the “<strong>Notice Period</strong>”). Your continued use of any of the Services following the Notice Period will constitute your acceptance of such changes or modifications. Therefore, you should review these Terms whenever you access the Services and at least every 30 days to make sure that you understand the terms and conditions that will apply to your use of the Services.</p>
<p>These terms form a binding agreement between you and CoreOS.</p>
<ol>
<li>
<strong>Your Quay.io Account.</strong> If you create an account on the Website, you are responsible for maintaining the security of your account, and you are fully responsible for all activities that occur under the account and any other actions taken in connection with the account. You must immediately notify DevTable of any unauthorized uses of your account or any other breaches of security. DevTable will not be liable for any acts or omissions by You, including any damages of any kind incurred as a result of such acts or omissions.
<strong>Privacy</strong>
<p>Please see CoreOS privacy policy at <a href="/privacy">https://quay.io/privacy</a> for information about how CoreOS collects, uses and discloses information about users of the Site and the Services.</p>
</li>
<li>
<strong>Responsibility of Contributors.</strong> If you share your repository, publish images, code or content, or otherwise make (or allow any third party to make) material available by means of the Website (any such material, “Content”), You are entirely responsible for the content of, and any harm resulting from, that Content. That is the case regardless of whether the Content in question constitutes text, graphics, an audio file, or computer software. By making Content available, you represent and warrant that:
<strong>Registration</strong>
<p>In order to access the Services, you must complete the CoreOS registration form provided via the Site. During the registration process, you must select a CoreOS package which includes: (a) the monthly or annual period during which you can access the Services (the “<strong>Subscription Period</strong>”); and (b) the monthly or annual fee you must pay to CoreOS in exchange for your rights to the Services (the “<strong>Subscription Fees</strong>”). All such information is incorporated into these Terms by reference.</p>
<p>You agree to: (a) provide accurate, current and complete information about you as may be prompted by the registration forms via the Site (“<strong>Registration Data</strong>”); (b) maintain the security of your password; (c) maintain and promptly update the Registration Data, and any other information you provide to CoreOS, to keep it accurate, current and complete; and (d) accept all risks of unauthorized access to the Registration Data and any other information you provide to CoreOS.</p>
<p>You are responsible for safeguarding the password that you use to access the Services, and you agree to be fully responsible for activities or transactions that relate to your account and password</p>
</li>
<li>
<strong>Services</strong>
<p>Subject to the terms and conditions of these Terms, CoreOS grants you a limited, non-transferable, non-exclusive and revocable right and license to access and use the Services.</p>
</li>
<li>
<strong>Restrictions</strong>
<p>Except as expressly authorized by these Terms, you may not (a) modify, disclose, alter, translate or create derivative works of the Services, (b) license, sublicense, resell, distribute, lease, rent, lend, transfer, assign or otherwise dispose of the Services (or any components thereof), (c) use the Services to store or transmit any viruses, software routines or other code designed to permit unauthorized access, to disable, erase or otherwise harm software, hardware or data, or to perform any other harmful actions, (d) build a competitive product or service, or copy any features or functions of the Services, (e) interfere with or disrupt the integrity or performance of the Services, (f) disclose to any third party any performance information or analysis relating to the Services, (g) remove, alter or obscure any proprietary notices in or on the Services, including copyright notices, or (h) cause or permit any third party to do any of the foregoing.</p>
</li>
<li>
<strong>Your Responsibilities</strong>
<p>If you share your repository, publish images, code or content, or otherwise make (or allow any third party to make) material available by means of the Site (“<strong>Content</strong>”), you are entirely responsible for such Content of, and any harm resulting from, that Content. That is the case regardless of whether the Content in question constitutes text, graphics, an audio file, or computer software. By making Content available, you represent and warrant that:</p>
<ul>
<li>
the downloading, copying and use of the Content will not infringe the proprietary rights, including but not limited to the copyright, patent, trademark or trade secret rights, of any third party;
</li>
<li>
if your employer has rights to intellectual property you create, you have either (i) received permission from your employer to post or make available the Content, including but not limited to any software, or (ii) secured from your employer a waiver as to all rights in or to the Content;
</li>
<li>
you have fully complied with any third-party licenses relating to the Content, and have done all things necessary to successfully pass through to end users any required terms;
</li>
<li>
the Content does not contain or install any viruses, worms, malware, Trojan horses or other harmful or destructive content;
</li>
<li>
the Content is not spam, is not randomly-generated, and does not contain unethical or unwanted commercial content designed to drive traffic to third party sites or boost the search engine rankings of third party sites, or to further unlawful acts (such as phishing) or mislead recipients as to the source of the material (such as spoofing);
</li>
<li>
the Content does not contain threats or incite violence, and does not violate the privacy or publicity rights of any third party;
</li>
<li>
your Content is not getting advertised via unwanted electronic messages such as spam links on newsgroups, email lists, other blogs and web sites, and similar unsolicited promotional methods;
</li>
<li>
your Content is not named in a manner that misleads your readers into thinking that you are another person or company. For example, your Contents URL or name is not the name of a person other than yourself or company other than your own; and
</li>
<li>
you have, in the case of Content that includes computer code, accurately categorized and/or described the type, nature, uses and effects of the materials, whether requested to do so by DevTable or otherwise.
</li>
</ul>
<li>the downloading, copying and use of the Content will not infringe, violate or misappropriate any Intellectual Property Rights of any third party;</li>
<li>if your employer has rights to intellectual property you create, you have either (a) received permission from your employer to post or make available the Content, including but not limited to any software, or (b) secured from your employer a waiver as to all rights in or to the Content;</li>
<li>you have fully complied with any third-party licenses relating to the Content, and have done all things necessary to successfully pass through to end users any required terms;</li>
<li>the Content does not contain or install any viruses, worms, malware, Trojan horses or other harmful or destructive content;</li>
<li>the Content is not spam, is not randomly-generated, and does not contain unethical or unwanted commercial content designed to drive traffic to third party sites or boost the search engine rankings of third party sites, or to further unlawful acts (such as phishing) or mislead recipients as to the source of the material (such as spoofing);</li>
<li>the Content does not contain threats or incite violence, and does not violate the privacy or publicity rights of any third party;</li>
<li>your Content is not getting advertised via unwanted electronic messages such as spam links on newsgroups, email lists, other blogs and web sites, and similar unsolicited promotional methods;</li>
<li>your Content is not named in a manner that misleads your readers into thinking that you are another person or company. For example, your Contents URL or name is not the name of a person other than yourself or company other than your own; and</li>
<li>you have, in the case of Content that includes computer code, accurately categorized and/or described the type, nature, uses and effects of the materials, whether requested to do so by CoreOS or otherwise.</li>
</ul>
<p>By submitting Content or computer code to CoreOS for inclusion in your repositories, you grant CoreOS a world-wide, royalty-free, and non-exclusive license to reproduce, modify, adapt and publish the Content solely for the purpose of providing the services you request. If you delete Content, CoreOS will use reasonable efforts to remove it from the Services, but you acknowledge that caching or references to the Content may not be made immediately unavailable.</p>
<p>Without limiting any of those representations or warranties, CoreOS has the right (though not the obligation) to, in CoreOS sole discretion (a) refuse or remove any content that, in CoreOS reasonable opinion, violates any CoreOS policy or is in any way harmful or objectionable, or (b) terminate or deny access to and use of the Site to any individual or entity for any reason, in CoreOS sole discretion. CoreOS will have no obligation to provide a refund of any amounts previously paid.</p>
</li>
<li>
By submitting Content or computer code to DevTable for inclusion in your Repositories, you grant DevTable a world-wide, royalty-free, and non-exclusive license to reproduce, modify, adapt and publish the Content solely for the purpose of providing the services you request. If you delete Content, DevTable will use reasonable efforts to remove it from the Service, but you acknowledge that caching or references to the Content may not be made immediately unavailable.
<strong>Fees and Payment Terms</strong>
<p>In exchange for your rights to the Services, you will pay to CoreOS the Subscription Fees. The Subscription Fees do not include taxes, and the Subscription Fees are payable in advance in accordance with your Quay.io Plan.</p>
<p>Unless CoreOS states otherwise, all payments must be made (a) in U.S. Dollars; and (b) by payment card via an authorized CoreOS payment processor. If you pay via a payment card, you hereby (i) authorize CoreOS (or its authorized payment processor) to make automatic recurring charges to your designated payment card number in the applicable amount of the Subscription Fees on an annual or monthly basis (as applicable) for the duration of the Subscription Period, (ii) represent and warrant that you are authorized to use and have fees charged to the payment card number you provide to CoreOS, and (iii) understand that you may withdraw this consent by emailing CoreOS at <a href="mailto:support@quay.io">support@quay.io</a>. <strong>Accounts can be canceled at any time in the Plan and Usage section of your Account Settings. No refunds will be issued (unless expressly stated otherwise).</strong></p>
<p>Notwithstanding any terms to the contrary in these Terms, CoreOS, at its sole discretion, may modify its pricing during any Subscription Period and such modifications will be effective as of the directly subsequent Subscription Period.</p>
<p>Interest on any late payments will accrue at the rate of 1.5% per month, or the highest rate permitted by law, whichever is lower, from the date such amount is due until the date such amount is paid in full. You will be responsible for, and will pay all sales and similar taxes on, all license fees and similar fees levied upon the provision of the Services provided under these Terms, excluding only taxes based solely on CoreOS net income. You will indemnify and hold CoreOS harmless from and against any and all such taxes and related amounts levied upon the provision of the Services and any costs associated with the collection or withholding thereof, including penalties and interest.</p>
</li>
<li>
Without limiting any of those representations or warranties, DevTable has the right (though not the obligation) to, in DevTables sole discretion (i) refuse or remove any content that, in DevTables reasonable opinion, violates any DevTable policy or is in any way harmful or objectionable, or (ii) terminate or deny access to and use of the Website to any individual or entity for any reason, in DevTables sole discretion. DevTable will have no obligation to provide a refund of any amounts previously paid.
<strong>Disclaimer</strong>
<p>COREOS DISCLAIMS ANY AND ALL REPRESENTATIONS OR WARRANTIES (EXPRESS OR IMPLIED, ORAL OR WRITTEN) WITH RESPECT TO THESE TERMS, SERVICES AND ANY OPEN SOURCE SOFTWARE (AS DEFINED BELOW), WHETHER ALLEGED TO ARISE BY OPERATION OF LAW, BY REASON OF CUSTOM OR USAGE IN THE TRADE, BY COURSE OF DEALING OR OTHERWISE. NOTWITHSTANDING ANY TERMS TO THE CONTRARY IN THESE TERMS, COMPANY ACKNOWLEDGES AND AGREES THAT COREOS MAY MODIFY THE FEATURES OF THE SERVICES FROM TIME-TO-TIME AT COREOS SOLE DISCRETION.</p>
</li>
<li>
<strong>Payment and Renewal.</strong>
<dl>
<dt>General Terms.</dt>
<dd>Paid services beyond the initial trial are available on the Website (any such services, an “Account”). By maintaining an Account you agree to pay DevTable the monthly or annual subscription fees indicated for that service. Payments will be charged on a pre-pay basis on the day you sign up for a plan and will cover the use of that service for a monthly or annual subscription period as indicated. Account fees are not refundable.</dd>
<dt>Automatic Renewal.</dt>
<dd>Unless you notify DevTable before the end of the applicable subscription period that you want to cancel an Account, your Account subscription will automatically renew and you authorize us to collect the then-applicable annual or monthly subscription fee for such Account (as well as any taxes) using any credit card or other payment mechanism we have on record for you. Accounts can be canceled at any time in the Payment Information section of your User Settings.</dd>
</dl>
<strong>Indemnification Obligations</strong>
<p>You agree, at your sole expense, to defend, indemnify and hold CoreOS (and its directors, officers, employees, consultants and agents) harmless from and against any and all actual or threatened suits, actions, proceedings (at law or in equity), claims, damages, payments, deficiencies, fines, judgments, settlements, liabilities, losses, costs and expenses (including, but not limited to, reasonable attorneys fees, costs, penalties, interest and disbursements) for any death, injury, property damage caused by, arising out of, resulting from, attributable to or in any way incidental to any of your Content or any actual or alleged breach of any of your obligations under these Terms (including, but not limited to, any actual or alleged breach of any of your representations or warranties as set forth in these Terms).</p>
</li>
<li>
<strong>Responsibility of Website Visitors.</strong> DevTable has not reviewed, and cannot review, all of the material, including computer software, submitted to the Service, and cannot therefore be responsible for that materials content, use or effects. By operating the Website, DevTable does not represent or imply that it endorses the material there posted, or that it believes such material to be accurate, useful or non-harmful. You are responsible for taking precautions as necessary to protect yourself and your computer systems from viruses, worms, Trojan horses, and other harmful or destructive content. The Website may contain content that is offensive, indecent, or otherwise objectionable, as well as content containing technical inaccuracies, typographical mistakes, and other errors. The Website may also contain material that violates the privacy or publicity rights, or infringes the intellectual property and other proprietary rights, of third parties, or the downloading, copying or use of which is subject to additional terms and conditions, stated or unstated. DevTable disclaims any responsibility for any harm resulting from the use by visitors of the Website, or from any downloading by those visitors of content there posted. </li>
<strong>Limitation of Liability</strong>
<p>IN NO EVENT WILL (A) COREOS TOTAL LIABILITY ARISING OUT OF OR RELATED TO THESE TERMS EXCEED THE TOTAL AMOUNT PAID BY YOU TO COREOS UNDER THESE TERMS THE SIX MONTHS IMMEDIATELY PRIOR TO THE ACCRUAL OF THE FIRST CLAIM, AND (B) COREOS BE LIABLE TO YOU OR ANY THIRD PARTY FOR ANY LOSS OF PROFITS, LOSS OF USE, LOSS OF REVENUE, LOSS OF GOODWILL, ANY INTERRUPTION OF BUSINESS, OR ANY INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, PUNITIVE OR CONSEQUENTIAL DAMAGES OF ANY KIND ARISING OUT OF, OR IN CONNECTION WITH THESE TERMS, WHETHER IN CONTRACT, TORT, STRICT LIABILITY OR OTHERWISE, EVEN IF SUCH PARTY HAS BEEN ADVISED OR IS OTHERWISE AWARE OF THE POSSIBILITY OF SUCH DAMAGES. MULTIPLE CLAIMS WILL NOT EXPAND THIS LIMITATION. THIS SECTION (LIMITATION OF LIABILITY) WILL BE GIVEN FULL EFFECT EVEN IF ANY REMEDY SPECIFIED IN THESE TERMS IS DEEMED TO HAVE FAILED OF ITS ESSENTIAL PURPOSE.</p>
</li>
<li>
<strong>Content Posted on Other Websites.</strong> We have not reviewed, and cannot review, all of the material, including computer software, made available through the websites and webpages to which Quay.io links, and that link to Quay.io. DevTable does not have any control over those non-DevTable websites and webpages, and is not responsible for their contents or their use. By linking to a non-DevTable website or webpage, DevTable does not represent or imply that it endorses such website or webpage. You are responsible for taking precautions as necessary to protect yourself and your computer systems from viruses, worms, Trojan horses, and other harmful or destructive content. DevTable disclaims any responsibility for any harm resulting from your use of non-DevTable websites and webpages. </li>
<strong>Ownership</strong>
<p>As between the parties and subject to Section 5 (Your Responsibilities), you own all right, title and interest in and to the Content and any and all Intellectual Property Rights (as defined below) embodied in or related to the foregoing. As between the parties and subject to Section 3 (Services), CoreOS owns all right, title and interest in and to the Services and any and all Intellectual Property Rights (as defined below) embodied in or related to the foregoing. CoreOS reserves all rights not expressly granted in these Terms, and no licenses are granted by CoreOS to you or any other party under these Terms, whether by implication, estoppel or otherwise, except as expressly set forth in these Terms. For the purpose of these Terms, “<strong>Intellectual Property Rights</strong>” means all patents, copyrights, moral rights, trademarks, trade secrets and any other form of intellectual property rights recognized in any jurisdiction, including applications and registrations for any of the foregoing.</p>
</li>
<li>
<strong>Copyright Infringement and DMCA Policy.</strong> As DevTable asks others to respect its intellectual property rights, it respects the intellectual property rights of others. If you believe that material located on or linked to by Quay.io violates your copyright, you are encouraged to notify DevTable in accordance with the provisions of the Digital Millennium Copyright Act (“DMCA”). DevTable will respond to all such notices, including as required or appropriate by removing the infringing material or disabling all links to the infringing material. DevTable will terminate a visitors access to and use of the Website if, under appropriate circumstances, the visitor is determined to be a repeat infringer of the copyrights or other intellectual property rights of DevTable or others. In the case of such termination, DevTable will have no obligation to provide a refund of any amounts previously paid to DevTable. </li>
<strong>Term, Termination and Effect of Termination</strong>
<p>Unless earlier terminated as set forth in these Terms, the term of these Terms commences upon the Effective Date and continues for the Subscription Period, and thereafter the term of these Terms automatically renews for one or more additional Subscription Periods unless a party terminates these Terms with no less than 15 days advance written notice prior to the close of the then-current term. Further, CoreOS may terminate or deny access to and use of the Services if CoreOS reasonably believes you have violate any of the terms or conditions of these Terms. Upon any termination of these Terms, your rights to the Services will immediately cease.</p>
</li>
<li>
<strong>Intellectual Property.</strong> This Agreement does not transfer from DevTable to you any DevTable or third party intellectual property, and all right, title and interest in and to such property will remain (as between the parties) solely with DevTable. DevTable, Quay.io, the Quay.io logo, and all other trademarks, service marks, graphics and logos used in connection with Quay.io, or the Website are trademarks or registered trademarks of DevTable or DevTables licensors. Other trademarks, service marks, graphics and logos used in connection with the Website may be the trademarks of other third parties. Your use of the Website grants you no right or license to reproduce or otherwise use any DevTable or third-party trademarks.
<strong>Copyright Policy</strong>
<p>CoreOS users may report content that appears on/via the Site or Services to CoreOS that he/she thinks violates these Terms, and CoreOS may remove such content, suspend or terminate the account of the user who made posted such content and/or take additional action to enforce these Terms against such user.</p>
<p>Also, in accordance with the Digital Millennium Copyright Act (DMCA) and other applicable law, CoreOS has adopted a policy of terminating, in appropriate circumstances and at our discretion, account holders who are deemed to be repeat infringers. CoreOS also may, at its discretion, limit access to the Services and terminate the accounts of any users who infringe any intellectual property rights of others, whether or not there is any repeat infringement.</p>
<p>If you think that anything on the Services infringes upon any copyright that you own or control, you may file a notification with CoreOS Designated Agent as set forth below:</p>
<table border=0>
<tr><td>Designated Agent:</td><td>[insert name]</td></tr>
<tr><td>Address of Designated Agent:</td><td>[insert address]</td></tr>
<tr><td>Telephone Number of Designated Agent:</td><td>[insert telephone]</td></tr>
<tr><td>Fax Number of Designated Agent:</td><td>[insert telephone number]</td></tr>
<tr><td>Email Address of Designated Agent:</td><td>[insert email address]</td></tr>
</table>
<p>Please see <a href="http://www.copyright.gov/title17/92chap5.html#512">17 U.S.C. § 512(c)(3)</a> for the requirements of a proper notification. If you knowingly misrepresent that any material or activity is infringing, you may be liable for any damages, including costs and attorneys fees, CoreOS or the alleged infringer incurs because we relied on the misrepresentation when removing or disabling access to the material or activity.</p>
</li>
<li>
<strong>Changes.</strong> DevTable reserves the right, at its sole discretion, to modify or replace any part of this Agreement. It is your responsibility to check this Agreement periodically for changes. Your continued use of or access to the Website following the posting of any changes to this Agreement constitutes acceptance of those changes. DevTable may also, in the future, offer new services and/or features through the Website (including, the release of new tools and resources). Such new features and/or services shall be subject to the terms and conditions of this Agreement.
<strong>Feedback</strong>
<p>Any suggestions, comments, or other feedback provided by you to CoreOS with respect to the Services or CoreOS (collectively, “<strong>Feedback</strong>”) will constitute confidential information of CoreOS. CoreOS will be free to use, disclose, reproduce, license, and otherwise distribute and exploit the Feedback provided to it as it sees fit, entirely without obligation or restriction of any kind, on account of intellectual property rights or otherwise.</p>
</li>
<li>
<strong>Termination.</strong> DevTable may terminate your access to all or any part of the Website at any time, with or without cause, with or without notice, effective immediately. If you wish to terminate this Agreement or your Quay.io account (if you have one), you may simply discontinue using the Website. All provisions of this Agreement which by their nature should survive termination shall survive termination, including, without limitation, ownership provisions, warranty disclaimers, indemnity and limitations of liability.
<strong>Links</strong>
<p>You are granted a limited, non-exclusive right to create a text hyperlink to the Services for noncommercial purposes, provided such link does not portray CoreOS or any of its products and services in a false, misleading, derogatory, or defamatory manner and that the linking site does not contain any material that is offensive, illegal, harassing, or otherwise objectionable. This limited right may be revoked at any time. CoreOS makes no claim or representation regarding, and accepts no responsibility for, the quality, content, nature, or reliability of third-party sites accessible by link from the Services or Site. CoreOS provides these links to you only as a convenience, and the inclusion of any link does not imply affiliation, endorsement, or adoption by CoreOS of the corresponding site or any information contained in (or made available via) that site. When you leave the Site, CoreOS terms and policies no longer govern. You should review the applicable terms and policies, including privacy and data-gathering practices, of any site to which you navigate from the Site.</p>
</li>
<li>
<strong>Disclaimer of Warranties.</strong> The Website is provided “as is”. DevTable and its suppliers and licensors hereby disclaim all warranties of any kind, express or implied, including, without limitation, the warranties of merchantability, fitness for a particular purpose and non-infringement. Neither DevTable nor its suppliers and licensors, makes any warranty that the Website will be error free or that access thereto will be continuous or uninterrupted. You understand that you download from, or otherwise obtain content or services through, the Website at your own discretion and risk.
<strong>Trademarks</strong>
<p>CoreOS name, trademarks, logos, and any other CoreOS product, service name, or slogan included in the Site are property of CoreOS and may not be copied, imitated, or used (in whole or in part) without CoreOS prior written consent. The look and feel of the Site, including all custom graphics, button icons, and scripts constitute service marks, trademarks, or trade dress of CoreOS and may not be copied, imitated, or used (in whole or in part) without CoreOS prior written consent. All other trademarks, registered trademarks, product names, and company names or logos mentioned in the Site (“<strong>Third-Party Trademarks</strong>”) are the property of their respective owners, and the use of such Third-Party Trademarks inures to the benefit of each owner. The use of such Third-Party Trademarks is intended to denote interoperability and does not constitute an affiliation by CoreOS and its licensors with such company or an endorsement or approval by such company of CoreOS or its licensors or their respective products or services.</p>
</li>
<li>
<strong>Limitation of Liability.</strong> In no event will DevTable, or its suppliers or licensors, be liable with respect to any subject matter of this agreement under any contract, negligence, strict liability or other legal or equitable theory for: (i) any special, incidental or consequential damages; (ii) the cost of procurement for substitute products or services; (iii) for interruption of use or loss or corruption of data; or (iv) for any amounts that exceed the fees paid by you to DevTable under this agreement during the twelve (12) month period prior to the cause of action. DevTable shall have no liability for any failure or delay due to matters beyond their reasonable control. The foregoing shall not apply to the extent prohibited by applicable law.
</li>
<li>
<strong>General Representation and Warranty.</strong> You represent and warrant that (i) your use of the Website will be in strict accordance with the Quay.io Privacy Policy, with this Agreement and with all applicable laws and regulations (including without limitation any local laws or regulations in your country, state, city, or other governmental area, regarding online conduct and acceptable content, and including all applicable laws regarding the transmission of technical data exported from the United States or the country in which you reside) and (ii) your use of the Website will not infringe or misappropriate the intellectual property rights of any third party.
</li>
<li>
<strong>Indemnification.</strong> You agree to indemnify and hold harmless DevTable, its contractors, and its licensors, and their respective directors, officers, employees and agents from and against any and all claims and expenses, including attorneys fees, arising out of your use of the Website, including but not limited to your violation of this Agreement.
</li>
<li>
<strong>Miscellaneous.</strong> This Agreement constitutes the entire agreement between DevTable and you concerning the subject matter hereof, and they may only be modified by a written amendment signed by an authorized executive of DevTable, or by the posting by DevTable of a revised version. Except to the extent applicable law, if any, provides otherwise, this Agreement, any access to or use of the Website will be governed by the laws of the state of New York, U.S.A., excluding its conflict of law provisions, and the proper venue for any disputes arising out of or relating to any of the same will be the state and federal courts located in New York County, New York. The prevailing party in any action or proceeding to enforce this Agreement shall be entitled to costs and attorneys fees. If any part of this Agreement is held invalid or unenforceable, that part will be construed to reflect the parties original intent, and the remaining portions will remain in full force and effect. A waiver by either party of any term or condition of this Agreement or any breach thereof, in any one instance, will not waive such term or condition or any subsequent breach thereof. You may assign your rights under this Agreement to any party that consents to, and agrees to be bound by, its terms and conditions; DevTable may assign its rights under this Agreement without condition. This Agreement will be binding upon and will inure to the benefit of the parties, their successors and permitted assigns.
<strong>General Provisions</strong>
<p/>
<dl>
<dt>Entire Agreement</dt>
<dd>
These Terms (together with all terms incorporated in by reference) are the complete and exclusive statement of the mutual understanding of the parties and supersedes and cancels all previous written and oral agreements and communications relating to the subject matter of these Terms.
</dd>
<dt>Governing Law and Venue</dt>
<dd>
These Terms will be governed by and construed in accordance with the laws of the State of California applicable to agreements made and to be entirely performed within the State of California, without resort to its conflict of law provisions. The federal court in San Mateo County, California will be the jurisdiction in which any suits should be filed if they relate to these Terms. Prior to the filing or initiation of any action or proceeding relating to these Terms, the parties must participate in good faith mediation in San Mateo County, California. If a party initiates any proceeding regarding these Terms, the prevailing party to such proceeding is entitled to reasonable attorneys fees and costs for claims arising out of these Terms.
</dd>
<dt>Publicity</dt>
<dd>
You consent to CoreOS use of your name and/or logo on the CoreOS website, identifying you as a customer of CoreOS and describing your use of the Services notwithstanding any terms to the contrary in these Terms. You agree that CoreOS may issue a press release identifying you as customer of CoreOS.
</dd>
<dt>Assignment</dt>
<dd>
Neither these Terms nor any right or duty under these Terms may be transferred, assigned or delegated by you, by operation of law or otherwise, without the prior written consent of CoreOS, and any attempted transfer, assignment or delegation without such consent will be void and without effect. CoreOS may freely transfer, assign or delegate these Terms or its rights and duties under these Terms. Subject to the foregoing, these Terms will be binding upon and will inure to the benefit of the parties and their respective representatives, heirs, administrators, successors and permitted assigns.
</dd>
<dt>Amendments and Waivers</dt>
<dd>
Unless expressly stated otherwise stated in your standard service terms, no modification, addition or deletion, or waiver of any rights under these Terms will be binding on a party unless clearly understood by the parties to be a modification or waiver and signed by a duly authorized representative of each party. No failure or delay (in whole or in part) on the part of a party to exercise any right or remedy hereunder will operate as a waiver thereof or effect any other right or remedy. All rights and remedies hereunder are cumulative and are not exclusive of any other rights or remedies provided hereunder or by law. The waiver of one breach or default or any delay in exercising any rights will not constitute a waiver of any subsequent breach or default.
</dd>
<dt>Electronic Communications</dt>
<dd>
CoreOS may choose to electronically deliver all communications with you, which may include email to the email address you provide to CoreOS. CoreOS electronic communications to you may transmit or convey information about action taken on your request, portions of your request that may be incomplete or require additional explanation, any notices required under applicable law and any other notices. You agree to do business electronically with CoreOS and to receive electronically all current and future notices, disclosures, communications and information and that the aforementioned electronic communications satisfy any legal requirement that such communications be in writing. An electronic notice will be deemed to have been received on the day of receipt as evidenced by such email.
</dd>
<dt>Severability</dt>
<dd>
If any provision of these Terms is invalid, illegal, or incapable of being enforced by any rule of law or public policy, all other provisions of these Terms will nonetheless remain in full force and effect so long as the economic and legal substance of the transactions contemplated by these Terms is not affected in any manner adverse to any party. Upon such determination that any provision is invalid, illegal, or incapable of being enforced, the parties will negotiate in good faith to modify these Terms so as to effect the original intent of the parties as closely as possible in an acceptable manner to the end that the transactions contemplated hereby are fulfilled.
</dd>
<dt>Force Majeure</dt>
<dd>
Except for payments due under these Terms, neither party will be responsible for any failure to perform or delay attributable in whole or in part to any cause beyond its reasonable control, including, but not limited to, acts of God (fire, storm, floods, earthquakes, etc.), civil disturbances, disruption of telecommunications, disruption of power or other essential services, interruption or termination of service by any service providers being used by CoreOS to host the Services or to link its servers to the Internet, labor disturbances, vandalism, cable cut, computer viruses or other similar occurrences, or any malicious or unlawful acts of any third party.
</dd>
<dt>Notice for California Users</dt>
<dd>
If you are a California resident, you may have these Terms mailed to you electronically by sending a letter to the foregoing address with your electronic mail address and a request for these Terms. Under California Civil Code Section 1789.3, California Website users are entitled to the following specific consumer rights notice: The Complaint Assistance Unit of the Division of Consumer Services of the California Department of Consumer Affairs may be contacted in writing at 1625 N. Market Blvd., Suite S-202, Sacramento, California 95834, or by telephone at (800) 952-5210.
</dd>
</dl>
</li>
</ol>
</div>

235
test/test_buildman.py Normal file
View file

@ -0,0 +1,235 @@
import unittest
import etcd
import os.path
import time
import json
from trollius import coroutine, get_event_loop, From, Future, sleep, Return
from mock import Mock
from threading import Event
from urllib3.exceptions import ReadTimeoutError
from buildman.manager.executor import BuilderExecutor
from buildman.manager.ephemeral import EphemeralBuilderManager, EtcdAction
from buildman.server import BuildJobResult
from buildman.component.buildcomponent import BuildComponent
BUILD_UUID = 'deadbeef-dead-beef-dead-deadbeefdead'
REALM_ID = '1234-realm'
def async_test(f):
def wrapper(*args, **kwargs):
coro = coroutine(f)
future = coro(*args, **kwargs)
loop = get_event_loop()
loop.run_until_complete(future)
return wrapper
class TestEphemeral(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.etcd_client_mock = None
self.etcd_wait_event = Event()
self.test_executor = None
super(TestEphemeral, self).__init__(*args, **kwargs)
def _create_mock_etcd_client(self, *args, **kwargs):
def hang_until_event(*args, **kwargs):
time.sleep(.01) # 10ms to simulate network latency
self.etcd_wait_event.wait()
self.etcd_client_mock = Mock(spec=etcd.Client, name='etcd.Client')
self.etcd_client_mock.watch = Mock(side_effect=hang_until_event)
return self.etcd_client_mock
def _create_completed_future(self, result=None):
def inner(*args, **kwargs):
new_future = Future()
new_future.set_result(result)
return new_future
return inner
def _create_mock_executor(self, *args, **kwargs):
self.test_executor = Mock(spec=BuilderExecutor)
self.test_executor.start_builder = Mock(side_effect=self._create_completed_future('123'))
self.test_executor.stop_builder = Mock(side_effect=self._create_completed_future())
return self.test_executor
def _create_build_job(self):
mock_job = Mock()
mock_job.job_details = {
'build_uuid': BUILD_UUID,
}
mock_job.job_item = {
'body': json.dumps(mock_job.job_details),
'id': 1,
}
return mock_job
def setUp(self):
EphemeralBuilderManager._executors['test'] = self._create_mock_executor
self.old_etcd_client_klass = EphemeralBuilderManager._etcd_client_klass
EphemeralBuilderManager._etcd_client_klass = self._create_mock_etcd_client
self.etcd_wait_event.clear()
self.register_component_callback = Mock()
self.unregister_component_callback = Mock()
self.job_heartbeat_callback = Mock()
self.job_complete_callback = Mock()
self.manager = EphemeralBuilderManager(
self.register_component_callback,
self.unregister_component_callback,
self.job_heartbeat_callback,
self.job_complete_callback,
'127.0.0.1',
30,
)
self.manager.initialize({'EXECUTOR': 'test'})
self.mock_job = self._create_build_job()
self.mock_job_key = os.path.join('building/', BUILD_UUID)
def tearDown(self):
self.etcd_wait_event.set()
self.manager.shutdown()
del EphemeralBuilderManager._executors['test']
EphemeralBuilderManager._etcd_client_klass = self.old_etcd_client_klass
@coroutine
def _setup_job_for_managers(self):
# Test that we are watching the realm location before anything else happens
self.etcd_client_mock.watch.assert_any_call('realm/', recursive=True, timeout=0)
self.etcd_client_mock.read = Mock(side_effect=KeyError)
test_component = Mock(spec=BuildComponent)
test_component.builder_realm = REALM_ID
test_component.start_build = Mock(side_effect=self._create_completed_future())
self.register_component_callback.return_value = test_component
# Ask for a builder to be scheduled
is_scheduled = yield From(self.manager.schedule(self.mock_job))
self.assertTrue(is_scheduled)
self.etcd_client_mock.read.assert_called_once_with('building/', recursive=True)
self.assertEqual(self.test_executor.start_builder.call_count, 1)
self.assertEqual(self.etcd_client_mock.write.call_args_list[0][0][0], self.mock_job_key)
self.assertEqual(self.etcd_client_mock.write.call_args_list[1][0][0], self.mock_job_key)
# Right now the job is not registered with any managers because etcd has not accepted the job
self.assertEqual(self.register_component_callback.call_count, 0)
realm_created = Mock(spec=etcd.EtcdResult)
realm_created.action = EtcdAction.CREATE
realm_created.key = os.path.join('realm/', REALM_ID)
realm_created.value = json.dumps({
'realm': REALM_ID,
'token': 'beef',
'builder_id': '123',
'job_queue_item': self.mock_job.job_item,
})
self.manager._handle_realm_change(realm_created)
self.assertEqual(self.register_component_callback.call_count, 1)
raise Return(test_component)
@async_test
def test_schedule_and_complete(self):
# Test that a job is properly registered with all of the managers
test_component = yield From(self._setup_job_for_managers())
# Take the job ourselves
yield From(self.manager.build_component_ready(test_component))
self.etcd_client_mock.delete.assert_called_once_with(os.path.join('realm/', REALM_ID))
self.etcd_client_mock.delete.reset_mock()
# Finish the job
yield From(self.manager.job_completed(self.mock_job, BuildJobResult.COMPLETE, test_component))
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
self.etcd_client_mock.delete.assert_called_once_with(self.mock_job_key)
@async_test
def test_another_manager_takes_job(self):
# Prepare a job to be taken by another manager
test_component = yield From(self._setup_job_for_managers())
realm_deleted = Mock(spec=etcd.EtcdResult)
realm_deleted.action = EtcdAction.DELETE
realm_deleted.key = os.path.join('realm/', REALM_ID)
realm_deleted._prev_node = Mock(spec=etcd.EtcdResult)
realm_deleted._prev_node.value = json.dumps({
'realm': REALM_ID,
'token': 'beef',
'builder_id': '123',
'job_queue_item': self.mock_job.job_item,
})
self.manager._handle_realm_change(realm_deleted)
self.unregister_component_callback.assert_called_once_with(test_component)
@async_test
def test_expiring_worker(self):
# Test that we are watching before anything else happens
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True, timeout=0)
# Send a signal to the callback that a worker has expired
expired_result = Mock(spec=etcd.EtcdResult)
expired_result.action = EtcdAction.EXPIRE
expired_result.key = self.mock_job_key
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
expired_result._prev_node.value = json.dumps({'builder_id': '1234'})
self.manager._handle_builder_expiration(expired_result)
yield From(sleep(.01))
self.test_executor.stop_builder.assert_called_once_with('1234')
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
@async_test
def test_change_worker(self):
# Send a signal to the callback that a worker key has been changed
set_result = Mock(sepc=etcd.EtcdResult)
set_result.action = 'set'
set_result.key = self.mock_job_key
self.manager._handle_builder_expiration(set_result)
yield From(sleep(.01))
self.assertEquals(self.test_executor.stop_builder.call_count, 0)
@async_test
def test_heartbeat_response(self):
expiration_timestamp = time.time() + 60
builder_result = Mock(spec=etcd.EtcdResult)
builder_result.value = json.dumps({
'builder_id': '123',
'expiration': expiration_timestamp,
'max_expiration': expiration_timestamp,
})
self.etcd_client_mock.read = Mock(return_value=builder_result)
yield From(self.manager.job_heartbeat(self.mock_job))
# Wait for threads to complete
yield From(sleep(.01))
self.job_heartbeat_callback.assert_called_once_with(self.mock_job)
self.assertEqual(self.etcd_client_mock.write.call_count, 1)
self.assertEqual(self.etcd_client_mock.write.call_args_list[0][0][0], self.mock_job_key)
if __name__ == '__main__':
unittest.main()

View file

@ -162,3 +162,8 @@ class TestQueue(QueueTestCase):
one = self.queue.get()
self.assertNotEqual(None, one)
self.assertEqual(self.TEST_MESSAGE_1, one.body)
if __name__ == '__main__':
unittest.main()

View file

@ -1,6 +1,7 @@
import os
from datetime import datetime, timedelta
from tempfile import NamedTemporaryFile
from config import DefaultConfig
@ -13,11 +14,14 @@ class FakeTransaction(object):
pass
TEST_DB_FILE = NamedTemporaryFile(delete=True)
class TestConfig(DefaultConfig):
TESTING = True
SECRET_KEY = 'a36c9d7d-25a9-4d3f-a586-3d2f8dc40a83'
DB_URI = os.environ.get('TEST_DATABASE_URI', 'sqlite:///:memory:')
DB_URI = os.environ.get('TEST_DATABASE_URI', 'sqlite:///{0}'.format(TEST_DB_FILE.name))
DB_CONNECTION_ARGS = {
'threadlocals': True,
'autorollback': True

27
tools/sendresetemail.py Normal file
View file

@ -0,0 +1,27 @@
from app import app
from util.useremails import send_recovery_email
from data import model
import argparse
from flask import Flask, current_app
from flask_mail import Mail
def sendReset(username):
user = model.get_user(username)
if not user:
print 'No user found'
return
with app.app_context():
code = model.create_reset_password_email_code(user.email)
send_recovery_email(user.email, code.code)
print 'Email sent to %s' % (user.email)
parser = argparse.ArgumentParser(description='Sends a reset email')
parser.add_argument('username', help='The username')
args = parser.parse_args()
sendReset(args.username)

View file

@ -1,704 +0,0 @@
import logging.config
if __name__ == "__main__":
logging.config.fileConfig('conf/logging.conf', disable_existing_loggers=False)
import logging
import argparse
import os
import requests
import re
import json
import shutil
import tarfile
from docker import Client
from docker.utils import kwargs_from_env
from docker.errors import APIError
from tempfile import TemporaryFile, mkdtemp
from zipfile import ZipFile
from functools import partial
from datetime import datetime, timedelta
from threading import Event
from uuid import uuid4
from collections import defaultdict
from requests.exceptions import ConnectionError
from data import model
from data.database import BUILD_PHASE
from workers.worker import Worker, WorkerUnhealthyException, JobException
from app import userfiles as user_files, build_logs, sentry, dockerfile_build_queue
from endpoints.notificationhelper import spawn_notification
from util.safetar import safe_extractall
from util.dockerfileparse import parse_dockerfile, ParsedDockerfile, serialize_dockerfile
logger = logging.getLogger(__name__)
TIMEOUT_PERIOD_MINUTES = 20
CACHE_EXPIRATION_PERIOD_HOURS = 24
NO_TAGS = ['<none>:<none>']
RESERVATION_TIME = (TIMEOUT_PERIOD_MINUTES + 5) * 60
def build_docker_args():
args = kwargs_from_env()
if 'tls' in args and os.environ.get('IGNORE_TLS_ISSUES', False):
args['tls'].verify = False
return args
def matches_system_error(status_str):
""" Returns true if the given status string matches a known system error in the
Docker builder.
"""
KNOWN_MATCHES = ['lxc-start: invalid', 'lxc-start: failed to', 'lxc-start: Permission denied']
for match in KNOWN_MATCHES:
# 10 because we might have a Unix control code at the start.
found = status_str.find(match[0:len(match) + 10])
if found >= 0 and found <= 10:
return True
return False
class StatusWrapper(object):
def __init__(self, build_uuid):
self._uuid = build_uuid
self._status = {
'total_commands': None,
'current_command': None,
'push_completion': 0.0,
'pull_completion': 0.0,
}
self.__exit__(None, None, None)
def __enter__(self):
return self._status
def __exit__(self, exc_type, value, traceback):
build_logs.set_status(self._uuid, self._status)
class _IncompleteJsonError(Exception):
def __init__(self, start_from):
self.start_from = start_from
class _StreamingJSONDecoder(json.JSONDecoder):
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
start_from = 0
while start_from < len(s):
try:
obj, end = self.raw_decode(s[start_from:], idx=_w(s[start_from:], 0).end())
except ValueError:
raise _IncompleteJsonError(start_from)
end = _w(s[start_from:], end).end()
start_from += end
yield obj
class StreamingDockerClient(Client):
def _stream_helper(self, response):
"""Generator for data coming from a chunked-encoded HTTP response."""
content_buf = ''
for content in response.iter_content(chunk_size=256):
content_buf += content
try:
for val in json.loads(content_buf, cls=_StreamingJSONDecoder):
yield val
content_buf = ''
except _IncompleteJsonError as exc:
content_buf = content_buf[exc.start_from:]
class DockerfileBuildContext(object):
def __init__(self, build_context_dir, dockerfile_subdir, repo, tag_names,
push_token, build_uuid, cache_size_gb, pull_credentials=None):
self._build_dir = build_context_dir
self._dockerfile_subdir = dockerfile_subdir
self._repo = repo
self._tag_names = tag_names
self._push_token = push_token
self._status = StatusWrapper(build_uuid)
self._build_logger = partial(build_logs.append_log_message, build_uuid)
self._pull_credentials = pull_credentials
self._cache_size_gb = cache_size_gb
# Note: We have two different clients here because we (potentially) login
# with both, but with different credentials that we do not want shared between
# the build and push operations.
self._push_cl = StreamingDockerClient(timeout=1200, **build_docker_args())
self._build_cl = StreamingDockerClient(timeout=1200, **build_docker_args())
dockerfile_path = os.path.join(self._build_dir, dockerfile_subdir,
'Dockerfile')
if not os.path.exists(dockerfile_path):
raise RuntimeError('Build job did not contain a Dockerfile.')
# Compute the number of steps
with open(dockerfile_path, 'r') as dockerfileobj:
self._parsed_dockerfile = parse_dockerfile(dockerfileobj.read())
self.__inject_quay_repo_env(self._parsed_dockerfile, repo)
self._num_steps = len(self._parsed_dockerfile.commands)
with open(dockerfile_path, 'w') as dockerfileobj:
dockerfileobj.write(serialize_dockerfile(self._parsed_dockerfile))
logger.debug('Will build and push to repo %s with tags named: %s', self._repo,
self._tag_names)
def __enter__(self):
try:
self.__cleanup_containers()
self.__cleanup_images()
self.__prune_cache()
except APIError:
sentry.client.captureException()
message = 'Docker installation is no longer healthy.'
logger.exception(message)
raise WorkerUnhealthyException(message)
return self
def __exit__(self, exc_type, value, traceback):
shutil.rmtree(self._build_dir)
try:
self.__cleanup_containers()
except APIError:
sentry.client.captureException()
message = 'Docker installation is no longer healthy.'
logger.exception(message)
raise WorkerUnhealthyException(message)
@staticmethod
def __inject_quay_repo_env(parsed_dockerfile, quay_reponame):
env_command = {
'command': 'ENV',
'parameters': 'QUAY_REPOSITORY %s' % quay_reponame
}
for index, command in reversed(list(enumerate(parsed_dockerfile.commands))):
if command['command'] == 'FROM':
new_command_index = index + 1
logger.debug('Injecting env command at dockerfile index: %s', new_command_index)
parsed_dockerfile.commands.insert(new_command_index, env_command)
break
@staticmethod
def __total_completion(statuses, total_images):
percentage_with_sizes = float(len(statuses.values()))/total_images
sent_bytes = sum([status['current'] for status in statuses.values()])
total_bytes = sum([status['total'] for status in statuses.values()])
return float(sent_bytes)/total_bytes*percentage_with_sizes
@staticmethod
def __monitor_completion(status_stream, required_message, status_updater, status_completion_key,
num_images=0):
images = {}
for status in status_stream:
logger.debug('%s: %s', status_completion_key, status)
if 'status' in status:
status_msg = status['status']
if status_msg == required_message:
if 'progressDetail' in status and 'id' in status:
image_id = status['id']
detail = status['progressDetail']
if 'current' in detail and 'total' in detail:
images[image_id] = detail
with status_updater as status_update:
status_update[status_completion_key] = \
DockerfileBuildContext.__total_completion(images, max(len(images), num_images))
elif 'errorDetail' in status:
message = 'Error pushing image.'
if 'message' in status['errorDetail']:
message = str(status['errorDetail']['message'])
raise RuntimeError(message)
def pull(self):
image_and_tag_tuple = self._parsed_dockerfile.get_image_and_tag()
if image_and_tag_tuple is None or image_and_tag_tuple[0] is None:
self._build_logger('Missing FROM command in Dockerfile', build_logs.ERROR)
raise JobException('Missing FROM command in Dockerfile')
image_and_tag = ':'.join(image_and_tag_tuple)
# Login with the specified credentials (if any).
if self._pull_credentials:
logger.debug('Logging in with pull credentials: %s@%s',
self._pull_credentials['username'], self._pull_credentials['registry'])
self._build_logger('Pulling base image: %s' % image_and_tag, log_data={
'phasestep': 'login',
'username': self._pull_credentials['username'],
'registry': self._pull_credentials['registry']
})
self._build_cl.login(self._pull_credentials['username'], self._pull_credentials['password'],
registry=self._pull_credentials['registry'], reauth=True)
else:
self._build_logger('Pulling base image: %s' % image_and_tag, log_data={
'phasestep': 'pull',
'repo_url': image_and_tag
})
pull_status = self._build_cl.pull(image_and_tag, stream=True)
self.__monitor_completion(pull_status, 'Downloading', self._status, 'pull_completion')
def build(self, reservation_extension_method):
# Start the build itself.
logger.debug('Starting build.')
with self._status as status:
status['total_commands'] = self._num_steps
logger.debug('Building to tags named: %s', self._tag_names)
context_path = os.path.join(self._build_dir, self._dockerfile_subdir)
logger.debug('Final context path: %s exists: %s', context_path,
os.path.exists(context_path))
build_status = self._build_cl.build(path=context_path, stream=True)
current_step = 0
built_image = None
for status in build_status:
fully_unwrapped = ""
if isinstance(status, dict):
keys_to_extract = ['error', 'status', 'stream']
for key in keys_to_extract:
if key in status:
fully_unwrapped = status[key]
break
if not fully_unwrapped:
logger.debug('Status dict did not have any extractable keys and was: %s', status)
elif isinstance(status, basestring):
fully_unwrapped = status
status_str = str(fully_unwrapped.encode('utf-8'))
# Check for system errors when building.
# DISABLED: LXC is super flaky, but this is causing build nodes to spasm.
#if matches_system_error(status_str):
# raise WorkerUnhealthyException(status_str)
logger.debug('Status: %s', status_str)
step_increment = re.search(r'Step ([0-9]+) :', status_str)
if step_increment:
self._build_logger(status_str, build_logs.COMMAND)
current_step = int(step_increment.group(1))
logger.debug('Step now: %s/%s', current_step, self._num_steps)
with self._status as status_update:
status_update['current_command'] = current_step
# Tell the queue that we're making progress every time we advance a step
reservation_extension_method(RESERVATION_TIME)
continue
else:
self._build_logger(status_str)
complete = re.match(r'Successfully built ([a-z0-9]+)$', status_str)
if complete:
built_image = complete.group(1)
logger.debug('Final image ID is: %s', built_image)
continue
# Get the image count
if not built_image:
return
return built_image
def push(self, built_image):
# Login to the registry
host = re.match(r'([a-z0-9.:]+)/.+/.+$', self._repo)
if not host:
raise RuntimeError('Invalid repo name: %s' % self._repo)
for protocol in ['https', 'http']:
registry_endpoint = '%s://%s/v1/' % (protocol, host.group(1))
logger.debug('Attempting login to registry: %s', registry_endpoint)
try:
self._push_cl.login('$token', self._push_token, registry=registry_endpoint)
break
except APIError:
pass # Probably the wrong protocol
for tag in self._tag_names:
logger.debug('Tagging image %s as %s:%s', built_image, self._repo, tag)
self._push_cl.tag(built_image, self._repo, tag)
history = self._push_cl.history(built_image)
num_images = len(history)
logger.debug('Pushing to repo %s', self._repo)
resp = self._push_cl.push(self._repo, stream=True)
self.__monitor_completion(resp, 'Pushing', self._status, 'push_completion', num_images)
def __cleanup_containers(self):
# First clean up any containers that might be holding the images
for running in self._build_cl.containers(quiet=True):
logger.debug('Killing container: %s', running['Id'])
self._build_cl.kill(running['Id'])
# Next, remove all of the containers (which should all now be killed)
for container in self._build_cl.containers(all=True, quiet=True):
logger.debug('Removing container: %s', container['Id'])
self._build_cl.remove_container(container['Id'])
def __cleanup_images(self):
""" Remove tags on internal nodes, and remove images older than the expiratino time. """
ids_to_images, ids_to_children = self.__compute_image_graph()
# Untag all internal nodes, which are usually the base images
for internal_id in ids_to_children.keys():
internal = ids_to_images[internal_id]
if internal['RepoTags'] != NO_TAGS:
for tag_name in internal['RepoTags']:
self._build_cl.remove_image(tag_name)
# Make sure all of the leaves have gibberish tags, and remove those older than our expiration
leaves = set(ids_to_images.keys()) - set(ids_to_children.keys())
now = datetime.now()
for leaf_id in leaves:
leaf = ids_to_images[leaf_id]
created = datetime.fromtimestamp(leaf['Created'])
expiration = created + timedelta(hours=CACHE_EXPIRATION_PERIOD_HOURS)
if expiration > now:
# Assign a new tag as a uuid to preserve this image
new_tag = str(uuid4())
self._build_cl.tag(leaf['Id'], new_tag)
# Remove all of the existing tags
if leaf['RepoTags'] != NO_TAGS:
for tag_name in leaf['RepoTags']:
self._build_cl.remove_image(tag_name)
def __prune_cache(self):
""" Remove the oldest leaf image until the cache size is the desired size. """
logger.debug('Pruning cache to size(gb): %s', self._cache_size_gb)
while self.__compute_cache_size_gb() > self._cache_size_gb:
logger.debug('Locating the oldest image in the cache to prune.')
# Find the oldest tagged image and remove it
oldest_creation_time = datetime.max
oldest_image = None
for image in self._build_cl.images():
created = datetime.fromtimestamp(image['Created'])
if created < oldest_creation_time:
oldest_creation_time = created
oldest_image = image
logger.debug('Removing oldest image from cache: %s', oldest_image['Id'])
# Remove all tags on the oldest image
if oldest_image['RepoTags'] == NO_TAGS:
# Remove the image id directly since there are no tags
self._build_cl.remove_image(oldest_image['Id'])
else:
# Remove all tags
for tag_name in oldest_image['RepoTags']:
self._build_cl.remove_image(tag_name)
def __compute_cache_size_gb(self):
all_images = self._build_cl.images(all=True)
size_in_bytes = sum([img['Size'] for img in all_images])
size_in_gb = float(size_in_bytes)/1024/1024/1024
logger.debug('Computed cache size(gb) of: %s', size_in_gb)
return size_in_gb
def __compute_image_graph(self):
all_images = self._build_cl.images(all=True)
ids_to_images = {}
ids_to_children = defaultdict(list)
for image in all_images:
if image['ParentId'] != '':
ids_to_children[image['ParentId']].append(image)
ids_to_images[image['Id']] = image
return (ids_to_images, ids_to_children)
class DockerfileBuildWorker(Worker):
def __init__(self, cache_size_gb, *vargs, **kwargs):
super(DockerfileBuildWorker, self).__init__(*vargs, **kwargs)
self._mime_processors = {
'application/zip': DockerfileBuildWorker.__prepare_zip,
'application/x-zip-compressed': DockerfileBuildWorker.__prepare_zip,
'text/plain': DockerfileBuildWorker.__prepare_dockerfile,
'application/octet-stream': DockerfileBuildWorker.__prepare_dockerfile,
'application/x-tar': DockerfileBuildWorker.__prepare_tarball,
'application/gzip': DockerfileBuildWorker.__prepare_tarball,
'application/x-gzip': DockerfileBuildWorker.__prepare_tarball,
}
self._timeout = Event()
self._cache_size_gb = cache_size_gb
@staticmethod
def __prepare_zip(request_file):
build_dir = mkdtemp(prefix='docker-build-')
# Save the zip file to temp somewhere
with TemporaryFile() as zip_file:
zip_file.write(request_file.content)
to_extract = ZipFile(zip_file)
to_extract.extractall(build_dir)
return build_dir
@staticmethod
def __prepare_dockerfile(request_file):
build_dir = mkdtemp(prefix='docker-build-')
dockerfile_path = os.path.join(build_dir, "Dockerfile")
with open(dockerfile_path, 'w') as dockerfile:
dockerfile.write(request_file.content)
return build_dir
@staticmethod
def __prepare_tarball(request_file):
build_dir = mkdtemp(prefix='docker-build-')
# Save the zip file to temp somewhere
with tarfile.open(mode='r|*', fileobj=request_file.raw) as tar_stream:
safe_extractall(tar_stream, build_dir)
return build_dir
def watchdog(self):
logger.debug('Running build watchdog code.')
try:
docker_cl = Client(**build_docker_args())
# Iterate the running containers and kill ones that have been running more than 20 minutes
for container in docker_cl.containers():
start_time = datetime.fromtimestamp(container['Created'])
running_time = datetime.now() - start_time
if running_time > timedelta(minutes=TIMEOUT_PERIOD_MINUTES):
logger.warning('Container has been running too long: %s with command: %s',
container['Id'], container['Command'])
docker_cl.kill(container['Id'])
self._timeout.set()
except ConnectionError as exc:
logger.exception('Watchdog exception')
raise WorkerUnhealthyException(exc.message)
def process_queue_item(self, job_details):
self._timeout.clear()
# Make sure we have more information for debugging problems
sentry.client.user_context(job_details)
repository_build = model.get_repository_build(job_details['build_uuid'])
pull_credentials = job_details.get('pull_credentials', None)
job_config = json.loads(repository_build.job_config)
resource_url = user_files.get_file_url(repository_build.resource_key, requires_cors=False)
tag_names = job_config['docker_tags']
build_subdir = job_config['build_subdir']
# TODO remove the top branch when there are no more jobs with a repository config
if 'repository' in job_config:
repo = job_config['repository']
else:
repo = '%s/%s/%s' % (job_config['registry'],
repository_build.repository.namespace_user.username,
repository_build.repository.name)
access_token = repository_build.access_token.code
log_appender = partial(build_logs.append_log_message, repository_build.uuid)
# Lookup and save the version of docker being used.
try:
docker_cl = Client(**build_docker_args())
docker_version = docker_cl.version().get('Version', '')
except ConnectionError as exc:
logger.exception('Initial connection exception')
raise WorkerUnhealthyException(exc.message)
dash = docker_version.find('-')
# Strip any -tutum or whatever off of the version.
if dash > 0:
docker_version = docker_version[:dash]
log_appender('initializing', build_logs.PHASE, log_data={
'docker_version': docker_version
})
log_appender('Docker version: %s' % docker_version)
start_msg = ('Starting job with resource url: %s repo: %s' % (resource_url, repo))
logger.debug(start_msg)
docker_resource = requests.get(resource_url, stream=True)
c_type = docker_resource.headers['content-type']
if ';' in c_type:
c_type = c_type.split(';')[0]
filetype_msg = ('Request to build type: %s with repo: %s and tags: %s' %
(c_type, repo, tag_names))
logger.info(filetype_msg)
log_appender(filetype_msg)
# Spawn a notification that the build has started.
event_data = {
'build_id': repository_build.uuid,
'build_name': repository_build.display_name,
'docker_tags': tag_names,
'trigger_id': repository_build.trigger.uuid,
'trigger_kind': repository_build.trigger.service.name
}
spawn_notification(repository_build.repository, 'build_start', event_data,
subpage='build?current=%s' % repository_build.uuid,
pathargs=['build', repository_build.uuid])
# Setup a handler for spawning failure messages.
def spawn_failure(message, event_data):
event_data['error_message'] = message
spawn_notification(repository_build.repository, 'build_failure', event_data,
subpage='build?current=%s' % repository_build.uuid,
pathargs=['build', repository_build.uuid])
if c_type not in self._mime_processors:
log_appender('error', build_logs.PHASE)
repository_build.phase = BUILD_PHASE.ERROR
repository_build.save()
message = 'Unknown mime-type: %s' % c_type
log_appender(message, build_logs.ERROR)
spawn_failure(message, event_data)
raise JobException(message)
# Try to build the build directory package from the buildpack.
log_appender('unpacking', build_logs.PHASE)
repository_build.phase = BUILD_PHASE.UNPACKING
repository_build.save()
build_dir = None
try:
build_dir = self._mime_processors[c_type](docker_resource)
except Exception as ex:
cur_message = ex.message or 'Error while unpacking build package'
log_appender(cur_message, build_logs.ERROR)
spawn_failure(cur_message, event_data)
raise JobException(cur_message)
# Start the build process.
try:
with DockerfileBuildContext(build_dir, build_subdir, repo, tag_names, access_token,
repository_build.uuid, self._cache_size_gb,
pull_credentials) as build_ctxt:
log_appender('pulling', build_logs.PHASE)
repository_build.phase = BUILD_PHASE.PULLING
repository_build.save()
build_ctxt.pull()
self.extend_processing(RESERVATION_TIME)
log_appender('building', build_logs.PHASE)
repository_build.phase = BUILD_PHASE.BUILDING
repository_build.save()
built_image = build_ctxt.build(self.extend_processing)
if not built_image:
log_appender('error', build_logs.PHASE)
repository_build.phase = BUILD_PHASE.ERROR
repository_build.save()
message = 'Unable to build dockerfile.'
if self._timeout.is_set():
message = 'Build step was terminated after %s minutes.' % TIMEOUT_PERIOD_MINUTES
log_appender(message, build_logs.ERROR)
raise JobException(message)
self.extend_processing(RESERVATION_TIME)
log_appender('pushing', build_logs.PHASE)
repository_build.phase = BUILD_PHASE.PUSHING
repository_build.save()
build_ctxt.push(built_image)
log_appender('complete', build_logs.PHASE)
repository_build.phase = BUILD_PHASE.COMPLETE
repository_build.save()
# Spawn a notification that the build has completed.
spawn_notification(repository_build.repository, 'build_success', event_data,
subpage='build?current=%s' % repository_build.uuid,
pathargs=['build', repository_build.uuid])
except WorkerUnhealthyException as exc:
# Spawn a notification that the build has failed.
log_appender('Worker has become unhealthy. Will retry shortly.', build_logs.ERROR)
spawn_failure(exc.message, event_data)
# Raise the exception to the queue.
raise exc
except JobException as exc:
# Spawn a notification that the build has failed.
spawn_failure(exc.message, event_data)
# Raise the exception to the queue.
raise exc
except ConnectionError as exc:
# A connection exception means the worker has become unhealthy (Docker is down)
# so we re-raise as that exception.
logger.exception('Build connection exception')
log_appender('Docker daemon has gone away. Will retry shortly.', build_logs.ERROR)
raise WorkerUnhealthyException(exc.message)
except Exception as exc:
# Spawn a notification that the build has failed.
spawn_failure(exc.message, event_data)
# Write the error to the logs.
sentry.client.captureException()
log_appender('error', build_logs.PHASE)
logger.exception('Exception when processing request.')
repository_build.phase = BUILD_PHASE.ERROR
repository_build.save()
log_appender(str(exc), build_logs.ERROR)
# Raise the exception to the queue.
raise JobException(str(exc))
if __name__ == "__main__":
desc = 'Worker daemon to monitor dockerfile build'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--cachegb', default=20, type=float,
help='Maximum cache size in gigabytes.')
args = parser.parse_args()
worker = DockerfileBuildWorker(args.cachegb, dockerfile_build_queue,
reservation_seconds=RESERVATION_TIME)
worker.start(start_status_server_port=8000)

View file

@ -98,7 +98,7 @@ class Worker(object):
def extend_processing(self, seconds_from_now):
with self._current_item_lock:
if self.current_queue_item is not None:
WorkQueue.extend_processing(self.current_queue_item, seconds_from_now)
self._queue.extend_processing(self.current_queue_item, seconds_from_now)
def run_watchdog(self):
logger.debug('Running watchdog.')