Merge branch 'master' into star

This commit is contained in:
Jimmy Zelinskie 2015-02-18 17:36:58 -05:00
commit 917dd6b674
229 changed files with 10807 additions and 3003 deletions

View file

@ -9,14 +9,8 @@ version = 1
[[container]] [[container]]
name = "quay" name = "quay"
Dockerfile = "Dockerfile.web" Dockerfile = "Dockerfile"
project = "quay" project = "quay"
tags = ["git:short"] tags = ["git:short"]
[[container]]
name = "builder"
Dockerfile = "Dockerfile.buildworker"
project = "builder"
tags = ["git:short"]
# vim:ft=toml # vim:ft=toml

View file

@ -1,27 +1,21 @@
# vim:ft=dockerfile # vim:ft=dockerfile
FROM phusion/baseimage:0.9.15
FROM phusion/baseimage:0.9.16
ENV DEBIAN_FRONTEND noninteractive ENV DEBIAN_FRONTEND noninteractive
ENV HOME /root ENV HOME /root
# Install the dependencies. # Install the dependencies.
RUN apt-get update # 10SEP2014 RUN apt-get update # 29JAN2015
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands # New ubuntu packages should be added as their own apt-get install lines below the existing install commands
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev libgpgme11 libgpgme11-dev
# Build the python dependencies # Build the python dependencies
ADD requirements.txt requirements.txt ADD requirements.txt requirements.txt
RUN virtualenv --distribute venv RUN virtualenv --distribute venv
RUN venv/bin/pip install -r requirements.txt RUN venv/bin/pip install -r requirements.txt
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev
### End common section ###
# Remove SSH.
RUN rm -rf /etc/service/sshd /etc/my_init.d/00_regen_ssh_host_keys.sh
# Install the binary dependencies # Install the binary dependencies
ADD binary_dependencies binary_dependencies ADD binary_dependencies binary_dependencies
RUN gdebi --n binary_dependencies/*.deb RUN gdebi --n binary_dependencies/*.deb
@ -34,6 +28,10 @@ RUN npm install -g grunt-cli
ADD grunt grunt ADD grunt grunt
RUN cd grunt && npm install RUN cd grunt && npm install
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev
RUN apt-get autoremove -y
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Add all of the files! # Add all of the files!
ADD . . ADD . .
@ -58,14 +56,9 @@ ADD conf/init/buildmanager /etc/service/buildmanager
RUN mkdir static/fonts static/ldn RUN mkdir static/fonts static/ldn
RUN venv/bin/python -m external_libraries RUN venv/bin/python -m external_libraries
RUN apt-get autoremove -y
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Run the tests # Run the tests
RUN TEST=true venv/bin/python -m unittest discover RUN TEST=true venv/bin/python -m unittest discover
VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp"] VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"]
EXPOSE 443 80 EXPOSE 443 8443 80
CMD ["/sbin/my_init"]

View file

@ -1,42 +0,0 @@
# vim:ft=dockerfile
FROM phusion/baseimage:0.9.15
ENV DEBIAN_FRONTEND noninteractive
ENV HOME /root
# Install the dependencies.
RUN apt-get update # 20NOV2014
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev
# Build the python dependencies
ADD requirements.txt requirements.txt
RUN virtualenv --distribute venv
RUN venv/bin/pip install -r requirements.txt
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev
### End common section ###
RUN apt-get install -y lxc aufs-tools
RUN usermod -v 100000-200000 -w 100000-200000 root
ADD binary_dependencies/builder binary_dependencies/builder
RUN gdebi --n binary_dependencies/builder/*.deb
ADD . .
ADD conf/init/svlogd_config /svlogd_config
ADD conf/init/preplogsdir.sh /etc/my_init.d/
ADD conf/init/tutumdocker /etc/service/tutumdocker
ADD conf/init/dockerfilebuild /etc/service/dockerfilebuild
RUN apt-get remove -y --auto-remove nodejs npm git phantomjs
RUN apt-get autoremove -y
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
VOLUME ["/var/lib/docker", "/var/lib/lxc", "/conf/stack", "/var/log"]
CMD ["/sbin/my_init"]

View file

@ -3,7 +3,7 @@ to build and upload quay to quay:
``` ```
curl -s https://get.docker.io/ubuntu/ | sudo sh curl -s https://get.docker.io/ubuntu/ | sudo sh
sudo apt-get update && sudo apt-get install -y git sudo apt-get update && sudo apt-get install -y git
git clone https://bitbucket.org/yackob03/quay.git git clone https://github.com/coreos-inc/quay.git
cd quay cd quay
rm Dockerfile rm Dockerfile
ln -s Dockerfile.web Dockerfile ln -s Dockerfile.web Dockerfile
@ -33,7 +33,7 @@ start the quay processes:
``` ```
cd ~ cd ~
git clone https://bitbucket.org/yackob03/quayconfig.git git clone https://github.com/coreos-inc/quay.git
sudo docker pull staging.quay.io/quay/quay sudo docker pull staging.quay.io/quay/quay
cd ~/gantryd cd ~/gantryd
sudo venv/bin/python gantry.py ../quayconfig/production/gantry.json update quay sudo venv/bin/python gantry.py ../quayconfig/production/gantry.json update quay
@ -44,7 +44,7 @@ to build and upload the builder to quay
``` ```
curl -s https://get.docker.io/ubuntu/ | sudo sh curl -s https://get.docker.io/ubuntu/ | sudo sh
sudo apt-get update && sudo apt-get install -y git sudo apt-get update && sudo apt-get install -y git
git clone git clone https://bitbucket.org/yackob03/quay.git git clone git clone https://github.com/coreos-inc/quay.git
cd quay cd quay
rm Dockerfile rm Dockerfile
ln -s Dockerfile.buildworker Dockerfile ln -s Dockerfile.buildworker Dockerfile
@ -74,7 +74,7 @@ start the worker
``` ```
cd ~ cd ~
git clone https://bitbucket.org/yackob03/quayconfig.git git clone https://github.com/coreos-inc/quay.git
sudo docker pull quay.io/quay/builder sudo docker pull quay.io/quay/builder
cd ~/gantryd cd ~/gantryd
sudo venv/bin/python gantry.py ../quayconfig/production/gantry.json update builder sudo venv/bin/python gantry.py ../quayconfig/production/gantry.json update builder

137
app.py
View file

@ -1,71 +1,55 @@
import logging import logging
import os import os
import json import json
import yaml
from flask import Flask as BaseFlask, Config as BaseConfig, request, Request from flask import Flask, Config, request, Request, _request_ctx_stack
from flask.ext.principal import Principal from flask.ext.principal import Principal
from flask.ext.login import LoginManager from flask.ext.login import LoginManager, UserMixin
from flask.ext.mail import Mail from flask.ext.mail import Mail
import features import features
from avatars.avatars import Avatar
from storage import Storage from storage import Storage
from avatars.avatars import Avatar
from data import model from data import model
from data import database from data import database
from data.userfiles import Userfiles from data.userfiles import Userfiles
from data.users import UserAuthentication from data.users import UserAuthentication
from util.analytics import Analytics
from util.exceptionlog import Sentry
from util.queuemetrics import QueueMetrics
from util.names import urn_generator
from util.oauth import GoogleOAuthConfig, GithubOAuthConfig
from data.billing import Billing from data.billing import Billing
from data.buildlogs import BuildLogs from data.buildlogs import BuildLogs
from data.archivedlogs import LogArchive from data.archivedlogs import LogArchive
from data.queue import WorkQueue
from data.userevent import UserEventsBuilderModule from data.userevent import UserEventsBuilderModule
from avatars.avatars import Avatar from data.queue import WorkQueue
from util.analytics import Analytics
from util.exceptionlog import Sentry
class Config(BaseConfig): from util.names import urn_generator
""" Flask config enhanced with a `from_yamlfile` method """ from util.oauth import GoogleOAuthConfig, GithubOAuthConfig
from util.signing import Signer
def from_yamlfile(self, config_file): from util.queuemetrics import QueueMetrics
with open(config_file) as f: from util.config.provider import FileConfigProvider, TestConfigProvider
c = yaml.load(f) from util.config.configutil import generate_secret_key
if not c: from util.config.superusermanager import SuperUserManager
logger.debug('Empty YAML config file') from buildman.jobutil.buildreporter import BuildMetrics
return
if isinstance(c, str):
raise Exception('Invalid YAML config file: ' + str(c))
for key in c.iterkeys():
if key.isupper():
self[key] = c[key]
class Flask(BaseFlask):
""" Extends the Flask class to implement our custom Config class. """
def make_config(self, instance_relative=False):
root_path = self.instance_path if instance_relative else self.root_path
return Config(root_path, self.default_config)
OVERRIDE_CONFIG_DIRECTORY = 'conf/stack/'
OVERRIDE_CONFIG_YAML_FILENAME = 'conf/stack/config.yaml' OVERRIDE_CONFIG_YAML_FILENAME = 'conf/stack/config.yaml'
OVERRIDE_CONFIG_PY_FILENAME = 'conf/stack/config.py' OVERRIDE_CONFIG_PY_FILENAME = 'conf/stack/config.py'
OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG' OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG'
LICENSE_FILENAME = 'conf/stack/license.enc' LICENSE_FILENAME = 'conf/stack/license.enc'
CONFIG_PROVIDER = FileConfigProvider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py')
app = Flask(__name__) app = Flask(__name__)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
profile = logging.getLogger('profile')
# Instantiate the default configuration (for test or for normal operation).
if 'TEST' in os.environ: if 'TEST' in os.environ:
CONFIG_PROVIDER = TestConfigProvider()
from test.testconfig import TestConfig from test.testconfig import TestConfig
logger.debug('Loading test config.') logger.debug('Loading test config.')
app.config.from_object(TestConfig()) app.config.from_object(TestConfig())
@ -73,20 +57,17 @@ else:
from config import DefaultConfig from config import DefaultConfig
logger.debug('Loading default config.') logger.debug('Loading default config.')
app.config.from_object(DefaultConfig()) app.config.from_object(DefaultConfig())
if os.path.exists(OVERRIDE_CONFIG_PY_FILENAME):
logger.debug('Applying config file: %s', OVERRIDE_CONFIG_PY_FILENAME)
app.config.from_pyfile(OVERRIDE_CONFIG_PY_FILENAME)
if os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME):
logger.debug('Applying config file: %s', OVERRIDE_CONFIG_YAML_FILENAME)
app.config.from_yamlfile(OVERRIDE_CONFIG_YAML_FILENAME)
environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}'))
app.config.update(environ_config)
app.teardown_request(database.close_db_filter) app.teardown_request(database.close_db_filter)
# Load the override config via the provider.
CONFIG_PROVIDER.update_app_config(app.config)
# Update any configuration found in the override environment variable.
OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG'
environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}'))
app.config.update(environ_config)
class RequestWithId(Request): class RequestWithId(Request):
request_gen = staticmethod(urn_generator(['request'])) request_gen = staticmethod(urn_generator(['request']))
@ -98,21 +79,24 @@ class RequestWithId(Request):
@app.before_request @app.before_request
def _request_start(): def _request_start():
profile.debug('Starting request: %s', request.path) logger.debug('Starting request: %s', request.path)
@app.after_request @app.after_request
def _request_end(r): def _request_end(r):
profile.debug('Ending request: %s', request.path) logger.debug('Ending request: %s', request.path)
return r return r
class InjectingFilter(logging.Filter): class InjectingFilter(logging.Filter):
def filter(self, record): def filter(self, record):
record.msg = '[%s] %s' % (request.request_id, record.msg) if _request_ctx_stack.top is not None:
record.msg = '[%s] %s' % (request.request_id, record.msg)
return True return True
profile.addFilter(InjectingFilter()) # Add the request id filter to all handlers of the root logger
for handler in logging.getLogger().handlers:
handler.addFilter(InjectingFilter())
app.request_class = RequestWithId app.request_class = RequestWithId
@ -130,16 +114,20 @@ analytics = Analytics(app)
billing = Billing(app) billing = Billing(app)
sentry = Sentry(app) sentry = Sentry(app)
build_logs = BuildLogs(app) build_logs = BuildLogs(app)
queue_metrics = QueueMetrics(app)
authentication = UserAuthentication(app) authentication = UserAuthentication(app)
userevents = UserEventsBuilderModule(app) userevents = UserEventsBuilderModule(app)
superusers = SuperUserManager(app)
github_login = GithubOAuthConfig(app, 'GITHUB_LOGIN_CONFIG') signer = Signer(app, OVERRIDE_CONFIG_DIRECTORY)
github_trigger = GithubOAuthConfig(app, 'GITHUB_TRIGGER_CONFIG') queue_metrics = QueueMetrics(app)
google_login = GoogleOAuthConfig(app, 'GOOGLE_LOGIN_CONFIG') build_metrics = BuildMetrics(app)
oauth_apps = [github_login, github_trigger, google_login]
tf = app.config['DB_TRANSACTION_FACTORY'] tf = app.config['DB_TRANSACTION_FACTORY']
github_login = GithubOAuthConfig(app.config, 'GITHUB_LOGIN_CONFIG')
github_trigger = GithubOAuthConfig(app.config, 'GITHUB_TRIGGER_CONFIG')
google_login = GoogleOAuthConfig(app.config, 'GOOGLE_LOGIN_CONFIG')
oauth_apps = [github_login, github_trigger, google_login]
image_diff_queue = WorkQueue(app.config['DIFFS_QUEUE_NAME'], tf) image_diff_queue = WorkQueue(app.config['DIFFS_QUEUE_NAME'], tf)
dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf, dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf,
reporter=queue_metrics.report) reporter=queue_metrics.report)
@ -149,5 +137,34 @@ database.configure(app.config)
model.config.app_config = app.config model.config.app_config = app.config
model.config.store = storage model.config.store = storage
# Generate a secret key if none was specified.
if app.config['SECRET_KEY'] is None:
logger.debug('Generating in-memory secret key')
app.config['SECRET_KEY'] = generate_secret_key()
@login_manager.user_loader
def load_user(user_uuid):
logger.debug('User loader loading deferred user with uuid: %s' % user_uuid)
return LoginWrappedDBUser(user_uuid)
class LoginWrappedDBUser(UserMixin):
def __init__(self, user_uuid, db_user=None):
self._uuid = user_uuid
self._db_user = db_user
def db_user(self):
if not self._db_user:
self._db_user = model.get_user_by_uuid(self._uuid)
return self._db_user
def is_authenticated(self):
return self.db_user() is not None
def is_active(self):
return self.db_user().verified
def get_id(self):
return unicode(self._uuid)
def get_app_url(): def get_app_url():
return '%s://%s' % (app.config['PREFERRED_URL_SCHEME'], app.config['SERVER_HOSTNAME']) return '%s://%s' % (app.config['PREFERRED_URL_SCHEME'], app.config['SERVER_HOSTNAME'])

View file

@ -11,5 +11,5 @@ import registry
if __name__ == '__main__': if __name__ == '__main__':
logging.config.fileConfig('conf/logging.conf', disable_existing_loggers=False) logging.config.fileConfig('conf/logging_debug.conf', disable_existing_loggers=False)
application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') application.run(port=5000, debug=True, threaded=True, host='0.0.0.0')

View file

@ -7,7 +7,7 @@ from functools import partial
import scopes import scopes
from data import model from data import model
from app import app from app import app, superusers
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -89,10 +89,14 @@ class QuayDeferredPermissionUser(Identity):
if not self._permissions_loaded: if not self._permissions_loaded:
logger.debug('Loading user permissions after deferring.') logger.debug('Loading user permissions after deferring.')
user_object = model.get_user_by_uuid(self.id) user_object = model.get_user_by_uuid(self.id)
if user_object is None:
return super(QuayDeferredPermissionUser, self).can(permission)
if user_object is None:
return super(QuayDeferredPermissionUser, self).can(permission)
# Add the superuser need, if applicable. # Add the superuser need, if applicable.
if (user_object.username is not None and if superusers.is_superuser(user_object.username):
user_object.username in app.config.get('SUPER_USERS', [])):
self.provides.add(_SuperUserNeed()) self.provides.add(_SuperUserNeed())
# Add the user specific permissions, only for non-oauth permission # Add the user specific permissions, only for non-oauth permission

Binary file not shown.

2
build.sh Executable file
View file

@ -0,0 +1,2 @@
docker build -t quay.io/quay/quay:`git rev-parse --short HEAD` .
echo quay.io/quay/quay:`git rev-parse --short HEAD`

27
buildman/asyncutil.py Normal file
View file

@ -0,0 +1,27 @@
from functools import partial, wraps
from trollius import get_event_loop
class AsyncWrapper(object):
""" Wrapper class which will transform a syncronous library to one that can be used with
trollius coroutines.
"""
def __init__(self, delegate, loop=None, executor=None):
self._loop = loop if loop is not None else get_event_loop()
self._delegate = delegate
self._executor = executor
def __getattr__(self, attrib):
delegate_attr = getattr(self._delegate, attrib)
if not callable(delegate_attr):
return delegate_attr
def wrapper(*args, **kwargs):
""" Wraps the delegate_attr with primitives that will transform sync calls to ones shelled
out to a thread pool.
"""
callable_delegate_attr = partial(delegate_attr, *args, **kwargs)
return self._loop.run_in_executor(self._executor, callable_delegate_attr)
return wrapper

View file

@ -6,6 +6,7 @@ import time
from app import app, userfiles as user_files, build_logs, dockerfile_build_queue from app import app, userfiles as user_files, build_logs, dockerfile_build_queue
from buildman.manager.enterprise import EnterpriseManager from buildman.manager.enterprise import EnterpriseManager
from buildman.manager.ephemeral import EphemeralBuilderManager
from buildman.server import BuilderServer from buildman.server import BuilderServer
from trollius import SSLContext from trollius import SSLContext
@ -13,14 +14,22 @@ from trollius import SSLContext
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
BUILD_MANAGERS = { BUILD_MANAGERS = {
'enterprise': EnterpriseManager 'enterprise': EnterpriseManager,
'ephemeral': EphemeralBuilderManager,
} }
EXTERNALLY_MANAGED = 'external' EXTERNALLY_MANAGED = 'external'
DEFAULT_WEBSOCKET_PORT = 8787
DEFAULT_CONTROLLER_PORT = 8686
LOG_FORMAT = "%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s"
def run_build_manager(): def run_build_manager():
if not features.BUILD_SUPPORT: if not features.BUILD_SUPPORT:
logger.debug('Building is disabled. Please enable the feature flag') logger.debug('Building is disabled. Please enable the feature flag')
while True:
time.sleep(1000)
return return
build_manager_config = app.config.get('BUILD_MANAGER') build_manager_config = app.config.get('BUILD_MANAGER')
@ -39,18 +48,32 @@ def run_build_manager():
if manager_klass is None: if manager_klass is None:
return return
manager_hostname = os.environ.get('BUILDMAN_HOSTNAME',
app.config.get('BUILDMAN_HOSTNAME',
app.config['SERVER_HOSTNAME']))
websocket_port = int(os.environ.get('BUILDMAN_WEBSOCKET_PORT',
app.config.get('BUILDMAN_WEBSOCKET_PORT',
DEFAULT_WEBSOCKET_PORT)))
controller_port = int(os.environ.get('BUILDMAN_CONTROLLER_PORT',
app.config.get('BUILDMAN_CONTROLLER_PORT',
DEFAULT_CONTROLLER_PORT)))
logger.debug('Will pass buildman hostname %s to builders for websocket connection',
manager_hostname)
logger.debug('Starting build manager with lifecycle "%s"', build_manager_config[0]) logger.debug('Starting build manager with lifecycle "%s"', build_manager_config[0])
ssl_context = None ssl_context = None
if os.environ.get('SSL_CONFIG'): if os.environ.get('SSL_CONFIG'):
logger.debug('Loading SSL cert and key') logger.debug('Loading SSL cert and key')
ssl_context = SSLContext() ssl_context = SSLContext()
ssl_context.load_cert_chain(os.environ.get('SSL_CONFIG') + '/ssl.cert', ssl_context.load_cert_chain(os.path.join(os.environ.get('SSL_CONFIG'), 'ssl.cert'),
os.environ.get('SSL_CONFIG') + '/ssl.key') os.path.join(os.environ.get('SSL_CONFIG'), 'ssl.key'))
server = BuilderServer(app.config['SERVER_HOSTNAME'], dockerfile_build_queue, build_logs, server = BuilderServer(app.config['SERVER_HOSTNAME'], dockerfile_build_queue, build_logs,
user_files, manager_klass) user_files, manager_klass, build_manager_config[1], manager_hostname)
server.run('0.0.0.0', ssl=ssl_context) server.run('0.0.0.0', websocket_port, controller_port, ssl=ssl_context)
if __name__ == '__main__': if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG) logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
logging.getLogger('peewee').setLevel(logging.WARN)
run_build_manager() run_build_manager()

View file

@ -8,3 +8,6 @@ class BaseComponent(ApplicationSession):
self.parent_manager = None self.parent_manager = None
self.build_logs = None self.build_logs = None
self.user_files = None self.user_files = None
def kind(self):
raise NotImplementedError

View file

@ -6,11 +6,10 @@ import trollius
import re import re
from autobahn.wamp.exception import ApplicationError from autobahn.wamp.exception import ApplicationError
from trollius.coroutines import From
from buildman.server import BuildJobResult from buildman.server import BuildJobResult
from buildman.component.basecomponent import BaseComponent from buildman.component.basecomponent import BaseComponent
from buildman.jobutil.buildpack import BuildPackage, BuildPackageException from buildman.jobutil.buildjob import BuildJobLoadException
from buildman.jobutil.buildstatus import StatusHandler from buildman.jobutil.buildstatus import StatusHandler
from buildman.jobutil.workererror import WorkerError from buildman.jobutil.workererror import WorkerError
@ -20,7 +19,7 @@ HEARTBEAT_DELTA = datetime.timedelta(seconds=30)
HEARTBEAT_TIMEOUT = 10 HEARTBEAT_TIMEOUT = 10
INITIAL_TIMEOUT = 25 INITIAL_TIMEOUT = 25
SUPPORTED_WORKER_VERSIONS = ['0.1-beta'] SUPPORTED_WORKER_VERSIONS = ['0.3']
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -39,84 +38,71 @@ class BuildComponent(BaseComponent):
self.builder_realm = realm self.builder_realm = realm
self.parent_manager = None self.parent_manager = None
self.server_hostname = None self.registry_hostname = None
self._component_status = ComponentStatus.JOINING self._component_status = ComponentStatus.JOINING
self._last_heartbeat = None self._last_heartbeat = None
self._current_job = None self._current_job = None
self._build_status = None self._build_status = None
self._image_info = None self._image_info = None
self._worker_version = None
BaseComponent.__init__(self, config, **kwargs) BaseComponent.__init__(self, config, **kwargs)
def kind(self):
return 'builder'
def onConnect(self): def onConnect(self):
self.join(self.builder_realm) self.join(self.builder_realm)
def onJoin(self, details): def onJoin(self, details):
logger.debug('Registering methods and listeners for component %s', self.builder_realm) logger.debug('Registering methods and listeners for component %s', self.builder_realm)
yield From(self.register(self._on_ready, u'io.quay.buildworker.ready')) yield trollius.From(self.register(self._on_ready, u'io.quay.buildworker.ready'))
yield From(self.register(self._ping, u'io.quay.buildworker.ping')) yield trollius.From(self.register(self._determine_cache_tag,
yield From(self.subscribe(self._on_heartbeat, 'io.quay.builder.heartbeat')) u'io.quay.buildworker.determinecachetag'))
yield From(self.subscribe(self._on_log_message, 'io.quay.builder.logmessage')) yield trollius.From(self.register(self._ping, u'io.quay.buildworker.ping'))
self._set_status(ComponentStatus.WAITING) yield trollius.From(self.subscribe(self._on_heartbeat, 'io.quay.builder.heartbeat'))
yield trollius.From(self.subscribe(self._on_log_message, 'io.quay.builder.logmessage'))
yield trollius.From(self._set_status(ComponentStatus.WAITING))
def is_ready(self): def is_ready(self):
""" Determines whether a build component is ready to begin a build. """ """ Determines whether a build component is ready to begin a build. """
return self._component_status == ComponentStatus.RUNNING return self._component_status == ComponentStatus.RUNNING
@trollius.coroutine
def start_build(self, build_job): def start_build(self, build_job):
""" Starts a build. """ """ Starts a build. """
logger.debug('Starting build for component %s (worker version: %s)',
self.builder_realm, self._worker_version)
self._current_job = build_job self._current_job = build_job
self._build_status = StatusHandler(self.build_logs, build_job.repo_build()) self._build_status = StatusHandler(self.build_logs, build_job.repo_build.uuid)
self._image_info = {} self._image_info = {}
self._set_status(ComponentStatus.BUILDING) yield trollius.From(self._set_status(ComponentStatus.BUILDING))
# Retrieve the job's buildpack. # Send the notification that the build has started.
buildpack_url = self.user_files.get_file_url(build_job.repo_build().resource_key, build_job.send_notification('build_start')
# Parse the build configuration.
try:
build_config = build_job.build_config
except BuildJobLoadException as irbe:
self._build_failure('Could not load build job information', irbe)
base_image_information = {}
buildpack_url = self.user_files.get_file_url(build_job.repo_build.resource_key,
requires_cors=False) requires_cors=False)
logger.debug('Retreiving build package: %s', buildpack_url)
buildpack = None
try:
buildpack = BuildPackage.from_url(buildpack_url)
except BuildPackageException as bpe:
self._build_failure('Could not retrieve build package', bpe)
return
# Extract the base image information from the Dockerfile.
parsed_dockerfile = None
logger.debug('Parsing dockerfile')
build_config = build_job.build_config()
try:
parsed_dockerfile = buildpack.parse_dockerfile(build_config.get('build_subdir'))
except BuildPackageException as bpe:
self._build_failure('Could not find Dockerfile in build package', bpe)
return
image_and_tag_tuple = parsed_dockerfile.get_image_and_tag()
if image_and_tag_tuple is None or image_and_tag_tuple[0] is None:
self._build_failure('Missing FROM line in Dockerfile')
return
base_image_information = {
'repository': image_and_tag_tuple[0],
'tag': image_and_tag_tuple[1]
}
# Extract the number of steps from the Dockerfile.
with self._build_status as status_dict:
status_dict['total_commands'] = len(parsed_dockerfile.commands)
# Add the pull robot information, if any. # Add the pull robot information, if any.
if build_config.get('pull_credentials') is not None: if build_job.pull_credentials:
base_image_information['username'] = build_config['pull_credentials'].get('username', '') base_image_information['username'] = build_job.pull_credentials.get('username', '')
base_image_information['password'] = build_config['pull_credentials'].get('password', '') base_image_information['password'] = build_job.pull_credentials.get('password', '')
# Retrieve the repository's fully qualified name. # Retrieve the repository's fully qualified name.
repo = build_job.repo_build().repository repo = build_job.repo_build.repository
repository_name = repo.namespace_user.username + '/' + repo.name repository_name = repo.namespace_user.username + '/' + repo.name
# Parse the build queue item into build arguments. # Parse the build queue item into build arguments.
@ -128,29 +114,26 @@ class BuildComponent(BaseComponent):
# push_token: The token to use to push the built image. # push_token: The token to use to push the built image.
# tag_names: The name(s) of the tag(s) for the newly built image. # tag_names: The name(s) of the tag(s) for the newly built image.
# base_image: The image name and credentials to use to conduct the base image pull. # base_image: The image name and credentials to use to conduct the base image pull.
# repository: The repository to pull. # repository: The repository to pull (DEPRECATED 0.2)
# tag: The tag to pull. # tag: The tag to pull (DEPRECATED in 0.2)
# username: The username for pulling the base image (if any). # username: The username for pulling the base image (if any).
# password: The password for pulling the base image (if any). # password: The password for pulling the base image (if any).
build_arguments = { build_arguments = {
'build_package': buildpack_url, 'build_package': buildpack_url,
'sub_directory': build_config.get('build_subdir', ''), 'sub_directory': build_config.get('build_subdir', ''),
'repository': repository_name, 'repository': repository_name,
'registry': self.server_hostname, 'registry': self.registry_hostname,
'pull_token': build_job.repo_build().access_token.code, 'pull_token': build_job.repo_build.access_token.code,
'push_token': build_job.repo_build().access_token.code, 'push_token': build_job.repo_build.access_token.code,
'tag_names': build_config.get('docker_tags', ['latest']), 'tag_names': build_config.get('docker_tags', ['latest']),
'base_image': base_image_information, 'base_image': base_image_information
'cached_tag': build_job.determine_cached_tag() or ''
} }
# Invoke the build. # Invoke the build.
logger.debug('Invoking build: %s', self.builder_realm) logger.debug('Invoking build: %s', self.builder_realm)
logger.debug('With Arguments: %s', build_arguments) logger.debug('With Arguments: %s', build_arguments)
return (self self.call("io.quay.builder.build", **build_arguments).add_done_callback(self._build_complete)
.call("io.quay.builder.build", **build_arguments)
.add_done_callback(self._build_complete))
@staticmethod @staticmethod
def _total_completion(statuses, total_images): def _total_completion(statuses, total_images):
@ -237,18 +220,28 @@ class BuildComponent(BaseComponent):
elif phase == BUILD_PHASE.BUILDING: elif phase == BUILD_PHASE.BUILDING:
self._build_status.append_log(current_status_string) self._build_status.append_log(current_status_string)
@trollius.coroutine
def _determine_cache_tag(self, command_comments, base_image_name, base_image_tag, base_image_id):
with self._build_status as status_dict:
status_dict['total_commands'] = len(command_comments) + 1
logger.debug('Checking cache on realm %s. Base image: %s:%s (%s)', self.builder_realm,
base_image_name, base_image_tag, base_image_id)
tag_found = self._current_job.determine_cached_tag(base_image_id, command_comments)
raise trollius.Return(tag_found or '')
def _build_failure(self, error_message, exception=None): def _build_failure(self, error_message, exception=None):
""" Handles and logs a failed build. """ """ Handles and logs a failed build. """
self._build_status.set_error(error_message, { self._build_status.set_error(error_message, {
'internal_error': exception.message if exception else None 'internal_error': str(exception) if exception else None
}) })
build_id = self._current_job.repo_build().uuid build_id = self._current_job.repo_build.uuid
logger.warning('Build %s failed with message: %s', build_id, error_message) logger.warning('Build %s failed with message: %s', build_id, error_message)
# Mark that the build has finished (in an error state) # Mark that the build has finished (in an error state)
self._build_finished(BuildJobResult.ERROR) trollius.async(self._build_finished(BuildJobResult.ERROR))
def _build_complete(self, result): def _build_complete(self, result):
""" Wraps up a completed build. Handles any errors and calls self._build_finished. """ """ Wraps up a completed build. Handles any errors and calls self._build_finished. """
@ -256,60 +249,78 @@ class BuildComponent(BaseComponent):
# Retrieve the result. This will raise an ApplicationError on any error that occurred. # Retrieve the result. This will raise an ApplicationError on any error that occurred.
result.result() result.result()
self._build_status.set_phase(BUILD_PHASE.COMPLETE) self._build_status.set_phase(BUILD_PHASE.COMPLETE)
self._build_finished(BuildJobResult.COMPLETE) trollius.async(self._build_finished(BuildJobResult.COMPLETE))
# Send the notification that the build has completed successfully.
self._current_job.send_notification('build_success')
except ApplicationError as aex: except ApplicationError as aex:
worker_error = WorkerError(aex.error, aex.kwargs.get('base_error')) worker_error = WorkerError(aex.error, aex.kwargs.get('base_error'))
# Write the error to the log. # Write the error to the log.
self._build_status.set_error(worker_error.public_message(), worker_error.extra_data(), self._build_status.set_error(worker_error.public_message(), worker_error.extra_data(),
internal_error=worker_error.is_internal_error()) internal_error=worker_error.is_internal_error(),
requeued=self._current_job.has_retries_remaining())
# Send the notification that the build has failed.
self._current_job.send_notification('build_failure',
error_message=worker_error.public_message())
# Mark the build as completed. # Mark the build as completed.
if worker_error.is_internal_error(): if worker_error.is_internal_error():
self._build_finished(BuildJobResult.INCOMPLETE) trollius.async(self._build_finished(BuildJobResult.INCOMPLETE))
else: else:
self._build_finished(BuildJobResult.ERROR) trollius.async(self._build_finished(BuildJobResult.ERROR))
@trollius.coroutine
def _build_finished(self, job_status): def _build_finished(self, job_status):
""" Alerts the parent that a build has completed and sets the status back to running. """ """ Alerts the parent that a build has completed and sets the status back to running. """
self.parent_manager.job_completed(self._current_job, job_status, self) yield trollius.From(self.parent_manager.job_completed(self._current_job, job_status, self))
self._current_job = None self._current_job = None
# Set the component back to a running state. # Set the component back to a running state.
self._set_status(ComponentStatus.RUNNING) yield trollius.From(self._set_status(ComponentStatus.RUNNING))
@staticmethod @staticmethod
def _ping(): def _ping():
""" Ping pong. """ """ Ping pong. """
return 'pong' return 'pong'
@trollius.coroutine
def _on_ready(self, token, version): def _on_ready(self, token, version):
if not version in SUPPORTED_WORKER_VERSIONS: self._worker_version = version
logger.warning('Build component (token "%s") is running an out-of-date version: %s', version)
return False
if self._component_status != 'waiting': if not version in SUPPORTED_WORKER_VERSIONS:
logger.warning('Build component (token "%s") is running an out-of-date version: %s', token,
version)
raise trollius.Return(False)
if self._component_status != ComponentStatus.WAITING:
logger.warning('Build component (token "%s") is already connected', self.expected_token) logger.warning('Build component (token "%s") is already connected', self.expected_token)
return False raise trollius.Return(False)
if token != self.expected_token: if token != self.expected_token:
logger.warning('Builder token mismatch. Expected: "%s". Found: "%s"', self.expected_token, token) logger.warning('Builder token mismatch. Expected: "%s". Found: "%s"', self.expected_token,
return False token)
raise trollius.Return(False)
self._set_status(ComponentStatus.RUNNING) yield trollius.From(self._set_status(ComponentStatus.RUNNING))
# Start the heartbeat check and updating loop. # Start the heartbeat check and updating loop.
loop = trollius.get_event_loop() loop = trollius.get_event_loop()
loop.create_task(self._heartbeat()) loop.create_task(self._heartbeat())
logger.debug('Build worker %s is connected and ready', self.builder_realm) logger.debug('Build worker %s is connected and ready', self.builder_realm)
return True raise trollius.Return(True)
@trollius.coroutine
def _set_status(self, phase): def _set_status(self, phase):
if phase == ComponentStatus.RUNNING:
yield trollius.From(self.parent_manager.build_component_ready(self))
self._component_status = phase self._component_status = phase
def _on_heartbeat(self): def _on_heartbeat(self):
""" Updates the last known heartbeat. """ """ Updates the last known heartbeat. """
self._last_heartbeat = datetime.datetime.now() self._last_heartbeat = datetime.datetime.utcnow()
@trollius.coroutine @trollius.coroutine
def _heartbeat(self): def _heartbeat(self):
@ -317,13 +328,13 @@ class BuildComponent(BaseComponent):
and updating the heartbeat in the build status dictionary (if applicable). This allows and updating the heartbeat in the build status dictionary (if applicable). This allows
the build system to catch crashes from either end. the build system to catch crashes from either end.
""" """
yield From(trollius.sleep(INITIAL_TIMEOUT)) yield trollius.From(trollius.sleep(INITIAL_TIMEOUT))
while True: while True:
# If the component is no longer running or actively building, nothing more to do. # If the component is no longer running or actively building, nothing more to do.
if (self._component_status != ComponentStatus.RUNNING and if (self._component_status != ComponentStatus.RUNNING and
self._component_status != ComponentStatus.BUILDING): self._component_status != ComponentStatus.BUILDING):
return raise trollius.Return()
# If there is an active build, write the heartbeat to its status. # If there is an active build, write the heartbeat to its status.
build_status = self._build_status build_status = self._build_status
@ -331,35 +342,37 @@ class BuildComponent(BaseComponent):
with build_status as status_dict: with build_status as status_dict:
status_dict['heartbeat'] = int(time.time()) status_dict['heartbeat'] = int(time.time())
# Mark the build item. # Mark the build item.
current_job = self._current_job current_job = self._current_job
if current_job is not None: if current_job is not None:
self.parent_manager.job_heartbeat(current_job) yield trollius.From(self.parent_manager.job_heartbeat(current_job))
# Check the heartbeat from the worker. # Check the heartbeat from the worker.
logger.debug('Checking heartbeat on realm %s', self.builder_realm) logger.debug('Checking heartbeat on realm %s', self.builder_realm)
if self._last_heartbeat and self._last_heartbeat < datetime.datetime.now() - HEARTBEAT_DELTA: if (self._last_heartbeat and
self._timeout() self._last_heartbeat < datetime.datetime.utcnow() - HEARTBEAT_DELTA):
return yield trollius.From(self._timeout())
raise trollius.Return()
yield From(trollius.sleep(HEARTBEAT_TIMEOUT)) yield trollius.From(trollius.sleep(HEARTBEAT_TIMEOUT))
@trollius.coroutine
def _timeout(self): def _timeout(self):
self._set_status(ComponentStatus.TIMED_OUT) if self._component_status == ComponentStatus.TIMED_OUT:
logger.warning('Build component with realm %s has timed out', self.builder_realm) raise trollius.Return()
self._dispose(timed_out=True)
yield trollius.From(self._set_status(ComponentStatus.TIMED_OUT))
logger.warning('Build component with realm %s has timed out', self.builder_realm)
def _dispose(self, timed_out=False):
# If we still have a running job, then it has not completed and we need to tell the parent # If we still have a running job, then it has not completed and we need to tell the parent
# manager. # manager.
if self._current_job is not None: if self._current_job is not None:
if timed_out: self._build_status.set_error('Build worker timed out', internal_error=True,
self._build_status.set_error('Build worker timed out', internal_error=True) requeued=self._current_job.has_retries_remaining())
self.parent_manager.job_completed(self._current_job, BuildJobResult.INCOMPLETE, self) self.parent_manager.job_completed(self._current_job, BuildJobResult.INCOMPLETE, self)
self._build_status = None self._build_status = None
self._current_job = None self._current_job = None
# Unregister the current component so that it cannot be invoked again. # Unregister the current component so that it cannot be invoked again.
self.parent_manager.build_component_disposed(self, timed_out) self.parent_manager.build_component_disposed(self, True)

12
buildman/enums.py Normal file
View file

@ -0,0 +1,12 @@
class BuildJobResult(object):
""" Build job result enum """
INCOMPLETE = 'incomplete'
COMPLETE = 'complete'
ERROR = 'error'
class BuildServerStatus(object):
""" Build server status enum """
STARTING = 'starting'
RUNNING = 'running'
SHUTDOWN = 'shutting_down'

View file

@ -1,6 +1,13 @@
from data import model
import json import json
import logging
from cachetools import lru_cache
from endpoints.notificationhelper import spawn_notification
from data import model
from util.imagetree import ImageTree
logger = logging.getLogger(__name__)
class BuildJobLoadException(Exception): class BuildJobLoadException(Exception):
""" Exception raised if a build job could not be instantiated for some reason. """ """ Exception raised if a build job could not be instantiated for some reason. """
@ -9,52 +16,123 @@ class BuildJobLoadException(Exception):
class BuildJob(object): class BuildJob(object):
""" Represents a single in-progress build job. """ """ Represents a single in-progress build job. """
def __init__(self, job_item): def __init__(self, job_item):
self._job_item = job_item self.job_item = job_item
try: try:
self._job_details = json.loads(job_item.body) self.job_details = json.loads(job_item.body)
except ValueError: except ValueError:
raise BuildJobLoadException( raise BuildJobLoadException(
'Could not parse build queue item config with ID %s' % self._job_details['build_uuid'] 'Could not parse build queue item config with ID %s' % self.job_details['build_uuid']
) )
def has_retries_remaining(self):
return self.job_item.retries_remaining > 0
def send_notification(self, kind, error_message=None):
tags = self.build_config.get('docker_tags', ['latest'])
event_data = {
'build_id': self.repo_build.uuid,
'build_name': self.repo_build.display_name,
'docker_tags': tags,
'trigger_id': self.repo_build.trigger.uuid,
'trigger_kind': self.repo_build.trigger.service.name
}
if error_message is not None:
event_data['error_message'] = error_message
spawn_notification(self.repo_build.repository, kind, event_data,
subpage='build?current=%s' % self.repo_build.uuid,
pathargs=['build', self.repo_build.uuid])
@lru_cache(maxsize=1)
def _load_repo_build(self):
try: try:
self._repo_build = model.get_repository_build(self._job_details['namespace'], return model.get_repository_build(self.job_details['build_uuid'])
self._job_details['repository'],
self._job_details['build_uuid'])
except model.InvalidRepositoryBuildException: except model.InvalidRepositoryBuildException:
raise BuildJobLoadException( raise BuildJobLoadException(
'Could not load repository build with ID %s' % self._job_details['build_uuid']) 'Could not load repository build with ID %s' % self.job_details['build_uuid'])
@property
def repo_build(self):
return self._load_repo_build()
@property
def pull_credentials(self):
""" Returns the pull credentials for this job, or None if none. """
return self.job_details.get('pull_credentials')
@property
def build_config(self):
try: try:
self._build_config = json.loads(self._repo_build.job_config) return json.loads(self.repo_build.job_config)
except ValueError: except ValueError:
raise BuildJobLoadException( raise BuildJobLoadException(
'Could not parse repository build job config with ID %s' % self._job_details['build_uuid'] 'Could not parse repository build job config with ID %s' % self.job_details['build_uuid']
) )
def determine_cached_tag(self): def determine_cached_tag(self, base_image_id=None, cache_comments=None):
""" Returns the tag to pull to prime the cache or None if none. """ """ Returns the tag to pull to prime the cache or None if none. """
# TODO(jschorr): Change this to use the more complicated caching rules, once we have caching cached_tag = None
# be a pull of things besides the constructed tags. if base_image_id and cache_comments:
tags = self._build_config.get('docker_tags', ['latest']) cached_tag = self._determine_cached_tag_by_comments(base_image_id, cache_comments)
existing_tags = model.list_repository_tags(self._job_details['namespace'],
self._job_details['repository'])
if not cached_tag:
cached_tag = self._determine_cached_tag_by_tag()
logger.debug('Determined cached tag %s for %s: %s', cached_tag, base_image_id, cache_comments)
return cached_tag
def _determine_cached_tag_by_comments(self, base_image_id, cache_commands):
""" Determines the tag to use for priming the cache for this build job, by matching commands
starting at the given base_image_id. This mimics the Docker cache checking, so it should,
in theory, provide "perfect" caching.
"""
# Lookup the base image in the repository. If it doesn't exist, nothing more to do.
repo_build = self.repo_build
repo_namespace = repo_build.repository.namespace_user.username
repo_name = repo_build.repository.name
base_image = model.get_image(repo_build.repository, base_image_id)
if base_image is None:
return None
# Build an in-memory tree of the full heirarchy of images in the repository.
all_images = model.get_repository_images(repo_namespace, repo_name)
all_tags = model.list_repository_tags(repo_namespace, repo_name)
tree = ImageTree(all_images, all_tags, base_filter=base_image.id)
# Find a path in the tree, starting at the base image, that matches the cache comments
# or some subset thereof.
def checker(step, image):
if step >= len(cache_commands):
return False
full_command = '["/bin/sh", "-c", "%s"]' % cache_commands[step]
logger.debug('Checking step #%s: %s, %s == %s', step, image.id,
image.storage.command, full_command)
return image.storage.command == full_command
path = tree.find_longest_path(base_image.id, checker)
if not path:
return None
# Find any tag associated with the last image in the path.
return tree.tag_containing_image(path[-1])
def _determine_cached_tag_by_tag(self):
""" Determines the cached tag by looking for one of the tags being built, and seeing if it
exists in the repository. This is a fallback for when no comment information is available.
"""
tags = self.build_config.get('docker_tags', ['latest'])
repository = self.repo_build.repository
existing_tags = model.list_repository_tags(repository.namespace_user.username, repository.name)
cached_tags = set(tags) & set([tag.name for tag in existing_tags]) cached_tags = set(tags) & set([tag.name for tag in existing_tags])
if cached_tags: if cached_tags:
return list(cached_tags)[0] return list(cached_tags)[0]
return None return None
def job_item(self):
""" Returns the job's queue item. """
return self._job_item
def repo_build(self):
""" Returns the repository build DB row for the job. """
return self._repo_build
def build_config(self):
""" Returns the parsed repository build config for the job. """
return self._build_config

View file

@ -1,88 +0,0 @@
import tarfile
import requests
import os
from tempfile import TemporaryFile, mkdtemp
from zipfile import ZipFile
from util.dockerfileparse import parse_dockerfile
from util.safetar import safe_extractall
class BuildPackageException(Exception):
""" Exception raised when retrieving or parsing a build package. """
pass
class BuildPackage(object):
""" Helper class for easy reading and updating of a Dockerfile build pack. """
def __init__(self, requests_file):
self._mime_processors = {
'application/zip': BuildPackage._prepare_zip,
'application/x-zip-compressed': BuildPackage._prepare_zip,
'text/plain': BuildPackage._prepare_dockerfile,
'application/octet-stream': BuildPackage._prepare_dockerfile,
'application/x-tar': BuildPackage._prepare_tarball,
'application/gzip': BuildPackage._prepare_tarball,
'application/x-gzip': BuildPackage._prepare_tarball,
}
c_type = requests_file.headers['content-type']
c_type = c_type.split(';')[0] if ';' in c_type else c_type
if c_type not in self._mime_processors:
raise BuildPackageException('Unknown build package mime type: %s' % c_type)
self._package_directory = None
try:
self._package_directory = self._mime_processors[c_type](requests_file)
except Exception as ex:
raise BuildPackageException(ex.message)
def parse_dockerfile(self, subdirectory):
dockerfile_path = os.path.join(self._package_directory, subdirectory, 'Dockerfile')
if not os.path.exists(dockerfile_path):
if subdirectory:
message = 'Build package did not contain a Dockerfile at sub directory %s.' % subdirectory
else:
message = 'Build package did not contain a Dockerfile at the root directory.'
raise BuildPackageException(message)
with open(dockerfile_path, 'r') as dockerfileobj:
return parse_dockerfile(dockerfileobj.read())
@staticmethod
def from_url(url):
buildpack_resource = requests.get(url, stream=True)
return BuildPackage(buildpack_resource)
@staticmethod
def _prepare_zip(request_file):
build_dir = mkdtemp(prefix='docker-build-')
# Save the zip file to temp somewhere
with TemporaryFile() as zip_file:
zip_file.write(request_file.content)
to_extract = ZipFile(zip_file)
to_extract.extractall(build_dir)
return build_dir
@staticmethod
def _prepare_dockerfile(request_file):
build_dir = mkdtemp(prefix='docker-build-')
dockerfile_path = os.path.join(build_dir, "Dockerfile")
with open(dockerfile_path, 'w') as dockerfile:
dockerfile.write(request_file.content)
return build_dir
@staticmethod
def _prepare_tarball(request_file):
build_dir = mkdtemp(prefix='docker-build-')
# Save the zip file to temp somewhere
with tarfile.open(mode='r|*', fileobj=request_file.raw) as tar_stream:
safe_extractall(tar_stream, build_dir)
return build_dir

View file

@ -0,0 +1,72 @@
from trollius import From
from buildman.enums import BuildJobResult
from util.cloudwatch import get_queue
class BuildReporter(object):
"""
Base class for reporting build statuses to a metrics service.
"""
def report_completion_status(self, status):
"""
Method to invoke the recording of build's completion status to a metric service.
"""
raise NotImplementedError
class NullReporter(BuildReporter):
"""
The /dev/null of BuildReporters.
"""
def report_completion_status(self, *args):
pass
class CloudWatchBuildReporter(BuildReporter):
"""
Implements a BuildReporter for Amazon's CloudWatch.
"""
def __init__(self, queue, namespace_name, completed_name, failed_name, incompleted_name):
self._queue = queue
self._namespace_name = namespace_name
self._completed_name = completed_name
self._failed_name = failed_name
self._incompleted_name = incompleted_name
def _send_to_queue(self, *args, **kwargs):
self._queue.put((args, kwargs))
def report_completion_status(self, status):
if status == BuildJobResult.COMPLETE:
status_name = self._completed_name
elif status == BuildJobResult.ERROR:
status_name = self._failed_name
elif status == BuildJobResult.INCOMPLETE:
status_name = self._incompleted_name
else:
return
self._send_to_queue(self._namespace_name, status_name, 1, unit='Count')
class BuildMetrics(object):
"""
BuildMetrics initializes a reporter for recording the status of build completions.
"""
def __init__(self, app=None):
self._app = app
self._reporter = NullReporter()
if app is not None:
reporter_type = app.config.get('BUILD_METRICS_TYPE', 'Null')
if reporter_type == 'CloudWatch':
namespace = app.config['BUILD_METRICS_NAMESPACE']
completed_name = app.config['BUILD_METRICS_COMPLETED_NAME']
failed_name = app.config['BUILD_METRICS_FAILED_NAME']
incompleted_name = app.config['BUILD_METRICS_INCOMPLETED_NAME']
request_queue = get_queue(app)
self._reporter = CloudWatchBuildReporter(request_queue, namespace, completed_name,
failed_name, incompleted_name)
def __getattr__(self, name):
return getattr(self._reporter, name, None)

View file

@ -1,16 +1,18 @@
from data.database import BUILD_PHASE from data.database import BUILD_PHASE
from data import model
import datetime
class StatusHandler(object): class StatusHandler(object):
""" Context wrapper for writing status to build logs. """ """ Context wrapper for writing status to build logs. """
def __init__(self, build_logs, repository_build): def __init__(self, build_logs, repository_build_uuid):
self._current_phase = None self._current_phase = None
self._repository_build = repository_build self._current_command = None
self._uuid = repository_build.uuid self._uuid = repository_build_uuid
self._build_logs = build_logs self._build_logs = build_logs
self._status = { self._status = {
'total_commands': None, 'total_commands': 0,
'current_command': None, 'current_command': None,
'push_completion': 0.0, 'push_completion': 0.0,
'pull_completion': 0.0, 'pull_completion': 0.0,
@ -20,16 +22,25 @@ class StatusHandler(object):
self.__exit__(None, None, None) self.__exit__(None, None, None)
def _append_log_message(self, log_message, log_type=None, log_data=None): def _append_log_message(self, log_message, log_type=None, log_data=None):
log_data = log_data or {}
log_data['datetime'] = str(datetime.datetime.now())
self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data) self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data)
def append_log(self, log_message, extra_data=None): def append_log(self, log_message, extra_data=None):
if log_message is None:
return
self._append_log_message(log_message, log_data=extra_data) self._append_log_message(log_message, log_data=extra_data)
def set_command(self, command, extra_data=None): def set_command(self, command, extra_data=None):
if self._current_command == command:
return
self._current_command = command
self._append_log_message(command, self._build_logs.COMMAND, extra_data) self._append_log_message(command, self._build_logs.COMMAND, extra_data)
def set_error(self, error_message, extra_data=None, internal_error=False): def set_error(self, error_message, extra_data=None, internal_error=False, requeued=False):
self.set_phase(BUILD_PHASE.INTERNAL_ERROR if internal_error else BUILD_PHASE.ERROR) self.set_phase(BUILD_PHASE.INTERNAL_ERROR if internal_error and requeued else BUILD_PHASE.ERROR)
extra_data = extra_data or {} extra_data = extra_data or {}
extra_data['internal_error'] = internal_error extra_data['internal_error'] = internal_error
@ -41,8 +52,12 @@ class StatusHandler(object):
self._current_phase = phase self._current_phase = phase
self._append_log_message(phase, self._build_logs.PHASE, extra_data) self._append_log_message(phase, self._build_logs.PHASE, extra_data)
self._repository_build.phase = phase
self._repository_build.save() # Update the repository build with the new phase
repo_build = model.get_repository_build(self._uuid)
repo_build.phase = phase
repo_build.save()
return True return True
def __enter__(self): def __enter__(self):

View file

@ -19,13 +19,19 @@ class WorkerError(object):
'is_internal': True 'is_internal': True
}, },
'io.quay.builder.dockerfileissue': {
'message': 'Could not find or parse Dockerfile',
'show_base_error': True
},
'io.quay.builder.cannotpullbaseimage': { 'io.quay.builder.cannotpullbaseimage': {
'message': 'Could not pull base image', 'message': 'Could not pull base image',
'show_base_error': True 'show_base_error': True
}, },
'io.quay.builder.internalerror': { 'io.quay.builder.internalerror': {
'message': 'An internal error occurred while building. Please submit a ticket.' 'message': 'An internal error occurred while building. Please submit a ticket.',
'is_internal': True
}, },
'io.quay.builder.buildrunerror': { 'io.quay.builder.buildrunerror': {
@ -57,6 +63,11 @@ class WorkerError(object):
'io.quay.builder.missingorinvalidargument': { 'io.quay.builder.missingorinvalidargument': {
'message': 'Missing required arguments for builder', 'message': 'Missing required arguments for builder',
'is_internal': True 'is_internal': True
},
'io.quay.builder.cachelookupissue': {
'message': 'Error checking for a cached tag',
'is_internal': True
} }
} }

View file

@ -1,12 +1,17 @@
from trollius import coroutine
class BaseManager(object): class BaseManager(object):
""" Base for all worker managers. """ """ Base for all worker managers. """
def __init__(self, register_component, unregister_component, job_heartbeat_callback, def __init__(self, register_component, unregister_component, job_heartbeat_callback,
job_complete_callback): job_complete_callback, manager_hostname, heartbeat_period_sec):
self.register_component = register_component self.register_component = register_component
self.unregister_component = unregister_component self.unregister_component = unregister_component
self.job_heartbeat_callback = job_heartbeat_callback self.job_heartbeat_callback = job_heartbeat_callback
self.job_complete_callback = job_complete_callback self.job_complete_callback = job_complete_callback
self.manager_hostname = manager_hostname
self.heartbeat_period_sec = heartbeat_period_sec
@coroutine
def job_heartbeat(self, build_job): def job_heartbeat(self, build_job):
""" Method invoked to tell the manager that a job is still running. This method will be called """ Method invoked to tell the manager that a job is still running. This method will be called
every few minutes. """ every few minutes. """
@ -25,25 +30,41 @@ class BaseManager(object):
""" """
raise NotImplementedError raise NotImplementedError
def schedule(self, build_job, loop): @coroutine
def schedule(self, build_job):
""" Schedules a queue item to be built. Returns True if the item was properly scheduled """ Schedules a queue item to be built. Returns True if the item was properly scheduled
and False if all workers are busy. and False if all workers are busy.
""" """
raise NotImplementedError raise NotImplementedError
def initialize(self): def initialize(self, manager_config):
""" Runs any initialization code for the manager. Called once the server is in a ready state. """ Runs any initialization code for the manager. Called once the server is in a ready state.
""" """
raise NotImplementedError raise NotImplementedError
@coroutine
def build_component_ready(self, build_component):
""" Method invoked whenever a build component announces itself as ready.
"""
raise NotImplementedError
def build_component_disposed(self, build_component, timed_out): def build_component_disposed(self, build_component, timed_out):
""" Method invoked whenever a build component has been disposed. The timed_out boolean indicates """ Method invoked whenever a build component has been disposed. The timed_out boolean indicates
whether the component's heartbeat timed out. whether the component's heartbeat timed out.
""" """
raise NotImplementedError raise NotImplementedError
@coroutine
def job_completed(self, build_job, job_status, build_component): def job_completed(self, build_job, job_status, build_component):
""" Method invoked once a job_item has completed, in some manner. The job_status will be """ Method invoked once a job_item has completed, in some manner. The job_status will be
one of: incomplete, error, complete. If incomplete, the job should be requeued. one of: incomplete, error, complete. Implementations of this method should call
self.job_complete_callback with a status of Incomplete if they wish for the job to be
automatically requeued.
"""
raise NotImplementedError
def num_workers(self):
""" Returns the number of active build workers currently registered. This includes those
that are currently busy and awaiting more work.
""" """
raise NotImplementedError raise NotImplementedError

View file

@ -5,7 +5,7 @@ from buildman.component.basecomponent import BaseComponent
from buildman.component.buildcomponent import BuildComponent from buildman.component.buildcomponent import BuildComponent
from buildman.manager.basemanager import BaseManager from buildman.manager.basemanager import BaseManager
from trollius.coroutines import From from trollius import From, Return, coroutine
REGISTRATION_REALM = 'registration' REGISTRATION_REALM = 'registration'
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -25,13 +25,21 @@ class DynamicRegistrationComponent(BaseComponent):
logger.debug('Registering new build component+worker with realm %s', realm) logger.debug('Registering new build component+worker with realm %s', realm)
return realm return realm
def kind(self):
return 'registration'
class EnterpriseManager(BaseManager): class EnterpriseManager(BaseManager):
""" Build manager implementation for the Enterprise Registry. """ """ Build manager implementation for the Enterprise Registry. """
build_components = []
shutting_down = False
def initialize(self): def __init__(self, *args, **kwargs):
self.ready_components = set()
self.all_components = set()
self.shutting_down = False
super(EnterpriseManager, self).__init__(*args, **kwargs)
def initialize(self, manager_config):
# Add a component which is used by build workers for dynamic registration. Unlike # Add a component which is used by build workers for dynamic registration. Unlike
# production, build workers in enterprise are long-lived and register dynamically. # production, build workers in enterprise are long-lived and register dynamically.
self.register_component(REGISTRATION_REALM, DynamicRegistrationComponent) self.register_component(REGISTRATION_REALM, DynamicRegistrationComponent)
@ -45,28 +53,39 @@ class EnterpriseManager(BaseManager):
""" Adds a new build component for an Enterprise Registry. """ """ Adds a new build component for an Enterprise Registry. """
# Generate a new unique realm ID for the build worker. # Generate a new unique realm ID for the build worker.
realm = str(uuid.uuid4()) realm = str(uuid.uuid4())
component = self.register_component(realm, BuildComponent, token="") new_component = self.register_component(realm, BuildComponent, token="")
self.build_components.append(component) self.all_components.add(new_component)
return realm return realm
def schedule(self, build_job, loop): @coroutine
def schedule(self, build_job):
""" Schedules a build for an Enterprise Registry. """ """ Schedules a build for an Enterprise Registry. """
if self.shutting_down: if self.shutting_down or not self.ready_components:
return False raise Return(False)
for component in self.build_components: component = self.ready_components.pop()
if component.is_ready():
loop.call_soon(component.start_build, build_job)
return True
return False yield From(component.start_build(build_job))
raise Return(True)
@coroutine
def build_component_ready(self, build_component):
self.ready_components.add(build_component)
def shutdown(self): def shutdown(self):
self.shutting_down = True self.shutting_down = True
@coroutine
def job_completed(self, build_job, job_status, build_component): def job_completed(self, build_job, job_status, build_component):
self.job_complete_callback(build_job, job_status) self.job_complete_callback(build_job, job_status)
def build_component_disposed(self, build_component, timed_out): def build_component_disposed(self, build_component, timed_out):
self.build_components.remove(build_component) self.all_components.remove(build_component)
if build_component in self.ready_components:
self.ready_components.remove(build_component)
self.unregister_component(build_component)
def num_workers(self):
return len(self.all_components)

View file

@ -0,0 +1,326 @@
import logging
import etcd
import uuid
import calendar
import os.path
import json
from datetime import datetime, timedelta
from trollius import From, coroutine, Return, async
from concurrent.futures import ThreadPoolExecutor
from urllib3.exceptions import ReadTimeoutError, ProtocolError
from buildman.manager.basemanager import BaseManager
from buildman.manager.executor import PopenExecutor, EC2Executor
from buildman.component.buildcomponent import BuildComponent
from buildman.jobutil.buildjob import BuildJob
from buildman.asyncutil import AsyncWrapper
from util.morecollections import AttrDict
logger = logging.getLogger(__name__)
ETCD_DISABLE_TIMEOUT = 0
class EtcdAction(object):
GET = 'get'
SET = 'set'
EXPIRE = 'expire'
UPDATE = 'update'
DELETE = 'delete'
CREATE = 'create'
COMPARE_AND_SWAP = 'compareAndSwap'
COMPARE_AND_DELETE = 'compareAndDelete'
class EphemeralBuilderManager(BaseManager):
""" Build manager implementation for the Enterprise Registry. """
_executors = {
'popen': PopenExecutor,
'ec2': EC2Executor,
}
_etcd_client_klass = etcd.Client
def __init__(self, *args, **kwargs):
self._shutting_down = False
self._manager_config = None
self._async_thread_executor = None
self._etcd_client = None
self._etcd_realm_prefix = None
self._etcd_builder_prefix = None
self._component_to_job = {}
self._job_uuid_to_component = {}
self._component_to_builder = {}
self._executor = None
# Map of etcd keys being watched to the tasks watching them
self._watch_tasks = {}
super(EphemeralBuilderManager, self).__init__(*args, **kwargs)
def _watch_etcd(self, etcd_key, change_callback, recursive=True):
watch_task_key = (etcd_key, recursive)
def callback_wrapper(changed_key_future):
if watch_task_key not in self._watch_tasks or self._watch_tasks[watch_task_key].done():
self._watch_etcd(etcd_key, change_callback)
if changed_key_future.cancelled():
# Due to lack of interest, tomorrow has been cancelled
return
try:
etcd_result = changed_key_future.result()
except (ReadTimeoutError, ProtocolError):
return
change_callback(etcd_result)
if not self._shutting_down:
watch_future = self._etcd_client.watch(etcd_key, recursive=recursive,
timeout=ETCD_DISABLE_TIMEOUT)
watch_future.add_done_callback(callback_wrapper)
logger.debug('Scheduling watch of key: %s%s', etcd_key, '/*' if recursive else '')
self._watch_tasks[watch_task_key] = async(watch_future)
def _handle_builder_expiration(self, etcd_result):
if etcd_result.action == EtcdAction.EXPIRE:
# Handle the expiration
logger.debug('Builder expired, clean up the old build node')
job_metadata = json.loads(etcd_result._prev_node.value)
if 'builder_id' in job_metadata:
logger.info('Terminating expired build node.')
async(self._executor.stop_builder(job_metadata['builder_id']))
def _handle_realm_change(self, etcd_result):
if etcd_result.action == EtcdAction.CREATE:
# We must listen on the realm created by ourselves or another worker
realm_spec = json.loads(etcd_result.value)
self._register_realm(realm_spec)
elif etcd_result.action == EtcdAction.DELETE or etcd_result.action == EtcdAction.EXPIRE:
# We must stop listening for new connections on the specified realm, if we did not get the
# connection
realm_spec = json.loads(etcd_result._prev_node.value)
build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
component = self._job_uuid_to_component.pop(build_job.job_details['build_uuid'], None)
if component is not None:
# We were not the manager which the worker connected to, remove the bookkeeping for it
logger.debug('Unregistering unused component on realm: %s', realm_spec['realm'])
del self._component_to_job[component]
del self._component_to_builder[component]
self.unregister_component(component)
else:
logger.warning('Unexpected action (%s) on realm key: %s', etcd_result.action, etcd_result.key)
def _register_realm(self, realm_spec):
logger.debug('Registering realm with manager: %s', realm_spec['realm'])
component = self.register_component(realm_spec['realm'], BuildComponent,
token=realm_spec['token'])
build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
self._component_to_job[component] = build_job
self._component_to_builder[component] = realm_spec['builder_id']
self._job_uuid_to_component[build_job.job_details['build_uuid']] = component
@coroutine
def _register_existing_realms(self):
try:
all_realms = yield From(self._etcd_client.read(self._etcd_realm_prefix, recursive=True))
for realm in all_realms.children:
if not realm.dir:
self._register_realm(json.loads(realm.value))
except KeyError:
# no realms have been registered yet
pass
def initialize(self, manager_config):
logger.debug('Calling initialize')
self._manager_config = manager_config
executor_klass = self._executors.get(manager_config.get('EXECUTOR', ''), PopenExecutor)
self._executor = executor_klass(manager_config.get('EXECUTOR_CONFIG', {}),
self.manager_hostname)
etcd_host = self._manager_config.get('ETCD_HOST', '127.0.0.1')
etcd_port = self._manager_config.get('ETCD_PORT', 2379)
etcd_auth = self._manager_config.get('ETCD_CERT_AND_KEY', None)
etcd_ca_cert = self._manager_config.get('ETCD_CA_CERT', None)
etcd_protocol = 'http' if etcd_auth is None else 'https'
logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port)
worker_threads = self._manager_config.get('ETCD_WORKER_THREADS', 5)
self._async_thread_executor = ThreadPoolExecutor(worker_threads)
self._etcd_client = AsyncWrapper(self._etcd_client_klass(host=etcd_host, port=etcd_port,
cert=etcd_auth, ca_cert=etcd_ca_cert,
protocol=etcd_protocol),
executor=self._async_thread_executor)
self._etcd_builder_prefix = self._manager_config.get('ETCD_BUILDER_PREFIX', 'building/')
self._watch_etcd(self._etcd_builder_prefix, self._handle_builder_expiration)
self._etcd_realm_prefix = self._manager_config.get('ETCD_REALM_PREFIX', 'realm/')
self._watch_etcd(self._etcd_realm_prefix, self._handle_realm_change)
# Load components for all realms currently known to the cluster
async(self._register_existing_realms())
def setup_time(self):
setup_time = self._manager_config.get('MACHINE_SETUP_TIME', 300)
return setup_time
def shutdown(self):
logger.debug('Shutting down worker.')
self._shutting_down = True
for (etcd_key, _), task in self._watch_tasks.items():
if not task.done():
logger.debug('Canceling watch task for %s', etcd_key)
task.cancel()
if self._async_thread_executor is not None:
logger.debug('Shutting down thread pool executor.')
self._async_thread_executor.shutdown()
@coroutine
def schedule(self, build_job):
build_uuid = build_job.job_details['build_uuid']
logger.debug('Calling schedule with job: %s', build_uuid)
# Check if there are worker slots avialable by checking the number of jobs in etcd
allowed_worker_count = self._manager_config.get('ALLOWED_WORKER_COUNT', 1)
try:
building = yield From(self._etcd_client.read(self._etcd_builder_prefix, recursive=True))
workers_alive = sum(1 for child in building.children if not child.dir)
except KeyError:
workers_alive = 0
logger.debug('Total jobs: %s', workers_alive)
if workers_alive >= allowed_worker_count:
logger.info('Too many workers alive, unable to start new worker. %s >= %s', workers_alive,
allowed_worker_count)
raise Return(False)
job_key = self._etcd_job_key(build_job)
# First try to take a lock for this job, meaning we will be responsible for its lifeline
realm = str(uuid.uuid4())
token = str(uuid.uuid4())
ttl = self.setup_time()
expiration = datetime.utcnow() + timedelta(seconds=ttl)
machine_max_expiration = self._manager_config.get('MACHINE_MAX_TIME', 7200)
max_expiration = datetime.utcnow() + timedelta(seconds=machine_max_expiration)
payload = {
'expiration': calendar.timegm(expiration.timetuple()),
'max_expiration': calendar.timegm(max_expiration.timetuple()),
}
try:
yield From(self._etcd_client.write(job_key, json.dumps(payload), prevExist=False, ttl=ttl))
except KeyError:
# The job was already taken by someone else, we are probably a retry
logger.error('Job already exists in etcd, are timeouts misconfigured or is the queue broken?')
raise Return(False)
logger.debug('Starting builder with executor: %s', self._executor)
builder_id = yield From(self._executor.start_builder(realm, token, build_uuid))
# Store the builder in etcd associated with the job id
payload['builder_id'] = builder_id
yield From(self._etcd_client.write(job_key, json.dumps(payload), prevExist=True, ttl=ttl))
# Store the realm spec which will allow any manager to accept this builder when it connects
realm_spec = json.dumps({
'realm': realm,
'token': token,
'builder_id': builder_id,
'job_queue_item': build_job.job_item,
})
try:
yield From(self._etcd_client.write(self._etcd_realm_key(realm), realm_spec, prevExist=False,
ttl=ttl))
except KeyError:
logger.error('Realm already exists in etcd. UUID collision or something is very very wrong.')
raise Return(False)
raise Return(True)
@coroutine
def build_component_ready(self, build_component):
try:
# Clean up the bookkeeping for allowing any manager to take the job
job = self._component_to_job.pop(build_component)
del self._job_uuid_to_component[job.job_details['build_uuid']]
yield From(self._etcd_client.delete(self._etcd_realm_key(build_component.builder_realm)))
logger.debug('Sending build %s to newly ready component on realm %s',
job.job_details['build_uuid'], build_component.builder_realm)
yield From(build_component.start_build(job))
except KeyError:
logger.debug('Builder is asking for more work, but work already completed')
def build_component_disposed(self, build_component, timed_out):
logger.debug('Calling build_component_disposed.')
self.unregister_component(build_component)
@coroutine
def job_completed(self, build_job, job_status, build_component):
logger.debug('Calling job_completed with status: %s', job_status)
# Kill the ephmeral builder
yield From(self._executor.stop_builder(self._component_to_builder.pop(build_component)))
# Release the lock in etcd
job_key = self._etcd_job_key(build_job)
yield From(self._etcd_client.delete(job_key))
self.job_complete_callback(build_job, job_status)
@coroutine
def job_heartbeat(self, build_job):
# Extend the deadline in etcd
job_key = self._etcd_job_key(build_job)
build_job_metadata_response = yield From(self._etcd_client.read(job_key))
build_job_metadata = json.loads(build_job_metadata_response.value)
max_expiration = datetime.utcfromtimestamp(build_job_metadata['max_expiration'])
max_expiration_remaining = max_expiration - datetime.utcnow()
max_expiration_sec = max(0, int(max_expiration_remaining.total_seconds()))
ttl = min(self.heartbeat_period_sec * 2, max_expiration_sec)
new_expiration = datetime.utcnow() + timedelta(seconds=ttl)
payload = {
'expiration': calendar.timegm(new_expiration.timetuple()),
'builder_id': build_job_metadata['builder_id'],
'max_expiration': build_job_metadata['max_expiration'],
}
yield From(self._etcd_client.write(job_key, json.dumps(payload), ttl=ttl))
self.job_heartbeat_callback(build_job)
def _etcd_job_key(self, build_job):
""" Create a key which is used to track a job in etcd.
"""
return os.path.join(self._etcd_builder_prefix, build_job.job_details['build_uuid'])
def _etcd_realm_key(self, realm):
""" Create a key which is used to track an incoming connection on a realm.
"""
return os.path.join(self._etcd_realm_prefix, realm)
def num_workers(self):
""" Return the number of workers we're managing locally.
"""
return len(self._component_to_builder)

View file

@ -0,0 +1,238 @@
import logging
import os
import uuid
import threading
import boto.ec2
import requests
import cachetools
from jinja2 import FileSystemLoader, Environment
from trollius import coroutine, From, Return, get_event_loop
from functools import partial
from buildman.asyncutil import AsyncWrapper
from container_cloud_config import CloudConfigContext
logger = logging.getLogger(__name__)
ONE_HOUR = 60*60
ENV = Environment(loader=FileSystemLoader('buildman/templates'))
TEMPLATE = ENV.get_template('cloudconfig.yaml')
CloudConfigContext().populate_jinja_environment(ENV)
class ExecutorException(Exception):
""" Exception raised when there is a problem starting or stopping a builder.
"""
pass
class BuilderExecutor(object):
def __init__(self, executor_config, manager_hostname):
self.executor_config = executor_config
self.manager_hostname = manager_hostname
""" Interface which can be plugged into the EphemeralNodeManager to provide a strategy for
starting and stopping builders.
"""
@coroutine
def start_builder(self, realm, token, build_uuid):
""" Create a builder with the specified config. Returns a unique id which can be used to manage
the builder.
"""
raise NotImplementedError
@coroutine
def stop_builder(self, builder_id):
""" Stop a builder which is currently running.
"""
raise NotImplementedError
def get_manager_websocket_url(self):
return 'ws://{0}:'
def generate_cloud_config(self, realm, token, coreos_channel, manager_hostname,
quay_username=None, quay_password=None):
if quay_username is None:
quay_username = self.executor_config['QUAY_USERNAME']
if quay_password is None:
quay_password = self.executor_config['QUAY_PASSWORD']
return TEMPLATE.render(
realm=realm,
token=token,
quay_username=quay_username,
quay_password=quay_password,
manager_hostname=manager_hostname,
coreos_channel=coreos_channel,
worker_tag=self.executor_config['WORKER_TAG'],
)
class EC2Executor(BuilderExecutor):
""" Implementation of BuilderExecutor which uses libcloud to start machines on a variety of cloud
providers.
"""
COREOS_STACK_URL = 'http://%s.release.core-os.net/amd64-usr/current/coreos_production_ami_hvm.txt'
def __init__(self, *args, **kwargs):
self._loop = get_event_loop()
super(EC2Executor, self).__init__(*args, **kwargs)
def _get_conn(self):
""" Creates an ec2 connection which can be used to manage instances.
"""
return AsyncWrapper(boto.ec2.connect_to_region(
self.executor_config['EC2_REGION'],
aws_access_key_id=self.executor_config['AWS_ACCESS_KEY'],
aws_secret_access_key=self.executor_config['AWS_SECRET_KEY'],
))
@classmethod
@cachetools.ttl_cache(ttl=ONE_HOUR)
def _get_coreos_ami(cls, ec2_region, coreos_channel):
""" Retrieve the CoreOS AMI id from the canonical listing.
"""
stack_list_string = requests.get(EC2Executor.COREOS_STACK_URL % coreos_channel).text
stack_amis = dict([stack.split('=') for stack in stack_list_string.split('|')])
return stack_amis[ec2_region]
@coroutine
def start_builder(self, realm, token, build_uuid):
region = self.executor_config['EC2_REGION']
channel = self.executor_config.get('COREOS_CHANNEL', 'stable')
get_ami_callable = partial(self._get_coreos_ami, region, channel)
coreos_ami = yield From(self._loop.run_in_executor(None, get_ami_callable))
user_data = self.generate_cloud_config(realm, token, channel, self.manager_hostname)
logger.debug('Generated cloud config: %s', user_data)
ec2_conn = self._get_conn()
ssd_root_ebs = boto.ec2.blockdevicemapping.BlockDeviceType(
size=32,
volume_type='gp2',
delete_on_termination=True,
)
block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping()
block_devices['/dev/xvda'] = ssd_root_ebs
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=self.executor_config['EC2_VPC_SUBNET_ID'],
groups=self.executor_config['EC2_SECURITY_GROUP_IDS'],
associate_public_ip_address=True,
)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
reservation = yield From(ec2_conn.run_instances(
coreos_ami,
instance_type=self.executor_config['EC2_INSTANCE_TYPE'],
key_name=self.executor_config.get('EC2_KEY_NAME', None),
user_data=user_data,
instance_initiated_shutdown_behavior='terminate',
block_device_map=block_devices,
network_interfaces=interfaces,
))
if not reservation.instances:
raise ExecutorException('Unable to spawn builder instance.')
elif len(reservation.instances) != 1:
raise ExecutorException('EC2 started wrong number of instances!')
launched = AsyncWrapper(reservation.instances[0])
yield From(launched.add_tags({
'Name': 'Quay Ephemeral Builder',
'Realm': realm,
'Token': token,
'BuildUUID': build_uuid,
}))
raise Return(launched.id)
@coroutine
def stop_builder(self, builder_id):
ec2_conn = self._get_conn()
terminated_instances = yield From(ec2_conn.terminate_instances([builder_id]))
if builder_id not in [si.id for si in terminated_instances]:
raise ExecutorException('Unable to terminate instance: %s' % builder_id)
class PopenExecutor(BuilderExecutor):
""" Implementation of BuilderExecutor which uses Popen to fork a quay-builder process.
"""
def __init__(self, executor_config, manager_hostname):
self._jobs = {}
super(PopenExecutor, self).__init__(executor_config, manager_hostname)
""" Executor which uses Popen to fork a quay-builder process.
"""
@coroutine
def start_builder(self, realm, token, build_uuid):
# Now start a machine for this job, adding the machine id to the etcd information
logger.debug('Forking process for build')
import subprocess
builder_env = {
'TOKEN': token,
'REALM': realm,
'ENDPOINT': 'ws://localhost:8787',
'DOCKER_TLS_VERIFY': os.environ.get('DOCKER_TLS_VERIFY', ''),
'DOCKER_CERT_PATH': os.environ.get('DOCKER_CERT_PATH', ''),
'DOCKER_HOST': os.environ.get('DOCKER_HOST', ''),
}
logpipe = LogPipe(logging.INFO)
spawned = subprocess.Popen('/Users/jake/bin/quay-builder', stdout=logpipe, stderr=logpipe,
env=builder_env)
builder_id = str(uuid.uuid4())
self._jobs[builder_id] = (spawned, logpipe)
logger.debug('Builder spawned with id: %s', builder_id)
raise Return(builder_id)
@coroutine
def stop_builder(self, builder_id):
if builder_id not in self._jobs:
raise ExecutorException('Builder id not being tracked by executor.')
logger.debug('Killing builder with id: %s', builder_id)
spawned, logpipe = self._jobs[builder_id]
if spawned.poll() is None:
spawned.kill()
logpipe.close()
class LogPipe(threading.Thread):
""" Adapted from http://codereview.stackexchange.com/a/17959
"""
def __init__(self, level):
"""Setup the object with a logger and a loglevel
and start the thread
"""
threading.Thread.__init__(self)
self.daemon = False
self.level = level
self.fd_read, self.fd_write = os.pipe()
self.pipe_reader = os.fdopen(self.fd_read)
self.start()
def fileno(self):
"""Return the write file descriptor of the pipe
"""
return self.fd_write
def run(self):
"""Run the thread, logging everything.
"""
for line in iter(self.pipe_reader.readline, ''):
logging.log(self.level, line.strip('\n'))
self.pipe_reader.close()
def close(self):
"""Close the write end of the pipe.
"""
os.close(self.fd_write)

View file

@ -1,5 +1,6 @@
import logging import logging
import trollius import trollius
import json
from autobahn.asyncio.wamp import RouterFactory, RouterSessionFactory from autobahn.asyncio.wamp import RouterFactory, RouterSessionFactory
from autobahn.asyncio.websocket import WampWebSocketServerFactory from autobahn.asyncio.websocket import WampWebSocketServerFactory
@ -8,11 +9,15 @@ from autobahn.wamp import types
from aiowsgi import create_server as create_wsgi_server from aiowsgi import create_server as create_wsgi_server
from flask import Flask from flask import Flask
from threading import Event from threading import Event
from trollius.tasks import Task
from trollius.coroutines import From from trollius.coroutines import From
from datetime import datetime, timedelta from datetime import timedelta
from buildman.enums import BuildJobResult, BuildServerStatus
from buildman.jobutil.buildstatus import StatusHandler
from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException
from data.queue import WorkQueue from data import database
from app import app, build_metrics
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -21,27 +26,21 @@ TIMEOUT_PERIOD_MINUTES = 20
JOB_TIMEOUT_SECONDS = 300 JOB_TIMEOUT_SECONDS = 300
MINIMUM_JOB_EXTENSION = timedelta(minutes=2) MINIMUM_JOB_EXTENSION = timedelta(minutes=2)
WEBSOCKET_PORT = 8787 HEARTBEAT_PERIOD_SEC = 30
CONTROLLER_PORT = 8686
class BuildJobResult(object):
""" Build job result enum """
INCOMPLETE = 'incomplete'
COMPLETE = 'complete'
ERROR = 'error'
class BuilderServer(object): class BuilderServer(object):
""" Server which handles both HTTP and WAMP requests, managing the full state of the build """ Server which handles both HTTP and WAMP requests, managing the full state of the build
controller. controller.
""" """
def __init__(self, server_hostname, queue, build_logs, user_files, lifecycle_manager_klass): def __init__(self, registry_hostname, queue, build_logs, user_files, lifecycle_manager_klass,
lifecycle_manager_config, manager_hostname):
self._loop = None self._loop = None
self._current_status = 'starting' self._current_status = BuildServerStatus.STARTING
self._current_components = [] self._current_components = []
self._job_count = 0 self._job_count = 0
self._session_factory = RouterSessionFactory(RouterFactory()) self._session_factory = RouterSessionFactory(RouterFactory())
self._server_hostname = server_hostname self._registry_hostname = registry_hostname
self._queue = queue self._queue = queue
self._build_logs = build_logs self._build_logs = build_logs
self._user_files = user_files self._user_files = user_files
@ -49,11 +48,14 @@ class BuilderServer(object):
self._register_component, self._register_component,
self._unregister_component, self._unregister_component,
self._job_heartbeat, self._job_heartbeat,
self._job_complete self._job_complete,
manager_hostname,
HEARTBEAT_PERIOD_SEC,
) )
self._lifecycle_manager_config = lifecycle_manager_config
self._shutdown_event = Event() self._shutdown_event = Event()
self._current_status = 'running' self._current_status = BuildServerStatus.RUNNING
self._register_controller() self._register_controller()
@ -63,22 +65,41 @@ class BuilderServer(object):
@controller_app.route('/status') @controller_app.route('/status')
def status(): def status():
return server._current_status metrics = server._queue.get_metrics(require_transaction=False)
(running_count, available_not_running_count, available_count) = metrics
workers = [component for component in server._current_components
if component.kind() == 'builder']
data = {
'status': server._current_status,
'running_local': server._job_count,
'running_total': running_count,
'workers': len(workers),
'job_total': available_count + running_count
}
return json.dumps(data)
self._controller_app = controller_app self._controller_app = controller_app
def run(self, host, ssl=None): def run(self, host, websocket_port, controller_port, ssl=None):
logger.debug('Initializing the lifecycle manager') logger.debug('Initializing the lifecycle manager')
self._lifecycle_manager.initialize() self._lifecycle_manager.initialize(self._lifecycle_manager_config)
logger.debug('Initializing all members of the event loop') logger.debug('Initializing all members of the event loop')
loop = trollius.get_event_loop() loop = trollius.get_event_loop()
trollius.Task(self._initialize(loop, host, ssl))
logger.debug('Starting server on port %s, with controller on port %s', WEBSOCKET_PORT, logger.debug('Starting server on port %s, with controller on port %s', websocket_port,
CONTROLLER_PORT) controller_port)
TASKS = [
Task(self._initialize(loop, host, websocket_port, controller_port, ssl)),
Task(self._queue_metrics_updater()),
]
try: try:
loop.run_forever() loop.run_until_complete(trollius.wait(TASKS))
except KeyboardInterrupt: except KeyboardInterrupt:
pass pass
finally: finally:
@ -86,7 +107,7 @@ class BuilderServer(object):
def close(self): def close(self):
logger.debug('Requested server shutdown') logger.debug('Requested server shutdown')
self._current_status = 'shutting_down' self._current_status = BuildServerStatus.SHUTDOWN
self._lifecycle_manager.shutdown() self._lifecycle_manager.shutdown()
self._shutdown_event.wait() self._shutdown_event.wait()
logger.debug('Shutting down server') logger.debug('Shutting down server')
@ -102,7 +123,7 @@ class BuilderServer(object):
component.parent_manager = self._lifecycle_manager component.parent_manager = self._lifecycle_manager
component.build_logs = self._build_logs component.build_logs = self._build_logs
component.user_files = self._user_files component.user_files = self._user_files
component.server_hostname = self._server_hostname component.registry_hostname = self._registry_hostname
self._current_components.append(component) self._current_components.append(component)
self._session_factory.add(component) self._session_factory.add(component)
@ -116,32 +137,34 @@ class BuilderServer(object):
self._session_factory.remove(component) self._session_factory.remove(component)
def _job_heartbeat(self, build_job): def _job_heartbeat(self, build_job):
WorkQueue.extend_processing(build_job.job_item(), seconds_from_now=JOB_TIMEOUT_SECONDS, self._queue.extend_processing(build_job.job_item, seconds_from_now=JOB_TIMEOUT_SECONDS,
retry_count=1, minimum_extension=MINIMUM_JOB_EXTENSION) minimum_extension=MINIMUM_JOB_EXTENSION)
def _job_complete(self, build_job, job_status): def _job_complete(self, build_job, job_status):
if job_status == BuildJobResult.INCOMPLETE: if job_status == BuildJobResult.INCOMPLETE:
self._queue.incomplete(build_job.job_item(), restore_retry=True, retry_after=30) self._queue.incomplete(build_job.job_item, restore_retry=False, retry_after=30)
elif job_status == BuildJobResult.ERROR:
self._queue.incomplete(build_job.job_item(), restore_retry=False)
else: else:
self._queue.complete(build_job.job_item()) self._queue.complete(build_job.job_item)
self._job_count = self._job_count - 1 self._job_count = self._job_count - 1
if self._current_status == 'shutting_down' and not self._job_count: if self._current_status == BuildServerStatus.SHUTDOWN and not self._job_count:
self._shutdown_event.set() self._shutdown_event.set()
# TODO(jschorr): check for work here? build_metrics.report_completion_status(job_status)
@trollius.coroutine @trollius.coroutine
def _work_checker(self): def _work_checker(self):
while self._current_status == 'running': while self._current_status == BuildServerStatus.RUNNING:
logger.debug('Checking for more work') with database.CloseForLongOperation(app.config):
yield From(trollius.sleep(WORK_CHECK_TIMEOUT))
logger.debug('Checking for more work for %d active workers',
self._lifecycle_manager.num_workers())
job_item = self._queue.get(processing_time=self._lifecycle_manager.setup_time()) job_item = self._queue.get(processing_time=self._lifecycle_manager.setup_time())
if job_item is None: if job_item is None:
logger.debug('No additional work found. Going to sleep for %s seconds', WORK_CHECK_TIMEOUT) logger.debug('No additional work found. Going to sleep for %s seconds', WORK_CHECK_TIMEOUT)
yield From(trollius.sleep(WORK_CHECK_TIMEOUT))
continue continue
try: try:
@ -149,20 +172,28 @@ class BuilderServer(object):
except BuildJobLoadException as irbe: except BuildJobLoadException as irbe:
logger.exception(irbe) logger.exception(irbe)
self._queue.incomplete(job_item, restore_retry=False) self._queue.incomplete(job_item, restore_retry=False)
continue
logger.debug('Build job found. Checking for an avaliable worker.') logger.debug('Build job found. Checking for an avaliable worker.')
if self._lifecycle_manager.schedule(build_job, self._loop): scheduled = yield From(self._lifecycle_manager.schedule(build_job))
if scheduled:
status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid)
status_handler.set_phase('build-scheduled')
self._job_count = self._job_count + 1 self._job_count = self._job_count + 1
logger.debug('Build job scheduled. Running: %s', self._job_count) logger.debug('Build job scheduled. Running: %s', self._job_count)
else: else:
logger.debug('All workers are busy. Requeuing.') logger.debug('All workers are busy. Requeuing.')
self._queue.incomplete(job_item, restore_retry=True, retry_after=0) self._queue.incomplete(job_item, restore_retry=True, retry_after=0)
yield From(trollius.sleep(WORK_CHECK_TIMEOUT)) @trollius.coroutine
def _queue_metrics_updater(self):
while self._current_status == BuildServerStatus.RUNNING:
yield From(trollius.sleep(30))
self._queue.update_metrics()
@trollius.coroutine @trollius.coroutine
def _initialize(self, loop, host, ssl=None): def _initialize(self, loop, host, websocket_port, controller_port, ssl=None):
self._loop = loop self._loop = loop
# Create the WAMP server. # Create the WAMP server.
@ -170,8 +201,8 @@ class BuilderServer(object):
transport_factory.setProtocolOptions(failByDrop=True) transport_factory.setProtocolOptions(failByDrop=True)
# Initialize the controller server and the WAMP server # Initialize the controller server and the WAMP server
create_wsgi_server(self._controller_app, loop=loop, host=host, port=CONTROLLER_PORT, ssl=ssl) create_wsgi_server(self._controller_app, loop=loop, host=host, port=controller_port, ssl=ssl)
yield From(loop.create_server(transport_factory, host, WEBSOCKET_PORT, ssl=ssl)) yield From(loop.create_server(transport_factory, host, websocket_port, ssl=ssl))
# Initialize the work queue checker. # Initialize the work queue checker.
yield From(self._work_checker()) yield From(self._work_checker())

View file

@ -0,0 +1,31 @@
#cloud-config
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCC0m+hVmyR3vn/xoxJe9+atRWBxSK+YXgyufNVDMcb7H00Jfnc341QH3kDVYZamUbhVh/nyc2RP7YbnZR5zORFtgOaNSdkMYrPozzBvxjnvSUokkCCWbLqXDHvIKiR12r+UTSijPJE/Yk702Mb2ejAFuae1C3Ec+qKAoOCagDjpQ3THyb5oaKE7VPHdwCWjWIQLRhC+plu77ObhoXIFJLD13gCi01L/rp4mYVCxIc2lX5A8rkK+bZHnIZwWUQ4t8SIjWxIaUo0FE7oZ83nKuNkYj5ngmLHQLY23Nx2WhE9H6NBthUpik9SmqQPtVYbhIG+bISPoH9Xs8CLrFb0VRjz Joey's Mac
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCo6FhAP7mFFOAzM91gtaKW7saahtaN4lur42FMMztz6aqUycIltCmvxo+3FmrXgCG30maMNU36Vm1+9QRtVQEd+eRuoIWP28t+8MT01Fh4zPuE2Wca3pOHSNo3X81FfWJLzmwEHiQKs9HPQqUhezR9PcVWVkbMyAzw85c0UycGmHGFNb0UiRd9HFY6XbgbxhZv/mvKLZ99xE3xkOzS1PNsdSNvjUKwZR7pSUPqNS5S/1NXyR4GhFTU24VPH/bTATOv2ATH+PSzsZ7Qyz9UHj38tKC+ALJHEDJ4HXGzobyOUP78cHGZOfCB5FYubq0zmOudAjKIAhwI8XTFvJ2DX1P3 jimmyzelinskie
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNvw8qo9m8np7yQ/Smv/oklM8bo8VyNRZriGYBDuolWDL/mZpYCQnZJXphQo7RFdNABYistikjJlBuuwUohLf2uSq0iKoFa2TgwI43wViWzvuzU4nA02/ITD5BZdmWAFNyIoqeB50Ol4qUgDwLAZ+7Kv7uCi6chcgr9gTi99jY3GHyZjrMiXMHGVGi+FExFuzhVC2drKjbz5q6oRfQeLtNfG4psl5GU3MQU6FkX4fgoCx0r9R48/b7l4+TT7pWblJQiRfeldixu6308vyoTUEHasdkU3/X0OTaGz/h5XqTKnGQc6stvvoED3w+L3QFp0H5Z8sZ9stSsitmCBrmbcKZ jakemoshenko
write_files:
- path: /root/overrides.list
permission: '0644'
content: |
REALM={{ realm }}
TOKEN={{ token }}
SERVER=wss://{{ manager_hostname }}
coreos:
update:
reboot-strategy: off
group: {{ coreos_channel }}
units:
{{ dockersystemd('quay-builder',
'quay.io/coreos/registry-build-worker',
quay_username,
quay_password,
worker_tag,
extra_args='--net=host --privileged --env-file /root/overrides.list -v /var/run/docker.sock:/var/run/docker.sock -v /usr/share/ca-certificates:/etc/ssl/certs',
exec_stop_post=['/bin/sh -xc "/bin/sleep 120; /usr/bin/systemctl --no-block poweroff"'],
flattened=True,
restart_policy='no'
) | indent(4) }}

View file

@ -1,7 +1,7 @@
bind = '0.0.0.0:5000' bind = '0.0.0.0:5000'
workers = 2 workers = 2
worker_class = 'gevent' worker_class = 'gevent'
timeout = 2000
daemon = False daemon = False
logconfig = 'conf/logging.conf' logconfig = 'conf/logging_debug.conf'
pythonpath = '.' pythonpath = '.'
preload_app = True

View file

@ -1,7 +1,6 @@
bind = 'unix:/tmp/gunicorn_registry.sock' bind = 'unix:/tmp/gunicorn_registry.sock'
workers = 8 workers = 8
worker_class = 'gevent' worker_class = 'gevent'
timeout = 2000
logconfig = 'conf/logging.conf' logconfig = 'conf/logging.conf'
pythonpath = '.' pythonpath = '.'
preload_app = True preload_app = True

View file

@ -1,6 +1,5 @@
bind = 'unix:/tmp/gunicorn_verbs.sock' bind = 'unix:/tmp/gunicorn_verbs.sock'
workers = 4 workers = 4
timeout = 2000
logconfig = 'conf/logging.conf' logconfig = 'conf/logging.conf'
pythonpath = '.' pythonpath = '.'
preload_app = True preload_app = True

View file

@ -1,7 +1,6 @@
bind = 'unix:/tmp/gunicorn_web.sock' bind = 'unix:/tmp/gunicorn_web.sock'
workers = 2 workers = 2
worker_class = 'gevent' worker_class = 'gevent'
timeout = 30
logconfig = 'conf/logging.conf' logconfig = 'conf/logging.conf'
pythonpath = '.' pythonpath = '.'
preload_app = True preload_app = True

View file

@ -1,3 +1,5 @@
# vim: ft=nginx
server { server {
listen 80 default_server; listen 80 default_server;
server_name _; server_name _;

View file

@ -1,3 +1,5 @@
# vim: ft=nginx
types_hash_max_size 2048; types_hash_max_size 2048;
include /usr/local/nginx/conf/mime.types.default; include /usr/local/nginx/conf/mime.types.default;

View file

@ -1,2 +0,0 @@
#!/bin/sh
exec svlogd /var/log/dockerfilebuild/

View file

@ -1,6 +0,0 @@
#! /bin/bash
sv start tutumdocker || exit 1
cd /
venv/bin/python -m workers.dockerfilebuild

View file

@ -3,6 +3,6 @@
echo 'Starting gunicon' echo 'Starting gunicon'
cd / cd /
venv/bin/gunicorn -c conf/gunicorn_registry.py registry:application nice -n 10 venv/bin/gunicorn -c conf/gunicorn_registry.py registry:application
echo 'Gunicorn exited' echo 'Gunicorn exited'

View file

@ -3,6 +3,6 @@
echo 'Starting gunicon' echo 'Starting gunicon'
cd / cd /
nice -10 venv/bin/gunicorn -c conf/gunicorn_verbs.py verbs:application nice -n 10 venv/bin/gunicorn -c conf/gunicorn_verbs.py verbs:application
echo 'Gunicorn exited' echo 'Gunicorn exited'

View file

@ -1,2 +0,0 @@
#!/bin/sh
exec svlogd /var/log/tutumdocker/

View file

@ -1,96 +0,0 @@
#!/bin/bash
# First, make sure that cgroups are mounted correctly.
CGROUP=/sys/fs/cgroup
[ -d $CGROUP ] ||
mkdir $CGROUP
mountpoint -q $CGROUP ||
mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
echo "Could not make a tmpfs mount. Did you use -privileged?"
exit 1
}
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
then
mount -t securityfs none /sys/kernel/security || {
echo "Could not mount /sys/kernel/security."
echo "AppArmor detection and -privileged mode might break."
}
fi
# Mount the cgroup hierarchies exactly as they are in the parent system.
for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
do
[ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
mountpoint -q $CGROUP/$SUBSYS ||
mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
# The two following sections address a bug which manifests itself
# by a cryptic "lxc-start: no ns_cgroup option specified" when
# trying to start containers withina container.
# The bug seems to appear when the cgroup hierarchies are not
# mounted on the exact same directories in the host, and in the
# container.
# Named, control-less cgroups are mounted with "-o name=foo"
# (and appear as such under /proc/<pid>/cgroup) but are usually
# mounted on a directory named "foo" (without the "name=" prefix).
# Systemd and OpenRC (and possibly others) both create such a
# cgroup. To avoid the aforementioned bug, we symlink "foo" to
# "name=foo". This shouldn't have any adverse effect.
echo $SUBSYS | grep -q ^name= && {
NAME=$(echo $SUBSYS | sed s/^name=//)
ln -s $SUBSYS $CGROUP/$NAME
}
# Likewise, on at least one system, it has been reported that
# systemd would mount the CPU and CPU accounting controllers
# (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
# but on a directory called "cpu,cpuacct" (note the inversion
# in the order of the groups). This tries to work around it.
[ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
done
# Note: as I write those lines, the LXC userland tools cannot setup
# a "sub-container" properly if the "devices" cgroup is not in its
# own hierarchy. Let's detect this and issue a warning.
grep -q :devices: /proc/1/cgroup ||
echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
grep -qw devices /proc/1/cgroup ||
echo "WARNING: it looks like the 'devices' cgroup is not mounted."
# Now, close extraneous file descriptors.
pushd /proc/self/fd >/dev/null
for FD in *
do
case "$FD" in
# Keep stdin/stdout/stderr
[012])
;;
# Nuke everything else
*)
eval exec "$FD>&-"
;;
esac
done
popd >/dev/null
# If a pidfile is still around (for example after a container restart),
# delete it so that docker can start.
rm -rf /var/run/docker.pid
chmod 777 /var/lib/lxc
chmod 777 /var/lib/docker
# If we were given a PORT environment variable, start as a simple daemon;
# otherwise, spawn a shell as well
if [ "$PORT" ]
then
exec docker -d -H 0.0.0.0:$PORT
else
docker -d -D -e lxc 2>&1
fi

View file

@ -1,5 +1,5 @@
[loggers] [loggers]
keys=root, gunicorn.error, gunicorn.access, application.profiler, boto, werkzeug keys=root
[handlers] [handlers]
keys=console keys=console
@ -7,39 +7,9 @@ keys=console
[formatters] [formatters]
keys=generic keys=generic
[logger_application.profiler]
level=DEBUG
handlers=console
propagate=0
qualname=application.profiler
[logger_root] [logger_root]
level=DEBUG
handlers=console
[logger_boto]
level=INFO level=INFO
handlers=console handlers=console
propagate=0
qualname=boto
[logger_werkzeug]
level=DEBUG
handlers=console
propagate=0
qualname=werkzeug
[logger_gunicorn.error]
level=INFO
handlers=console
propagate=1
qualname=gunicorn.error
[logger_gunicorn.access]
level=INFO
handlers=console
propagate=0
qualname=gunicorn.access
[handler_console] [handler_console]
class=StreamHandler class=StreamHandler

21
conf/logging_debug.conf Normal file
View file

@ -0,0 +1,21 @@
[loggers]
keys=root
[handlers]
keys=console
[formatters]
keys=generic
[logger_root]
level=DEBUG
handlers=console
[handler_console]
class=StreamHandler
formatter=generic
args=(sys.stdout, )
[formatter_generic]
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
class=logging.Formatter

View file

@ -1,14 +1,12 @@
# vim: ft=nginx
include root-base.conf; include root-base.conf;
worker_processes 2;
user root nogroup;
daemon off;
http { http {
include http-base.conf; include http-base.conf;
include rate-limiting.conf;
server { server {
include server-base.conf; include server-base.conf;

View file

@ -1,16 +1,14 @@
# vim: ft=nginx
include root-base.conf; include root-base.conf;
worker_processes 2;
user root nogroup;
daemon off;
http { http {
include http-base.conf; include http-base.conf;
include hosted-http-base.conf; include hosted-http-base.conf;
include rate-limiting.conf;
server { server {
include server-base.conf; include server-base.conf;
@ -24,4 +22,20 @@ http {
ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP; ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP;
ssl_prefer_server_ciphers on; ssl_prefer_server_ciphers on;
} }
server {
include proxy-protocol.conf;
include proxy-server-base.conf;
listen 8443 default proxy_protocol;
ssl on;
ssl_certificate ./stack/ssl.cert;
ssl_certificate_key ./stack/ssl.key;
ssl_session_timeout 5m;
ssl_protocols SSLv3 TLSv1;
ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP;
ssl_prefer_server_ciphers on;
}
} }

8
conf/proxy-protocol.conf Normal file
View file

@ -0,0 +1,8 @@
# vim: ft=nginx
set_real_ip_from 0.0.0.0/0;
real_ip_header proxy_protocol;
log_format elb_pp '$proxy_protocol_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
access_log /var/log/nginx/nginx.access.log elb_pp;

View file

@ -0,0 +1,87 @@
# vim: ft=nginx
client_body_temp_path /var/log/nginx/client_body 1 2;
server_name _;
keepalive_timeout 5;
if ($args ~ "_escaped_fragment_") {
rewrite ^ /snapshot$uri;
}
proxy_set_header X-Forwarded-For $proxy_protocol_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header Transfer-Encoding $http_transfer_encoding;
location / {
proxy_pass http://web_app_server;
limit_req zone=webapp burst=25 nodelay;
}
location /realtime {
proxy_pass http://web_app_server;
proxy_buffering off;
proxy_request_buffering off;
}
location /v1/repositories/ {
proxy_buffering off;
proxy_request_buffering off;
proxy_pass http://registry_app_server;
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
client_max_body_size 20G;
limit_req zone=repositories burst=5 nodelay;
}
location /v1/ {
proxy_buffering off;
proxy_request_buffering off;
proxy_pass http://registry_app_server;
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
client_max_body_size 20G;
}
location /c1/ {
proxy_buffering off;
proxy_request_buffering off;
proxy_pass http://verbs_app_server;
proxy_temp_path /var/log/nginx/proxy_temp 1 2;
limit_req zone=api burst=5 nodelay;
}
location /static/ {
# checks for static file, if not found proxy to app
alias /static/;
}
location /v1/_ping {
add_header Content-Type text/plain;
add_header X-Docker-Registry-Version 0.6.0;
add_header X-Docker-Registry-Standalone 0;
return 200 'true';
}
location ~ ^/b1/controller(/?)(.*) {
proxy_pass http://build_manager_controller_server/$2;
}
location ~ ^/b1/socket(/?)(.*) {
proxy_pass http://build_manager_websocket_server/$2;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}

7
conf/rate-limiting.conf Normal file
View file

@ -0,0 +1,7 @@
# vim: ft=nginx
limit_req_zone $proxy_protocol_addr zone=webapp:10m rate=25r/s;
limit_req_zone $proxy_protocol_addr zone=repositories:10m rate=1r/s;
limit_req_zone $proxy_protocol_addr zone=api:10m rate=1r/s;
limit_req_status 429;
limit_req_log_level warn;

View file

@ -1,7 +1,17 @@
# vim: ft=nginx
pid /tmp/nginx.pid; pid /tmp/nginx.pid;
error_log /var/log/nginx/nginx.error.log; error_log /var/log/nginx/nginx.error.log;
worker_processes 2;
worker_priority -10;
worker_rlimit_nofile 10240;
user root nogroup;
daemon off;
events { events {
worker_connections 1024; worker_connections 10240;
accept_mutex off; accept_mutex off;
} }

View file

@ -1,3 +1,5 @@
# vim: ft=nginx
client_body_temp_path /var/log/nginx/client_body 1 2; client_body_temp_path /var/log/nginx/client_body 1 2;
server_name _; server_name _;
@ -33,7 +35,6 @@ location /v1/ {
proxy_request_buffering off; proxy_request_buffering off;
proxy_pass http://registry_app_server; proxy_pass http://registry_app_server;
proxy_read_timeout 2000;
proxy_temp_path /var/log/nginx/proxy_temp 1 2; proxy_temp_path /var/log/nginx/proxy_temp 1 2;
client_max_body_size 20G; client_max_body_size 20G;
@ -45,7 +46,6 @@ location /c1/ {
proxy_request_buffering off; proxy_request_buffering off;
proxy_pass http://verbs_app_server; proxy_pass http://verbs_app_server;
proxy_read_timeout 2000;
proxy_temp_path /var/log/nginx/proxy_temp 1 2; proxy_temp_path /var/log/nginx/proxy_temp 1 2;
} }
@ -63,7 +63,6 @@ location /v1/_ping {
location ~ ^/b1/controller(/?)(.*) { location ~ ^/b1/controller(/?)(.*) {
proxy_pass http://build_manager_controller_server/$2; proxy_pass http://build_manager_controller_server/$2;
proxy_read_timeout 2000;
} }
location ~ ^/b1/socket(/?)(.*) { location ~ ^/b1/socket(/?)(.*) {

View file

@ -36,7 +36,6 @@ def getFrontendVisibleConfig(config_dict):
class DefaultConfig(object): class DefaultConfig(object):
# Flask config # Flask config
SECRET_KEY = 'a36c9d7d-25a9-4d3f-a586-3d2f8dc40a83'
JSONIFY_PRETTYPRINT_REGULAR = False JSONIFY_PRETTYPRINT_REGULAR = False
SESSION_COOKIE_SECURE = False SESSION_COOKIE_SECURE = False
@ -48,8 +47,9 @@ class DefaultConfig(object):
AVATAR_KIND = 'local' AVATAR_KIND = 'local'
REGISTRY_TITLE = 'Quay.io' REGISTRY_TITLE = 'CoreOS Enterprise Registry'
REGISTRY_TITLE_SHORT = 'Quay.io' REGISTRY_TITLE_SHORT = 'Enterprise Registry'
CONTACT_INFO = [ CONTACT_INFO = [
'mailto:support@quay.io', 'mailto:support@quay.io',
'irc://chat.freenode.net:6665/quayio', 'irc://chat.freenode.net:6665/quayio',
@ -132,6 +132,9 @@ class DefaultConfig(object):
# Super user config. Note: This MUST BE an empty list for the default config. # Super user config. Note: This MUST BE an empty list for the default config.
SUPER_USERS = [] SUPER_USERS = []
# Feature Flag: Whether super users are supported.
FEATURE_SUPER_USERS = True
# Feature Flag: Whether billing is required. # Feature Flag: Whether billing is required.
FEATURE_BILLING = False FEATURE_BILLING = False
@ -147,9 +150,6 @@ class DefaultConfig(object):
# Feature flag, whether to enable olark chat # Feature flag, whether to enable olark chat
FEATURE_OLARK_CHAT = False FEATURE_OLARK_CHAT = False
# Feature Flag: Whether super users are supported.
FEATURE_SUPER_USERS = False
# Feature Flag: Whether to support GitHub build triggers. # Feature Flag: Whether to support GitHub build triggers.
FEATURE_GITHUB_BUILD = False FEATURE_GITHUB_BUILD = False
@ -187,3 +187,14 @@ class DefaultConfig(object):
# For enterprise: # For enterprise:
MAXIMUM_REPOSITORY_USAGE = 20 MAXIMUM_REPOSITORY_USAGE = 20
# System logs.
SYSTEM_LOGS_PATH = "/var/log/"
SYSTEM_SERVICE_LOGS_PATH = "/var/log/%s/current"
SYSTEM_SERVICES_PATH = "conf/init/"
# Services that should not be shown in the logs view.
SYSTEM_SERVICE_BLACKLIST = []
# Temporary tag expiration in seconds, this may actually be longer based on GC policy
PUSH_TEMP_TAG_EXPIRATION_SEC = 60 * 60

View file

@ -1,14 +1,15 @@
import string import string
import logging import logging
import uuid import uuid
import time
from random import SystemRandom from random import SystemRandom
from datetime import datetime from datetime import datetime
from peewee import (Proxy, MySQLDatabase, SqliteDatabase, PostgresqlDatabase, fn, CharField, from peewee import *
BooleanField, IntegerField, DateTimeField, ForeignKeyField, TextField,
BigIntegerField)
from data.read_slave import ReadSlaveModel from data.read_slave import ReadSlaveModel
from sqlalchemy.engine.url import make_url from sqlalchemy.engine.url import make_url
from data.read_slave import ReadSlaveModel
from util.names import urn_generator from util.names import urn_generator
@ -31,6 +32,16 @@ SCHEME_RANDOM_FUNCTION = {
'postgresql+psycopg2': fn.Random, 'postgresql+psycopg2': fn.Random,
} }
def real_for_update(query):
return query.for_update()
def null_for_update(query):
return query
SCHEME_SPECIALIZED_FOR_UPDATE = {
'sqlite': null_for_update,
}
class CallableProxy(Proxy): class CallableProxy(Proxy):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
if self.obj is None: if self.obj is None:
@ -70,6 +81,15 @@ class UseThenDisconnect(object):
db = Proxy() db = Proxy()
read_slave = Proxy() read_slave = Proxy()
db_random_func = CallableProxy() db_random_func = CallableProxy()
db_for_update = CallableProxy()
def validate_database_url(url, connect_timeout=5):
driver = _db_from_url(url, {
'connect_timeout': connect_timeout
})
driver.connect()
driver.close()
def _db_from_url(url, db_kwargs): def _db_from_url(url, db_kwargs):
@ -84,6 +104,10 @@ def _db_from_url(url, db_kwargs):
if parsed_url.password: if parsed_url.password:
db_kwargs['password'] = parsed_url.password db_kwargs['password'] = parsed_url.password
# Note: sqlite does not support connect_timeout.
if parsed_url.drivername == 'sqlite' and 'connect_timeout' in db_kwargs:
del db_kwargs['connect_timeout']
return SCHEME_DRIVERS[parsed_url.drivername](parsed_url.database, **db_kwargs) return SCHEME_DRIVERS[parsed_url.drivername](parsed_url.database, **db_kwargs)
@ -95,6 +119,8 @@ def configure(config_object):
parsed_write_uri = make_url(write_db_uri) parsed_write_uri = make_url(write_db_uri)
db_random_func.initialize(SCHEME_RANDOM_FUNCTION[parsed_write_uri.drivername]) db_random_func.initialize(SCHEME_RANDOM_FUNCTION[parsed_write_uri.drivername])
db_for_update.initialize(SCHEME_SPECIALIZED_FOR_UPDATE.get(parsed_write_uri.drivername,
real_for_update))
read_slave_uri = config_object.get('DB_READ_SLAVE_URI', None) read_slave_uri = config_object.get('DB_READ_SLAVE_URI', None)
if read_slave_uri is not None: if read_slave_uri is not None:
@ -113,6 +139,9 @@ def uuid_generator():
return str(uuid.uuid4()) return str(uuid.uuid4())
_get_epoch_timestamp = lambda: int(time.time())
def close_db_filter(_): def close_db_filter(_):
if not db.is_closed(): if not db.is_closed():
logger.debug('Disconnecting from database.') logger.debug('Disconnecting from database.')
@ -124,8 +153,9 @@ def close_db_filter(_):
class QuayUserField(ForeignKeyField): class QuayUserField(ForeignKeyField):
def __init__(self, allows_robots=False, *args, **kwargs): def __init__(self, allows_robots=False, robot_null_delete=False, *args, **kwargs):
self.allows_robots = allows_robots self.allows_robots = allows_robots
self.robot_null_delete = robot_null_delete
if not 'rel_model' in kwargs: if not 'rel_model' in kwargs:
kwargs['rel_model'] = User kwargs['rel_model'] = User
@ -151,6 +181,7 @@ class User(BaseModel):
invoice_email = BooleanField(default=False) invoice_email = BooleanField(default=False)
invalid_login_attempts = IntegerField(default=0) invalid_login_attempts = IntegerField(default=0)
last_invalid_login = DateTimeField(default=datetime.utcnow) last_invalid_login = DateTimeField(default=datetime.utcnow)
removed_tag_expiration_s = IntegerField(default=1209600) # Two weeks
def delete_instance(self, recursive=False, delete_nullable=False): def delete_instance(self, recursive=False, delete_nullable=False):
# If we are deleting a robot account, only execute the subset of queries necessary. # If we are deleting a robot account, only execute the subset of queries necessary.
@ -159,7 +190,11 @@ class User(BaseModel):
for query, fk in self.dependencies(search_nullable=True): for query, fk in self.dependencies(search_nullable=True):
if isinstance(fk, QuayUserField) and fk.allows_robots: if isinstance(fk, QuayUserField) and fk.allows_robots:
model = fk.model_class model = fk.model_class
model.delete().where(query).execute()
if fk.robot_null_delete:
model.update(**{fk.name: None}).where(query).execute()
else:
model.delete().where(query).execute()
# Delete the instance itself. # Delete the instance itself.
super(User, self).delete_instance(recursive=False, delete_nullable=False) super(User, self).delete_instance(recursive=False, delete_nullable=False)
@ -319,6 +354,10 @@ class PermissionPrototype(BaseModel):
) )
class AccessTokenKind(BaseModel):
name = CharField(unique=True, index=True)
class AccessToken(BaseModel): class AccessToken(BaseModel):
friendly_name = CharField(null=True) friendly_name = CharField(null=True)
code = CharField(default=random_string_generator(length=64), unique=True, code = CharField(default=random_string_generator(length=64), unique=True,
@ -327,6 +366,7 @@ class AccessToken(BaseModel):
created = DateTimeField(default=datetime.now) created = DateTimeField(default=datetime.now)
role = ForeignKeyField(Role) role = ForeignKeyField(Role)
temporary = BooleanField(default=True) temporary = BooleanField(default=True)
kind = ForeignKeyField(AccessTokenKind, null=True)
class BuildTriggerService(BaseModel): class BuildTriggerService(BaseModel):
@ -368,6 +408,24 @@ class ImageStorageTransformation(BaseModel):
name = CharField(index=True, unique=True) name = CharField(index=True, unique=True)
class ImageStorageSignatureKind(BaseModel):
name = CharField(index=True, unique=True)
class ImageStorageSignature(BaseModel):
storage = ForeignKeyField(ImageStorage, index=True)
kind = ForeignKeyField(ImageStorageSignatureKind)
signature = TextField(null=True)
uploading = BooleanField(default=True, null=True)
class Meta:
database = db
read_slaves = (read_slave,)
indexes = (
(('kind', 'storage'), True),
)
class DerivedImageStorage(BaseModel): class DerivedImageStorage(BaseModel):
source = ForeignKeyField(ImageStorage, null=True, related_name='source') source = ForeignKeyField(ImageStorage, null=True, related_name='source')
derivative = ForeignKeyField(ImageStorage, related_name='derivative') derivative = ForeignKeyField(ImageStorage, related_name='derivative')
@ -424,12 +482,15 @@ class RepositoryTag(BaseModel):
name = CharField() name = CharField()
image = ForeignKeyField(Image) image = ForeignKeyField(Image)
repository = ForeignKeyField(Repository) repository = ForeignKeyField(Repository)
lifetime_start_ts = IntegerField(default=_get_epoch_timestamp)
lifetime_end_ts = IntegerField(null=True, index=True)
hidden = BooleanField(default=False)
class Meta: class Meta:
database = db database = db
read_slaves = (read_slave,) read_slaves = (read_slave,)
indexes = ( indexes = (
(('repository', 'name'), True), (('repository', 'name'), False),
) )
@ -441,23 +502,10 @@ class BUILD_PHASE(object):
PULLING = 'pulling' PULLING = 'pulling'
BUILDING = 'building' BUILDING = 'building'
PUSHING = 'pushing' PUSHING = 'pushing'
WAITING = 'waiting'
COMPLETE = 'complete' COMPLETE = 'complete'
class RepositoryBuild(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
repository = ForeignKeyField(Repository, index=True)
access_token = ForeignKeyField(AccessToken)
resource_key = CharField(index=True)
job_config = TextField()
phase = CharField(default='waiting')
started = DateTimeField(default=datetime.now)
display_name = CharField()
trigger = ForeignKeyField(RepositoryBuildTrigger, null=True, index=True)
pull_robot = QuayUserField(null=True, related_name='buildpullrobot')
logs_archived = BooleanField(default=False)
class QueueItem(BaseModel): class QueueItem(BaseModel):
queue_name = CharField(index=True, max_length=1024) queue_name = CharField(index=True, max_length=1024)
body = TextField() body = TextField()
@ -467,6 +515,21 @@ class QueueItem(BaseModel):
retries_remaining = IntegerField(default=5) retries_remaining = IntegerField(default=5)
class RepositoryBuild(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
repository = ForeignKeyField(Repository, index=True)
access_token = ForeignKeyField(AccessToken)
resource_key = CharField(index=True)
job_config = TextField()
phase = CharField(default=BUILD_PHASE.WAITING)
started = DateTimeField(default=datetime.now)
display_name = CharField()
trigger = ForeignKeyField(RepositoryBuildTrigger, null=True, index=True)
pull_robot = QuayUserField(null=True, related_name='buildpullrobot')
logs_archived = BooleanField(default=False)
queue_item = ForeignKeyField(QueueItem, null=True, index=True)
class LogEntryKind(BaseModel): class LogEntryKind(BaseModel):
name = CharField(index=True, unique=True) name = CharField(index=True, unique=True)
@ -475,7 +538,7 @@ class LogEntry(BaseModel):
kind = ForeignKeyField(LogEntryKind, index=True) kind = ForeignKeyField(LogEntryKind, index=True)
account = QuayUserField(index=True, related_name='account') account = QuayUserField(index=True, related_name='account')
performer = QuayUserField(allows_robots=True, index=True, null=True, performer = QuayUserField(allows_robots=True, index=True, null=True,
related_name='performer') related_name='performer', robot_null_delete=True)
repository = ForeignKeyField(Repository, index=True, null=True) repository = ForeignKeyField(Repository, index=True, null=True)
datetime = DateTimeField(default=datetime.now, index=True) datetime = DateTimeField(default=datetime.now, index=True)
ip = CharField(null=True) ip = CharField(null=True)
@ -566,4 +629,5 @@ all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission,
Notification, ImageStorageLocation, ImageStoragePlacement, Notification, ImageStorageLocation, ImageStoragePlacement,
ExternalNotificationEvent, ExternalNotificationMethod, RepositoryNotification, ExternalNotificationEvent, ExternalNotificationMethod, RepositoryNotification,
RepositoryAuthorizedEmail, ImageStorageTransformation, DerivedImageStorage, RepositoryAuthorizedEmail, ImageStorageTransformation, DerivedImageStorage,
TeamMemberInvite, Star] TeamMemberInvite, ImageStorageSignature, ImageStorageSignatureKind,
AccessTokenKind, Star]

View file

@ -18,7 +18,8 @@ config.set_main_option('sqlalchemy.url', unquote(app.config['DB_URI']))
# Interpret the config file for Python logging. # Interpret the config file for Python logging.
# This line sets up loggers basically. # This line sets up loggers basically.
fileConfig(config.config_file_name) if config.config_file_name:
fileConfig(config.config_file_name)
# add your model's MetaData object here # add your model's MetaData object here
# for 'autogenerate' support # for 'autogenerate' support

View file

@ -2,13 +2,14 @@ set -e
DOCKER_IP=`echo $DOCKER_HOST | sed 's/tcp:\/\///' | sed 's/:.*//'` DOCKER_IP=`echo $DOCKER_HOST | sed 's/tcp:\/\///' | sed 's/:.*//'`
MYSQL_CONFIG_OVERRIDE="{\"DB_URI\":\"mysql+pymysql://root:password@$DOCKER_IP/genschema\"}" MYSQL_CONFIG_OVERRIDE="{\"DB_URI\":\"mysql+pymysql://root:password@$DOCKER_IP/genschema\"}"
PERCONA_CONFIG_OVERRIDE="{\"DB_URI\":\"mysql+pymysql://root@$DOCKER_IP/genschema\"}"
PGSQL_CONFIG_OVERRIDE="{\"DB_URI\":\"postgresql://postgres@$DOCKER_IP/genschema\"}" PGSQL_CONFIG_OVERRIDE="{\"DB_URI\":\"postgresql://postgres@$DOCKER_IP/genschema\"}"
up_mysql() { up_mysql() {
# Run a SQL database on port 3306 inside of Docker. # Run a SQL database on port 3306 inside of Docker.
docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql
# Sleep for 5s to get MySQL get started. # Sleep for 10s to get MySQL get started.
echo 'Sleeping for 10...' echo 'Sleeping for 10...'
sleep 10 sleep 10
@ -21,6 +22,40 @@ down_mysql() {
docker rm mysql docker rm mysql
} }
up_mariadb() {
# Run a SQL database on port 3306 inside of Docker.
docker run --name mariadb -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mariadb
# Sleep for 10s to get MySQL get started.
echo 'Sleeping for 10...'
sleep 10
# Add the database to mysql.
docker run --rm --link mariadb:mariadb mariadb sh -c 'echo "create database genschema" | mysql -h"$MARIADB_PORT_3306_TCP_ADDR" -P"$MARIADB_PORT_3306_TCP_PORT" -uroot -ppassword'
}
down_mariadb() {
docker kill mariadb
docker rm mariadb
}
up_percona() {
# Run a SQL database on port 3306 inside of Docker.
docker run --name percona -p 3306:3306 -d dockerfile/percona
# Sleep for 10s
echo 'Sleeping for 10...'
sleep 10
# Add the daabase to mysql.
docker run --rm --link percona:percona dockerfile/percona sh -c 'echo "create database genschema" | mysql -h $PERCONA_PORT_3306_TCP_ADDR'
}
down_percona() {
docker kill percona
docker rm percona
}
up_postgres() { up_postgres() {
# Run a SQL database on port 5432 inside of Docker. # Run a SQL database on port 5432 inside of Docker.
docker run --name postgres -p 5432:5432 -d postgres docker run --name postgres -p 5432:5432 -d postgres
@ -73,6 +108,26 @@ test_migrate $MYSQL_CONFIG_OVERRIDE
set -e set -e
down_mysql down_mysql
# Test via MariaDB.
echo '> Starting MariaDB'
up_mariadb
echo '> Testing Migration (mariadb)'
set +e
test_migrate $MYSQL_CONFIG_OVERRIDE
set -e
down_mariadb
# Test via Percona.
echo '> Starting Percona'
up_percona
echo '> Testing Migration (percona)'
set +e
test_migrate $PERCONA_CONFIG_OVERRIDE
set -e
down_percona
# Test via Postgres. # Test via Postgres.
echo '> Starting Postgres' echo '> Starting Postgres'
up_postgres up_postgres

View file

@ -0,0 +1,30 @@
"""Add build queue item reference to the repositorybuild table
Revision ID: 14fe12ade3df
Revises: 5ad999136045
Create Date: 2015-02-12 16:11:57.814645
"""
# revision identifiers, used by Alembic.
revision = '14fe12ade3df'
down_revision = '5ad999136045'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.add_column('repositorybuild', sa.Column('queue_item_id', sa.Integer(), nullable=True))
op.create_index('repositorybuild_queue_item_id', 'repositorybuild', ['queue_item_id'], unique=False)
op.create_foreign_key(op.f('fk_repositorybuild_queue_item_id_queueitem'), 'repositorybuild', 'queueitem', ['queue_item_id'], ['id'])
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f('fk_repositorybuild_queue_item_id_queueitem'), 'repositorybuild', type_='foreignkey')
op.drop_index('repositorybuild_queue_item_id', table_name='repositorybuild')
op.drop_column('repositorybuild', 'queue_item_id')
### end Alembic commands ###

View file

@ -0,0 +1,37 @@
"""Actually remove the column access_token_id
Revision ID: 1d2d86d09fcd
Revises: 14fe12ade3df
Create Date: 2015-02-12 16:27:30.260797
"""
# revision identifiers, used by Alembic.
revision = '1d2d86d09fcd'
down_revision = '14fe12ade3df'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.exc import InternalError
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
try:
op.drop_constraint(u'fk_logentry_access_token_id_accesstoken', 'logentry', type_='foreignkey')
op.drop_index('logentry_access_token_id', table_name='logentry')
op.drop_column('logentry', 'access_token_id')
except InternalError:
pass
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
try:
op.add_column('logentry', sa.Column('access_token_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.create_foreign_key(u'fk_logentry_access_token_id_accesstoken', 'logentry', 'accesstoken', ['access_token_id'], ['id'])
op.create_index('logentry_access_token_id', 'logentry', ['access_token_id'], unique=False)
except InternalError:
pass
### end Alembic commands ###

View file

@ -0,0 +1,25 @@
"""mysql max index lengths
Revision ID: 228d1af6af1c
Revises: 5b84373e5db
Create Date: 2015-01-06 14:35:24.651424
"""
# revision identifiers, used by Alembic.
revision = '228d1af6af1c'
down_revision = '5b84373e5db'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(tables):
op.drop_index('queueitem_queue_name', table_name='queueitem')
op.create_index('queueitem_queue_name', 'queueitem', ['queue_name'], unique=False, mysql_length=767)
op.drop_index('image_ancestors', table_name='image')
op.create_index('image_ancestors', 'image', ['ancestors'], unique=False, mysql_length=767)
def downgrade(tables):
pass

View file

@ -0,0 +1,44 @@
"""Add access token kinds type
Revision ID: 3e2d38b52a75
Revises: 1d2d86d09fcd
Create Date: 2015-02-17 12:03:26.422485
"""
# revision identifiers, used by Alembic.
revision = '3e2d38b52a75'
down_revision = '1d2d86d09fcd'
from alembic import op
import sqlalchemy as sa
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.create_table('accesstokenkind',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_accesstokenkind'))
)
op.create_index('accesstokenkind_name', 'accesstokenkind', ['name'], unique=True)
op.add_column(u'accesstoken', sa.Column('kind_id', sa.Integer(), nullable=True))
op.create_index('accesstoken_kind_id', 'accesstoken', ['kind_id'], unique=False)
op.create_foreign_key(op.f('fk_accesstoken_kind_id_accesstokenkind'), 'accesstoken', 'accesstokenkind', ['kind_id'], ['id'])
### end Alembic commands ###
op.bulk_insert(tables.accesstokenkind,
[
{'id': 1, 'name':'build-worker'},
{'id': 2, 'name':'pushpull-token'},
])
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f('fk_accesstoken_kind_id_accesstokenkind'), 'accesstoken', type_='foreignkey')
op.drop_index('accesstoken_kind_id', table_name='accesstoken')
op.drop_column(u'accesstoken', 'kind_id')
op.drop_index('accesstokenkind_name', table_name='accesstokenkind')
op.drop_table('accesstokenkind')
### end Alembic commands ###

View file

@ -0,0 +1,26 @@
"""Allow tags to be marked as hidden.
Revision ID: 4ef04c61fcf9
Revises: 509d2857566f
Create Date: 2015-02-18 16:34:16.586129
"""
# revision identifiers, used by Alembic.
revision = '4ef04c61fcf9'
down_revision = '509d2857566f'
from alembic import op
import sqlalchemy as sa
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.add_column('repositorytag', sa.Column('hidden', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()))
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_column('repositorytag', 'hidden')
### end Alembic commands ###

View file

@ -0,0 +1,36 @@
"""Track the lifetime start and end for tags to allow the state of a repository to be rewound.
Revision ID: 509d2857566f
Revises: 3e2d38b52a75
Create Date: 2015-02-13 14:35:38.939049
"""
# revision identifiers, used by Alembic.
revision = '509d2857566f'
down_revision = '3e2d38b52a75'
from alembic import op
import sqlalchemy as sa
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.add_column('repositorytag', sa.Column('lifetime_end_ts', sa.Integer(), nullable=True))
op.add_column('repositorytag', sa.Column('lifetime_start_ts', sa.Integer(), nullable=False, server_default="0"))
op.create_index('repositorytag_lifetime_end_ts', 'repositorytag', ['lifetime_end_ts'], unique=False)
op.drop_index('repositorytag_repository_id_name', table_name='repositorytag')
op.create_index('repositorytag_repository_id_name', 'repositorytag', ['repository_id', 'name'], unique=False)
op.add_column('user', sa.Column('removed_tag_expiration_s', sa.Integer(), nullable=False, server_default="1209600"))
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'removed_tag_expiration_s')
op.drop_index('repositorytag_repository_id_name', table_name='repositorytag')
op.create_index('repositorytag_repository_id_name', 'repositorytag', ['repository_id', 'name'], unique=True)
op.drop_index('repositorytag_lifetime_end_ts', table_name='repositorytag')
op.drop_column('repositorytag', 'lifetime_start_ts')
op.drop_column('repositorytag', 'lifetime_end_ts')
### end Alembic commands ###

View file

@ -53,7 +53,7 @@ def upgrade(tables):
op.create_index('queueitem_available', 'queueitem', ['available'], unique=False) op.create_index('queueitem_available', 'queueitem', ['available'], unique=False)
op.create_index('queueitem_available_after', 'queueitem', ['available_after'], unique=False) op.create_index('queueitem_available_after', 'queueitem', ['available_after'], unique=False)
op.create_index('queueitem_processing_expires', 'queueitem', ['processing_expires'], unique=False) op.create_index('queueitem_processing_expires', 'queueitem', ['processing_expires'], unique=False)
op.create_index('queueitem_queue_name', 'queueitem', ['queue_name'], unique=False) op.create_index('queueitem_queue_name', 'queueitem', ['queue_name'], unique=False, mysql_length=767)
op.create_table('role', op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False), sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False), sa.Column('name', sa.String(length=255), nullable=False),
@ -376,7 +376,7 @@ def upgrade(tables):
sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], ), sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], ),
sa.PrimaryKeyConstraint('id') sa.PrimaryKeyConstraint('id')
) )
op.create_index('image_ancestors', 'image', ['ancestors'], unique=False) op.create_index('image_ancestors', 'image', ['ancestors'], unique=False, mysql_length=767)
op.create_index('image_repository_id', 'image', ['repository_id'], unique=False) op.create_index('image_repository_id', 'image', ['repository_id'], unique=False)
op.create_index('image_repository_id_docker_image_id', 'image', ['repository_id', 'docker_image_id'], unique=True) op.create_index('image_repository_id_docker_image_id', 'image', ['repository_id', 'docker_image_id'], unique=True)
op.create_index('image_storage_id', 'image', ['storage_id'], unique=False) op.create_index('image_storage_id', 'image', ['storage_id'], unique=False)

View file

@ -0,0 +1,55 @@
"""Add signature storage
Revision ID: 5ad999136045
Revises: 228d1af6af1c
Create Date: 2015-02-05 15:01:54.989573
"""
# revision identifiers, used by Alembic.
revision = '5ad999136045'
down_revision = '228d1af6af1c'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.create_table('imagestoragesignaturekind',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragesignaturekind'))
)
op.create_index('imagestoragesignaturekind_name', 'imagestoragesignaturekind', ['name'], unique=True)
op.create_table('imagestoragesignature',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('storage_id', sa.Integer(), nullable=False),
sa.Column('kind_id', sa.Integer(), nullable=False),
sa.Column('signature', sa.Text(), nullable=True),
sa.Column('uploading', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['kind_id'], ['imagestoragesignaturekind.id'], name=op.f('fk_imagestoragesignature_kind_id_imagestoragesignaturekind')),
sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_imagestoragesignature_storage_id_imagestorage')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragesignature'))
)
op.create_index('imagestoragesignature_kind_id', 'imagestoragesignature', ['kind_id'], unique=False)
op.create_index('imagestoragesignature_kind_id_storage_id', 'imagestoragesignature', ['kind_id', 'storage_id'], unique=True)
op.create_index('imagestoragesignature_storage_id', 'imagestoragesignature', ['storage_id'], unique=False)
### end Alembic commands ###
op.bulk_insert(tables.imagestoragetransformation,
[
{'id': 2, 'name':'aci'},
])
op.bulk_insert(tables.imagestoragesignaturekind,
[
{'id': 1, 'name':'gpg2'},
])
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_table('imagestoragesignature')
op.drop_table('imagestoragesignaturekind')
### end Alembic commands ###

View file

@ -0,0 +1,24 @@
"""Convert slack webhook data
Revision ID: 5b84373e5db
Revises: 1c5b738283a5
Create Date: 2014-12-16 12:02:55.167744
"""
# revision identifiers, used by Alembic.
revision = '5b84373e5db'
down_revision = '1c5b738283a5'
from alembic import op
import sqlalchemy as sa
from util.migrateslackwebhook import run_slackwebhook_migration
def upgrade(tables):
run_slackwebhook_migration()
def downgrade(tables):
pass

View file

@ -2,8 +2,10 @@ import bcrypt
import logging import logging
import dateutil.parser import dateutil.parser
import json import json
import time
from datetime import datetime, timedelta, date from datetime import datetime, timedelta, date
from uuid import uuid4
from data.database import (User, Repository, Image, AccessToken, Role, RepositoryPermission, from data.database import (User, Repository, Image, AccessToken, Role, RepositoryPermission,
Visibility, RepositoryTag, EmailConfirmation, FederatedLogin, Visibility, RepositoryTag, EmailConfirmation, FederatedLogin,
@ -14,7 +16,9 @@ from data.database import (User, Repository, Image, AccessToken, Role, Repositor
ExternalNotificationEvent, ExternalNotificationMethod, ExternalNotificationEvent, ExternalNotificationMethod,
RepositoryNotification, RepositoryAuthorizedEmail, TeamMemberInvite, RepositoryNotification, RepositoryAuthorizedEmail, TeamMemberInvite,
DerivedImageStorage, ImageStorageTransformation, random_string_generator, DerivedImageStorage, ImageStorageTransformation, random_string_generator,
db, BUILD_PHASE, QuayUserField, Star) db, BUILD_PHASE, QuayUserField, ImageStorageSignature, QueueItem,
ImageStorageSignatureKind, validate_database_url, db_for_update,
AccessTokenKind, Star)
from peewee import JOIN_LEFT_OUTER, fn from peewee import JOIN_LEFT_OUTER, fn
from util.validation import (validate_username, validate_email, validate_password, from util.validation import (validate_username, validate_email, validate_password,
INVALID_PASSWORD_MESSAGE) INVALID_PASSWORD_MESSAGE)
@ -105,12 +109,15 @@ class TooManyLoginAttemptsException(Exception):
self.retry_after = retry_after self.retry_after = retry_after
def _get_repository(namespace_name, repository_name): def _get_repository(namespace_name, repository_name, for_update=False):
return (Repository query = (Repository
.select(Repository, Namespace) .select(Repository, Namespace)
.join(Namespace, on=(Repository.namespace_user == Namespace.id)) .join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Namespace.username == namespace_name, Repository.name == repository_name) .where(Namespace.username == namespace_name, Repository.name == repository_name))
.get()) if for_update:
query = db_for_update(query)
return query.get()
def hash_password(password, salt=None): def hash_password(password, salt=None):
@ -164,8 +171,7 @@ def _create_user(username, email):
pass pass
try: try:
new_user = User.create(username=username, email=email) return User.create(username=username, email=email)
return new_user
except Exception as ex: except Exception as ex:
raise DataModelException(ex.message) raise DataModelException(ex.message)
@ -295,6 +301,9 @@ def delete_robot(robot_username):
def _list_entity_robots(entity_name): def _list_entity_robots(entity_name):
""" Return the list of robots for the specified entity. This MUST return a query, not a
materialized list so that callers can use db_for_update.
"""
return (User return (User
.select() .select()
.join(FederatedLogin) .join(FederatedLogin)
@ -901,14 +910,17 @@ def change_password(user, new_password):
delete_notifications_by_kind(user, 'password_required') delete_notifications_by_kind(user, 'password_required')
def change_username(user, new_username): def change_username(user_id, new_username):
(username_valid, username_issue) = validate_username(new_username) (username_valid, username_issue) = validate_username(new_username)
if not username_valid: if not username_valid:
raise InvalidUsernameException('Invalid username %s: %s' % (new_username, username_issue)) raise InvalidUsernameException('Invalid username %s: %s' % (new_username, username_issue))
with config.app_config['DB_TRANSACTION_FACTORY'](db): with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Reload the user for update
user = db_for_update(User.select().where(User.id == user_id)).get()
# Rename the robots # Rename the robots
for robot in _list_entity_robots(user.username): for robot in db_for_update(_list_entity_robots(user.username)):
_, robot_shortname = parse_robot_username(robot.username) _, robot_shortname = parse_robot_username(robot.username)
new_robot_name = format_robot_username(new_username, robot_shortname) new_robot_name = format_robot_username(new_username, robot_shortname)
robot.username = new_robot_name robot.username = new_robot_name
@ -924,6 +936,11 @@ def change_invoice_email(user, invoice_email):
user.save() user.save()
def change_user_tag_expiration(user, tag_expiration_s):
user.removed_tag_expiration_s = tag_expiration_s
user.save()
def update_email(user, new_email, auto_verify=False): def update_email(user, new_email, auto_verify=False):
user.email = new_email user.email = new_email
user.verified = auto_verify user.verified = auto_verify
@ -1087,6 +1104,26 @@ def get_repository(namespace_name, repository_name):
return None return None
def get_image(repo, dockerfile_id):
try:
return Image.get(Image.docker_image_id == dockerfile_id, Image.repository == repo)
except Image.DoesNotExist:
return None
def find_child_image(repo, parent_image, command):
try:
return (Image.select()
.join(ImageStorage)
.switch(Image)
.where(Image.ancestors % '%/' + parent_image.id + '/%',
ImageStorage.command == command)
.order_by(ImageStorage.created.desc())
.get())
except Image.DoesNotExist:
return None
def get_repo_image(namespace_name, repository_name, docker_image_id): def get_repo_image(namespace_name, repository_name, docker_image_id):
def limit_to_image_id(query): def limit_to_image_id(query):
return query.where(Image.docker_image_id == docker_image_id).limit(1) return query.where(Image.docker_image_id == docker_image_id).limit(1)
@ -1249,9 +1286,9 @@ def _find_or_link_image(existing_image, repository, username, translations, pref
storage.locations = {placement.location.name storage.locations = {placement.location.name
for placement in storage.imagestorageplacement_set} for placement in storage.imagestorageplacement_set}
new_image = Image.create(docker_image_id=existing_image.docker_image_id, new_image = Image.create(docker_image_id=existing_image.docker_image_id,
repository=repository, storage=storage, repository=repository, storage=storage,
ancestors=new_image_ancestry) ancestors=new_image_ancestry)
logger.debug('Storing translation %s -> %s', existing_image.id, new_image.id) logger.debug('Storing translation %s -> %s', existing_image.id, new_image.id)
translations[existing_image.id] = new_image.id translations[existing_image.id] = new_image.id
@ -1315,7 +1352,28 @@ def find_create_or_link_image(docker_image_id, repository, username, translation
ancestors='/') ancestors='/')
def find_or_create_derived_storage(source, transformation_name, preferred_location): def find_or_create_storage_signature(storage, signature_kind):
found = lookup_storage_signature(storage, signature_kind)
if found is None:
kind = ImageStorageSignatureKind.get(name=signature_kind)
found = ImageStorageSignature.create(storage=storage, kind=kind)
return found
def lookup_storage_signature(storage, signature_kind):
kind = ImageStorageSignatureKind.get(name=signature_kind)
try:
return (ImageStorageSignature
.select()
.where(ImageStorageSignature.storage == storage,
ImageStorageSignature.kind == kind)
.get())
except ImageStorageSignature.DoesNotExist:
return None
def find_derived_storage(source, transformation_name):
try: try:
found = (ImageStorage found = (ImageStorage
.select(ImageStorage, DerivedImageStorage) .select(ImageStorage, DerivedImageStorage)
@ -1328,11 +1386,19 @@ def find_or_create_derived_storage(source, transformation_name, preferred_locati
found.locations = {placement.location.name for placement in found.imagestorageplacement_set} found.locations = {placement.location.name for placement in found.imagestorageplacement_set}
return found return found
except ImageStorage.DoesNotExist: except ImageStorage.DoesNotExist:
logger.debug('Creating storage dervied from source: %s', source.uuid) return None
trans = ImageStorageTransformation.get(name=transformation_name)
new_storage = _create_storage(preferred_location)
DerivedImageStorage.create(source=source, derivative=new_storage, transformation=trans) def find_or_create_derived_storage(source, transformation_name, preferred_location):
return new_storage existing = find_derived_storage(source, transformation_name)
if existing is not None:
return existing
logger.debug('Creating storage dervied from source: %s', source.uuid)
trans = ImageStorageTransformation.get(name=transformation_name)
new_storage = _create_storage(preferred_location)
DerivedImageStorage.create(source=source, derivative=new_storage, transformation=trans)
return new_storage
def delete_derived_storage_by_uuid(storage_uuid): def delete_derived_storage_by_uuid(storage_uuid):
@ -1401,7 +1467,7 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
Image.docker_image_id == docker_image_id)) Image.docker_image_id == docker_image_id))
try: try:
fetched = query.get() fetched = db_for_update(query).get()
except Image.DoesNotExist: except Image.DoesNotExist:
raise DataModelException('No image with specified id and repository') raise DataModelException('No image with specified id and repository')
@ -1489,19 +1555,48 @@ def get_repository_images(namespace_name, repository_name):
return _get_repository_images_base(namespace_name, repository_name, lambda q: q) return _get_repository_images_base(namespace_name, repository_name, lambda q: q)
def list_repository_tags(namespace_name, repository_name): def _tag_alive(query):
return (RepositoryTag return query.where((RepositoryTag.lifetime_end_ts >> None) |
.select(RepositoryTag, Image) (RepositoryTag.lifetime_end_ts > int(time.time())))
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(RepositoryTag) def list_repository_tags(namespace_name, repository_name, include_hidden=False):
.join(Image) query = _tag_alive(RepositoryTag
.where(Repository.name == repository_name, Namespace.username == namespace_name)) .select(RepositoryTag, Image)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(RepositoryTag)
.join(Image)
.where(Repository.name == repository_name,
Namespace.username == namespace_name))
if not include_hidden:
query = query.where(RepositoryTag.hidden == False)
return query
def _garbage_collect_tags(namespace_name, repository_name):
to_delete = (RepositoryTag
.select(RepositoryTag.id)
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Repository.name == repository_name, Namespace.username == namespace_name,
~(RepositoryTag.lifetime_end_ts >> None),
(RepositoryTag.lifetime_end_ts + Namespace.removed_tag_expiration_s) <=
int(time.time())))
(RepositoryTag
.delete()
.where(RepositoryTag.id << to_delete)
.execute())
def garbage_collect_repository(namespace_name, repository_name): def garbage_collect_repository(namespace_name, repository_name):
storage_id_whitelist = {} storage_id_whitelist = {}
_garbage_collect_tags(namespace_name, repository_name)
with config.app_config['DB_TRANSACTION_FACTORY'](db): with config.app_config['DB_TRANSACTION_FACTORY'](db):
# TODO (jake): We could probably select this and all the images in a single query using # TODO (jake): We could probably select this and all the images in a single query using
# a different kind of join. # a different kind of join.
@ -1535,12 +1630,10 @@ def garbage_collect_repository(namespace_name, repository_name):
if len(to_remove) > 0: if len(to_remove) > 0:
logger.info('Garbage collecting storage for images: %s', to_remove) logger.info('Garbage collecting storage for images: %s', to_remove)
garbage_collect_storage(storage_id_whitelist) _garbage_collect_storage(storage_id_whitelist)
return len(to_remove)
def garbage_collect_storage(storage_id_whitelist): def _garbage_collect_storage(storage_id_whitelist):
if len(storage_id_whitelist) == 0: if len(storage_id_whitelist) == 0:
return return
@ -1632,10 +1725,10 @@ def garbage_collect_storage(storage_id_whitelist):
def get_tag_image(namespace_name, repository_name, tag_name): def get_tag_image(namespace_name, repository_name, tag_name):
def limit_to_tag(query): def limit_to_tag(query):
return (query return _tag_alive(query
.switch(Image) .switch(Image)
.join(RepositoryTag) .join(RepositoryTag)
.where(RepositoryTag.name == tag_name)) .where(RepositoryTag.name == tag_name))
images = _get_repository_images_base(namespace_name, repository_name, limit_to_tag) images = _get_repository_images_base(namespace_name, repository_name, limit_to_tag)
if not images: if not images:
@ -1643,7 +1736,6 @@ def get_tag_image(namespace_name, repository_name, tag_name):
else: else:
return images[0] return images[0]
def get_image_by_id(namespace_name, repository_name, docker_image_id): def get_image_by_id(namespace_name, repository_name, docker_image_id):
image = get_repo_image_extended(namespace_name, repository_name, docker_image_id) image = get_repo_image_extended(namespace_name, repository_name, docker_image_id)
if not image: if not image:
@ -1672,45 +1764,69 @@ def get_parent_images(namespace_name, repository_name, image_obj):
def create_or_update_tag(namespace_name, repository_name, tag_name, def create_or_update_tag(namespace_name, repository_name, tag_name,
tag_docker_image_id): tag_docker_image_id):
try:
repo = _get_repository(namespace_name, repository_name)
except Repository.DoesNotExist:
raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
try: with config.app_config['DB_TRANSACTION_FACTORY'](db):
image = Image.get(Image.docker_image_id == tag_docker_image_id, Image.repository == repo) try:
except Image.DoesNotExist: repo = _get_repository(namespace_name, repository_name)
raise DataModelException('Invalid image with id: %s' % tag_docker_image_id) except Repository.DoesNotExist:
raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
try: try:
tag = RepositoryTag.get(RepositoryTag.repository == repo, RepositoryTag.name == tag_name) image = Image.get(Image.docker_image_id == tag_docker_image_id, Image.repository == repo)
tag.image = image except Image.DoesNotExist:
tag.save() raise DataModelException('Invalid image with id: %s' % tag_docker_image_id)
except RepositoryTag.DoesNotExist:
tag = RepositoryTag.create(repository=repo, image=image, name=tag_name)
return tag now_ts = int(time.time())
try:
# When we move a tag, we really end the timeline of the old one and create a new one
query = _tag_alive(RepositoryTag
.select()
.where(RepositoryTag.repository == repo, RepositoryTag.name == tag_name))
tag = query.get()
tag.lifetime_end_ts = now_ts
tag.save()
except RepositoryTag.DoesNotExist:
# No tag that needs to be ended
pass
return RepositoryTag.create(repository=repo, image=image, name=tag_name,
lifetime_start_ts=now_ts)
def delete_tag(namespace_name, repository_name, tag_name): def delete_tag(namespace_name, repository_name, tag_name):
try: with config.app_config['DB_TRANSACTION_FACTORY'](db):
found = (RepositoryTag try:
.select() query = _tag_alive(RepositoryTag
.join(Repository) .select(RepositoryTag, Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id)) .join(Repository)
.where(Repository.name == repository_name, Namespace.username == namespace_name, .join(Namespace, on=(Repository.namespace_user == Namespace.id))
RepositoryTag.name == tag_name) .where(Repository.name == repository_name,
.get()) Namespace.username == namespace_name,
RepositoryTag.name == tag_name))
found = db_for_update(query).get()
except RepositoryTag.DoesNotExist:
msg = ('Invalid repository tag \'%s\' on repository \'%s/%s\'' %
(tag_name, namespace_name, repository_name))
raise DataModelException(msg)
except RepositoryTag.DoesNotExist: found.lifetime_end_ts = int(time.time())
msg = ('Invalid repository tag \'%s\' on repository \'%s/%s\'' % found.save()
(tag_name, namespace_name, repository_name))
raise DataModelException(msg)
found.delete_instance()
def delete_all_repository_tags(namespace_name, repository_name): def create_temporary_hidden_tag(repo, image, expiration_s):
""" Create a tag with a defined timeline, that will not appear in the UI or CLI. Returns the name
of the temporary tag. """
now_ts = int(time.time())
expire_ts = now_ts + expiration_s
tag_name = str(uuid4())
RepositoryTag.create(repository=repo, image=image, name=tag_name, lifetime_start_ts=now_ts,
lifetime_end_ts=expire_ts, hidden=True)
return tag_name
def purge_all_repository_tags(namespace_name, repository_name):
""" Immediately purge all repository tags without respecting the lifeline procedure """
try: try:
repo = _get_repository(namespace_name, repository_name) repo = _get_repository(namespace_name, repository_name)
except Repository.DoesNotExist: except Repository.DoesNotExist:
@ -1825,7 +1941,7 @@ def set_team_repo_permission(team_name, namespace_name, repository_name,
def purge_repository(namespace_name, repository_name): def purge_repository(namespace_name, repository_name):
# Delete all tags to allow gc to reclaim storage # Delete all tags to allow gc to reclaim storage
delete_all_repository_tags(namespace_name, repository_name) purge_all_repository_tags(namespace_name, repository_name)
# Gc to remove the images and storage # Gc to remove the images and storage
garbage_collect_repository(namespace_name, repository_name) garbage_collect_repository(namespace_name, repository_name)
@ -1845,10 +1961,14 @@ def get_private_repo_count(username):
.count()) .count())
def create_access_token(repository, role): def create_access_token(repository, role, kind=None, friendly_name=None):
role = Role.get(Role.name == role) role = Role.get(Role.name == role)
kind_ref = None
if kind is not None:
kind_ref = AccessTokenKind.get(AccessTokenKind.name == kind)
new_token = AccessToken.create(repository=repository, temporary=True, new_token = AccessToken.create(repository=repository, temporary=True,
role=role) role=role, kind=kind_ref, friendly_name=friendly_name)
return new_token return new_token
@ -1967,10 +2087,10 @@ def create_repository_build(repo, access_token, job_config_obj, dockerfile_id,
pull_robot = lookup_robot(pull_robot_name) pull_robot = lookup_robot(pull_robot_name)
return RepositoryBuild.create(repository=repo, access_token=access_token, return RepositoryBuild.create(repository=repo, access_token=access_token,
job_config=json.dumps(job_config_obj), job_config=json.dumps(job_config_obj),
display_name=display_name, trigger=trigger, display_name=display_name, trigger=trigger,
resource_key=dockerfile_id, resource_key=dockerfile_id,
pull_robot=pull_robot) pull_robot=pull_robot)
def get_pull_robot_name(trigger): def get_pull_robot_name(trigger):
@ -2255,11 +2375,20 @@ def delete_user(user):
# TODO: also delete any repository data associated # TODO: also delete any repository data associated
def check_health(): def check_health(app_config):
# Attempt to connect to the database first. If the DB is not responding,
# using the validate_database_url will timeout quickly, as opposed to
# making a normal connect which will just hang (thus breaking the health
# check).
try:
validate_database_url(app_config['DB_URI'], connect_timeout=3)
except Exception:
logger.exception('Could not connect to the database')
return False
# We will connect to the db, check that it contains some log entry kinds # We will connect to the db, check that it contains some log entry kinds
try: try:
found_count = LogEntryKind.select().count() return bool(list(LogEntryKind.select().limit(1)))
return found_count > 0
except: except:
return False return False
@ -2365,6 +2494,32 @@ def confirm_team_invite(code, user):
found.delete_instance() found.delete_instance()
return (team, inviter) return (team, inviter)
def cancel_repository_build(build):
with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Reload the build for update.
try:
build = db_for_update(RepositoryBuild.select().where(RepositoryBuild.id == build.id)).get()
except RepositoryBuild.DoesNotExist:
return False
if build.phase != BUILD_PHASE.WAITING or not build.queue_item:
return False
# Load the build queue item for update.
try:
queue_item = db_for_update(QueueItem.select()
.where(QueueItem.id == build.queue_item.id)).get()
except QueueItem.DoesNotExist:
return False
# Check the queue item.
if not queue_item.available or queue_item.retries_remaining == 0:
return False
# Delete the queue item and build.
queue_item.delete_instance(recursive=True)
build.delete_instance()
return True
def get_repository_usage(): def get_repository_usage():
one_month_ago = date.today() - timedelta(weeks=4) one_month_ago = date.today() - timedelta(weeks=4)

View file

@ -1,11 +1,17 @@
from datetime import datetime, timedelta from datetime import datetime, timedelta
from data.database import QueueItem, db from data.database import QueueItem, db, db_for_update
from util.morecollections import AttrDict from util.morecollections import AttrDict
MINIMUM_EXTENSION = timedelta(seconds=20) MINIMUM_EXTENSION = timedelta(seconds=20)
class NoopWith:
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
class WorkQueue(object): class WorkQueue(object):
def __init__(self, queue_name, transaction_factory, def __init__(self, queue_name, transaction_factory,
@ -31,31 +37,50 @@ class WorkQueue(object):
QueueItem.processing_expires > now, QueueItem.processing_expires > now,
QueueItem.queue_name ** name_match_query)) QueueItem.queue_name ** name_match_query))
def _available_jobs(self, now, name_match_query, running_query): def _available_jobs(self, now, name_match_query):
return (QueueItem return (QueueItem
.select() .select()
.where(QueueItem.queue_name ** name_match_query, QueueItem.available_after <= now, .where(QueueItem.queue_name ** name_match_query, QueueItem.available_after <= now,
((QueueItem.available == True) | (QueueItem.processing_expires <= now)), ((QueueItem.available == True) | (QueueItem.processing_expires <= now)),
QueueItem.retries_remaining > 0, ~(QueueItem.queue_name << running_query))) QueueItem.retries_remaining > 0))
def _available_jobs_not_running(self, now, name_match_query, running_query):
return (self
._available_jobs(now, name_match_query)
.where(~(QueueItem.queue_name << running_query)))
def _name_match_query(self): def _name_match_query(self):
return '%s%%' % self._canonical_name([self._queue_name] + self._canonical_name_match_list) return '%s%%' % self._canonical_name([self._queue_name] + self._canonical_name_match_list)
def update_metrics(self): def _item_by_id_for_update(self, queue_id):
if self._reporter is None: return db_for_update(QueueItem.select().where(QueueItem.id == queue_id)).get()
return
with self._transaction_factory(db): def get_metrics(self, require_transaction=True):
guard = self._transaction_factory(db) if require_transaction else NoopWith()
with guard:
now = datetime.utcnow() now = datetime.utcnow()
name_match_query = self._name_match_query() name_match_query = self._name_match_query()
running_query = self._running_jobs(now, name_match_query) running_query = self._running_jobs(now, name_match_query)
running_count = running_query.distinct().count() running_count = running_query.distinct().count()
avialable_query = self._available_jobs(now, name_match_query, running_query) available_query = self._available_jobs(now, name_match_query)
available_count = avialable_query.select(QueueItem.queue_name).distinct().count() available_count = available_query.select(QueueItem.queue_name).distinct().count()
self._reporter(self._currently_processing, running_count, running_count + available_count) available_not_running_query = self._available_jobs_not_running(now, name_match_query,
running_query)
available_not_running_count = (available_not_running_query.select(QueueItem.queue_name)
.distinct().count())
return (running_count, available_not_running_count, available_count)
def update_metrics(self):
if self._reporter is None:
return
(running_count, available_not_running_count, available_count) = self.get_metrics()
self._reporter(self._currently_processing, running_count,
running_count + available_not_running_count)
def put(self, canonical_name_list, message, available_after=0, retries_remaining=5): def put(self, canonical_name_list, message, available_after=0, retries_remaining=5):
""" """
@ -73,24 +98,31 @@ class WorkQueue(object):
params['available_after'] = available_date params['available_after'] = available_date
with self._transaction_factory(db): with self._transaction_factory(db):
QueueItem.create(**params) return QueueItem.create(**params)
def get(self, processing_time=300): def get(self, processing_time=300):
""" """
Get an available item and mark it as unavailable for the default of five Get an available item and mark it as unavailable for the default of five
minutes. minutes. The result of this method must always be composed of simple
python objects which are JSON serializable for network portability reasons.
""" """
now = datetime.utcnow() now = datetime.utcnow()
name_match_query = self._name_match_query() name_match_query = self._name_match_query()
with self._transaction_factory(db): running = self._running_jobs(now, name_match_query)
running = self._running_jobs(now, name_match_query) avail = self._available_jobs_not_running(now, name_match_query, running)
avail = self._available_jobs(now, name_match_query, running)
item = None item = None
try: try:
db_item = avail.order_by(QueueItem.id).get() db_item_candidate = avail.order_by(QueueItem.id).get()
with self._transaction_factory(db):
still_available_query = (db_for_update(self
._available_jobs(now, name_match_query)
.where(QueueItem.id == db_item_candidate.id)))
db_item = still_available_query.get()
db_item.available = False db_item.available = False
db_item.processing_expires = now + timedelta(seconds=processing_time) db_item.processing_expires = now + timedelta(seconds=processing_time)
db_item.retries_remaining -= 1 db_item.retries_remaining -= 1
@ -99,25 +131,26 @@ class WorkQueue(object):
item = AttrDict({ item = AttrDict({
'id': db_item.id, 'id': db_item.id,
'body': db_item.body, 'body': db_item.body,
'retries_remaining': db_item.retries_remaining
}) })
self._currently_processing = True self._currently_processing = True
except QueueItem.DoesNotExist: except QueueItem.DoesNotExist:
self._currently_processing = False self._currently_processing = False
# Return a view of the queue item rather than an active db object # Return a view of the queue item rather than an active db object
return item return item
def complete(self, completed_item): def complete(self, completed_item):
with self._transaction_factory(db): with self._transaction_factory(db):
completed_item_obj = QueueItem.get(QueueItem.id == completed_item.id) completed_item_obj = self._item_by_id_for_update(completed_item.id)
completed_item_obj.delete_instance() completed_item_obj.delete_instance()
self._currently_processing = False self._currently_processing = False
def incomplete(self, incomplete_item, retry_after=300, restore_retry=False): def incomplete(self, incomplete_item, retry_after=300, restore_retry=False):
with self._transaction_factory(db): with self._transaction_factory(db):
retry_date = datetime.utcnow() + timedelta(seconds=retry_after) retry_date = datetime.utcnow() + timedelta(seconds=retry_after)
incomplete_item_obj = QueueItem.get(QueueItem.id == incomplete_item.id) incomplete_item_obj = self._item_by_id_for_update(incomplete_item.id)
incomplete_item_obj.available_after = retry_date incomplete_item_obj.available_after = retry_date
incomplete_item_obj.available = True incomplete_item_obj.available = True
@ -126,16 +159,14 @@ class WorkQueue(object):
incomplete_item_obj.save() incomplete_item_obj.save()
self._currently_processing = False self._currently_processing = False
return incomplete_item_obj.retries_remaining > 0
@staticmethod def extend_processing(self, item, seconds_from_now, minimum_extension=MINIMUM_EXTENSION):
def extend_processing(queue_item, seconds_from_now, retry_count=None, with self._transaction_factory(db):
minimum_extension=MINIMUM_EXTENSION): queue_item = self._item_by_id_for_update(item.id)
new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now) new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now)
# Only actually write the new expiration to the db if it moves the expiration some minimum # Only actually write the new expiration to the db if it moves the expiration some minimum
if new_expiration - queue_item.processing_expires > minimum_extension: if new_expiration - queue_item.processing_expires > minimum_extension:
if retry_count is not None: queue_item.processing_expires = new_expiration
queue_item.retries_remaining = retry_count queue_item.save()
queue_item.processing_expires = new_expiration
queue_item.save()

20
data/runmigration.py Normal file
View file

@ -0,0 +1,20 @@
import logging
from alembic.config import Config
from alembic.script import ScriptDirectory
from alembic.environment import EnvironmentContext
from alembic.migration import __name__ as migration_name
def run_alembic_migration(log_handler=None):
if log_handler:
logging.getLogger(migration_name).addHandler(log_handler)
config = Config()
config.set_main_option("script_location", "data:migrations")
script = ScriptDirectory.from_config(config)
def fn(rev, context):
return script._upgrade_revs('head', rev)
with EnvironmentContext(config, script, fn=fn, destination_rev='head'):
script.run_env()

View file

@ -4,6 +4,12 @@
<meta name="viewport" content="width=device-width" /> <meta name="viewport" content="width=device-width" />
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>{{ subject }}</title> <title>{{ subject }}</title>
{% if action_metadata %}
<script type="application/ld+json">
{{ action_metadata }}
</script>
{% endif %}
</head> </head>
<body bgcolor="#FFFFFF" style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; width: 100% !important; height: 100%; margin: 0; padding: 0;"><style type="text/css"> <body bgcolor="#FFFFFF" style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; width: 100% !important; height: 100%; margin: 0; padding: 0;"><style type="text/css">
@media only screen and (max-width: 600px) { @media only screen and (max-width: 600px) {

View file

@ -280,6 +280,23 @@ require_user_read = require_user_permission(UserReadPermission, scopes.READ_USER
require_user_admin = require_user_permission(UserAdminPermission, None) require_user_admin = require_user_permission(UserAdminPermission, None)
require_fresh_user_admin = require_user_permission(UserAdminPermission, None) require_fresh_user_admin = require_user_permission(UserAdminPermission, None)
def verify_not_prod(func):
@add_method_metadata('enterprise_only', True)
@wraps(func)
def wrapped(*args, **kwargs):
# Verify that we are not running on a production (i.e. hosted) stack. If so, we fail.
# This should never happen (because of the feature-flag on SUPER_USERS), but we want to be
# absolutely sure.
if app.config['SERVER_HOSTNAME'].find('quay.io') >= 0:
logger.error('!!! Super user method called IN PRODUCTION !!!')
raise NotFound()
return func(*args, **kwargs)
return wrapped
def require_fresh_login(func): def require_fresh_login(func):
@add_method_metadata('requires_fresh_login', True) @add_method_metadata('requires_fresh_login', True)
@wraps(func) @wraps(func)
@ -317,7 +334,11 @@ def validate_json_request(schema_name):
def wrapped(self, *args, **kwargs): def wrapped(self, *args, **kwargs):
schema = self.schemas[schema_name] schema = self.schemas[schema_name]
try: try:
validate(request.get_json(), schema) json_data = request.get_json()
if json_data is None:
raise InvalidRequest('Missing JSON body')
validate(json_data, schema)
return func(self, *args, **kwargs) return func(self, *args, **kwargs)
except ValidationError as ex: except ValidationError as ex:
raise InvalidRequest(ex.message) raise InvalidRequest(ex.message)
@ -385,8 +406,10 @@ import endpoints.api.repoemail
import endpoints.api.repotoken import endpoints.api.repotoken
import endpoints.api.robot import endpoints.api.robot
import endpoints.api.search import endpoints.api.search
import endpoints.api.suconfig
import endpoints.api.superuser import endpoints.api.superuser
import endpoints.api.tag import endpoints.api.tag
import endpoints.api.team import endpoints.api.team
import endpoints.api.trigger import endpoints.api.trigger
import endpoints.api.user import endpoints.api.user

View file

@ -9,7 +9,7 @@ from app import app, userfiles as user_files, build_logs, log_archive
from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource, from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource,
require_repo_read, require_repo_write, validate_json_request, require_repo_read, require_repo_write, validate_json_request,
ApiResource, internal_only, format_date, api, Unauthorized, NotFound, ApiResource, internal_only, format_date, api, Unauthorized, NotFound,
path_param) path_param, InvalidRequest, require_repo_admin)
from endpoints.common import start_build from endpoints.common import start_build
from endpoints.trigger import BuildTrigger from endpoints.trigger import BuildTrigger
from data import model, database from data import model, database
@ -70,10 +70,17 @@ def build_status_view(build_obj, can_write=False):
# If the status contains a heartbeat, then check to see if has been written in the last few # If the status contains a heartbeat, then check to see if has been written in the last few
# minutes. If not, then the build timed out. # minutes. If not, then the build timed out.
if status is not None and 'heartbeat' in status and status['heartbeat']: if phase != database.BUILD_PHASE.COMPLETE and phase != database.BUILD_PHASE.ERROR:
heartbeat = datetime.datetime.fromtimestamp(status['heartbeat']) if status is not None and 'heartbeat' in status and status['heartbeat']:
if datetime.datetime.now() - heartbeat > datetime.timedelta(minutes=1): heartbeat = datetime.datetime.utcfromtimestamp(status['heartbeat'])
phase = database.BUILD_PHASE.INTERNAL_ERROR if datetime.datetime.utcnow() - heartbeat > datetime.timedelta(minutes=1):
phase = database.BUILD_PHASE.INTERNAL_ERROR
# If the phase is internal error, return 'error' instead of the number if retries
# on the queue item is 0.
if phase == database.BUILD_PHASE.INTERNAL_ERROR:
if build_obj.queue_item is None or build_obj.queue_item.retries_remaining == 0:
phase = database.BUILD_PHASE.ERROR
logger.debug('Can write: %s job_config: %s', can_write, build_obj.job_config) logger.debug('Can write: %s job_config: %s', can_write, build_obj.job_config)
resp = { resp = {
@ -86,7 +93,7 @@ def build_status_view(build_obj, can_write=False):
'is_writer': can_write, 'is_writer': can_write,
'trigger': trigger_view(build_obj.trigger), 'trigger': trigger_view(build_obj.trigger),
'resource_key': build_obj.resource_key, 'resource_key': build_obj.resource_key,
'pull_robot': user_view(build_obj.pull_robot) if build_obj.pull_robot else None, 'pull_robot': user_view(build_obj.pull_robot) if build_obj.pull_robot else None
} }
if can_write: if can_write:
@ -200,6 +207,31 @@ class RepositoryBuildList(RepositoryParamResource):
return resp, 201, headers return resp, 201, headers
@resource('/v1/repository/<repopath:repository>/build/<build_uuid>')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
@path_param('build_uuid', 'The UUID of the build')
class RepositoryBuildResource(RepositoryParamResource):
""" Resource for dealing with repository builds. """
@require_repo_admin
@nickname('cancelRepoBuild')
def delete(self, namespace, repository, build_uuid):
""" Cancels a repository build if it has not yet been picked up by a build worker. """
try:
build = model.get_repository_build(build_uuid)
except model.InvalidRepositoryBuildException:
raise NotFound()
if build.repository.name != repository or build.repository.namespace_user.username != namespace:
raise NotFound()
if model.cancel_repository_build(build):
return 'Okay', 201
else:
raise InvalidRequest('Build is currently running or has finished')
@resource('/v1/repository/<repopath:repository>/build/<build_uuid>/status') @resource('/v1/repository/<repopath:repository>/build/<build_uuid>/status')
@path_param('repository', 'The full path of the repository. e.g. namespace/name') @path_param('repository', 'The full path of the repository. e.g. namespace/name')
@path_param('build_uuid', 'The UUID of the build') @path_param('build_uuid', 'The UUID of the build')

View file

@ -116,6 +116,11 @@ class Organization(ApiResource):
'type': 'boolean', 'type': 'boolean',
'description': 'Whether the organization desires to receive emails for invoices', 'description': 'Whether the organization desires to receive emails for invoices',
}, },
'tag_expiration': {
'type': 'integer',
'maximum': 2592000,
'minimum': 0,
},
}, },
}, },
} }
@ -161,6 +166,10 @@ class Organization(ApiResource):
logger.debug('Changing email address for organization: %s', org.username) logger.debug('Changing email address for organization: %s', org.username)
model.update_email(org, new_email) model.update_email(org, new_email)
if 'tag_expiration' in org_data:
logger.debug('Changing organization tag expiration to: %ss', org_data['tag_expiration'])
model.change_user_tag_expiration(org, org_data['tag_expiration'])
teams = model.get_teams_within_org(org) teams = model.get_teams_within_org(org)
return org_view(org, teams) return org_view(org, teams)
raise Unauthorized() raise Unauthorized()

362
endpoints/api/suconfig.py Normal file
View file

@ -0,0 +1,362 @@
import logging
import os
import json
import signal
from flask import abort, Response
from endpoints.api import (ApiResource, nickname, resource, internal_only, show_if,
require_fresh_login, request, validate_json_request, verify_not_prod)
from endpoints.common import common_login
from app import app, CONFIG_PROVIDER, superusers
from data import model
from data.database import configure
from auth.permissions import SuperUserPermission
from auth.auth_context import get_authenticated_user
from data.database import User
from util.config.configutil import add_enterprise_config_defaults
from util.config.validator import validate_service_for_config, SSL_FILENAMES
from data.runmigration import run_alembic_migration
import features
logger = logging.getLogger(__name__)
def database_is_valid():
""" Returns whether the database, as configured, is valid. """
if app.config['TESTING']:
return False
try:
list(User.select().limit(1))
return True
except:
return False
def database_has_users():
""" Returns whether the database has any users defined. """
return bool(list(User.select().limit(1)))
@resource('/v1/superuser/registrystatus')
@internal_only
@show_if(features.SUPER_USERS)
class SuperUserRegistryStatus(ApiResource):
""" Resource for determining the status of the registry, such as if config exists,
if a database is configured, and if it has any defined users.
"""
@nickname('scRegistryStatus')
@verify_not_prod
def get(self):
""" Returns the status of the registry. """
# If there is no conf/stack volume, then report that status.
if not CONFIG_PROVIDER.volume_exists():
return {
'status': 'missing-config-dir'
}
# If there is no config file, we need to setup the database.
if not CONFIG_PROVIDER.yaml_exists():
return {
'status': 'config-db'
}
# If the database isn't yet valid, then we need to set it up.
if not database_is_valid():
return {
'status': 'setup-db'
}
# If we have SETUP_COMPLETE, then we're ready to go!
if app.config.get('SETUP_COMPLETE', False):
return {
'requires_restart': CONFIG_PROVIDER.requires_restart(app.config),
'status': 'ready'
}
return {
'status': 'create-superuser' if not database_has_users() else 'config'
}
class _AlembicLogHandler(logging.Handler):
def __init__(self):
super(_AlembicLogHandler, self).__init__()
self.records = []
def emit(self, record):
self.records.append({
'level': record.levelname,
'message': record.getMessage()
})
@resource('/v1/superuser/setupdb')
@internal_only
@show_if(features.SUPER_USERS)
class SuperUserSetupDatabase(ApiResource):
""" Resource for invoking alembic to setup the database. """
@verify_not_prod
@nickname('scSetupDatabase')
def get(self):
""" Invokes the alembic upgrade process. """
# Note: This method is called after the database configured is saved, but before the
# database has any tables. Therefore, we only allow it to be run in that unique case.
if CONFIG_PROVIDER.yaml_exists() and not database_is_valid():
# Note: We need to reconfigure the database here as the config has changed.
combined = dict(**app.config)
combined.update(CONFIG_PROVIDER.get_yaml())
configure(combined)
app.config['DB_URI'] = combined['DB_URI']
log_handler = _AlembicLogHandler()
try:
run_alembic_migration(log_handler)
except Exception as ex:
return {
'error': str(ex)
}
return {
'logs': log_handler.records
}
abort(403)
@resource('/v1/superuser/shutdown')
@internal_only
@show_if(features.SUPER_USERS)
class SuperUserShutdown(ApiResource):
""" Resource for sending a shutdown signal to the container. """
@verify_not_prod
@nickname('scShutdownContainer')
def post(self):
""" Sends a signal to the phusion init system to shut down the container. """
# Note: This method is called to set the database configuration before super users exists,
# so we also allow it to be called if there is no valid registry configuration setup.
if app.config['TESTING'] or not database_has_users() or SuperUserPermission().can():
# Note: We skip if debugging locally.
if app.config.get('DEBUGGING') == True:
return {}
os.kill(1, signal.SIGINT)
return {}
abort(403)
@resource('/v1/superuser/config')
@internal_only
@show_if(features.SUPER_USERS)
class SuperUserConfig(ApiResource):
""" Resource for fetching and updating the current configuration, if any. """
schemas = {
'UpdateConfig': {
'id': 'UpdateConfig',
'type': 'object',
'description': 'Updates the YAML config file',
'required': [
'config',
'hostname'
],
'properties': {
'config': {
'type': 'object'
},
'hostname': {
'type': 'string'
}
},
},
}
@require_fresh_login
@verify_not_prod
@nickname('scGetConfig')
def get(self):
""" Returns the currently defined configuration, if any. """
if SuperUserPermission().can():
config_object = CONFIG_PROVIDER.get_yaml()
return {
'config': config_object
}
abort(403)
@nickname('scUpdateConfig')
@verify_not_prod
@validate_json_request('UpdateConfig')
def put(self):
""" Updates the config.yaml file. """
# Note: This method is called to set the database configuration before super users exists,
# so we also allow it to be called if there is no valid registry configuration setup.
if not CONFIG_PROVIDER.yaml_exists() or SuperUserPermission().can():
config_object = request.get_json()['config']
hostname = request.get_json()['hostname']
# Add any enterprise defaults missing from the config.
add_enterprise_config_defaults(config_object, app.config['SECRET_KEY'], hostname)
# Write the configuration changes to the YAML file.
CONFIG_PROVIDER.save_yaml(config_object)
return {
'exists': True,
'config': config_object
}
abort(403)
@resource('/v1/superuser/config/file/<filename>')
@internal_only
@show_if(features.SUPER_USERS)
class SuperUserConfigFile(ApiResource):
""" Resource for fetching the status of config files and overriding them. """
@nickname('scConfigFileExists')
@verify_not_prod
def get(self, filename):
""" Returns whether the configuration file with the given name exists. """
if not filename in SSL_FILENAMES:
abort(404)
if SuperUserPermission().can():
return {
'exists': CONFIG_PROVIDER.volume_file_exists(filename)
}
abort(403)
@nickname('scUpdateConfigFile')
@verify_not_prod
def post(self, filename):
""" Updates the configuration file with the given name. """
if not filename in SSL_FILENAMES:
abort(404)
if SuperUserPermission().can():
uploaded_file = request.files['file']
if not uploaded_file:
abort(400)
CONFIG_PROVIDER.save_volume_file(filename, uploaded_file)
return {
'status': True
}
abort(403)
@resource('/v1/superuser/config/createsuperuser')
@internal_only
@show_if(features.SUPER_USERS)
class SuperUserCreateInitialSuperUser(ApiResource):
""" Resource for creating the initial super user. """
schemas = {
'CreateSuperUser': {
'id': 'CreateSuperUser',
'type': 'object',
'description': 'Information for creating the initial super user',
'required': [
'username',
'password',
'email'
],
'properties': {
'username': {
'type': 'string',
'description': 'The username for the superuser'
},
'password': {
'type': 'string',
'description': 'The password for the superuser'
},
'email': {
'type': 'string',
'description': 'The e-mail address for the superuser'
},
},
},
}
@nickname('scCreateInitialSuperuser')
@verify_not_prod
@validate_json_request('CreateSuperUser')
def post(self):
""" Creates the initial super user, updates the underlying configuration and
sets the current session to have that super user. """
# Special security check: This method is only accessible when:
# - There is a valid config YAML file.
# - There are currently no users in the database (clean install)
#
# We do this special security check because at the point this method is called, the database
# is clean but does not (yet) have any super users for our permissions code to check against.
if CONFIG_PROVIDER.yaml_exists() and not database_has_users():
data = request.get_json()
username = data['username']
password = data['password']
email = data['email']
# Create the user in the database.
superuser = model.create_user(username, password, email, auto_verify=True)
# Add the user to the config.
config_object = CONFIG_PROVIDER.get_yaml()
config_object['SUPER_USERS'] = [username]
CONFIG_PROVIDER.save_yaml(config_object)
# Update the in-memory config for the new superuser.
superusers.register_superuser(username)
# Conduct login with that user.
common_login(superuser)
return {
'status': True
}
abort(403)
@resource('/v1/superuser/config/validate/<service>')
@internal_only
@show_if(features.SUPER_USERS)
class SuperUserConfigValidate(ApiResource):
""" Resource for validating a block of configuration against an external service. """
schemas = {
'ValidateConfig': {
'id': 'ValidateConfig',
'type': 'object',
'description': 'Validates configuration',
'required': [
'config'
],
'properties': {
'config': {
'type': 'object'
}
},
},
}
@nickname('scValidateConfig')
@verify_not_prod
@validate_json_request('ValidateConfig')
def post(self, service):
""" Validates the given config for the given service. """
# Note: This method is called to validate the database configuration before super users exists,
# so we also allow it to be called if there is no valid registry configuration setup. Note that
# this is also safe since this method does not access any information not given in the request.
if not CONFIG_PROVIDER.yaml_exists() or SuperUserPermission().can():
config = request.get_json()['config']
return validate_service_for_config(service, config)
abort(403)

View file

@ -1,15 +1,16 @@
import string import string
import logging import logging
import json import json
import os
from random import SystemRandom from random import SystemRandom
from app import app from app import app, avatar, superusers
from flask import request from flask import request
from endpoints.api import (ApiResource, nickname, resource, validate_json_request, request_error, from endpoints.api import (ApiResource, nickname, resource, validate_json_request, request_error,
log_action, internal_only, NotFound, require_user_admin, format_date, log_action, internal_only, NotFound, require_user_admin, format_date,
InvalidToken, require_scope, format_date, hide_if, show_if, parse_args, InvalidToken, require_scope, format_date, hide_if, show_if, parse_args,
query_param, abort, require_fresh_login, path_param) query_param, abort, require_fresh_login, path_param, verify_not_prod)
from endpoints.api.logs import get_logs from endpoints.api.logs import get_logs
@ -22,18 +23,76 @@ import features
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def get_immediate_subdirectories(directory):
return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))]
def get_services():
services = set(get_immediate_subdirectories(app.config['SYSTEM_SERVICES_PATH']))
services = services - set(app.config['SYSTEM_SERVICE_BLACKLIST'])
return services
@resource('/v1/superuser/systemlogs/<service>')
@internal_only
@show_if(features.SUPER_USERS)
class SuperUserGetLogsForService(ApiResource):
""" Resource for fetching the kinds of system logs in the system. """
@require_fresh_login
@verify_not_prod
@nickname('getSystemLogs')
def get(self, service):
""" Returns the logs for the specific service. """
if SuperUserPermission().can():
if not service in get_services():
abort(404)
try:
with open(app.config['SYSTEM_SERVICE_LOGS_PATH'] % service, 'r') as f:
logs = f.read()
except Exception as ex:
logger.exception('Cannot read logs')
abort(400)
return {
'logs': logs
}
abort(403)
@resource('/v1/superuser/systemlogs/')
@internal_only
@show_if(features.SUPER_USERS)
class SuperUserSystemLogServices(ApiResource):
""" Resource for fetching the kinds of system logs in the system. """
@require_fresh_login
@verify_not_prod
@nickname('listSystemLogServices')
def get(self):
""" List the system logs for the current system. """
if SuperUserPermission().can():
return {
'services': list(get_services())
}
abort(403)
@resource('/v1/superuser/logs') @resource('/v1/superuser/logs')
@internal_only @internal_only
@show_if(features.SUPER_USERS) @show_if(features.SUPER_USERS)
class SuperUserLogs(ApiResource): class SuperUserLogs(ApiResource):
""" Resource for fetching all logs in the system. """ """ Resource for fetching all logs in the system. """
@require_fresh_login
@verify_not_prod
@nickname('listAllLogs') @nickname('listAllLogs')
@parse_args @parse_args
@query_param('starttime', 'Earliest time from which to get logs. (%m/%d/%Y %Z)', type=str) @query_param('starttime', 'Earliest time from which to get logs. (%m/%d/%Y %Z)', type=str)
@query_param('endtime', 'Latest time to which to get logs. (%m/%d/%Y %Z)', type=str) @query_param('endtime', 'Latest time to which to get logs. (%m/%d/%Y %Z)', type=str)
@query_param('performer', 'Username for which to filter logs.', type=str) @query_param('performer', 'Username for which to filter logs.', type=str)
def get(self, args): def get(self, args):
""" List the logs for the current system. """ """ List the usage logs for the current system. """
if SuperUserPermission().can(): if SuperUserPermission().can():
performer_name = args['performer'] performer_name = args['performer']
start_time = args['starttime'] start_time = args['starttime']
@ -49,7 +108,8 @@ def user_view(user):
'username': user.username, 'username': user.username,
'email': user.email, 'email': user.email,
'verified': user.verified, 'verified': user.verified,
'super_user': user.username in app.config['SUPER_USERS'] 'avatar': avatar.compute_hash(user.email, name=user.username),
'super_user': superusers.is_superuser(user.username)
} }
@resource('/v1/superuser/usage/') @resource('/v1/superuser/usage/')
@ -58,6 +118,7 @@ def user_view(user):
class UsageInformation(ApiResource): class UsageInformation(ApiResource):
""" Resource for returning the usage information for enterprise customers. """ """ Resource for returning the usage information for enterprise customers. """
@require_fresh_login @require_fresh_login
@verify_not_prod
@nickname('getSystemUsage') @nickname('getSystemUsage')
def get(self): def get(self):
""" Returns the number of repository handles currently held. """ """ Returns the number of repository handles currently held. """
@ -96,6 +157,7 @@ class SuperUserList(ApiResource):
} }
@require_fresh_login @require_fresh_login
@verify_not_prod
@nickname('listAllUsers') @nickname('listAllUsers')
def get(self): def get(self):
""" Returns a list of all users in the system. """ """ Returns a list of all users in the system. """
@ -109,6 +171,7 @@ class SuperUserList(ApiResource):
@require_fresh_login @require_fresh_login
@verify_not_prod
@nickname('createInstallUser') @nickname('createInstallUser')
@validate_json_request('CreateInstallUser') @validate_json_request('CreateInstallUser')
def post(self): def post(self):
@ -146,6 +209,7 @@ class SuperUserList(ApiResource):
class SuperUserSendRecoveryEmail(ApiResource): class SuperUserSendRecoveryEmail(ApiResource):
""" Resource for sending a recovery user on behalf of a user. """ """ Resource for sending a recovery user on behalf of a user. """
@require_fresh_login @require_fresh_login
@verify_not_prod
@nickname('sendInstallUserRecoveryEmail') @nickname('sendInstallUserRecoveryEmail')
def post(self, username): def post(self, username):
if SuperUserPermission().can(): if SuperUserPermission().can():
@ -153,7 +217,7 @@ class SuperUserSendRecoveryEmail(ApiResource):
if not user or user.organization or user.robot: if not user or user.organization or user.robot:
abort(404) abort(404)
if username in app.config['SUPER_USERS']: if superusers.is_superuser(username):
abort(403) abort(403)
code = model.create_reset_password_email_code(user.email) code = model.create_reset_password_email_code(user.email)
@ -190,6 +254,7 @@ class SuperUserManagement(ApiResource):
} }
@require_fresh_login @require_fresh_login
@verify_not_prod
@nickname('getInstallUser') @nickname('getInstallUser')
def get(self, username): def get(self, username):
""" Returns information about the specified user. """ """ Returns information about the specified user. """
@ -203,6 +268,7 @@ class SuperUserManagement(ApiResource):
abort(403) abort(403)
@require_fresh_login @require_fresh_login
@verify_not_prod
@nickname('deleteInstallUser') @nickname('deleteInstallUser')
def delete(self, username): def delete(self, username):
""" Deletes the specified user. """ """ Deletes the specified user. """
@ -211,7 +277,7 @@ class SuperUserManagement(ApiResource):
if not user or user.organization or user.robot: if not user or user.organization or user.robot:
abort(404) abort(404)
if username in app.config['SUPER_USERS']: if superusers.is_superuser(username):
abort(403) abort(403)
model.delete_user(user) model.delete_user(user)
@ -220,6 +286,7 @@ class SuperUserManagement(ApiResource):
abort(403) abort(403)
@require_fresh_login @require_fresh_login
@verify_not_prod
@nickname('changeInstallUser') @nickname('changeInstallUser')
@validate_json_request('UpdateUser') @validate_json_request('UpdateUser')
def put(self, username): def put(self, username):
@ -229,7 +296,7 @@ class SuperUserManagement(ApiResource):
if not user or user.organization or user.robot: if not user or user.organization or user.robot:
abort(404) abort(404)
if username in app.config['SUPER_USERS']: if superusers.is_superuser(username):
abort(403) abort(403)
user_data = request.get_json() user_data = request.get_json()

View file

@ -54,8 +54,8 @@ class RepositoryTag(RepositoryParamResource):
username = get_authenticated_user().username username = get_authenticated_user().username
log_action('move_tag' if original_image_id else 'create_tag', namespace, log_action('move_tag' if original_image_id else 'create_tag', namespace,
{ 'username': username, 'repo': repository, 'tag': tag, {'username': username, 'repo': repository, 'tag': tag,
'image': image_id, 'original_image': original_image_id }, 'image': image_id, 'original_image': original_image_id},
repo=model.get_repository(namespace, repository)) repo=model.get_repository(namespace, repository))
return 'Updated', 201 return 'Updated', 201

View file

@ -415,13 +415,13 @@ class ActivateBuildTrigger(RepositoryParamResource):
try: try:
run_parameters = request.get_json() run_parameters = request.get_json()
specs = handler.manual_start(trigger.auth_token, config_dict, run_parameters=run_parameters) specs = handler.manual_start(trigger.auth_token, config_dict, run_parameters=run_parameters)
dockerfile_id, tags, name, subdir = specs dockerfile_id, tags, name, subdir, metadata = specs
repo = model.get_repository(namespace, repository) repo = model.get_repository(namespace, repository)
pull_robot_name = model.get_pull_robot_name(trigger) pull_robot_name = model.get_pull_robot_name(trigger)
build_request = start_build(repo, dockerfile_id, tags, name, subdir, True, build_request = start_build(repo, dockerfile_id, tags, name, subdir, True,
pull_robot_name=pull_robot_name) pull_robot_name=pull_robot_name, trigger_metadata=metadata)
except TriggerStartException as tse: except TriggerStartException as tse:
raise InvalidRequest(tse.message) raise InvalidRequest(tse.message)

View file

@ -73,6 +73,7 @@ def user_view(user):
'can_create_repo': True, 'can_create_repo': True,
'invoice_email': user.invoice_email, 'invoice_email': user.invoice_email,
'preferred_namespace': not (user.stripe_id is None), 'preferred_namespace': not (user.stripe_id is None),
'tag_expiration': user.removed_tag_expiration_s,
}) })
if features.SUPER_USERS: if features.SUPER_USERS:
@ -144,6 +145,11 @@ class User(ApiResource):
'type': 'string', 'type': 'string',
'description': 'The user\'s email address', 'description': 'The user\'s email address',
}, },
'tag_expiration': {
'type': 'integer',
'maximum': 2592000,
'minimum': 0,
},
'username': { 'username': {
'type': 'string', 'type': 'string',
'description': 'The user\'s username', 'description': 'The user\'s username',
@ -227,6 +233,10 @@ class User(ApiResource):
logger.debug('Changing invoice_email for user: %s', user.username) logger.debug('Changing invoice_email for user: %s', user.username)
model.change_invoice_email(user, user_data['invoice_email']) model.change_invoice_email(user, user_data['invoice_email'])
if 'tag_expiration' in user_data:
logger.debug('Changing user tag expiration to: %ss', user_data['tag_expiration'])
model.change_user_tag_expiration(user, user_data['tag_expiration'])
if 'email' in user_data and user_data['email'] != user.email: if 'email' in user_data and user_data['email'] != user.email:
new_email = user_data['email'] new_email = user_data['email']
if model.find_user_by_email(new_email): if model.find_user_by_email(new_email):
@ -248,7 +258,8 @@ class User(ApiResource):
# Username already used # Username already used
raise request_error(message='Username is already in use') raise request_error(message='Username is already in use')
model.change_username(user, new_username) model.change_username(user.id, new_username)
except model.InvalidPasswordException, ex: except model.InvalidPasswordException, ex:
raise request_error(exception=ex) raise request_error(exception=ex)

View file

@ -3,15 +3,19 @@ import urlparse
import json import json
import string import string
import datetime import datetime
import os
# Register the various exceptions via decorators.
import endpoints.decorated
from flask import make_response, render_template, request, abort, session from flask import make_response, render_template, request, abort, session
from flask.ext.login import login_user, UserMixin from flask.ext.login import login_user
from flask.ext.principal import identity_changed from flask.ext.principal import identity_changed
from random import SystemRandom from random import SystemRandom
from data import model from data import model
from data.database import db from data.database import db
from app import app, login_manager, dockerfile_build_queue, notification_queue, oauth_apps from app import app, oauth_apps, dockerfile_build_queue, LoginWrappedDBUser
from auth.permissions import QuayDeferredPermissionUser from auth.permissions import QuayDeferredPermissionUser
from auth import scopes from auth import scopes
@ -21,15 +25,30 @@ from functools import wraps
from config import getFrontendVisibleConfig from config import getFrontendVisibleConfig
from external_libraries import get_external_javascript, get_external_css from external_libraries import get_external_javascript, get_external_css
from endpoints.notificationhelper import spawn_notification from endpoints.notificationhelper import spawn_notification
from util.useremails import CannotSendEmailException
import features import features
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
profile = logging.getLogger('application.profiler')
route_data = None route_data = None
CACHE_BUSTERS_JSON = 'static/dist/cachebusters.json'
CACHE_BUSTERS = None
def get_cache_busters():
""" Retrieves the cache busters hashes. """
global CACHE_BUSTERS
if CACHE_BUSTERS is not None:
return CACHE_BUSTERS
if not os.path.exists(CACHE_BUSTERS_JSON):
return {}
with open(CACHE_BUSTERS_JSON, 'r') as f:
CACHE_BUSTERS = json.loads(f.read())
return CACHE_BUSTERS
class RepoPathConverter(BaseConverter): class RepoPathConverter(BaseConverter):
regex = '[\.a-zA-Z0-9_\-]+/[\.a-zA-Z0-9_\-]+' regex = '[\.a-zA-Z0-9_\-]+/[\.a-zA-Z0-9_\-]+'
weight = 200 weight = 200
@ -84,34 +103,8 @@ def param_required(param_name):
return wrapper return wrapper
@login_manager.user_loader
def load_user(user_uuid):
logger.debug('User loader loading deferred user with uuid: %s' % user_uuid)
return _LoginWrappedDBUser(user_uuid)
class _LoginWrappedDBUser(UserMixin):
def __init__(self, user_uuid, db_user=None):
self._uuid = user_uuid
self._db_user = db_user
def db_user(self):
if not self._db_user:
self._db_user = model.get_user_by_uuid(self._uuid)
return self._db_user
def is_authenticated(self):
return self.db_user() is not None
def is_active(self):
return self.db_user().verified
def get_id(self):
return unicode(self._uuid)
def common_login(db_user): def common_login(db_user):
if login_user(_LoginWrappedDBUser(db_user.uuid, db_user)): if login_user(LoginWrappedDBUser(db_user.uuid, db_user)):
logger.debug('Successfully signed in as: %s (%s)' % (db_user.username, db_user.uuid)) logger.debug('Successfully signed in as: %s (%s)' % (db_user.username, db_user.uuid))
new_identity = QuayDeferredPermissionUser(db_user.uuid, 'user_uuid', {scopes.DIRECT_LOGIN}) new_identity = QuayDeferredPermissionUser(db_user.uuid, 'user_uuid', {scopes.DIRECT_LOGIN})
identity_changed.send(app, identity=new_identity) identity_changed.send(app, identity=new_identity)
@ -121,17 +114,6 @@ def common_login(db_user):
logger.debug('User could not be logged in, inactive?.') logger.debug('User could not be logged in, inactive?.')
return False return False
@app.errorhandler(model.DataModelException)
def handle_dme(ex):
logger.exception(ex)
return make_response(json.dumps({'message': ex.message}), 400)
@app.errorhandler(CannotSendEmailException)
def handle_emailexception(ex):
message = 'Could not send email. Please contact an administrator and report this problem.'
return make_response(json.dumps({'message': message}), 400)
def random_string(): def random_string():
random = SystemRandom() random = SystemRandom()
return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(8)]) return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(8)])
@ -148,17 +130,15 @@ def list_files(path, extension):
filepath = 'static/' + path filepath = 'static/' + path
return [join_path(dp, f) for dp, dn, files in os.walk(filepath) for f in files if matches(f)] return [join_path(dp, f) for dp, dn, files in os.walk(filepath) for f in files if matches(f)]
SAVED_CACHE_STRING = random_string()
def render_page_template(name, **kwargs): def render_page_template(name, **kwargs):
if app.config.get('DEBUGGING', False): debugging = app.config.get('DEBUGGING', False)
if debugging:
# If DEBUGGING is enabled, then we load the full set of individual JS and CSS files # If DEBUGGING is enabled, then we load the full set of individual JS and CSS files
# from the file system. # from the file system.
library_styles = list_files('lib', 'css') library_styles = list_files('lib', 'css')
main_styles = list_files('css', 'css') main_styles = list_files('css', 'css')
library_scripts = list_files('lib', 'js') library_scripts = list_files('lib', 'js')
main_scripts = list_files('js', 'js') main_scripts = list_files('js', 'js')
cache_buster = 'debugging'
file_lists = [library_styles, main_styles, library_scripts, main_scripts] file_lists = [library_styles, main_styles, library_scripts, main_scripts]
for file_list in file_lists: for file_list in file_lists:
@ -168,7 +148,6 @@ def render_page_template(name, **kwargs):
main_styles = ['dist/quay-frontend.css'] main_styles = ['dist/quay-frontend.css']
library_scripts = [] library_scripts = []
main_scripts = ['dist/quay-frontend.min.js'] main_scripts = ['dist/quay-frontend.min.js']
cache_buster = SAVED_CACHE_STRING
use_cdn = app.config.get('USE_CDN', True) use_cdn = app.config.get('USE_CDN', True)
if request.args.get('use_cdn') is not None: if request.args.get('use_cdn') is not None:
@ -177,6 +156,12 @@ def render_page_template(name, **kwargs):
external_styles = get_external_css(local=not use_cdn) external_styles = get_external_css(local=not use_cdn)
external_scripts = get_external_javascript(local=not use_cdn) external_scripts = get_external_javascript(local=not use_cdn)
def add_cachebusters(filenames):
cachebusters = get_cache_busters()
for filename in filenames:
cache_buster = cachebusters.get(filename, random_string()) if not debugging else 'debugging'
yield (filename, cache_buster)
def get_oauth_config(): def get_oauth_config():
oauth_config = {} oauth_config = {}
for oauth_app in oauth_apps: for oauth_app in oauth_apps:
@ -188,13 +173,14 @@ def render_page_template(name, **kwargs):
if len(app.config.get('CONTACT_INFO', [])) == 1: if len(app.config.get('CONTACT_INFO', [])) == 1:
contact_href = app.config['CONTACT_INFO'][0] contact_href = app.config['CONTACT_INFO'][0]
resp = make_response(render_template(name, route_data=json.dumps(get_route_data()), resp = make_response(render_template(name,
route_data=json.dumps(get_route_data()),
external_styles=external_styles, external_styles=external_styles,
external_scripts=external_scripts, external_scripts=external_scripts,
main_styles=main_styles, main_styles=add_cachebusters(main_styles),
library_styles=library_styles, library_styles=add_cachebusters(library_styles),
main_scripts=main_scripts, main_scripts=add_cachebusters(main_scripts),
library_scripts=library_scripts, library_scripts=add_cachebusters(library_scripts),
feature_set=json.dumps(features.get_features()), feature_set=json.dumps(features.get_features()),
config_set=json.dumps(getFrontendVisibleConfig(app.config)), config_set=json.dumps(getFrontendVisibleConfig(app.config)),
oauth_set=json.dumps(get_oauth_config()), oauth_set=json.dumps(get_oauth_config()),
@ -204,9 +190,10 @@ def render_page_template(name, **kwargs):
sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''), sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''),
is_debug=str(app.config.get('DEBUGGING', False)).lower(), is_debug=str(app.config.get('DEBUGGING', False)).lower(),
show_chat=features.OLARK_CHAT, show_chat=features.OLARK_CHAT,
cache_buster=cache_buster,
has_billing=features.BILLING, has_billing=features.BILLING,
contact_href=contact_href, contact_href=contact_href,
hostname=app.config['SERVER_HOSTNAME'],
preferred_scheme=app.config['PREFERRED_URL_SCHEME'],
**kwargs)) **kwargs))
resp.headers['X-FRAME-OPTIONS'] = 'DENY' resp.headers['X-FRAME-OPTIONS'] = 'DENY'
@ -224,18 +211,20 @@ def check_repository_usage(user_or_org, plan_found):
def start_build(repository, dockerfile_id, tags, build_name, subdir, manual, def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
trigger=None, pull_robot_name=None): trigger=None, pull_robot_name=None, trigger_metadata=None):
host = urlparse.urlparse(request.url).netloc host = urlparse.urlparse(request.url).netloc
repo_path = '%s/%s/%s' % (host, repository.namespace_user.username, repository.name) repo_path = '%s/%s/%s' % (host, repository.namespace_user.username, repository.name)
token = model.create_access_token(repository, 'write') token = model.create_access_token(repository, 'write', kind='build-worker',
friendly_name='Repository Build Token')
logger.debug('Creating build %s with repo %s tags %s and dockerfile_id %s', logger.debug('Creating build %s with repo %s tags %s and dockerfile_id %s',
build_name, repo_path, tags, dockerfile_id) build_name, repo_path, tags, dockerfile_id)
job_config = { job_config = {
'docker_tags': tags, 'docker_tags': tags,
'registry': host, 'registry': host,
'build_subdir': subdir 'build_subdir': subdir,
'trigger_metadata': trigger_metadata or {}
} }
with app.config['DB_TRANSACTION_FACTORY'](db): with app.config['DB_TRANSACTION_FACTORY'](db):
@ -243,10 +232,17 @@ def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
dockerfile_id, build_name, dockerfile_id, build_name,
trigger, pull_robot_name=pull_robot_name) trigger, pull_robot_name=pull_robot_name)
dockerfile_build_queue.put([repository.namespace_user.username, repository.name], json.dumps({ json_data = json.dumps({
'build_uuid': build_request.uuid, 'build_uuid': build_request.uuid,
'pull_credentials': model.get_pull_credentials(pull_robot_name) if pull_robot_name else None 'pull_credentials': model.get_pull_credentials(pull_robot_name) if pull_robot_name else None
}), retries_remaining=1) })
queue_item = dockerfile_build_queue.put([repository.namespace_user.username, repository.name],
json_data,
retries_remaining=3)
build_request.queue_item = queue_item
build_request.save()
# Add the build to the repo's log. # Add the build to the repo's log.
metadata = { metadata = {
@ -265,7 +261,7 @@ def start_build(repository, dockerfile_id, tags, build_name, subdir, manual,
metadata=metadata, repository=repository) metadata=metadata, repository=repository)
# Add notifications for the build queue. # Add notifications for the build queue.
profile.debug('Adding notifications for repository') logger.debug('Adding notifications for repository')
event_data = { event_data = {
'build_id': build_request.uuid, 'build_id': build_request.uuid,
'build_name': build_name, 'build_name': build_name,

View file

@ -19,19 +19,21 @@ def generate_csrf_token():
return session['_csrf_token'] return session['_csrf_token']
def verify_csrf():
token = session.get('_csrf_token', None)
found_token = request.values.get('_csrf_token', None)
if not token or token != found_token:
msg = 'CSRF Failure. Session token was %s and request token was %s'
logger.error(msg, token, found_token)
abort(403, message='CSRF token was invalid or missing.')
def csrf_protect(func): def csrf_protect(func):
@wraps(func) @wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
oauth_token = get_validated_oauth_token() oauth_token = get_validated_oauth_token()
if oauth_token is None and request.method != "GET" and request.method != "HEAD": if oauth_token is None and request.method != "GET" and request.method != "HEAD":
token = session.get('_csrf_token', None) verify_csrf()
found_token = request.values.get('_csrf_token', None)
if not token or token != found_token:
msg = 'CSRF Failure. Session token was %s and request token was %s'
logger.error(msg, token, found_token)
abort(403, message='CSRF token was invalid or missing.')
return func(*args, **kwargs) return func(*args, **kwargs)
return wrapper return wrapper

19
endpoints/decorated.py Normal file
View file

@ -0,0 +1,19 @@
import logging
import json
from flask import make_response
from app import app
from util.useremails import CannotSendEmailException
from data import model
logger = logging.getLogger(__name__)
@app.errorhandler(model.DataModelException)
def handle_dme(ex):
logger.exception(ex)
return make_response(json.dumps({'message': ex.message}), 400)
@app.errorhandler(CannotSendEmailException)
def handle_emailexception(ex):
message = 'Could not send email. Please contact an administrator and report this problem.'
return make_response(json.dumps({'message': message}), 400)

View file

@ -23,7 +23,6 @@ from endpoints.notificationhelper import spawn_notification
import features import features
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
profile = logging.getLogger('application.profiler')
index = Blueprint('index', __name__) index = Blueprint('index', __name__)
@ -51,7 +50,7 @@ def generate_headers(role='read'):
if has_token_request: if has_token_request:
repo = model.get_repository(namespace, repository) repo = model.get_repository(namespace, repository)
if repo: if repo:
token = model.create_access_token(repo, role) token = model.create_access_token(repo, role, 'pushpull-token')
token_str = 'signature=%s' % token.code token_str = 'signature=%s' % token.code
response.headers['WWW-Authenticate'] = token_str response.headers['WWW-Authenticate'] = token_str
response.headers['X-Docker-Token'] = token_str response.headers['X-Docker-Token'] = token_str
@ -120,7 +119,7 @@ def create_user():
else: else:
# New user case # New user case
profile.debug('Creating user') logger.debug('Creating user')
new_user = None new_user = None
try: try:
@ -128,10 +127,10 @@ def create_user():
except model.TooManyUsersException as ex: except model.TooManyUsersException as ex:
abort(402, 'Seat limit has been reached for this license', issue='seat-limit') abort(402, 'Seat limit has been reached for this license', issue='seat-limit')
profile.debug('Creating email code for user') logger.debug('Creating email code for user')
code = model.create_confirm_email_code(new_user) code = model.create_confirm_email_code(new_user)
profile.debug('Sending email code to user') logger.debug('Sending email code to user')
send_confirmation_email(new_user.username, new_user.email, code.code) send_confirmation_email(new_user.username, new_user.email, code.code)
return make_response('Created', 201) return make_response('Created', 201)
@ -168,12 +167,12 @@ def update_user(username):
update_request = request.get_json() update_request = request.get_json()
if 'password' in update_request: if 'password' in update_request:
profile.debug('Updating user password') logger.debug('Updating user password')
model.change_password(get_authenticated_user(), model.change_password(get_authenticated_user(),
update_request['password']) update_request['password'])
if 'email' in update_request: if 'email' in update_request:
profile.debug('Updating user email') logger.debug('Updating user email')
model.update_email(get_authenticated_user(), update_request['email']) model.update_email(get_authenticated_user(), update_request['email'])
return jsonify({ return jsonify({
@ -189,13 +188,13 @@ def update_user(username):
@parse_repository_name @parse_repository_name
@generate_headers(role='write') @generate_headers(role='write')
def create_repository(namespace, repository): def create_repository(namespace, repository):
profile.debug('Parsing image descriptions') logger.debug('Parsing image descriptions')
image_descriptions = json.loads(request.data.decode('utf8')) image_descriptions = json.loads(request.data.decode('utf8'))
profile.debug('Looking up repository') logger.debug('Looking up repository')
repo = model.get_repository(namespace, repository) repo = model.get_repository(namespace, repository)
profile.debug('Repository looked up') logger.debug('Repository looked up')
if not repo and get_authenticated_user() is None: if not repo and get_authenticated_user() is None:
logger.debug('Attempt to create new repository without user auth.') logger.debug('Attempt to create new repository without user auth.')
abort(401, abort(401,
@ -219,36 +218,10 @@ def create_repository(namespace, repository):
issue='no-create-permission', issue='no-create-permission',
namespace=namespace) namespace=namespace)
profile.debug('Creaing repository with owner: %s', get_authenticated_user().username) logger.debug('Creaing repository with owner: %s', get_authenticated_user().username)
repo = model.create_repository(namespace, repository, repo = model.create_repository(namespace, repository,
get_authenticated_user()) get_authenticated_user())
profile.debug('Determining already added images')
added_images = OrderedDict([(desc['id'], desc) for desc in image_descriptions])
new_repo_images = dict(added_images)
# Optimization: Lookup any existing images in the repository with matching docker IDs and
# remove them from the added dict, so we don't need to look them up one-by-one.
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
# Note: We do this in chunks in an effort to not hit the SQL query size limit.
for chunk in chunks(new_repo_images.keys(), 50):
existing_images = model.lookup_repository_images(namespace, repository, chunk)
for existing in existing_images:
added_images.pop(existing.docker_image_id)
profile.debug('Creating/Linking necessary images')
username = get_authenticated_user() and get_authenticated_user().username
translations = {}
for image_description in added_images.values():
model.find_create_or_link_image(image_description['id'], repo, username,
translations, storage.preferred_locations[0])
profile.debug('Created images')
track_and_log('push_repo', repo)
return make_response('Created', 201) return make_response('Created', 201)
@ -260,14 +233,14 @@ def update_images(namespace, repository):
permission = ModifyRepositoryPermission(namespace, repository) permission = ModifyRepositoryPermission(namespace, repository)
if permission.can(): if permission.can():
profile.debug('Looking up repository') logger.debug('Looking up repository')
repo = model.get_repository(namespace, repository) repo = model.get_repository(namespace, repository)
if not repo: if not repo:
# Make sure the repo actually exists. # Make sure the repo actually exists.
abort(404, message='Unknown repository', issue='unknown-repo') abort(404, message='Unknown repository', issue='unknown-repo')
if get_authenticated_user(): if get_authenticated_user():
profile.debug('Publishing push event') logger.debug('Publishing push event')
username = get_authenticated_user().username username = get_authenticated_user().username
# Mark that the user has pushed the repo. # Mark that the user has pushed the repo.
@ -280,17 +253,17 @@ def update_images(namespace, repository):
event = userevents.get_event(username) event = userevents.get_event(username)
event.publish_event_data('docker-cli', user_data) event.publish_event_data('docker-cli', user_data)
profile.debug('GCing repository') logger.debug('GCing repository')
num_removed = model.garbage_collect_repository(namespace, repository) model.garbage_collect_repository(namespace, repository)
# Generate a job for each notification that has been added to this repo # Generate a job for each notification that has been added to this repo
profile.debug('Adding notifications for repository') logger.debug('Adding notifications for repository')
updated_tags = session.get('pushed_tags', {}) updated_tags = session.get('pushed_tags', {})
event_data = { event_data = {
'updated_tags': updated_tags, 'updated_tags': updated_tags,
'pruned_image_count': num_removed
} }
track_and_log('push_repo', repo)
spawn_notification(repo, 'repo_push', event_data) spawn_notification(repo, 'repo_push', event_data)
return make_response('Updated', 204) return make_response('Updated', 204)
@ -305,17 +278,15 @@ def get_repository_images(namespace, repository):
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
# TODO invalidate token? # TODO invalidate token?
profile.debug('Looking up public status of repository') if permission.can() or model.repository_is_public(namespace, repository):
is_public = model.repository_is_public(namespace, repository)
if permission.can() or is_public:
# We can't rely on permissions to tell us if a repo exists anymore # We can't rely on permissions to tell us if a repo exists anymore
profile.debug('Looking up repository') logger.debug('Looking up repository')
repo = model.get_repository(namespace, repository) repo = model.get_repository(namespace, repository)
if not repo: if not repo:
abort(404, message='Unknown repository', issue='unknown-repo') abort(404, message='Unknown repository', issue='unknown-repo')
all_images = [] all_images = []
profile.debug('Retrieving repository images') logger.debug('Retrieving repository images')
for image in model.get_repository_images(namespace, repository): for image in model.get_repository_images(namespace, repository):
new_image_view = { new_image_view = {
'id': image.docker_image_id, 'id': image.docker_image_id,
@ -323,7 +294,7 @@ def get_repository_images(namespace, repository):
} }
all_images.append(new_image_view) all_images.append(new_image_view)
profile.debug('Building repository image response') logger.debug('Building repository image response')
resp = make_response(json.dumps(all_images), 200) resp = make_response(json.dumps(all_images), 200)
resp.mimetype = 'application/json' resp.mimetype = 'application/json'
@ -382,6 +353,11 @@ def get_search():
resp.mimetype = 'application/json' resp.mimetype = 'application/json'
return resp return resp
# Note: This is *not* part of the Docker index spec. This is here for our own health check,
# since we have nginx handle the _ping below.
@index.route('/_internal_ping')
def internal_ping():
return make_response('true', 200)
@index.route('/_ping') @index.route('/_ping')
@index.route('/_ping') @index.route('/_ping')

View file

@ -1,14 +1,10 @@
import logging import logging
import io
import os.path
import tarfile
import base64
import json import json
import requests import requests
import re import re
from flask.ext.mail import Message from flask.ext.mail import Message
from app import mail, app, get_app_url from app import mail, app
from data import model from data import model
from workers.worker import JobException from workers.worker import JobException
@ -363,11 +359,8 @@ class SlackMethod(NotificationMethod):
return 'slack' return 'slack'
def validate(self, repository, config_data): def validate(self, repository, config_data):
if not config_data.get('token', ''): if not config_data.get('url', ''):
raise CannotValidateNotificationMethodException('Missing Slack Token') raise CannotValidateNotificationMethodException('Missing Slack Callback URL')
if not config_data.get('subdomain', '').isalnum():
raise CannotValidateNotificationMethodException('Missing Slack Subdomain Name')
def format_for_slack(self, message): def format_for_slack(self, message):
message = message.replace('\n', '') message = message.replace('\n', '')
@ -378,10 +371,8 @@ class SlackMethod(NotificationMethod):
def perform(self, notification, event_handler, notification_data): def perform(self, notification, event_handler, notification_data):
config_data = json.loads(notification.config_json) config_data = json.loads(notification.config_json)
token = config_data.get('token', '') url = config_data.get('url', '')
subdomain = config_data.get('subdomain', '') if not url:
if not token or not subdomain:
return return
owner = model.get_user_or_org(notification.repository.namespace_user.username) owner = model.get_user_or_org(notification.repository.namespace_user.username)
@ -389,8 +380,6 @@ class SlackMethod(NotificationMethod):
# Something went wrong. # Something went wrong.
return return
url = 'https://%s.slack.com/services/hooks/incoming-webhook?token=%s' % (subdomain, token)
level = event_handler.get_level(notification_data['event_data'], notification_data) level = event_handler.get_level(notification_data['event_data'], notification_data)
color = { color = {
'info': '#ffffff', 'info': '#ffffff',
@ -426,5 +415,5 @@ class SlackMethod(NotificationMethod):
raise NotificationMethodPerformException(error_message) raise NotificationMethodPerformException(error_message)
except requests.exceptions.RequestException as ex: except requests.exceptions.RequestException as ex:
logger.exception('Slack method was unable to be sent: %s' % ex.message) logger.exception('Slack method was unable to be sent: %s', ex.message)
raise NotificationMethodPerformException(ex.message) raise NotificationMethodPerformException(ex.message)

View file

@ -4,13 +4,62 @@ import json
from flask import request, Blueprint, abort, Response from flask import request, Blueprint, abort, Response
from flask.ext.login import current_user from flask.ext.login import current_user
from auth.auth import require_session_login from auth.auth import require_session_login
from app import userevents from endpoints.common import route_show_if
from app import app, userevents
from auth.permissions import SuperUserPermission
import features
import psutil
import time
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
realtime = Blueprint('realtime', __name__) realtime = Blueprint('realtime', __name__)
@realtime.route("/ps")
@route_show_if(features.SUPER_USERS)
@require_session_login
def ps():
if not SuperUserPermission().can():
abort(403)
def generator():
while True:
build_status = {}
try:
builder_data = app.config['HTTPCLIENT'].get('http://localhost:8686/status', timeout=1)
if builder_data.status_code == 200:
build_status = json.loads(builder_data.text)
except:
pass
try:
data = {
'count': {
'cpu': psutil.cpu_percent(interval=1, percpu=True),
'virtual_mem': psutil.virtual_memory(),
'swap_mem': psutil.swap_memory(),
'connections': len(psutil.net_connections()),
'processes': len(psutil.pids()),
'network': psutil.net_io_counters()
},
'build': build_status
}
except psutil.AccessDenied:
data = {}
json_string = json.dumps(data)
yield 'data: %s\n\n' % json_string
time.sleep(1)
try:
return Response(generator(), mimetype="text/event-stream")
except:
pass
@realtime.route("/user/") @realtime.route("/user/")
@require_session_login @require_session_login
def index(): def index():

View file

@ -9,6 +9,7 @@ from time import time
from app import storage as store, image_diff_queue, app from app import storage as store, image_diff_queue, app
from auth.auth import process_auth, extract_namespace_repo_from_session from auth.auth import process_auth, extract_namespace_repo_from_session
from auth.auth_context import get_authenticated_user
from util import checksums, changes from util import checksums, changes
from util.http import abort, exact_abort from util.http import abort, exact_abort
from auth.permissions import (ReadRepositoryPermission, from auth.permissions import (ReadRepositoryPermission,
@ -20,7 +21,6 @@ from util import gzipstream
registry = Blueprint('registry', __name__) registry = Blueprint('registry', __name__)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
profile = logging.getLogger('application.profiler')
class SocketReader(object): class SocketReader(object):
def __init__(self, fp): def __init__(self, fp):
@ -100,12 +100,12 @@ def set_cache_headers(f):
def head_image_layer(namespace, repository, image_id, headers): def head_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
profile.debug('Checking repo permissions') logger.debug('Checking repo permissions')
if permission.can() or model.repository_is_public(namespace, repository): if permission.can() or model.repository_is_public(namespace, repository):
profile.debug('Looking up repo image') logger.debug('Looking up repo image')
repo_image = model.get_repo_image_extended(namespace, repository, image_id) repo_image = model.get_repo_image_extended(namespace, repository, image_id)
if not repo_image: if not repo_image:
profile.debug('Image not found') logger.debug('Image not found')
abort(404, 'Image %(image_id)s not found', issue='unknown-image', abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id) image_id=image_id)
@ -114,7 +114,7 @@ def head_image_layer(namespace, repository, image_id, headers):
# Add the Accept-Ranges header if the storage engine supports resumable # Add the Accept-Ranges header if the storage engine supports resumable
# downloads. # downloads.
if store.get_supports_resumable_downloads(repo_image.storage.locations): if store.get_supports_resumable_downloads(repo_image.storage.locations):
profile.debug('Storage supports resumable downloads') logger.debug('Storage supports resumable downloads')
extra_headers['Accept-Ranges'] = 'bytes' extra_headers['Accept-Ranges'] = 'bytes'
resp = make_response('') resp = make_response('')
@ -133,31 +133,35 @@ def head_image_layer(namespace, repository, image_id, headers):
def get_image_layer(namespace, repository, image_id, headers): def get_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
profile.debug('Checking repo permissions') logger.debug('Checking repo permissions')
if permission.can() or model.repository_is_public(namespace, repository): if permission.can() or model.repository_is_public(namespace, repository):
profile.debug('Looking up repo image') logger.debug('Looking up repo image')
repo_image = model.get_repo_image_extended(namespace, repository, image_id) repo_image = model.get_repo_image_extended(namespace, repository, image_id)
if not repo_image:
logger.debug('Image not found')
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id)
profile.debug('Looking up the layer path') logger.debug('Looking up the layer path')
try: try:
path = store.image_layer_path(repo_image.storage.uuid) path = store.image_layer_path(repo_image.storage.uuid)
profile.debug('Looking up the direct download URL') logger.debug('Looking up the direct download URL')
direct_download_url = store.get_direct_download_url(repo_image.storage.locations, path) direct_download_url = store.get_direct_download_url(repo_image.storage.locations, path)
if direct_download_url: if direct_download_url:
profile.debug('Returning direct download URL') logger.debug('Returning direct download URL')
resp = redirect(direct_download_url) resp = redirect(direct_download_url)
return resp return resp
profile.debug('Streaming layer data') logger.debug('Streaming layer data')
# Close the database handle here for this process before we send the long download. # Close the database handle here for this process before we send the long download.
database.close_db_filter(None) database.close_db_filter(None)
return Response(store.stream_read(repo_image.storage.locations, path), headers=headers) return Response(store.stream_read(repo_image.storage.locations, path), headers=headers)
except (IOError, AttributeError): except (IOError, AttributeError):
profile.debug('Image not found') logger.exception('Image layer data not found')
abort(404, 'Image %(image_id)s not found', issue='unknown-image', abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id) image_id=image_id)
@ -168,29 +172,30 @@ def get_image_layer(namespace, repository, image_id, headers):
@process_auth @process_auth
@extract_namespace_repo_from_session @extract_namespace_repo_from_session
def put_image_layer(namespace, repository, image_id): def put_image_layer(namespace, repository, image_id):
profile.debug('Checking repo permissions') logger.debug('Checking repo permissions')
permission = ModifyRepositoryPermission(namespace, repository) permission = ModifyRepositoryPermission(namespace, repository)
if not permission.can(): if not permission.can():
abort(403) abort(403)
profile.debug('Retrieving image') logger.debug('Retrieving image')
repo_image = model.get_repo_image_extended(namespace, repository, image_id) repo_image = model.get_repo_image_extended(namespace, repository, image_id)
try: try:
profile.debug('Retrieving image data') logger.debug('Retrieving image data')
uuid = repo_image.storage.uuid uuid = repo_image.storage.uuid
json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
except (IOError, AttributeError): except (IOError, AttributeError):
logger.exception('Exception when retrieving image data')
abort(404, 'Image %(image_id)s not found', issue='unknown-image', abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id) image_id=image_id)
profile.debug('Retrieving image path info') logger.debug('Retrieving image path info')
layer_path = store.image_layer_path(uuid) layer_path = store.image_layer_path(uuid)
if (store.exists(repo_image.storage.locations, layer_path) and not if (store.exists(repo_image.storage.locations, layer_path) and not
image_is_uploading(repo_image)): image_is_uploading(repo_image)):
exact_abort(409, 'Image already exists') exact_abort(409, 'Image already exists')
profile.debug('Storing layer data') logger.debug('Storing layer data')
input_stream = request.stream input_stream = request.stream
if request.headers.get('transfer-encoding') == 'chunked': if request.headers.get('transfer-encoding') == 'chunked':
@ -257,7 +262,7 @@ def put_image_layer(namespace, repository, image_id):
# The layer is ready for download, send a job to the work queue to # The layer is ready for download, send a job to the work queue to
# process it. # process it.
profile.debug('Adding layer to diff queue') logger.debug('Adding layer to diff queue')
repo = model.get_repository(namespace, repository) repo = model.get_repository(namespace, repository)
image_diff_queue.put([repo.namespace_user.username, repository, image_id], json.dumps({ image_diff_queue.put([repo.namespace_user.username, repository, image_id], json.dumps({
'namespace_user_id': repo.namespace_user.id, 'namespace_user_id': repo.namespace_user.id,
@ -272,7 +277,7 @@ def put_image_layer(namespace, repository, image_id):
@process_auth @process_auth
@extract_namespace_repo_from_session @extract_namespace_repo_from_session
def put_image_checksum(namespace, repository, image_id): def put_image_checksum(namespace, repository, image_id):
profile.debug('Checking repo permissions') logger.debug('Checking repo permissions')
permission = ModifyRepositoryPermission(namespace, repository) permission = ModifyRepositoryPermission(namespace, repository)
if not permission.can(): if not permission.can():
abort(403) abort(403)
@ -298,23 +303,23 @@ def put_image_checksum(namespace, repository, image_id):
abort(400, 'Checksum not found in Cookie for image %(image_id)s', abort(400, 'Checksum not found in Cookie for image %(image_id)s',
issue='missing-checksum-cookie', image_id=image_id) issue='missing-checksum-cookie', image_id=image_id)
profile.debug('Looking up repo image') logger.debug('Looking up repo image')
repo_image = model.get_repo_image_extended(namespace, repository, image_id) repo_image = model.get_repo_image_extended(namespace, repository, image_id)
if not repo_image or not repo_image.storage: if not repo_image or not repo_image.storage:
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id) abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
uuid = repo_image.storage.uuid uuid = repo_image.storage.uuid
profile.debug('Looking up repo layer data') logger.debug('Looking up repo layer data')
if not store.exists(repo_image.storage.locations, store.image_json_path(uuid)): if not store.exists(repo_image.storage.locations, store.image_json_path(uuid)):
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id) abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
profile.debug('Marking image path') logger.debug('Marking image path')
if not image_is_uploading(repo_image): if not image_is_uploading(repo_image):
abort(409, 'Cannot set checksum for image %(image_id)s', abort(409, 'Cannot set checksum for image %(image_id)s',
issue='image-write-error', image_id=image_id) issue='image-write-error', image_id=image_id)
profile.debug('Storing image checksum') logger.debug('Storing image checksum')
err = store_checksum(repo_image.storage, checksum) err = store_checksum(repo_image.storage, checksum)
if err: if err:
abort(400, err) abort(400, err)
@ -331,7 +336,7 @@ def put_image_checksum(namespace, repository, image_id):
# The layer is ready for download, send a job to the work queue to # The layer is ready for download, send a job to the work queue to
# process it. # process it.
profile.debug('Adding layer to diff queue') logger.debug('Adding layer to diff queue')
repo = model.get_repository(namespace, repository) repo = model.get_repository(namespace, repository)
image_diff_queue.put([repo.namespace_user.username, repository, image_id], json.dumps({ image_diff_queue.put([repo.namespace_user.username, repository, image_id], json.dumps({
'namespace_user_id': repo.namespace_user.id, 'namespace_user_id': repo.namespace_user.id,
@ -348,23 +353,23 @@ def put_image_checksum(namespace, repository, image_id):
@require_completion @require_completion
@set_cache_headers @set_cache_headers
def get_image_json(namespace, repository, image_id, headers): def get_image_json(namespace, repository, image_id, headers):
profile.debug('Checking repo permissions') logger.debug('Checking repo permissions')
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
if not permission.can() and not model.repository_is_public(namespace, if not permission.can() and not model.repository_is_public(namespace,
repository): repository):
abort(403) abort(403)
profile.debug('Looking up repo image') logger.debug('Looking up repo image')
repo_image = model.get_repo_image_extended(namespace, repository, image_id) repo_image = model.get_repo_image_extended(namespace, repository, image_id)
profile.debug('Looking up repo layer data') logger.debug('Looking up repo layer data')
try: try:
uuid = repo_image.storage.uuid uuid = repo_image.storage.uuid
data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
except (IOError, AttributeError): except (IOError, AttributeError):
flask_abort(404) flask_abort(404)
profile.debug('Looking up repo layer size') logger.debug('Looking up repo layer size')
size = repo_image.storage.image_size size = repo_image.storage.image_size
headers['X-Docker-Size'] = str(size) headers['X-Docker-Size'] = str(size)
@ -379,16 +384,16 @@ def get_image_json(namespace, repository, image_id, headers):
@require_completion @require_completion
@set_cache_headers @set_cache_headers
def get_image_ancestry(namespace, repository, image_id, headers): def get_image_ancestry(namespace, repository, image_id, headers):
profile.debug('Checking repo permissions') logger.debug('Checking repo permissions')
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
if not permission.can() and not model.repository_is_public(namespace, if not permission.can() and not model.repository_is_public(namespace,
repository): repository):
abort(403) abort(403)
profile.debug('Looking up repo image') logger.debug('Looking up repo image')
repo_image = model.get_repo_image_extended(namespace, repository, image_id) repo_image = model.get_repo_image_extended(namespace, repository, image_id)
profile.debug('Looking up image data') logger.debug('Looking up image data')
try: try:
uuid = repo_image.storage.uuid uuid = repo_image.storage.uuid
data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid)) data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
@ -396,11 +401,11 @@ def get_image_ancestry(namespace, repository, image_id, headers):
abort(404, 'Image %(image_id)s not found', issue='unknown-image', abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id) image_id=image_id)
profile.debug('Converting to <-> from JSON') logger.debug('Converting to <-> from JSON')
response = make_response(json.dumps(json.loads(data)), 200) response = make_response(json.dumps(json.loads(data)), 200)
response.headers.extend(headers) response.headers.extend(headers)
profile.debug('Done') logger.debug('Done')
return response return response
@ -430,12 +435,12 @@ def store_checksum(image_storage, checksum):
@process_auth @process_auth
@extract_namespace_repo_from_session @extract_namespace_repo_from_session
def put_image_json(namespace, repository, image_id): def put_image_json(namespace, repository, image_id):
profile.debug('Checking repo permissions') logger.debug('Checking repo permissions')
permission = ModifyRepositoryPermission(namespace, repository) permission = ModifyRepositoryPermission(namespace, repository)
if not permission.can(): if not permission.can():
abort(403) abort(403)
profile.debug('Parsing image JSON') logger.debug('Parsing image JSON')
try: try:
data = json.loads(request.data.decode('utf8')) data = json.loads(request.data.decode('utf8'))
except ValueError: except ValueError:
@ -449,12 +454,22 @@ def put_image_json(namespace, repository, image_id):
abort(400, 'Missing key `id` in JSON for image: %(image_id)s', abort(400, 'Missing key `id` in JSON for image: %(image_id)s',
issue='invalid-request', image_id=image_id) issue='invalid-request', image_id=image_id)
profile.debug('Looking up repo image') logger.debug('Looking up repo image')
repo_image = model.get_repo_image_extended(namespace, repository, image_id) repo_image = model.get_repo_image_extended(namespace, repository, image_id)
if not repo_image: if not repo_image:
profile.debug('Image not found') logger.debug('Image not found, creating image')
abort(404, 'Image %(image_id)s not found', issue='unknown-image', repo = model.get_repository(namespace, repository)
image_id=image_id) if repo is None:
abort(404, 'Repository does not exist: %(namespace)s/%(repository)s', issue='no-repo',
namespace=namespace, repository=repository)
username = get_authenticated_user() and get_authenticated_user().username
repo_image = model.find_create_or_link_image(image_id, repo, username, {},
store.preferred_locations[0])
# Create a temporary tag to prevent this image from getting garbage collected while the push
# is in progress.
model.create_temporary_hidden_tag(repo, repo_image, app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
uuid = repo_image.storage.uuid uuid = repo_image.storage.uuid
@ -466,24 +481,24 @@ def put_image_json(namespace, repository, image_id):
parent_image = None parent_image = None
if parent_id: if parent_id:
profile.debug('Looking up parent image') logger.debug('Looking up parent image')
parent_image = model.get_repo_image_extended(namespace, repository, parent_id) parent_image = model.get_repo_image_extended(namespace, repository, parent_id)
parent_uuid = parent_image and parent_image.storage.uuid parent_uuid = parent_image and parent_image.storage.uuid
parent_locations = parent_image and parent_image.storage.locations parent_locations = parent_image and parent_image.storage.locations
if parent_id: if parent_id:
profile.debug('Looking up parent image data') logger.debug('Looking up parent image data')
if (parent_id and not if (parent_id and not
store.exists(parent_locations, store.image_json_path(parent_uuid))): store.exists(parent_locations, store.image_json_path(parent_uuid))):
abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s', abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s',
issue='invalid-request', image_id=image_id, parent_id=parent_id) issue='invalid-request', image_id=image_id, parent_id=parent_id)
profile.debug('Looking up image storage paths') logger.debug('Looking up image storage paths')
json_path = store.image_json_path(uuid) json_path = store.image_json_path(uuid)
profile.debug('Checking if image already exists') logger.debug('Checking if image already exists')
if (store.exists(repo_image.storage.locations, json_path) and not if (store.exists(repo_image.storage.locations, json_path) and not
image_is_uploading(repo_image)): image_is_uploading(repo_image)):
exact_abort(409, 'Image already exists') exact_abort(409, 'Image already exists')
@ -496,24 +511,24 @@ def put_image_json(namespace, repository, image_id):
command_list = data.get('container_config', {}).get('Cmd', None) command_list = data.get('container_config', {}).get('Cmd', None)
command = json.dumps(command_list) if command_list else None command = json.dumps(command_list) if command_list else None
profile.debug('Setting image metadata') logger.debug('Setting image metadata')
model.set_image_metadata(image_id, namespace, repository, model.set_image_metadata(image_id, namespace, repository,
data.get('created'), data.get('comment'), command, data.get('created'), data.get('comment'), command,
parent_image) parent_image)
profile.debug('Putting json path') logger.debug('Putting json path')
store.put_content(repo_image.storage.locations, json_path, request.data) store.put_content(repo_image.storage.locations, json_path, request.data)
profile.debug('Generating image ancestry') logger.debug('Generating image ancestry')
try: try:
generate_ancestry(image_id, uuid, repo_image.storage.locations, parent_id, parent_uuid, generate_ancestry(image_id, uuid, repo_image.storage.locations, parent_id, parent_uuid,
parent_locations) parent_locations)
except IOError as ioe: except IOError as ioe:
profile.debug('Error when generating ancestry: %s' % ioe.message) logger.debug('Error when generating ancestry: %s' % ioe.message)
abort(404) abort(404)
profile.debug('Done') logger.debug('Done')
return make_response('true', 200) return make_response('true', 200)

View file

@ -6,7 +6,6 @@ from flask import request
from auth.auth_context import get_authenticated_user, get_validated_token, get_validated_oauth_token from auth.auth_context import get_authenticated_user, get_validated_token, get_validated_oauth_token
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
profile = logging.getLogger('application.profiler')
def track_and_log(event_name, repo, **kwargs): def track_and_log(event_name, repo, **kwargs):
repository = repo.name repository = repo.name
@ -19,20 +18,27 @@ def track_and_log(event_name, repo, **kwargs):
analytics_id = 'anonymous' analytics_id = 'anonymous'
profile.debug('Logging the %s to Mixpanel and the log system', event_name) authenticated_oauth_token = get_validated_oauth_token()
if get_validated_oauth_token(): authenticated_user = get_authenticated_user()
oauth_token = get_validated_oauth_token() authenticated_token = get_validated_token() if not authenticated_user else None
metadata['oauth_token_id'] = oauth_token.id
metadata['oauth_token_application_id'] = oauth_token.application.client_id logger.debug('Logging the %s to Mixpanel and the log system', event_name)
metadata['oauth_token_application'] = oauth_token.application.name if authenticated_oauth_token:
analytics_id = 'oauth:' + oauth_token.id metadata['oauth_token_id'] = authenticated_oauth_token.id
elif get_authenticated_user(): metadata['oauth_token_application_id'] = authenticated_oauth_token.application.client_id
metadata['username'] = get_authenticated_user().username metadata['oauth_token_application'] = authenticated_oauth_token.application.name
analytics_id = get_authenticated_user().username analytics_id = 'oauth:' + authenticated_oauth_token.id
elif get_validated_token(): elif authenticated_user:
metadata['token'] = get_validated_token().friendly_name metadata['username'] = authenticated_user.username
metadata['token_code'] = get_validated_token().code analytics_id = authenticated_user.username
analytics_id = 'token:' + get_validated_token().code elif authenticated_token:
metadata['token'] = authenticated_token.friendly_name
metadata['token_code'] = authenticated_token.code
if authenticated_token.kind:
metadata['token_type'] = authenticated_token.kind.name
analytics_id = 'token:' + authenticated_token.code
else: else:
metadata['public'] = True metadata['public'] = True
analytics_id = 'anonymous' analytics_id = 'anonymous'
@ -42,21 +48,27 @@ def track_and_log(event_name, repo, **kwargs):
} }
# Publish the user event (if applicable) # Publish the user event (if applicable)
if get_authenticated_user(): logger.debug('Checking publishing %s to the user events system', event_name)
if authenticated_user:
logger.debug('Publishing %s to the user events system', event_name)
user_event_data = { user_event_data = {
'action': event_name, 'action': event_name,
'repository': repository, 'repository': repository,
'namespace': namespace 'namespace': namespace
} }
event = userevents.get_event(get_authenticated_user().username) event = userevents.get_event(authenticated_user.username)
event.publish_event_data('docker-cli', user_event_data) event.publish_event_data('docker-cli', user_event_data)
# Save the action to mixpanel. # Save the action to mixpanel.
logger.debug('Logging the %s to Mixpanel', event_name)
analytics.track(analytics_id, event_name, extra_params) analytics.track(analytics_id, event_name, extra_params)
# Log the action to the database. # Log the action to the database.
logger.debug('Logging the %s to logs system', event_name)
model.log_action(event_name, namespace, model.log_action(event_name, namespace,
performer=get_authenticated_user(), performer=authenticated_user,
ip=request.remote_addr, metadata=metadata, ip=request.remote_addr, metadata=metadata,
repository=repo) repository=repo)
logger.debug('Track and log of %s complete', event_name)

View file

@ -226,7 +226,7 @@ class GithubBuildTrigger(BuildTrigger):
'personal': False, 'personal': False,
'repos': repo_list, 'repos': repo_list,
'info': { 'info': {
'name': org.name, 'name': org.name or org.login,
'avatar_url': org.avatar_url 'avatar_url': org.avatar_url
} }
}) })
@ -345,8 +345,10 @@ class GithubBuildTrigger(BuildTrigger):
# compute the tag(s) # compute the tag(s)
branch = ref.split('/')[-1] branch = ref.split('/')[-1]
tags = {branch} tags = {branch}
if branch == repo.default_branch: if branch == repo.default_branch:
tags.add('latest') tags.add('latest')
logger.debug('Pushing to tags: %s' % tags) logger.debug('Pushing to tags: %s' % tags)
# compute the subdir # compute the subdir
@ -354,7 +356,14 @@ class GithubBuildTrigger(BuildTrigger):
joined_subdir = os.path.join(tarball_subdir, repo_subdir) joined_subdir = os.path.join(tarball_subdir, repo_subdir)
logger.debug('Final subdir: %s' % joined_subdir) logger.debug('Final subdir: %s' % joined_subdir)
return dockerfile_id, list(tags), build_name, joined_subdir # compute the metadata
metadata = {
'commit_sha': commit_sha,
'ref': ref,
'default_branch': repo.default_branch
}
return dockerfile_id, list(tags), build_name, joined_subdir, metadata
@staticmethod @staticmethod
def get_display_name(sha): def get_display_name(sha):

View file

@ -2,11 +2,10 @@ import logging
import json import json
import hashlib import hashlib
from flask import redirect, Blueprint, abort, send_file, request from flask import redirect, Blueprint, abort, send_file, make_response
from app import app from app import app, signer
from auth.auth import process_auth from auth.auth import process_auth
from auth.auth_context import get_authenticated_user
from auth.permissions import ReadRepositoryPermission from auth.permissions import ReadRepositoryPermission
from data import model from data import model
from data import database from data import database
@ -15,13 +14,16 @@ from storage import Storage
from util.queuefile import QueueFile from util.queuefile import QueueFile
from util.queueprocess import QueueProcess from util.queueprocess import QueueProcess
from util.gzipwrap import GzipWrap from formats.squashed import SquashedDockerImage
from util.dockerloadformat import build_docker_load_stream from formats.aci import ACIImage
# pylint: disable=invalid-name
verbs = Blueprint('verbs', __name__) verbs = Blueprint('verbs', __name__)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _open_stream(namespace, repository, tag, synthetic_image_id, image_json, image_id_list): def _open_stream(formatter, namespace, repository, tag, synthetic_image_id, image_json,
image_id_list):
store = Storage(app) store = Storage(app)
# For performance reasons, we load the full image list here, cache it, then disconnect from # For performance reasons, we load the full image list here, cache it, then disconnect from
@ -42,20 +44,43 @@ def _open_stream(namespace, repository, tag, synthetic_image_id, image_json, ima
current_image_path) current_image_path)
current_image_id = current_image_entry.id current_image_id = current_image_entry.id
logger.debug('Returning image layer %s: %s' % (current_image_id, current_image_path)) logger.debug('Returning image layer %s: %s', current_image_id, current_image_path)
yield current_image_stream yield current_image_stream
stream = build_docker_load_stream(namespace, repository, tag, synthetic_image_id, image_json, stream = formatter.build_stream(namespace, repository, tag, synthetic_image_id, image_json,
get_next_image, get_next_layer) get_next_image, get_next_layer)
return stream.read return stream.read
def _write_synthetic_image_to_storage(linked_storage_uuid, linked_locations, queue_file): def _sign_sythentic_image(verb, linked_storage_uuid, queue_file):
signature = None
try:
signature = signer.detached_sign(queue_file)
except:
logger.exception('Exception when signing %s image %s', verb, linked_storage_uuid)
return
# Setup the database (since this is a new process) and then disconnect immediately
# once the operation completes.
if not queue_file.raised_exception:
with database.UseThenDisconnect(app.config):
try:
derived = model.get_storage_by_uuid(linked_storage_uuid)
except model.InvalidImageException:
return
signature_entry = model.find_or_create_storage_signature(derived, signer.name)
signature_entry.signature = signature
signature_entry.uploading = False
signature_entry.save()
def _write_synthetic_image_to_storage(verb, linked_storage_uuid, linked_locations, queue_file):
store = Storage(app) store = Storage(app)
def handle_exception(ex): def handle_exception(ex):
logger.debug('Exception when building squashed image %s: %s', linked_storage_uuid, ex) logger.debug('Exception when building %s image %s: %s', verb, linked_storage_uuid, ex)
with database.UseThenDisconnect(app.config): with database.UseThenDisconnect(app.config):
model.delete_derived_storage_by_uuid(linked_storage_uuid) model.delete_derived_storage_by_uuid(linked_storage_uuid)
@ -67,86 +92,193 @@ def _write_synthetic_image_to_storage(linked_storage_uuid, linked_locations, que
queue_file.close() queue_file.close()
if not queue_file.raised_exception: if not queue_file.raised_exception:
# Setup the database (since this is a new process) and then disconnect immediately
# once the operation completes.
with database.UseThenDisconnect(app.config): with database.UseThenDisconnect(app.config):
done_uploading = model.get_storage_by_uuid(linked_storage_uuid) done_uploading = model.get_storage_by_uuid(linked_storage_uuid)
done_uploading.uploading = False done_uploading.uploading = False
done_uploading.save() done_uploading.save()
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET']) # pylint: disable=too-many-locals
@process_auth def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None):
def get_squashed_tag(namespace, repository, tag):
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
if permission.can() or model.repository_is_public(namespace, repository):
# Lookup the requested tag.
try:
tag_image = model.get_tag_image(namespace, repository, tag)
except model.DataModelException:
abort(404)
# Lookup the tag's image and storage. # pylint: disable=no-member
repo_image = model.get_repo_image_extended(namespace, repository, tag_image.docker_image_id) if not permission.can() and not model.repository_is_public(namespace, repository):
if not repo_image: abort(403)
abort(404)
# Log the action. # Lookup the requested tag.
track_and_log('repo_verb', repo_image.repository, tag=tag, verb='squash') try:
tag_image = model.get_tag_image(namespace, repository, tag)
except model.DataModelException:
abort(404)
store = Storage(app) # Lookup the tag's image and storage.
derived = model.find_or_create_derived_storage(repo_image.storage, 'squash', repo_image = model.get_repo_image_extended(namespace, repository, tag_image.docker_image_id)
store.preferred_locations[0]) if not repo_image:
if not derived.uploading: abort(404)
logger.debug('Derived image %s exists in storage', derived.uuid)
derived_layer_path = store.image_layer_path(derived.uuid)
download_url = store.get_direct_download_url(derived.locations, derived_layer_path)
if download_url:
logger.debug('Redirecting to download URL for derived image %s', derived.uuid)
return redirect(download_url)
# Close the database handle here for this process before we send the long download. # If there is a data checker, call it first.
database.close_db_filter(None) uuid = repo_image.storage.uuid
image_json = None
logger.debug('Sending cached derived image %s', derived.uuid) if checker is not None:
return send_file(store.stream_read_file(derived.locations, derived_layer_path))
# Load the ancestry for the image.
logger.debug('Building and returning derived image %s', derived.uuid)
uuid = repo_image.storage.uuid
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
full_image_list = json.loads(ancestry_data)
# Load the image's JSON layer.
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
image_json = json.loads(image_json_data) image_json = json.loads(image_json_data)
# Calculate a synthetic image ID. if not checker(image_json):
synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':squash').hexdigest() logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repository, tag, verb)
abort(404)
# Create a queue process to generate the data. The queue files will read from the process return (repo_image, tag_image, image_json)
# and send the results to the client and storage.
def _cleanup():
# Close any existing DB connection once the process has exited.
database.close_db_filter(None)
args = (namespace, repository, tag, synthetic_image_id, image_json, full_image_list)
queue_process = QueueProcess(_open_stream,
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
args, finished=_cleanup)
client_queue_file = QueueFile(queue_process.create_queue(), 'client') # pylint: disable=too-many-locals
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage') def _repo_verb_signature(namespace, repository, tag, verb, checker=None, **kwargs):
# Verify that the image exists and that we have access to it.
store = Storage(app)
result = _verify_repo_verb(store, namespace, repository, tag, verb, checker)
(repo_image, tag_image, image_json) = result
# Start building. # Lookup the derived image storage for the verb.
queue_process.run() derived = model.find_derived_storage(repo_image.storage, verb)
if derived is None or derived.uploading:
abort(404)
# Start the storage saving. # Check if we have a valid signer configured.
storage_args = (derived.uuid, derived.locations, storage_queue_file) if not signer.name:
QueueProcess.run_process(_write_synthetic_image_to_storage, storage_args, finished=_cleanup) abort(404)
# Lookup the signature for the verb.
signature_entry = model.lookup_storage_signature(derived, signer.name)
if signature_entry is None:
abort(404)
# Return the signature.
return make_response(signature_entry.signature)
# pylint: disable=too-many-locals
def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker=None, **kwargs):
# Verify that the image exists and that we have access to it.
store = Storage(app)
result = _verify_repo_verb(store, namespace, repository, tag, verb, checker)
(repo_image, tag_image, image_json) = result
# Log the action.
track_and_log('repo_verb', repo_image.repository, tag=tag, verb=verb, **kwargs)
# Lookup/create the derived image storage for the verb.
derived = model.find_or_create_derived_storage(repo_image.storage, verb,
store.preferred_locations[0])
if not derived.uploading:
logger.debug('Derived %s image %s exists in storage', verb, derived.uuid)
derived_layer_path = store.image_layer_path(derived.uuid)
download_url = store.get_direct_download_url(derived.locations, derived_layer_path)
if download_url:
logger.debug('Redirecting to download URL for derived %s image %s', verb, derived.uuid)
return redirect(download_url)
# Close the database handle here for this process before we send the long download. # Close the database handle here for this process before we send the long download.
database.close_db_filter(None) database.close_db_filter(None)
# Return the client's data. logger.debug('Sending cached derived %s image %s', verb, derived.uuid)
return send_file(client_queue_file) return send_file(store.stream_read_file(derived.locations, derived_layer_path))
# Load the ancestry for the image.
uuid = repo_image.storage.uuid
logger.debug('Building and returning derived %s image %s', verb, derived.uuid)
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
full_image_list = json.loads(ancestry_data)
# Load the image's JSON layer.
if not image_json:
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
image_json = json.loads(image_json_data)
# Calculate a synthetic image ID.
synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':' + verb).hexdigest()
def _cleanup():
# Close any existing DB connection once the process has exited.
database.close_db_filter(None)
# Create a queue process to generate the data. The queue files will read from the process
# and send the results to the client and storage.
args = (formatter, namespace, repository, tag, synthetic_image_id, image_json, full_image_list)
queue_process = QueueProcess(_open_stream,
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
args, finished=_cleanup)
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
# If signing is required, add a QueueFile for signing the image as we stream it out.
signing_queue_file = None
if sign and signer.name:
signing_queue_file = QueueFile(queue_process.create_queue(), 'signing')
# Start building.
queue_process.run()
# Start the storage saving.
storage_args = (verb, derived.uuid, derived.locations, storage_queue_file)
QueueProcess.run_process(_write_synthetic_image_to_storage, storage_args, finished=_cleanup)
if sign and signer.name:
signing_args = (verb, derived.uuid, signing_queue_file)
QueueProcess.run_process(_sign_sythentic_image, signing_args, finished=_cleanup)
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
# Return the client's data.
return send_file(client_queue_file)
def os_arch_checker(os, arch):
def checker(image_json):
# Verify the architecture and os.
operating_system = image_json.get('os', 'linux')
if operating_system != os:
return False
architecture = image_json.get('architecture', 'amd64')
# Note: Some older Docker images have 'x86_64' rather than 'amd64'.
# We allow the conversion here.
if architecture == 'x86_64' and operating_system == 'linux':
architecture = 'amd64'
if architecture != arch:
return False
return True
return checker
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/sig/<os>/<arch>/', methods=['GET'])
@process_auth
# pylint: disable=unused-argument
def get_aci_signature(server, namespace, repository, tag, os, arch):
return _repo_verb_signature(namespace, repository, tag, 'aci', checker=os_arch_checker(os, arch),
os=os, arch=arch)
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci/<os>/<arch>/', methods=['GET'])
@process_auth
# pylint: disable=unused-argument
def get_aci_image(server, namespace, repository, tag, os, arch):
return _repo_verb(namespace, repository, tag, 'aci', ACIImage(),
sign=True, checker=os_arch_checker(os, arch), os=os, arch=arch)
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET'])
@process_auth
def get_squashed_tag(namespace, repository, tag):
return _repo_verb(namespace, repository, tag, 'squash', SquashedDockerImage())
abort(403)

View file

@ -1,32 +1,38 @@
import logging import logging
from flask import (abort, redirect, request, url_for, make_response, Response, from flask import (abort, redirect, request, url_for, make_response, Response,
Blueprint, send_from_directory, jsonify) Blueprint, send_from_directory, jsonify, send_file)
from avatar_generator import Avatar from avatar_generator import Avatar
from flask.ext.login import current_user from flask.ext.login import current_user
from urlparse import urlparse from urlparse import urlparse
from health.healthcheck import HealthCheck from health.healthcheck import get_healthchecker
from data import model from data import model
from data.model.oauth import DatabaseAuthorizationProvider from data.model.oauth import DatabaseAuthorizationProvider
from app import app, billing as stripe, build_logs, avatar from app import app, billing as stripe, build_logs, avatar, signer
from auth.auth import require_session_login, process_oauth from auth.auth import require_session_login, process_oauth
from auth.permissions import AdministerOrganizationPermission, ReadRepositoryPermission from auth.permissions import (AdministerOrganizationPermission, ReadRepositoryPermission,
SuperUserPermission)
from util.invoice import renderInvoiceToPdf from util.invoice import renderInvoiceToPdf
from util.seo import render_snapshot from util.seo import render_snapshot
from util.cache import no_cache from util.cache import no_cache
from endpoints.common import common_login, render_page_template, route_show_if, param_required from endpoints.common import common_login, render_page_template, route_show_if, param_required
from endpoints.csrf import csrf_protect, generate_csrf_token from endpoints.csrf import csrf_protect, generate_csrf_token, verify_csrf
from endpoints.registry import set_cache_headers from endpoints.registry import set_cache_headers
from util.names import parse_repository_name from util.names import parse_repository_name, parse_repository_name_and_tag
from util.useremails import send_email_changed from util.useremails import send_email_changed
from util.systemlogs import build_logs_archive
from auth import scopes from auth import scopes
import features import features
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Capture the unverified SSL errors.
logging.captureWarnings(True)
web = Blueprint('web', __name__) web = Blueprint('web', __name__)
STATUS_TAGS = app.config['STATUS_TAGS'] STATUS_TAGS = app.config['STATUS_TAGS']
@ -57,6 +63,14 @@ def snapshot(path = ''):
abort(404) abort(404)
@web.route('/aci-signing-key')
@no_cache
def aci_signing_key():
if not signer.name:
abort(404)
return send_file(signer.public_key_path)
@web.route('/plans/') @web.route('/plans/')
@no_cache @no_cache
@route_show_if(features.BILLING) @route_show_if(features.BILLING)
@ -95,6 +109,7 @@ def organizations():
def user(): def user():
return index('') return index('')
@web.route('/superuser/') @web.route('/superuser/')
@no_cache @no_cache
@route_show_if(features.SUPER_USERS) @route_show_if(features.SUPER_USERS)
@ -102,6 +117,13 @@ def superuser():
return index('') return index('')
@web.route('/setup/')
@no_cache
@route_show_if(features.SUPER_USERS)
def setup():
return index('')
@web.route('/signin/') @web.route('/signin/')
@no_cache @no_cache
def signin(redirect=None): def signin(redirect=None):
@ -158,33 +180,27 @@ def v1():
return index('') return index('')
# TODO(jschorr): Remove this mirrored endpoint once we migrate ELB.
@web.route('/health', methods=['GET']) @web.route('/health', methods=['GET'])
@web.route('/health/instance', methods=['GET'])
@no_cache @no_cache
def health(): def instance_health():
db_healthy = model.check_health() checker = get_healthchecker(app)
buildlogs_healthy = build_logs.check_health() (data, status_code) = checker.check_instance()
response = jsonify(dict(data=data, status_code=status_code))
check = HealthCheck.get_check(app.config['HEALTH_CHECKER'][0], app.config['HEALTH_CHECKER'][1]) response.status_code = status_code
(data, is_healthy) = check.conduct_healthcheck(db_healthy, buildlogs_healthy)
response = jsonify(dict(data = data, is_healthy = is_healthy))
response.status_code = 200 if is_healthy else 503
return response return response
# TODO(jschorr): Remove this mirrored endpoint once we migrate pingdom.
@web.route('/status', methods=['GET']) @web.route('/status', methods=['GET'])
@web.route('/health/endtoend', methods=['GET'])
@no_cache @no_cache
def status(): def endtoend_health():
db_healthy = model.check_health() checker = get_healthchecker(app)
buildlogs_healthy = build_logs.check_health() (data, status_code) = checker.check_endtoend()
response = jsonify(dict(data=data, status_code=status_code))
response = jsonify({ response.status_code = status_code
'db_healthy': db_healthy,
'buildlogs_healthy': buildlogs_healthy,
'is_testing': app.config['TESTING'],
})
response.status_code = 200 if db_healthy and buildlogs_healthy else 503
return response return response
@ -229,14 +245,14 @@ def robots():
@web.route('/<path:repository>') @web.route('/<path:repository>')
@no_cache @no_cache
@process_oauth @process_oauth
@parse_repository_name @parse_repository_name_and_tag
def redirect_to_repository(namespace, reponame): def redirect_to_repository(namespace, reponame, tag):
permission = ReadRepositoryPermission(namespace, reponame) permission = ReadRepositoryPermission(namespace, reponame)
is_public = model.repository_is_public(namespace, reponame) is_public = model.repository_is_public(namespace, reponame)
if permission.can() or is_public: if permission.can() or is_public:
repository_name = '/'.join([namespace, reponame]) repository_name = '/'.join([namespace, reponame])
return redirect(url_for('web.repository', path=repository_name)) return redirect(url_for('web.repository', path=repository_name, tag=tag))
abort(404) abort(404)
@ -471,3 +487,21 @@ def exchange_code_for_token():
provider = FlaskAuthorizationProvider() provider = FlaskAuthorizationProvider()
return provider.get_token(grant_type, client_id, client_secret, redirect_uri, code, scope=scope) return provider.get_token(grant_type, client_id, client_secret, redirect_uri, code, scope=scope)
@web.route('/systemlogsarchive', methods=['GET'])
@process_oauth
@route_show_if(features.SUPER_USERS)
@no_cache
def download_logs_archive():
# Note: We cannot use the decorator here because this is a GET method. That being said, this
# information is sensitive enough that we want the extra protection.
verify_csrf()
if SuperUserPermission().can():
archive_data = build_logs_archive(app)
return Response(archive_data,
mimetype="application/octet-stream",
headers={"Content-Disposition": "attachment;filename=erlogs.tar.gz"})
abort(403)

View file

@ -91,7 +91,7 @@ def build_trigger_webhook(trigger_uuid, **kwargs):
try: try:
specs = handler.handle_trigger_request(request, trigger.auth_token, specs = handler.handle_trigger_request(request, trigger.auth_token,
config_dict) config_dict)
dockerfile_id, tags, name, subdir = specs dockerfile_id, tags, name, subdir, metadata = specs
except ValidationRequestException: except ValidationRequestException:
# This was just a validation request, we don't need to build anything # This was just a validation request, we don't need to build anything
@ -104,7 +104,7 @@ def build_trigger_webhook(trigger_uuid, **kwargs):
pull_robot_name = model.get_pull_robot_name(trigger) pull_robot_name = model.get_pull_robot_name(trigger)
repo = model.get_repository(namespace, repository) repo = model.get_repository(namespace, repository)
start_build(repo, dockerfile_id, tags, name, subdir, False, trigger, start_build(repo, dockerfile_id, tags, name, subdir, False, trigger,
pull_robot_name=pull_robot_name) pull_robot_name=pull_robot_name, trigger_metadata=metadata)
return make_response('Okay') return make_response('Okay')

View file

@ -6,7 +6,7 @@ LOCAL_DIRECTORY = 'static/ldn/'
EXTERNAL_JS = [ EXTERNAL_JS = [
'code.jquery.com/jquery.js', 'code.jquery.com/jquery.js',
'netdna.bootstrapcdn.com/bootstrap/3.0.0/js/bootstrap.min.js', 'netdna.bootstrapcdn.com/bootstrap/3.3.2/js/bootstrap.min.js',
'ajax.googleapis.com/ajax/libs/angularjs/1.2.9/angular.min.js', 'ajax.googleapis.com/ajax/libs/angularjs/1.2.9/angular.min.js',
'ajax.googleapis.com/ajax/libs/angularjs/1.2.9/angular-route.min.js', 'ajax.googleapis.com/ajax/libs/angularjs/1.2.9/angular-route.min.js',
'ajax.googleapis.com/ajax/libs/angularjs/1.2.9/angular-sanitize.min.js', 'ajax.googleapis.com/ajax/libs/angularjs/1.2.9/angular-sanitize.min.js',
@ -18,15 +18,15 @@ EXTERNAL_JS = [
] ]
EXTERNAL_CSS = [ EXTERNAL_CSS = [
'netdna.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.css', 'netdna.bootstrapcdn.com/font-awesome/4.2.0/css/font-awesome.css',
'netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.no-icons.min.css', 'netdna.bootstrapcdn.com/bootstrap/3.3.2/css/bootstrap.min.css',
'fonts.googleapis.com/css?family=Droid+Sans:400,700', 'fonts.googleapis.com/css?family=Source+Sans+Pro:400,700',
] ]
EXTERNAL_FONTS = [ EXTERNAL_FONTS = [
'netdna.bootstrapcdn.com/font-awesome/4.0.3/fonts/fontawesome-webfont.woff?v=4.0.3', 'netdna.bootstrapcdn.com/font-awesome/4.2.0/fonts/fontawesome-webfont.woff?v=4.2.0',
'netdna.bootstrapcdn.com/font-awesome/4.0.3/fonts/fontawesome-webfont.ttf?v=4.0.3', 'netdna.bootstrapcdn.com/font-awesome/4.2.0/fonts/fontawesome-webfont.ttf?v=4.2.0',
'netdna.bootstrapcdn.com/font-awesome/4.0.3/fonts/fontawesome-webfont.svg?v=4.0.3', 'netdna.bootstrapcdn.com/font-awesome/4.2.0/fonts/fontawesome-webfont.svg?v=4.2.0',
] ]

0
formats/__init__.py Normal file
View file

196
formats/aci.py Normal file
View file

@ -0,0 +1,196 @@
from app import app
from util.streamlayerformat import StreamLayerMerger
from formats.tarimageformatter import TarImageFormatter
import json
import re
# pylint: disable=bad-continuation
class ACIImage(TarImageFormatter):
""" Image formatter which produces an ACI-compatible TAR.
"""
# pylint: disable=too-many-arguments
def stream_generator(self, namespace, repository, tag, synthetic_image_id,
layer_json, get_image_iterator, get_layer_iterator):
# ACI Format (.tar):
# manifest - The JSON manifest
# rootfs - The root file system
# Yield the manifest.
yield self.tar_file('manifest', self._build_manifest(namespace, repository, tag, layer_json,
synthetic_image_id))
# Yield the merged layer dtaa.
yield self.tar_folder('rootfs')
layer_merger = StreamLayerMerger(get_layer_iterator, path_prefix='rootfs/')
for entry in layer_merger.get_generator():
yield entry
@staticmethod
def _build_isolators(docker_config):
""" Builds ACI isolator config from the docker config. """
def _isolate_memory(memory):
return {
"name": "memory/limit",
"value": str(memory) + 'B'
}
def _isolate_swap(memory):
return {
"name": "memory/swap",
"value": str(memory) + 'B'
}
def _isolate_cpu(cpu):
return {
"name": "cpu/shares",
"value": str(cpu)
}
def _isolate_capabilities(capabilities_set_value):
capabilities_set = re.split(r'[\s,]', capabilities_set_value)
return {
"name": "capabilities/bounding-set",
"value": ' '.join(capabilities_set)
}
mappers = {
'Memory': _isolate_memory,
'MemorySwap': _isolate_swap,
'CpuShares': _isolate_cpu,
'Cpuset': _isolate_capabilities
}
isolators = []
for config_key in mappers:
value = docker_config.get(config_key)
if value:
isolators.append(mappers[config_key](value))
return isolators
@staticmethod
def _build_ports(docker_config):
""" Builds the ports definitions for the ACI. """
ports = []
for docker_port_definition in docker_config.get('ports', {}):
# Formats:
# port/tcp
# port/udp
# port
protocol = 'tcp'
port_number = -1
if '/' in docker_port_definition:
(port_number, protocol) = docker_port_definition.split('/')
else:
port_number = docker_port_definition
try:
port_number = int(port_number)
ports.append({
"name": "port-%s" % port_number,
"port": port_number,
"protocol": protocol
})
except ValueError:
pass
return ports
@staticmethod
def _build_volumes(docker_config):
""" Builds the volumes definitions for the ACI. """
volumes = []
names = set()
def get_name(docker_volume_path):
parts = docker_volume_path.split('/')
name = ''
while True:
name = name + parts[-1]
parts = parts[0:-1]
if names.add(name):
break
name = '/' + name
return name
for docker_volume_path in docker_config.get('volumes', {}):
volumes.append({
"name": get_name(docker_volume_path),
"path": docker_volume_path,
"readOnly": False
})
return volumes
@staticmethod
def _build_manifest(namespace, repository, tag, docker_layer_data, synthetic_image_id):
""" Builds an ACI manifest from the docker layer data. """
config = docker_layer_data.get('config', {})
source_url = "%s://%s/%s/%s:%s" % (app.config['PREFERRED_URL_SCHEME'],
app.config['SERVER_HOSTNAME'],
namespace, repository, tag)
# ACI requires that the execution command be absolutely referenced. Therefore, if we find
# a relative command, we give it as an argument to /bin/sh to resolve and execute for us.
entrypoint = config.get('Entrypoint', []) or []
exec_path = entrypoint + (config.get('Cmd', []) or [])
if exec_path and not exec_path[0].startswith('/'):
exec_path = ['/bin/sh', '-c', '""%s""' % ' '.join(exec_path)]
# TODO(jschorr): ACI doesn't support : in the name, so remove any ports.
hostname = app.config['SERVER_HOSTNAME']
hostname = hostname.split(':', 1)[0]
manifest = {
"acKind": "ImageManifest",
"acVersion": "0.2.0",
"name": '%s/%s/%s/%s' % (hostname, namespace, repository, tag),
"labels": [
{
"name": "version",
"value": "1.0.0"
},
{
"name": "arch",
"value": docker_layer_data.get('architecture', 'amd64')
},
{
"name": "os",
"value": docker_layer_data.get('os', 'linux')
}
],
"app": {
"exec": exec_path,
# Below, `or 'root'` is required to replace empty string from Dockerfiles.
"user": config.get('User', '') or 'root',
"group": config.get('Group', '') or 'root',
"eventHandlers": [],
"workingDirectory": config.get('WorkingDir', '') or '/',
"environment": [{"name": key, "value": value}
for (key, value) in [e.split('=') for e in config.get('Env')]],
"isolators": ACIImage._build_isolators(config),
"mountPoints": ACIImage._build_volumes(config),
"ports": ACIImage._build_ports(config),
"annotations": [
{"name": "created", "value": docker_layer_data.get('created', '')},
{"name": "homepage", "value": source_url},
{"name": "quay.io/derived-image", "value": synthetic_image_id},
]
},
}
return json.dumps(manifest)

102
formats/squashed.py Normal file
View file

@ -0,0 +1,102 @@
from app import app
from util.gzipwrap import GZIP_BUFFER_SIZE
from util.streamlayerformat import StreamLayerMerger
from formats.tarimageformatter import TarImageFormatter
import copy
import json
class FileEstimationException(Exception):
""" Exception raised by build_docker_load_stream if the estimated size of the layer TAR
was lower than the actual size. This means the sent TAR header is wrong, and we have
to fail.
"""
pass
class SquashedDockerImage(TarImageFormatter):
""" Image formatter which produces a squashed image compatible with the `docker load`
command.
"""
# pylint: disable=too-many-arguments,too-many-locals
def stream_generator(self, namespace, repository, tag, synthetic_image_id,
layer_json, get_image_iterator, get_layer_iterator):
# Docker import V1 Format (.tar):
# repositories - JSON file containing a repo -> tag -> image map
# {image ID folder}:
# json - The layer JSON
# layer.tar - The TARed contents of the layer
# VERSION - The docker import version: '1.0'
layer_merger = StreamLayerMerger(get_layer_iterator)
# Yield the repositories file:
synthetic_layer_info = {}
synthetic_layer_info[tag + '.squash'] = synthetic_image_id
hostname = app.config['SERVER_HOSTNAME']
repositories = {}
repositories[hostname + '/' + namespace + '/' + repository] = synthetic_layer_info
yield self.tar_file('repositories', json.dumps(repositories))
# Yield the image ID folder.
yield self.tar_folder(synthetic_image_id)
# Yield the JSON layer data.
layer_json = SquashedDockerImage._build_layer_json(layer_json, synthetic_image_id)
yield self.tar_file(synthetic_image_id + '/json', json.dumps(layer_json))
# Yield the VERSION file.
yield self.tar_file(synthetic_image_id + '/VERSION', '1.0')
# Yield the merged layer data's header.
estimated_file_size = 0
for image in get_image_iterator():
estimated_file_size += image.storage.uncompressed_size
yield self.tar_file_header(synthetic_image_id + '/layer.tar', estimated_file_size)
# Yield the contents of the merged layer.
yielded_size = 0
for entry in layer_merger.get_generator():
yield entry
yielded_size += len(entry)
# If the yielded size is more than the estimated size (which is unlikely but possible), then
# raise an exception since the tar header will be wrong.
if yielded_size > estimated_file_size:
raise FileEstimationException()
# If the yielded size is less than the estimated size (which is likely), fill the rest with
# zeros.
if yielded_size < estimated_file_size:
to_yield = estimated_file_size - yielded_size
while to_yield > 0:
yielded = min(to_yield, GZIP_BUFFER_SIZE)
yield '\0' * yielded
to_yield -= yielded
# Yield any file padding to 512 bytes that is necessary.
yield self.tar_file_padding(estimated_file_size)
# Last two records are empty in TAR spec.
yield '\0' * 512
yield '\0' * 512
@staticmethod
def _build_layer_json(layer_json, synthetic_image_id):
updated_json = copy.deepcopy(layer_json)
updated_json['id'] = synthetic_image_id
if 'parent' in updated_json:
del updated_json['parent']
if 'config' in updated_json and 'Image' in updated_json['config']:
updated_json['config']['Image'] = synthetic_image_id
if 'container_config' in updated_json and 'Image' in updated_json['container_config']:
updated_json['container_config']['Image'] = synthetic_image_id
return updated_json

View file

@ -0,0 +1,46 @@
import tarfile
from util.gzipwrap import GzipWrap
class TarImageFormatter(object):
""" Base class for classes which produce a TAR containing image and layer data. """
def build_stream(self, namespace, repository, tag, synthetic_image_id, layer_json,
get_image_iterator, get_layer_iterator):
""" Builds and streams a synthetic .tar.gz that represents the formatted TAR created by this
class's implementation.
"""
return GzipWrap(self.stream_generator(namespace, repository, tag,
synthetic_image_id, layer_json,
get_image_iterator, get_layer_iterator))
def stream_generator(self, namespace, repository, tag, synthetic_image_id,
layer_json, get_image_iterator, get_layer_iterator):
raise NotImplementedError
def tar_file(self, name, contents):
""" Returns the TAR binary representation for a file with the given name and file contents. """
length = len(contents)
tar_data = self.tar_file_header(name, length)
tar_data += contents
tar_data += self.tar_file_padding(length)
return tar_data
def tar_file_padding(self, length):
""" Returns TAR file padding for file data of the given length. """
if length % 512 != 0:
return '\0' * (512 - (length % 512))
return ''
def tar_file_header(self, name, file_size):
""" Returns TAR file header data for a file with the given name and size. """
info = tarfile.TarInfo(name=name)
info.type = tarfile.REGTYPE
info.size = file_size
return info.tobuf()
def tar_folder(self, name):
""" Returns TAR file header data for a folder with the given name. """
info = tarfile.TarInfo(name=name)
info.type = tarfile.DIRTYPE
return info.tobuf()

View file

@ -65,9 +65,22 @@ module.exports = function(grunt) {
} }
}, },
quay: { quay: {
src: ['../static/partials/*.html', '../static/directives/*.html'], src: ['../static/partials/*.html', '../static/directives/*.html', '../static/directives/*.html'
, '../static/directives/config/*.html'],
dest: '../static/dist/template-cache.js' dest: '../static/dist/template-cache.js'
} }
},
cachebuster: {
build: {
options: {
format: 'json',
basedir: '../static/'
},
src: [ '../static/dist/template-cache.js', '../static/dist/<%= pkg.name %>.min.js',
'../static/dist/<%= pkg.name %>.css' ],
dest: '../static/dist/cachebusters.json'
}
} }
}); });
@ -75,7 +88,8 @@ module.exports = function(grunt) {
grunt.loadNpmTasks('grunt-contrib-concat'); grunt.loadNpmTasks('grunt-contrib-concat');
grunt.loadNpmTasks('grunt-contrib-cssmin'); grunt.loadNpmTasks('grunt-contrib-cssmin');
grunt.loadNpmTasks('grunt-angular-templates'); grunt.loadNpmTasks('grunt-angular-templates');
grunt.loadNpmTasks('grunt-cachebuster');
// Default task(s). // Default task(s).
grunt.registerTask('default', ['ngtemplates', 'concat', 'cssmin', 'uglify']); grunt.registerTask('default', ['ngtemplates', 'concat', 'cssmin', 'uglify', 'cachebuster']);
}; };

View file

@ -6,6 +6,7 @@
"grunt-contrib-concat": "~0.4.0", "grunt-contrib-concat": "~0.4.0",
"grunt-contrib-cssmin": "~0.9.0", "grunt-contrib-cssmin": "~0.9.0",
"grunt-angular-templates": "~0.5.4", "grunt-angular-templates": "~0.5.4",
"grunt-contrib-uglify": "~0.4.0" "grunt-contrib-uglify": "~0.4.0",
"grunt-cachebuster": "~0.1.5"
} }
} }

View file

@ -1,47 +1,84 @@
import boto.rds2 import boto.rds2
import logging import logging
from health.services import check_all_services
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class HealthCheck(object): def get_healthchecker(app):
def __init__(self): """ Returns a HealthCheck instance for the given app. """
pass return HealthCheck.get_checker(app)
def conduct_healthcheck(self, db_healthy, buildlogs_healthy):
class HealthCheck(object):
def __init__(self, app):
self.app = app
def check_instance(self):
""" """
Conducts any custom healthcheck work, returning a dict representing the HealthCheck Conducts a check on this specific instance, returning a dict representing the HealthCheck
output and a boolean indicating whether the instance is healthy. output and a number indicating the health check response code.
""" """
raise NotImplementedError service_statuses = check_all_services(self.app)
return self.get_instance_health(service_statuses)
def check_endtoend(self):
"""
Conducts a check on all services, returning a dict representing the HealthCheck
output and a number indicating the health check response code.
"""
service_statuses = check_all_services(self.app)
return self.calculate_overall_health(service_statuses)
def get_instance_health(self, service_statuses):
"""
For the given service statuses, returns a dict representing the HealthCheck
output and a number indicating the health check response code. By default,
this simply ensures that all services are reporting as healthy.
"""
return self.calculate_overall_health(service_statuses)
def calculate_overall_health(self, service_statuses, skip=None, notes=None):
""" Returns true if and only if all the given service statuses report as healthy. """
is_healthy = True
notes = notes or []
for service_name in service_statuses:
if skip and service_name in skip:
notes.append('%s skipped in compute health' % service_name)
continue
is_healthy = is_healthy and service_statuses[service_name]
data = {
'services': service_statuses,
'notes': notes,
'is_testing': self.app.config['TESTING']
}
return (data, 200 if is_healthy else 503)
@classmethod @classmethod
def get_check(cls, name, parameters): def get_checker(cls, app):
name = app.config['HEALTH_CHECKER'][0]
parameters = app.config['HEALTH_CHECKER'][1] or {}
for subc in cls.__subclasses__(): for subc in cls.__subclasses__():
if subc.check_name() == name: if subc.check_name() == name:
return subc(**parameters) return subc(app, **parameters)
raise Exception('Unknown health check with name %s' % name) raise Exception('Unknown health check with name %s' % name)
class LocalHealthCheck(HealthCheck): class LocalHealthCheck(HealthCheck):
def __init__(self):
pass
@classmethod @classmethod
def check_name(cls): def check_name(cls):
return 'LocalHealthCheck' return 'LocalHealthCheck'
def conduct_healthcheck(self, db_healthy, buildlogs_healthy):
data = {
'db_healthy': db_healthy,
'buildlogs_healthy': buildlogs_healthy
}
return (data, db_healthy and buildlogs_healthy)
class ProductionHealthCheck(HealthCheck): class ProductionHealthCheck(HealthCheck):
def __init__(self, access_key, secret_key): def __init__(self, app, access_key, secret_key):
super(ProductionHealthCheck, self).__init__(app)
self.access_key = access_key self.access_key = access_key
self.secret_key = secret_key self.secret_key = secret_key
@ -49,36 +86,38 @@ class ProductionHealthCheck(HealthCheck):
def check_name(cls): def check_name(cls):
return 'ProductionHealthCheck' return 'ProductionHealthCheck'
def conduct_healthcheck(self, db_healthy, buildlogs_healthy): def get_instance_health(self, service_statuses):
data = { # Note: We skip the redis check because if redis is down, we don't want ELB taking the
'db_healthy': db_healthy, # machines out of service. Redis is not considered a high avaliability-required service.
'buildlogs_healthy': buildlogs_healthy skip = ['redis']
} notes = []
# Only report unhealthy if the machine cannot connect to the DB. Redis isn't required for # If the database is marked as unhealthy, check the status of RDS directly. If RDS is
# mission critical/high avaliability operations. # reporting as available, then the problem is with this instance. Otherwise, the problem is
# with RDS, and so we skip the DB status so we can keep this machine as 'healthy'.
db_healthy = service_statuses['database']
if not db_healthy: if not db_healthy:
# If the database is marked as unhealthy, check the status of RDS directly. If RDS is rds_status = self._get_rds_status()
# reporting as available, then the problem is with this instance. Otherwise, the problem is notes.append('DB reports unhealthy; RDS status: %s' % rds_status)
# with RDS, and we can keep this machine as 'healthy'.
is_rds_working = False
try:
region = boto.rds2.connect_to_region('us-east-1',
aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key)
response = region.describe_db_instances()['DescribeDBInstancesResponse']
result = response['DescribeDBInstancesResult']
instances = result['DBInstances']
status = instances[0]['DBInstanceStatus']
is_rds_working = status == 'available'
except:
logger.exception("Exception while checking RDS status")
pass
data['db_available_checked'] = True # If the RDS is in any state but available, then we skip the DB check since it will
data['db_available_status'] = is_rds_working # fail and bring down the instance.
if rds_status != 'available':
skip.append('database')
# If RDS is down, then we still report the machine as healthy, so that it can handle return self.calculate_overall_health(service_statuses, skip=skip, notes=notes)
# requests once RDS comes back up.
return (data, not is_rds_working)
return (data, db_healthy)
def _get_rds_status(self):
""" Returns the status of the RDS instance as reported by AWS. """
try:
region = boto.rds2.connect_to_region('us-east-1',
aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key)
response = region.describe_db_instances()['DescribeDBInstancesResponse']
result = response['DescribeDBInstancesResult']
instances = result['DBInstances']
status = instances[0]['DBInstanceStatus']
return status
except:
logger.exception("Exception while checking RDS status")
return 'error'

46
health/services.py Normal file
View file

@ -0,0 +1,46 @@
import logging
from data import model
from app import build_logs
logger = logging.getLogger(__name__)
def _check_registry_gunicorn(app):
""" Returns the status of the registry gunicorn workers. """
# Compute the URL for checking the registry endpoint. We append a port if and only if the
# hostname contains one.
client = app.config['HTTPCLIENT']
hostname_parts = app.config['SERVER_HOSTNAME'].split(':')
port = ''
if len(hostname_parts) == 2:
port = ':' + hostname_parts[1]
registry_url = '%s://localhost%s/v1/_internal_ping' % (app.config['PREFERRED_URL_SCHEME'], port)
try:
return client.get(registry_url, verify=False, timeout=2).status_code == 200
except Exception:
logger.exception('Exception when checking registry health: %s', registry_url)
return False
def _check_database(app):
""" Returns the status of the database, as accessed from this instance. """
return model.check_health(app.config)
def _check_redis(app):
""" Returns the status of Redis, as accessed from this instance. """
return build_logs.check_health()
_SERVICES = {
'registry_gunicorn': _check_registry_gunicorn,
'database': _check_database,
'redis': _check_redis
}
def check_all_services(app):
""" Returns a dictionary containing the status of all the services defined. """
status = {}
for name in _SERVICES:
status[name] = _SERVICES[name](app)
return status

View file

@ -192,6 +192,9 @@ def initialize_database():
BuildTriggerService.create(name='github') BuildTriggerService.create(name='github')
AccessTokenKind.create(name='build-worker')
AccessTokenKind.create(name='pushpull-token')
LogEntryKind.create(name='account_change_plan') LogEntryKind.create(name='account_change_plan')
LogEntryKind.create(name='account_change_cc') LogEntryKind.create(name='account_change_cc')
LogEntryKind.create(name='account_change_password') LogEntryKind.create(name='account_change_password')
@ -255,6 +258,9 @@ def initialize_database():
ImageStorageLocation.create(name='local_us') ImageStorageLocation.create(name='local_us')
ImageStorageTransformation.create(name='squash') ImageStorageTransformation.create(name='squash')
ImageStorageTransformation.create(name='aci')
ImageStorageSignatureKind.create(name='gpg2')
# NOTE: These MUST be copied over to NotificationKind, since every external # NOTE: These MUST be copied over to NotificationKind, since every external
# notification can also generate a Quay.io notification. # notification can also generate a Quay.io notification.
@ -390,7 +396,7 @@ def populate_database():
'Empty repository which is building.', 'Empty repository which is building.',
False, [], (0, [], None)) False, [], (0, [], None))
token = model.create_access_token(building, 'write') token = model.create_access_token(building, 'write', 'build-worker')
trigger = model.create_build_trigger(building, 'github', '123authtoken', trigger = model.create_build_trigger(building, 'github', '123authtoken',
new_user_1, pull_robot=dtrobot[0]) new_user_1, pull_robot=dtrobot[0])

19
local-setup-osx.sh Executable file
View file

@ -0,0 +1,19 @@
#!/bin/bash
set -e
# Install Docker and C libraries on which Python libraries are dependent
brew update
brew install boot2docker docker libevent libmagic postgresql
# Some OSX installs don't have /usr/include, which is required for finding SASL headers for our LDAP library
if [ ! -e /usr/include ]; then
sudo ln -s `xcrun --show-sdk-path`/usr/include /usr/include
fi
# Install Python dependencies
sudo pip install -r requirements.txt
# Put the local testing config in place
git clone git@github.com:coreos-inc/quay-config.git ../quay-config
ln -s ../../quay-config/local conf/stack

View file

@ -1 +1 @@
TEST=true python -m unittest discover TEST=true TROLLIUSDEBUG=1 python -m unittest discover

View file

@ -7,7 +7,6 @@ from endpoints.index import index
from endpoints.tags import tags from endpoints.tags import tags
from endpoints.registry import registry from endpoints.registry import registry
application.register_blueprint(index, url_prefix='/v1') application.register_blueprint(index, url_prefix='/v1')
application.register_blueprint(tags, url_prefix='/v1') application.register_blueprint(tags, url_prefix='/v1')
application.register_blueprint(registry, url_prefix='/v1') application.register_blueprint(registry, url_prefix='/v1')

View file

@ -1,4 +1,4 @@
autobahn autobahn==0.9.3-3
aiowsgi aiowsgi
trollius trollius
peewee peewee
@ -22,7 +22,6 @@ xhtml2pdf
redis redis
hiredis hiredis
docker-py docker-py
pygithub
flask-restful==0.2.12 flask-restful==0.2.12
jsonschema jsonschema
git+https://github.com/NateFerrero/oauth2lib.git git+https://github.com/NateFerrero/oauth2lib.git
@ -40,4 +39,12 @@ pyyaml
git+https://github.com/DevTable/aniso8601-fake.git git+https://github.com/DevTable/aniso8601-fake.git
git+https://github.com/DevTable/anunidecode.git git+https://github.com/DevTable/anunidecode.git
git+https://github.com/DevTable/avatar-generator.git git+https://github.com/DevTable/avatar-generator.git
git+https://github.com/DevTable/pygithub.git
git+https://github.com/DevTable/container-cloud-config.git
git+https://github.com/jplana/python-etcd.git
gipc gipc
pyOpenSSL
pygpgme
cachetools
mock
psutil

View file

@ -8,24 +8,22 @@ Jinja2==2.7.3
LogentriesLogger==0.2.1 LogentriesLogger==0.2.1
Mako==1.0.0 Mako==1.0.0
MarkupSafe==0.23 MarkupSafe==0.23
Pillow==2.6.1 Pillow==2.7.0
PyGithub==1.25.2 PyMySQL==0.6.3
PyMySQL==0.6.2 PyPDF2==1.24
PyPDF2==1.23
PyYAML==3.11 PyYAML==3.11
SQLAlchemy==0.9.8 SQLAlchemy==0.9.8
WebOb==1.4
Werkzeug==0.9.6 Werkzeug==0.9.6
alembic==0.7.0
git+https://github.com/DevTable/aniso8601-fake.git
git+https://github.com/DevTable/anunidecode.git
git+https://github.com/DevTable/avatar-generator.git
aiowsgi==0.3 aiowsgi==0.3
alembic==0.7.4
autobahn==0.9.3-3 autobahn==0.9.3-3
backports.ssl-match-hostname==3.4.0.2 backports.ssl-match-hostname==3.4.0.2
beautifulsoup4==4.3.2 beautifulsoup4==4.3.2
blinker==1.3 blinker==1.3
boto==2.34.0 boto==2.35.1
docker-py==0.6.0 cachetools==1.0.0
docker-py==0.7.1
ecdsa==0.11 ecdsa==0.11
futures==2.2.0 futures==2.2.0
gevent==1.0.1 gevent==1.0.1
@ -36,26 +34,37 @@ hiredis==0.1.5
html5lib==0.999 html5lib==0.999
itsdangerous==0.24 itsdangerous==0.24
jsonschema==2.4.0 jsonschema==2.4.0
marisa-trie==0.6 marisa-trie==0.7
mixpanel-py==3.2.0 mixpanel-py==3.2.1
git+https://github.com/NateFerrero/oauth2lib.git mock==1.0.1
paramiko==1.15.1 paramiko==1.15.2
peewee==2.4.3 peewee==2.4.7
psutil==2.2.1
psycopg2==2.5.4 psycopg2==2.5.4
py-bcrypt==0.4 py-bcrypt==0.4
pycrypto==2.6.1 pycrypto==2.6.1
python-dateutil==2.2 python-dateutil==2.4.0
python-ldap==2.4.18 python-ldap==2.4.19
python-magic==0.4.6 python-magic==0.4.6
pytz==2014.9 pygpgme==0.3
pytz==2014.10
pyOpenSSL==0.14
raven==5.1.1 raven==5.1.1
redis==2.10.3 redis==2.10.3
reportlab==2.7 reportlab==2.7
requests==2.4.3 requests==2.5.1
six==1.8.0 six==1.9.0
stripe==1.19.1 stripe==1.20.1
trollius==1.0.3 trollius==1.0.4
tzlocal==1.1.2 tzlocal==1.1.2
websocket-client==0.21.0 waitress==0.8.9
websocket-client==0.23.0
wsgiref==0.1.2 wsgiref==0.1.2
xhtml2pdf==0.0.6 xhtml2pdf==0.0.6
git+https://github.com/DevTable/aniso8601-fake.git
git+https://github.com/DevTable/anunidecode.git
git+https://github.com/DevTable/avatar-generator.git
git+https://github.com/DevTable/pygithub.git
git+https://github.com/DevTable/container-cloud-config.git
git+https://github.com/NateFerrero/oauth2lib.git
git+https://github.com/jplana/python-etcd.git

Some files were not shown because too many files have changed in this diff Show more