Merge branch 'touchdown' of https://bitbucket.org/yackob03/quay into touchdown
This commit is contained in:
commit
92000eb11d
48 changed files with 772 additions and 386 deletions
2
app.py
2
app.py
|
@ -11,6 +11,7 @@ import features
|
|||
from storage import Storage
|
||||
from data.userfiles import Userfiles
|
||||
from util.analytics import Analytics
|
||||
from util.exceptionlog import Sentry
|
||||
from data.billing import Billing
|
||||
|
||||
|
||||
|
@ -44,3 +45,4 @@ storage = Storage(app)
|
|||
userfiles = Userfiles(app)
|
||||
analytics = Analytics(app)
|
||||
billing = Billing(app)
|
||||
sentry = Sentry(app)
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
import logging
|
||||
import logging.config
|
||||
import uuid
|
||||
|
||||
from app import app as application
|
||||
|
||||
# Initialize logging
|
||||
application.config['LOGGING_CONFIG']()
|
||||
from flask import request, Request
|
||||
from util.names import urn_generator
|
||||
|
||||
from data.model import db as model_db
|
||||
|
||||
|
@ -21,6 +22,7 @@ from endpoints.callbacks import callback
|
|||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
application.register_blueprint(web)
|
||||
application.register_blueprint(callback, url_prefix='/oauth2')
|
||||
|
@ -31,6 +33,29 @@ application.register_blueprint(api_bp, url_prefix='/api')
|
|||
application.register_blueprint(webhooks, url_prefix='/webhooks')
|
||||
application.register_blueprint(realtime, url_prefix='/realtime')
|
||||
|
||||
class RequestWithId(Request):
|
||||
request_gen = staticmethod(urn_generator(['request']))
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RequestWithId, self).__init__(*args, **kwargs)
|
||||
self.request_id = self.request_gen()
|
||||
|
||||
@application.before_request
|
||||
def _request_start():
|
||||
profile.debug('Starting request: %s', request.path)
|
||||
|
||||
|
||||
@application.after_request
|
||||
def _request_end(r):
|
||||
profile.debug('Ending request: %s', request.path)
|
||||
return r
|
||||
|
||||
class InjectingFilter(logging.Filter):
|
||||
def filter(self, record):
|
||||
record.msg = '[%s] %s' % (request.request_id, record.msg)
|
||||
return True
|
||||
|
||||
profile.addFilter(InjectingFilter())
|
||||
|
||||
def close_db(exc):
|
||||
db = model_db
|
||||
|
@ -39,6 +64,8 @@ def close_db(exc):
|
|||
db.close()
|
||||
|
||||
application.teardown_request(close_db)
|
||||
application.request_class = RequestWithId
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.config.fileConfig('conf/logging_local.conf', disable_existing_loggers=False)
|
||||
application.run(port=5000, debug=True, threaded=True, host='0.0.0.0')
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -3,7 +3,5 @@ workers = 8
|
|||
worker_class = 'gevent'
|
||||
timeout = 2000
|
||||
pidfile = '/tmp/gunicorn.pid'
|
||||
errorlog = '/mnt/logs/application.log'
|
||||
loglevel = 'debug'
|
||||
logger_class = 'util.glogger.LogstashLogger'
|
||||
logconfig = 'conf/logging.conf'
|
||||
pythonpath = '.'
|
|
@ -3,7 +3,5 @@ workers = 2
|
|||
worker_class = 'gevent'
|
||||
timeout = 2000
|
||||
daemon = False
|
||||
errorlog = '-'
|
||||
loglevel = 'debug'
|
||||
logger_class = 'util.glogger.LogstashLogger'
|
||||
logconfig = 'conf/logging_local.conf'
|
||||
pythonpath = '.'
|
39
conf/logging.conf
Normal file
39
conf/logging.conf
Normal file
|
@ -0,0 +1,39 @@
|
|||
[loggers]
|
||||
keys=root, gunicorn.error, gunicorn.access
|
||||
|
||||
[handlers]
|
||||
keys=error_file
|
||||
|
||||
[formatters]
|
||||
keys=generic
|
||||
|
||||
[logger_application.profiler]
|
||||
level=DEBUG
|
||||
handlers=error_file
|
||||
propagate=0
|
||||
qualname=application.profiler
|
||||
|
||||
[logger_root]
|
||||
level=DEBUG
|
||||
handlers=error_file
|
||||
|
||||
[logger_gunicorn.error]
|
||||
level=INFO
|
||||
handlers=error_file
|
||||
propagate=1
|
||||
qualname=gunicorn.error
|
||||
|
||||
[logger_gunicorn.access]
|
||||
level=INFO
|
||||
handlers=error_file
|
||||
propagate=0
|
||||
qualname=gunicorn.access
|
||||
|
||||
[handler_error_file]
|
||||
class=logging.FileHandler
|
||||
formatter=generic
|
||||
args=('/mnt/logs/application.log',)
|
||||
|
||||
[formatter_generic]
|
||||
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||
class=logging.Formatter
|
39
conf/logging_local.conf
Normal file
39
conf/logging_local.conf
Normal file
|
@ -0,0 +1,39 @@
|
|||
[loggers]
|
||||
keys=root, gunicorn.error, gunicorn.access, application.profiler
|
||||
|
||||
[handlers]
|
||||
keys=console
|
||||
|
||||
[formatters]
|
||||
keys=generic
|
||||
|
||||
[logger_application.profiler]
|
||||
level=DEBUG
|
||||
handlers=console
|
||||
propagate=0
|
||||
qualname=application.profiler
|
||||
|
||||
[logger_root]
|
||||
level=DEBUG
|
||||
handlers=console
|
||||
|
||||
[logger_gunicorn.error]
|
||||
level=INFO
|
||||
handlers=console
|
||||
propagate=1
|
||||
qualname=gunicorn.error
|
||||
|
||||
[logger_gunicorn.access]
|
||||
level=INFO
|
||||
handlers=console
|
||||
propagate=0
|
||||
qualname=gunicorn.access
|
||||
|
||||
[handler_console]
|
||||
class=StreamHandler
|
||||
formatter=generic
|
||||
args=(sys.stdout, )
|
||||
|
||||
[formatter_generic]
|
||||
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||
class=logging.Formatter
|
23
config.py
23
config.py
|
@ -1,5 +1,3 @@
|
|||
import logging
|
||||
import logstash_formatter
|
||||
import requests
|
||||
import os.path
|
||||
|
||||
|
@ -16,23 +14,11 @@ def build_requests_session():
|
|||
return sess
|
||||
|
||||
|
||||
def logs_init_builder(level=logging.DEBUG,
|
||||
formatter=logstash_formatter.LogstashFormatter()):
|
||||
@staticmethod
|
||||
def init_logs():
|
||||
handler = logging.StreamHandler()
|
||||
root_logger = logging.getLogger('')
|
||||
root_logger.setLevel(level)
|
||||
handler.setFormatter(formatter)
|
||||
root_logger.addHandler(handler)
|
||||
|
||||
return init_logs
|
||||
|
||||
# The set of configuration key names that will be accessible in the client. Since these
|
||||
# values are set to the frontend, DO NOT PLACE ANY SECRETS OR KEYS in this list.
|
||||
CLIENT_WHITELIST = ['SERVER_HOSTNAME', 'PREFERRED_URL_SCHEME', 'GITHUB_CLIENT_ID',
|
||||
'GITHUB_LOGIN_CLIENT_ID', 'MIXPANEL_KEY', 'STRIPE_PUBLISHABLE_KEY',
|
||||
'ENTERPRISE_LOGO_URL']
|
||||
'ENTERPRISE_LOGO_URL', 'SENTRY_PUBLIC_DSN']
|
||||
|
||||
|
||||
def getFrontendVisibleConfig(config_dict):
|
||||
|
@ -53,7 +39,7 @@ class DefaultConfig(object):
|
|||
JSONIFY_PRETTYPRINT_REGULAR = False
|
||||
SESSION_COOKIE_SECURE = False
|
||||
|
||||
LOGGING_CONFIG = logs_init_builder(formatter=logging.Formatter())
|
||||
LOGGING_LEVEL = 'DEBUG'
|
||||
SEND_FILE_MAX_AGE_DEFAULT = 0
|
||||
POPULATE_DB_TEST_DATA = True
|
||||
PREFERRED_URL_SCHEME = 'http'
|
||||
|
@ -102,6 +88,11 @@ class DefaultConfig(object):
|
|||
# Analytics
|
||||
ANALYTICS_TYPE = "FakeAnalytics"
|
||||
|
||||
# Exception logging
|
||||
EXCEPTION_LOG_TYPE = 'FakeSentry'
|
||||
SENTRY_DSN = None
|
||||
SENTRY_PUBLIC_DSN = None
|
||||
|
||||
# Github Config
|
||||
GITHUB_TOKEN_URL = 'https://github.com/login/oauth/access_token'
|
||||
GITHUB_USER_URL = 'https://api.github.com/user'
|
||||
|
|
|
@ -133,6 +133,8 @@ class FakeStripe(object):
|
|||
'plan': FAKE_PLAN,
|
||||
'current_period_start': timegm(datetime.now().utctimetuple()),
|
||||
'current_period_end': timegm((datetime.now() + timedelta(days=30)).utctimetuple()),
|
||||
'trial_start': timegm(datetime.now().utctimetuple()),
|
||||
'trial_end': timegm((datetime.now() + timedelta(days=30)).utctimetuple()),
|
||||
})
|
||||
|
||||
FAKE_CARD = AttrDict({
|
||||
|
|
|
@ -220,7 +220,8 @@ class ImageStorage(BaseModel):
|
|||
created = DateTimeField(null=True)
|
||||
comment = TextField(null=True)
|
||||
command = TextField(null=True)
|
||||
image_size = BigIntegerField(null=True)
|
||||
image_size = BigIntegerField(null=True)
|
||||
uploading = BooleanField(default=True, null=True)
|
||||
|
||||
|
||||
class Image(BaseModel):
|
||||
|
|
|
@ -817,7 +817,7 @@ def get_repository(namespace_name, repository_name):
|
|||
|
||||
def get_repo_image(namespace_name, repository_name, image_id):
|
||||
query = (Image
|
||||
.select()
|
||||
.select(Image, ImageStorage)
|
||||
.join(Repository)
|
||||
.switch(Image)
|
||||
.join(ImageStorage, JOIN_LEFT_OUTER)
|
||||
|
|
|
@ -7,6 +7,9 @@ from app import app
|
|||
transaction_factory = app.config['DB_TRANSACTION_FACTORY']
|
||||
|
||||
|
||||
MINIMUM_EXTENSION = timedelta(seconds=20)
|
||||
|
||||
|
||||
class WorkQueue(object):
|
||||
def __init__(self, queue_name, canonical_name_match_list=None):
|
||||
self.queue_name = queue_name
|
||||
|
@ -80,17 +83,24 @@ class WorkQueue(object):
|
|||
completed_item.delete_instance()
|
||||
|
||||
@staticmethod
|
||||
def incomplete(incomplete_item, retry_after=300):
|
||||
def incomplete(incomplete_item, retry_after=300, restore_retry=False):
|
||||
retry_date = datetime.now() + timedelta(seconds=retry_after)
|
||||
incomplete_item.available_after = retry_date
|
||||
incomplete_item.available = True
|
||||
|
||||
if restore_retry:
|
||||
incomplete_item.retries_remaining += 1
|
||||
|
||||
incomplete_item.save()
|
||||
|
||||
@staticmethod
|
||||
def extend_processing(queue_item, seconds_from_now):
|
||||
new_expiration = datetime.now() + timedelta(seconds=seconds_from_now)
|
||||
queue_item.processing_expires = new_expiration
|
||||
queue_item.save()
|
||||
|
||||
# Only actually write the new expiration to the db if it moves the expiration some minimum
|
||||
if new_expiration - queue_item.processing_expires > MINIMUM_EXTENSION:
|
||||
queue_item.processing_expires = new_expiration
|
||||
queue_item.save()
|
||||
|
||||
|
||||
image_diff_queue = WorkQueue(app.config['DIFFS_QUEUE_NAME'])
|
||||
|
|
|
@ -155,6 +155,7 @@ def render_page_template(name, **kwargs):
|
|||
feature_set=json.dumps(features.get_features()),
|
||||
config_set=json.dumps(getFrontendVisibleConfig(app.config)),
|
||||
mixpanel_key=app.config.get('MIXPANEL_KEY', ''),
|
||||
sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''),
|
||||
is_debug=str(app.config.get('DEBUGGING', False)).lower(),
|
||||
show_chat=features.OLARK_CHAT,
|
||||
cache_buster=cache_buster,
|
||||
|
|
|
@ -21,6 +21,7 @@ from util.http import abort
|
|||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
index = Blueprint('index', __name__)
|
||||
|
||||
|
@ -112,9 +113,15 @@ def create_user():
|
|||
|
||||
else:
|
||||
# New user case
|
||||
profile.debug('Creating user')
|
||||
new_user = model.create_user(username, password, user_data['email'])
|
||||
|
||||
profile.debug('Creating email code for user')
|
||||
code = model.create_confirm_email_code(new_user)
|
||||
|
||||
profile.debug('Sending email code to user')
|
||||
send_confirmation_email(new_user.username, new_user.email, code.code)
|
||||
|
||||
return make_response('Created', 201)
|
||||
|
||||
|
||||
|
@ -149,12 +156,12 @@ def update_user(username):
|
|||
update_request = request.get_json()
|
||||
|
||||
if 'password' in update_request:
|
||||
logger.debug('Updating user password.')
|
||||
profile.debug('Updating user password')
|
||||
model.change_password(get_authenticated_user(),
|
||||
update_request['password'])
|
||||
|
||||
if 'email' in update_request:
|
||||
logger.debug('Updating user email')
|
||||
profile.debug('Updating user email')
|
||||
model.update_email(get_authenticated_user(), update_request['email'])
|
||||
|
||||
return jsonify({
|
||||
|
@ -170,9 +177,13 @@ def update_user(username):
|
|||
@parse_repository_name
|
||||
@generate_headers(role='write')
|
||||
def create_repository(namespace, repository):
|
||||
profile.debug('Parsing image descriptions')
|
||||
image_descriptions = json.loads(request.data)
|
||||
|
||||
profile.debug('Looking up repository')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
|
||||
profile.debug('Repository looked up')
|
||||
if not repo and get_authenticated_user() is None:
|
||||
logger.debug('Attempt to create new repository without user auth.')
|
||||
abort(401,
|
||||
|
@ -196,11 +207,11 @@ def create_repository(namespace, repository):
|
|||
issue='no-create-permission',
|
||||
namespace=namespace)
|
||||
|
||||
logger.debug('Creaing repository with owner: %s' %
|
||||
get_authenticated_user().username)
|
||||
profile.debug('Creaing repository with owner: %s', get_authenticated_user().username)
|
||||
repo = model.create_repository(namespace, repository,
|
||||
get_authenticated_user())
|
||||
|
||||
profile.debug('Determining added images')
|
||||
added_images = OrderedDict([(desc['id'], desc)
|
||||
for desc in image_descriptions])
|
||||
new_repo_images = dict(added_images)
|
||||
|
@ -209,12 +220,15 @@ def create_repository(namespace, repository):
|
|||
if existing.docker_image_id in new_repo_images:
|
||||
added_images.pop(existing.docker_image_id)
|
||||
|
||||
profile.debug('Creating/Linking necessary images')
|
||||
username = get_authenticated_user() and get_authenticated_user().username
|
||||
translations = {}
|
||||
for image_description in added_images.values():
|
||||
model.find_create_or_link_image(image_description['id'], repo, username,
|
||||
translations)
|
||||
|
||||
|
||||
profile.debug('Created images')
|
||||
response = make_response('Created', 201)
|
||||
|
||||
extra_params = {
|
||||
|
@ -268,21 +282,23 @@ def update_images(namespace, repository):
|
|||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
|
||||
if permission.can():
|
||||
profile.debug('Looking up repository')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
if not repo:
|
||||
# Make sure the repo actually exists.
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
||||
profile.debug('Parsing image data')
|
||||
image_with_checksums = json.loads(request.data)
|
||||
|
||||
updated_tags = {}
|
||||
for image in image_with_checksums:
|
||||
logger.debug('Setting checksum for image id: %s to %s' %
|
||||
(image['id'], image['checksum']))
|
||||
profile.debug('Setting checksum for image id: %s to %s', image['id'], image['checksum'])
|
||||
updated_tags[image['Tag']] = image['id']
|
||||
model.set_image_checksum(image['id'], repo, image['checksum'])
|
||||
|
||||
if get_authenticated_user():
|
||||
profile.debug('Publishing push event')
|
||||
username = get_authenticated_user().username
|
||||
|
||||
# Mark that the user has pushed the repo.
|
||||
|
@ -295,15 +311,18 @@ def update_images(namespace, repository):
|
|||
event = app.config['USER_EVENTS'].get_event(username)
|
||||
event.publish_event_data('docker-cli', user_data)
|
||||
|
||||
profile.debug('GCing repository')
|
||||
num_removed = model.garbage_collect_repository(namespace, repository)
|
||||
|
||||
# Generate a job for each webhook that has been added to this repo
|
||||
profile.debug('Adding webhooks for repository')
|
||||
|
||||
webhooks = model.list_webhooks(namespace, repository)
|
||||
for webhook in webhooks:
|
||||
webhook_data = json.loads(webhook.parameters)
|
||||
repo_string = '%s/%s' % (namespace, repository)
|
||||
logger.debug('Creating webhook for repository \'%s\' for url \'%s\'' %
|
||||
(repo_string, webhook_data['url']))
|
||||
profile.debug('Creating webhook for repository \'%s\' for url \'%s\'',
|
||||
repo_string, webhook_data['url'])
|
||||
webhook_data['payload'] = {
|
||||
'repository': repo_string,
|
||||
'namespace': namespace,
|
||||
|
@ -330,14 +349,17 @@ def get_repository_images(namespace, repository):
|
|||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
|
||||
# TODO invalidate token?
|
||||
profile.debug('Looking up public status of repository')
|
||||
is_public = model.repository_is_public(namespace, repository)
|
||||
if permission.can() or is_public:
|
||||
# We can't rely on permissions to tell us if a repo exists anymore
|
||||
profile.debug('Looking up repository')
|
||||
repo = model.get_repository(namespace, repository)
|
||||
if not repo:
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
||||
all_images = []
|
||||
profile.debug('Retrieving repository images')
|
||||
for image in model.get_repository_images(namespace, repository):
|
||||
new_image_view = {
|
||||
'id': image.docker_image_id,
|
||||
|
@ -345,6 +367,7 @@ def get_repository_images(namespace, repository):
|
|||
}
|
||||
all_images.append(new_image_view)
|
||||
|
||||
profile.debug('Building repository image response')
|
||||
resp = make_response(json.dumps(all_images), 200)
|
||||
resp.mimetype = 'application/json'
|
||||
|
||||
|
@ -353,6 +376,7 @@ def get_repository_images(namespace, repository):
|
|||
'namespace': namespace,
|
||||
}
|
||||
|
||||
profile.debug('Logging the pull to Mixpanel and the log system')
|
||||
if get_validated_oauth_token():
|
||||
oauth_token = get_validated_oauth_token()
|
||||
metadata['oauth_token_id'] = oauth_token.id
|
||||
|
@ -408,4 +432,5 @@ def get_search():
|
|||
def ping():
|
||||
response = make_response('true', 200)
|
||||
response.headers['X-Docker-Registry-Version'] = '0.6.0'
|
||||
response.headers['X-Docker-Registry-Standalone'] = '0'
|
||||
return response
|
||||
|
|
|
@ -21,7 +21,7 @@ from data import model
|
|||
registry = Blueprint('registry', __name__)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
profile = logging.getLogger('application.profiler')
|
||||
|
||||
class SocketReader(object):
|
||||
def __init__(self, fp):
|
||||
|
@ -40,16 +40,35 @@ class SocketReader(object):
|
|||
return buf
|
||||
|
||||
|
||||
def image_is_uploading(namespace, repository, image_id, repo_image):
|
||||
if repo_image and repo_image.storage and repo_image.storage.uploading is not None:
|
||||
return repo_image.storage.uploading
|
||||
|
||||
logger.warning('Setting legacy upload flag')
|
||||
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
|
||||
mark_path = store.image_mark_path(namespace, repository, image_id, uuid)
|
||||
return store.exists(mark_path)
|
||||
|
||||
|
||||
def mark_upload_complete(namespace, repository, image_id, repo_image):
|
||||
if repo_image and repo_image.storage and repo_image.storage.uploading is not None:
|
||||
repo_image.storage.uploading = False
|
||||
repo_image.storage.save()
|
||||
else:
|
||||
logger.warning('Removing legacy upload flag')
|
||||
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
|
||||
mark_path = store.image_mark_path(namespace, repository, image_id, uuid)
|
||||
if store.exists(mark_path):
|
||||
store.remove(mark_path)
|
||||
|
||||
|
||||
def require_completion(f):
|
||||
"""This make sure that the image push correctly finished."""
|
||||
@wraps(f)
|
||||
def wrapper(namespace, repository, *args, **kwargs):
|
||||
image_id = kwargs['image_id']
|
||||
repo_image = model.get_repo_image(namespace, repository, image_id)
|
||||
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
|
||||
|
||||
if store.exists(store.image_mark_path(namespace, repository, image_id,
|
||||
uuid)):
|
||||
if image_is_uploading(namespace, repository, image_id, repo_image):
|
||||
abort(400, 'Image %(image_id)s is being uploaded, retry later',
|
||||
issue='upload-in-progress', image_id=kwargs['image_id'])
|
||||
|
||||
|
@ -88,17 +107,28 @@ def set_cache_headers(f):
|
|||
@set_cache_headers
|
||||
def get_image_layer(namespace, repository, image_id, headers):
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
|
||||
profile.debug('Checking repo permissions')
|
||||
if permission.can() or model.repository_is_public(namespace, repository):
|
||||
profile.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image(namespace, repository, image_id)
|
||||
|
||||
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
|
||||
|
||||
profile.debug('Looking up the layer path')
|
||||
path = store.image_layer_path(namespace, repository, image_id, uuid)
|
||||
|
||||
profile.debug('Looking up the direct download URL')
|
||||
direct_download_url = store.get_direct_download_url(path)
|
||||
|
||||
if direct_download_url:
|
||||
profile.debug('Returning direct download URL')
|
||||
return redirect(direct_download_url)
|
||||
try:
|
||||
profile.debug('Streaming layer data')
|
||||
return Response(store.stream_read(path), headers=headers)
|
||||
except IOError:
|
||||
profile.debug('Image not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
|
@ -109,25 +139,32 @@ def get_image_layer(namespace, repository, image_id, headers):
|
|||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
def put_image_layer(namespace, repository, image_id):
|
||||
profile.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
||||
profile.debug('Retrieving image')
|
||||
repo_image = model.get_repo_image(namespace, repository, image_id)
|
||||
|
||||
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
|
||||
try:
|
||||
profile.debug('Retrieving image data')
|
||||
json_data = store.get_content(store.image_json_path(namespace, repository,
|
||||
image_id, uuid))
|
||||
except IOError:
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
profile.debug('Retrieving image path info')
|
||||
layer_path = store.image_layer_path(namespace, repository, image_id, uuid)
|
||||
mark_path = store.image_mark_path(namespace, repository, image_id, uuid)
|
||||
|
||||
if store.exists(layer_path) and not store.exists(mark_path):
|
||||
if (store.exists(layer_path) and not
|
||||
image_is_uploading(namespace, repository, image_id, repo_image)):
|
||||
abort(409, 'Image already exists', issue='image-exists', image_id=image_id)
|
||||
|
||||
profile.debug('Storing layer data')
|
||||
|
||||
input_stream = request.stream
|
||||
if request.headers.get('transfer-encoding') == 'chunked':
|
||||
# Careful, might work only with WSGI servers supporting chunked
|
||||
|
@ -174,11 +211,11 @@ def put_image_layer(namespace, repository, image_id):
|
|||
issue='checksum-mismatch', image_id=image_id)
|
||||
|
||||
# Checksum is ok, we remove the marker
|
||||
store.remove(mark_path)
|
||||
mark_upload_complete(namespace, repository, image_id, repo_image)
|
||||
|
||||
# The layer is ready for download, send a job to the work queue to
|
||||
# process it.
|
||||
logger.debug('Queing diffs job for image: %s' % image_id)
|
||||
profile.debug('Adding layer to diff queue')
|
||||
image_diff_queue.put([namespace, repository, image_id], json.dumps({
|
||||
'namespace': namespace,
|
||||
'repository': repository,
|
||||
|
@ -192,6 +229,7 @@ def put_image_layer(namespace, repository, image_id):
|
|||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
def put_image_checksum(namespace, repository, image_id):
|
||||
profile.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
@ -204,17 +242,22 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
abort(400, 'Checksum not found in Cookie for image %(imaage_id)s',
|
||||
issue='missing-checksum-cookie', image_id=image_id)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image(namespace, repository, image_id)
|
||||
|
||||
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
|
||||
|
||||
profile.debug('Looking up repo layer data')
|
||||
if not store.exists(store.image_json_path(namespace, repository, image_id,
|
||||
uuid)):
|
||||
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
||||
|
||||
mark_path = store.image_mark_path(namespace, repository, image_id, uuid)
|
||||
if not store.exists(mark_path):
|
||||
profile.debug('Marking image path')
|
||||
if not image_is_uploading(namespace, repository, image_id, repo_image):
|
||||
abort(409, 'Cannot set checksum for image %(image_id)s',
|
||||
issue='image-write-error', image_id=image_id)
|
||||
|
||||
profile.debug('Storing image checksum')
|
||||
err = store_checksum(namespace, repository, image_id, uuid, checksum)
|
||||
if err:
|
||||
abort(400, err)
|
||||
|
@ -227,11 +270,11 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
issue='checksum-mismatch', image_id=image_id)
|
||||
|
||||
# Checksum is ok, we remove the marker
|
||||
store.remove(mark_path)
|
||||
mark_upload_complete(namespace, repository, image_id, repo_image)
|
||||
|
||||
# The layer is ready for download, send a job to the work queue to
|
||||
# process it.
|
||||
logger.debug('Queing diffs job for image: %s' % image_id)
|
||||
profile.debug('Adding layer to diff queue')
|
||||
image_diff_queue.put([namespace, repository, image_id], json.dumps({
|
||||
'namespace': namespace,
|
||||
'repository': repository,
|
||||
|
@ -247,27 +290,31 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
@require_completion
|
||||
@set_cache_headers
|
||||
def get_image_json(namespace, repository, image_id, headers):
|
||||
profile.debug('Checking repo permissions')
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
if not permission.can() and not model.repository_is_public(namespace,
|
||||
repository):
|
||||
abort(403)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image(namespace, repository, image_id)
|
||||
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
|
||||
|
||||
profile.debug('Looking up repo layer data')
|
||||
try:
|
||||
data = store.get_content(store.image_json_path(namespace, repository,
|
||||
image_id, uuid))
|
||||
except IOError:
|
||||
flask_abort(404)
|
||||
|
||||
profile.debug('Looking up repo layer size')
|
||||
try:
|
||||
size = store.get_size(store.image_layer_path(namespace, repository,
|
||||
image_id, uuid))
|
||||
size = repo_image.image_size or repo_image.storage.image_size
|
||||
headers['X-Docker-Size'] = str(size)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
profile.debug('Retrieving checksum')
|
||||
checksum_path = store.image_checksum_path(namespace, repository, image_id,
|
||||
uuid)
|
||||
if store.exists(checksum_path):
|
||||
|
@ -284,14 +331,17 @@ def get_image_json(namespace, repository, image_id, headers):
|
|||
@require_completion
|
||||
@set_cache_headers
|
||||
def get_image_ancestry(namespace, repository, image_id, headers):
|
||||
profile.debug('Checking repo permissions')
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
if not permission.can() and not model.repository_is_public(namespace,
|
||||
repository):
|
||||
abort(403)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image(namespace, repository, image_id)
|
||||
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
|
||||
|
||||
profile.debug('Looking up image data')
|
||||
try:
|
||||
data = store.get_content(store.image_ancestry_path(namespace, repository,
|
||||
image_id, uuid))
|
||||
|
@ -299,8 +349,11 @@ def get_image_ancestry(namespace, repository, image_id, headers):
|
|||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
profile.debug('Converting to <-> from JSON')
|
||||
response = make_response(json.dumps(json.loads(data)), 200)
|
||||
response.headers.extend(headers)
|
||||
|
||||
profile.debug('Done')
|
||||
return response
|
||||
|
||||
|
||||
|
@ -335,10 +388,12 @@ def store_checksum(namespace, repository, image_id, uuid, checksum):
|
|||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
def put_image_json(namespace, repository, image_id):
|
||||
profile.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
||||
profile.debug('Parsing image JSON')
|
||||
try:
|
||||
data = json.loads(request.data)
|
||||
except json.JSONDecodeError:
|
||||
|
@ -351,6 +406,7 @@ def put_image_json(namespace, repository, image_id):
|
|||
abort(400, 'Missing key `id` in JSON for image: %(image_id)s',
|
||||
issue='invalid-request', image_id=image_id)
|
||||
|
||||
profile.debug('Looking up repo image')
|
||||
repo_image = model.get_repo_image(namespace, repository, image_id)
|
||||
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
|
||||
|
||||
|
@ -358,12 +414,14 @@ def put_image_json(namespace, repository, image_id):
|
|||
checksum = request.headers.get('X-Docker-Checksum')
|
||||
if checksum:
|
||||
# Storing the checksum is optional at this stage
|
||||
profile.debug('Storing image checksum')
|
||||
err = store_checksum(namespace, repository, image_id, uuid, checksum)
|
||||
if err:
|
||||
abort(400, err, issue='write-error')
|
||||
|
||||
else:
|
||||
# We cleanup any old checksum in case it's a retry after a fail
|
||||
profile.debug('Cleanup old checksum')
|
||||
store.remove(store.image_checksum_path(namespace, repository, image_id,
|
||||
uuid))
|
||||
if image_id != data['id']:
|
||||
|
@ -374,19 +432,27 @@ def put_image_json(namespace, repository, image_id):
|
|||
|
||||
parent_image = None
|
||||
if parent_id:
|
||||
profile.debug('Looking up parent image')
|
||||
parent_image = model.get_repo_image(namespace, repository, parent_id)
|
||||
|
||||
parent_uuid = (parent_image and parent_image.storage and
|
||||
parent_image.storage.uuid)
|
||||
|
||||
if parent_id:
|
||||
profile.debug('Looking up parent image data')
|
||||
|
||||
if (parent_id and not
|
||||
store.exists(store.image_json_path(namespace, repository, parent_id,
|
||||
parent_uuid))):
|
||||
abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s',
|
||||
issue='invalid-request', image_id=image_id, parent_id=parent_id)
|
||||
|
||||
profile.debug('Looking up image storage paths')
|
||||
json_path = store.image_json_path(namespace, repository, image_id, uuid)
|
||||
mark_path = store.image_mark_path(namespace, repository, image_id, uuid)
|
||||
if store.exists(json_path) and not store.exists(mark_path):
|
||||
|
||||
profile.debug('Checking if image already exists')
|
||||
if (store.exists(json_path) and not
|
||||
image_is_uploading(namespace, repository, image_id, repo_image)):
|
||||
abort(409, 'Image already exists', issue='image-exists', image_id=image_id)
|
||||
|
||||
# If we reach that point, it means that this is a new image or a retry
|
||||
|
@ -394,13 +460,20 @@ def put_image_json(namespace, repository, image_id):
|
|||
# save the metadata
|
||||
command_list = data.get('container_config', {}).get('Cmd', None)
|
||||
command = json.dumps(command_list) if command_list else None
|
||||
|
||||
profile.debug('Setting image metadata')
|
||||
model.set_image_metadata(image_id, namespace, repository,
|
||||
data.get('created'), data.get('comment'), command,
|
||||
parent_image)
|
||||
store.put_content(mark_path, 'true')
|
||||
|
||||
profile.debug('Putting json path')
|
||||
store.put_content(json_path, request.data)
|
||||
|
||||
profile.debug('Generating image ancestry')
|
||||
generate_ancestry(namespace, repository, image_id, uuid, parent_id,
|
||||
parent_uuid)
|
||||
|
||||
profile.debug('Done')
|
||||
return make_response('true', 200)
|
||||
|
||||
|
||||
|
|
|
@ -20,6 +20,10 @@ TARBALL_MIME = 'application/gzip'
|
|||
CHUNK_SIZE = 512 * 1024
|
||||
|
||||
|
||||
def should_skip_commit(message):
|
||||
return '[skip build]' in message or '[build skip]' in message
|
||||
|
||||
|
||||
class BuildArchiveException(Exception):
|
||||
pass
|
||||
|
||||
|
@ -35,6 +39,9 @@ class TriggerDeactivationException(Exception):
|
|||
class ValidationRequestException(Exception):
|
||||
pass
|
||||
|
||||
class SkipRequestException(Exception):
|
||||
pass
|
||||
|
||||
class EmptyRepositoryException(Exception):
|
||||
pass
|
||||
|
||||
|
@ -308,13 +315,20 @@ class GithubBuildTrigger(BuildTrigger):
|
|||
|
||||
def handle_trigger_request(self, request, auth_token, config):
|
||||
payload = request.get_json()
|
||||
|
||||
if not payload:
|
||||
raise SkipRequestException()
|
||||
|
||||
if 'zen' in payload:
|
||||
raise ValidationRequestException()
|
||||
|
||||
logger.debug('Payload %s', payload)
|
||||
ref = payload['ref']
|
||||
commit_sha = payload['head_commit']['id']
|
||||
commit_message = payload['head_commit'].get('message', '')
|
||||
|
||||
if should_skip_commit(commit_message):
|
||||
raise SkipRequestException()
|
||||
|
||||
short_sha = GithubBuildTrigger.get_display_name(commit_sha)
|
||||
|
||||
gh_client = self._get_client(auth_token)
|
||||
|
|
|
@ -11,7 +11,7 @@ from util.invoice import renderInvoiceToHtml
|
|||
from util.email import send_invoice_email, send_subscription_change, send_payment_failed
|
||||
from util.names import parse_repository_name
|
||||
from util.http import abort
|
||||
from endpoints.trigger import BuildTrigger, ValidationRequestException
|
||||
from endpoints.trigger import BuildTrigger, ValidationRequestException, SkipRequestException
|
||||
from endpoints.common import start_build
|
||||
|
||||
|
||||
|
@ -30,7 +30,7 @@ def stripe_webhook():
|
|||
|
||||
event_type = request_data['type'] if 'type' in request_data else None
|
||||
if event_type == 'charge.succeeded':
|
||||
invoice_id = ['data']['object']['invoice']
|
||||
invoice_id = request_data['data']['object']['invoice']
|
||||
|
||||
if user and user.invoice_email:
|
||||
# Lookup the invoice.
|
||||
|
@ -94,6 +94,10 @@ def build_trigger_webhook(namespace, repository, trigger_uuid):
|
|||
# This was just a validation request, we don't need to build anything
|
||||
return make_response('Okay')
|
||||
|
||||
except SkipRequestException:
|
||||
# The build was requested to be skipped
|
||||
return make_response('Okay')
|
||||
|
||||
pull_robot_name = model.get_pull_robot_name(trigger)
|
||||
repo = model.get_repository(namespace, repository)
|
||||
start_build(repo, dockerfile_id, tags, name, subdir, False, trigger,
|
||||
|
|
|
@ -489,7 +489,8 @@ def populate_database():
|
|||
'service': trigger.service.name})
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.config['LOGGING_CONFIG']()
|
||||
log_level = getattr(logging, app.config['LOGGING_LEVEL'])
|
||||
logging.basicConfig(level=log_level)
|
||||
initialize_database()
|
||||
|
||||
if app.config.get('POPULATE_DB_TEST_DATA', False):
|
||||
|
|
|
@ -18,7 +18,6 @@ python-daemon
|
|||
paramiko
|
||||
python-digitalocean
|
||||
xhtml2pdf
|
||||
logstash_formatter
|
||||
redis
|
||||
hiredis
|
||||
git+https://github.com/DevTable/docker-py.git
|
||||
|
@ -30,3 +29,6 @@ git+https://github.com/NateFerrero/oauth2lib.git
|
|||
alembic
|
||||
sqlalchemy
|
||||
python-magic
|
||||
reportlab==2.7
|
||||
blinker
|
||||
raven
|
||||
|
|
|
@ -6,10 +6,11 @@ Flask-Principal==0.4.0
|
|||
Flask-RESTful==0.2.12
|
||||
Jinja2==2.7.2
|
||||
Mako==0.9.1
|
||||
MarkupSafe==0.19
|
||||
MarkupSafe==0.21
|
||||
Pillow==2.4.0
|
||||
PyGithub==1.24.1
|
||||
PyMySQL==0.6.1
|
||||
PyMySQL==0.6.2
|
||||
PyPDF2==1.21
|
||||
SQLAlchemy==0.9.4
|
||||
Werkzeug==0.9.4
|
||||
alembic==0.6.4
|
||||
|
@ -20,30 +21,28 @@ blinker==1.3
|
|||
boto==2.27.0
|
||||
git+https://github.com/DevTable/docker-py.git
|
||||
ecdsa==0.11
|
||||
gevent==1.0
|
||||
gevent==1.0.1
|
||||
greenlet==0.4.2
|
||||
gunicorn==18.0
|
||||
hiredis==0.1.2
|
||||
html5lib==1.0b3
|
||||
hiredis==0.1.3
|
||||
html5lib==0.999
|
||||
itsdangerous==0.24
|
||||
jsonschema==2.3.0
|
||||
lockfile==0.9.1
|
||||
logstash-formatter==0.5.8
|
||||
loremipsum==1.0.2
|
||||
marisa-trie==0.6
|
||||
mixpanel-py==3.1.2
|
||||
mock==1.0.1
|
||||
git+https://github.com/NateFerrero/oauth2lib.git
|
||||
paramiko==1.13.0
|
||||
peewee==2.2.2
|
||||
peewee==2.2.3
|
||||
py-bcrypt==0.4
|
||||
pyPdf==1.13
|
||||
pycrypto==2.6.1
|
||||
python-daemon==1.6
|
||||
python-dateutil==2.2
|
||||
python-digitalocean==0.7
|
||||
python-magic==0.4.6
|
||||
pytz==2014.2
|
||||
raven==4.2.1
|
||||
redis==2.9.1
|
||||
reportlab==2.7
|
||||
requests==2.2.1
|
||||
|
@ -51,4 +50,4 @@ six==1.6.1
|
|||
stripe==1.14.0
|
||||
websocket-client==0.11.0
|
||||
wsgiref==0.1.2
|
||||
xhtml2pdf==0.0.5
|
||||
xhtml2pdf==0.0.6
|
||||
|
|
|
@ -698,6 +698,10 @@ i.toggle-icon:hover {
|
|||
background-color: #ddd;
|
||||
}
|
||||
|
||||
.phase-icon.pulling {
|
||||
background-color: #cab442;
|
||||
}
|
||||
|
||||
.phase-icon.building {
|
||||
background-color: #f0ad4e;
|
||||
}
|
||||
|
|
|
@ -235,6 +235,26 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading
|
|||
};
|
||||
|
||||
dataFileService.tryAsTarGz_ = function(buf, success, failure) {
|
||||
var gunzip = new Zlib.Gunzip(buf);
|
||||
var plain = null;
|
||||
|
||||
try {
|
||||
plain = gunzip.decompress();
|
||||
} catch (e) {
|
||||
failure();
|
||||
return;
|
||||
}
|
||||
|
||||
dataFileService.arrayToString(plain, function(result) {
|
||||
if (result) {
|
||||
dataFileService.tryAsTarGzWithStringData_(result, success, failure);
|
||||
} else {
|
||||
failure();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
dataFileService.tryAsTarGzWithStringData_ = function(strData, success, failure) {
|
||||
var collapsePath = function(originalPath) {
|
||||
// Tar files can contain entries of the form './', so we need to collapse
|
||||
// those paths down.
|
||||
|
@ -248,12 +268,9 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading
|
|||
return parts.join('/');
|
||||
};
|
||||
|
||||
var gunzip = new Zlib.Gunzip(buf);
|
||||
var plain = gunzip.decompress();
|
||||
|
||||
var handler = new MultiFile();
|
||||
handler.files = [];
|
||||
handler.processTarChunks(dataFileService.arrayToString(plain), 0);
|
||||
handler.processTarChunks(strData, 0);
|
||||
if (!handler.files.length) {
|
||||
failure();
|
||||
return;
|
||||
|
@ -288,8 +305,19 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading
|
|||
reader.readAsText(blob);
|
||||
};
|
||||
|
||||
dataFileService.arrayToString = function(buf) {
|
||||
return String.fromCharCode.apply(null, new Uint16Array(buf));
|
||||
dataFileService.arrayToString = function(buf, callback) {
|
||||
var bb = new Blob([buf], {type: 'application/octet-binary'});
|
||||
var f = new FileReader();
|
||||
f.onload = function(e) {
|
||||
callback(e.target.result);
|
||||
};
|
||||
f.onerror = function(e) {
|
||||
callback(null);
|
||||
};
|
||||
f.onabort = function(e) {
|
||||
callback(null);
|
||||
};
|
||||
f.readAsText(bb);
|
||||
};
|
||||
|
||||
dataFileService.readDataArrayAsPossibleArchive = function(buf, success, failure) {
|
||||
|
@ -394,7 +422,7 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading
|
|||
builderService.getDescription = function(name, config) {
|
||||
switch (name) {
|
||||
case 'github':
|
||||
var source = $sanitize(UtilService.textToSafeHtml(config['build_source']));
|
||||
var source = UtilService.textToSafeHtml(config['build_source']);
|
||||
var desc = '<i class="fa fa-github fa-lg" style="margin-left: 2px; margin-right: 2px"></i> Push to Github Repository ';
|
||||
desc += '<a href="https://github.com/' + source + '" target="_blank">' + source + '</a>';
|
||||
desc += '<br>Dockerfile folder: //' + UtilService.textToSafeHtml(config['subdir']);
|
||||
|
@ -778,7 +806,18 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading
|
|||
olark('api.chat.updateVisitorStatus', {snippet: 'username: ' + userResponse.username});
|
||||
}
|
||||
|
||||
if (window.Raven !== undefined) {
|
||||
Raven.setUser({
|
||||
email: userResponse.email,
|
||||
id: userResponse.username
|
||||
});
|
||||
}
|
||||
|
||||
CookieService.putPermanent('quay.loggedin', 'true');
|
||||
} else {
|
||||
if (window.Raven !== undefined) {
|
||||
Raven.setUser();
|
||||
}
|
||||
}
|
||||
|
||||
if (opt_callback) {
|
||||
|
@ -1059,7 +1098,9 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading
|
|||
planDict[data.plans[i].stripeId] = data.plans[i];
|
||||
}
|
||||
plans = data.plans;
|
||||
callback(plans);
|
||||
if (plans) {
|
||||
callback(plans);
|
||||
}
|
||||
}, function() { callback([]); });
|
||||
};
|
||||
|
||||
|
@ -1155,7 +1196,7 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading
|
|||
|
||||
planService.getCardInfo(orgname, function(cardInfo) {
|
||||
if (plan.price > 0 && (previousSubscribeFailure || !cardInfo.last4)) {
|
||||
var title = cardInfo.last4 ? 'Subscribe' : 'Start Free trial<span style="display:none">{{amount}}</span>';
|
||||
var title = cardInfo.last4 ? 'Subscribe' : 'Start Trial ({{amount}} plan)';
|
||||
planService.showSubscribeDialog($scope, orgname, planId, callbacks, title);
|
||||
return;
|
||||
}
|
||||
|
@ -1374,6 +1415,17 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading
|
|||
}]);
|
||||
}
|
||||
|
||||
if (window.__config && window.__config.SENTRY_PUBLIC_DSN) {
|
||||
quayApp.config(function($provide) {
|
||||
$provide.decorator("$exceptionHandler", function($delegate) {
|
||||
return function(ex, cause) {
|
||||
$delegate(ex, cause);
|
||||
Raven.captureException(ex, {extra: {cause: cause}});
|
||||
};
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
function buildConditionalLinker($animate, name, evaluator) {
|
||||
// Based off of a solution found here: http://stackoverflow.com/questions/20325480/angularjs-whats-the-best-practice-to-add-ngif-to-a-directive-programmatically
|
||||
|
@ -1552,7 +1604,7 @@ quayApp.directive('entityReference', function () {
|
|||
'entity': '=entity',
|
||||
'namespace': '=namespace'
|
||||
},
|
||||
controller: function($scope, $element, UserService, $sanitize) {
|
||||
controller: function($scope, $element, UserService, UtilService) {
|
||||
$scope.getIsAdmin = function(namespace) {
|
||||
return UserService.isNamespaceAdmin(namespace);
|
||||
};
|
||||
|
@ -1570,10 +1622,10 @@ quayApp.directive('entityReference', function () {
|
|||
var org = UserService.getOrganization(namespace);
|
||||
if (!org) {
|
||||
// This robot is owned by the user.
|
||||
return '/user/?tab=robots&showRobot=' + $sanitize(name);
|
||||
return '/user/?tab=robots&showRobot=' + UtilService.textToSafeHtml(name);
|
||||
}
|
||||
|
||||
return '/organization/' + org['name'] + '/admin?tab=robots&showRobot=' + $sanitize(name);
|
||||
return '/organization/' + org['name'] + '/admin?tab=robots&showRobot=' + UtilService.textToSafeHtml(name);
|
||||
};
|
||||
|
||||
$scope.getPrefix = function(name) {
|
||||
|
@ -4100,7 +4152,7 @@ quayApp.directive('dockerfileCommand', function () {
|
|||
scope: {
|
||||
'command': '=command'
|
||||
},
|
||||
controller: function($scope, $element, $sanitize, Config) {
|
||||
controller: function($scope, $element, UtilService, Config) {
|
||||
var registryHandlers = {
|
||||
'quay.io': function(pieces) {
|
||||
var rnamespace = pieces[pieces.length - 2];
|
||||
|
@ -4137,11 +4189,11 @@ quayApp.directive('dockerfileCommand', function () {
|
|||
$scope.getCommandTitleHtml = function(title) {
|
||||
var space = title.indexOf(' ');
|
||||
if (space <= 0) {
|
||||
return $sanitize(title);
|
||||
return UtilService.textToSafeHtml(title);
|
||||
}
|
||||
|
||||
var kind = $scope.getCommandKind(title);
|
||||
var sanitized = $sanitize(title.substring(space + 1));
|
||||
var sanitized = UtilService.textToSafeHtml(title.substring(space + 1));
|
||||
|
||||
var handler = kindHandlers[kind || ''];
|
||||
if (handler) {
|
||||
|
@ -4166,7 +4218,7 @@ quayApp.directive('dockerfileView', function () {
|
|||
scope: {
|
||||
'contents': '=contents'
|
||||
},
|
||||
controller: function($scope, $element, $sanitize) {
|
||||
controller: function($scope, $element, UtilService) {
|
||||
$scope.$watch('contents', function(contents) {
|
||||
$scope.lines = [];
|
||||
|
||||
|
@ -4181,7 +4233,7 @@ quayApp.directive('dockerfileView', function () {
|
|||
}
|
||||
|
||||
var lineInfo = {
|
||||
'text': $sanitize(line),
|
||||
'text': UtilService.textToSafeHtml(line),
|
||||
'kind': kind
|
||||
};
|
||||
$scope.lines.push(lineInfo);
|
||||
|
@ -4232,6 +4284,9 @@ quayApp.directive('buildMessage', function () {
|
|||
|
||||
case 'waiting':
|
||||
return 'Waiting for available build worker';
|
||||
|
||||
case 'pulling':
|
||||
return 'Pulling base image';
|
||||
|
||||
case 'building':
|
||||
return 'Building image from Dockerfile';
|
||||
|
@ -4265,10 +4320,14 @@ quayApp.directive('buildProgress', function () {
|
|||
controller: function($scope, $element) {
|
||||
$scope.getPercentage = function(buildInfo) {
|
||||
switch (buildInfo.phase) {
|
||||
case 'pulling':
|
||||
return buildInfo.status.pull_completion * 100;
|
||||
break;
|
||||
|
||||
case 'building':
|
||||
return (buildInfo.status.current_command / buildInfo.status.total_commands) * 100;
|
||||
break;
|
||||
|
||||
|
||||
case 'pushing':
|
||||
return buildInfo.status.push_completion * 100;
|
||||
break;
|
||||
|
@ -4810,6 +4869,9 @@ quayApp.run(['$location', '$rootScope', 'Restangular', 'UserService', 'PlanServi
|
|||
|
||||
$rootScope.$on('$routeChangeSuccess', function (event, current, previous) {
|
||||
$rootScope.pageClass = '';
|
||||
$rootScope.current = current.$$route;
|
||||
|
||||
if (!current.$$route) { return; }
|
||||
|
||||
if (current.$$route.title) {
|
||||
$rootScope.title = current.$$route.title;
|
||||
|
@ -4826,7 +4888,6 @@ quayApp.run(['$location', '$rootScope', 'Restangular', 'UserService', 'PlanServi
|
|||
}
|
||||
|
||||
$rootScope.fixFooter = !!current.$$route.fixFooter;
|
||||
$rootScope.current = current.$$route;
|
||||
});
|
||||
|
||||
$rootScope.$on('$viewContentLoaded', function(event, current) {
|
||||
|
|
|
@ -782,33 +782,34 @@ function RepoCtrl($scope, $sanitize, Restangular, ImageMetadataService, ApiServi
|
|||
}
|
||||
|
||||
// Create the new tree.
|
||||
$scope.tree = new ImageHistoryTree(namespace, name, resp.images,
|
||||
var tree = new ImageHistoryTree(namespace, name, resp.images,
|
||||
getFirstTextLine, $scope.getTimeSince, ImageMetadataService.getEscapedFormattedCommand);
|
||||
|
||||
$scope.tree.draw('image-history-container');
|
||||
$scope.tree = tree.draw('image-history-container');
|
||||
if ($scope.tree) {
|
||||
// If we already have a tag, use it
|
||||
if ($scope.currentTag) {
|
||||
$scope.tree.setTag($scope.currentTag.name);
|
||||
}
|
||||
|
||||
// If we already have a tag, use it
|
||||
if ($scope.currentTag) {
|
||||
$scope.tree.setTag($scope.currentTag.name);
|
||||
// Listen for changes to the selected tag and image in the tree.
|
||||
$($scope.tree).bind('tagChanged', function(e) {
|
||||
$scope.$apply(function() { $scope.setTag(e.tag, true); });
|
||||
});
|
||||
|
||||
$($scope.tree).bind('imageChanged', function(e) {
|
||||
$scope.$apply(function() { $scope.setImage(e.image.id, true); });
|
||||
});
|
||||
|
||||
$($scope.tree).bind('showTagMenu', function(e) {
|
||||
$scope.$apply(function() { $scope.showTagMenu(e.tag, e.clientX, e.clientY); });
|
||||
});
|
||||
|
||||
$($scope.tree).bind('hideTagMenu', function(e) {
|
||||
$scope.$apply(function() { $scope.hideTagMenu(); });
|
||||
});
|
||||
}
|
||||
|
||||
// Listen for changes to the selected tag and image in the tree.
|
||||
$($scope.tree).bind('tagChanged', function(e) {
|
||||
$scope.$apply(function() { $scope.setTag(e.tag, true); });
|
||||
});
|
||||
|
||||
$($scope.tree).bind('imageChanged', function(e) {
|
||||
$scope.$apply(function() { $scope.setImage(e.image.id, true); });
|
||||
});
|
||||
|
||||
$($scope.tree).bind('showTagMenu', function(e) {
|
||||
$scope.$apply(function() { $scope.showTagMenu(e.tag, e.clientX, e.clientY); });
|
||||
});
|
||||
|
||||
$($scope.tree).bind('hideTagMenu', function(e) {
|
||||
$scope.$apply(function() { $scope.hideTagMenu(); });
|
||||
});
|
||||
|
||||
if ($routeParams.image) {
|
||||
$scope.setImage($routeParams.image);
|
||||
}
|
||||
|
@ -892,7 +893,7 @@ function BuildPackageCtrl($scope, Restangular, ApiService, DataFileService, $rou
|
|||
if (dockerfile && dockerfile.canRead) {
|
||||
DataFileService.blobToString(dockerfile.toBlob(), function(result) {
|
||||
$scope.$apply(function() {
|
||||
$scope.dockerFilePath = dockerfilePath;
|
||||
$scope.dockerFilePath = dockerfilePath || 'Dockerfile';
|
||||
$scope.dockerFileContents = result;
|
||||
});
|
||||
});
|
||||
|
@ -902,8 +903,11 @@ function BuildPackageCtrl($scope, Restangular, ApiService, DataFileService, $rou
|
|||
};
|
||||
|
||||
var notarchive = function() {
|
||||
$scope.dockerFileContents = DataFileService.arrayToString(uint8array);
|
||||
$scope.loaded = true;
|
||||
DataFileService.arrayToString(uint8array, function(r) {
|
||||
$scope.dockerFilePath = 'Dockerfile';
|
||||
$scope.dockerFileContents = r;
|
||||
$scope.loaded = true;
|
||||
});
|
||||
};
|
||||
|
||||
DataFileService.readDataArrayAsPossibleArchive(uint8array, archiveread, notarchive);
|
||||
|
@ -2386,10 +2390,10 @@ function NewOrgCtrl($scope, $routeParams, $timeout, $location, UserService, Plan
|
|||
// Load the list of plans.
|
||||
PlanService.getPlans(function(plans) {
|
||||
$scope.plans = plans;
|
||||
$scope.currentPlan = null;
|
||||
$scope.holder.currentPlan = null;
|
||||
if (requested) {
|
||||
PlanService.getPlan(requested, function(plan) {
|
||||
$scope.currentPlan = plan;
|
||||
$scope.holder.currentPlan = plan;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
@ -2410,7 +2414,7 @@ function NewOrgCtrl($scope, $routeParams, $timeout, $location, UserService, Plan
|
|||
};
|
||||
|
||||
$scope.setPlan = function(plan) {
|
||||
$scope.currentPlan = plan;
|
||||
$scope.holder.currentPlan = plan;
|
||||
};
|
||||
|
||||
$scope.createNewOrg = function() {
|
||||
|
@ -2438,7 +2442,7 @@ function NewOrgCtrl($scope, $routeParams, $timeout, $location, UserService, Plan
|
|||
};
|
||||
|
||||
// If the selected plan is free, simply move to the org page.
|
||||
if (!Features.BILLING || $scope.currentPlan.price == 0) {
|
||||
if (!Features.BILLING || $scope.holder.currentPlan.price == 0) {
|
||||
showOrg();
|
||||
return;
|
||||
}
|
||||
|
@ -2452,7 +2456,7 @@ function NewOrgCtrl($scope, $routeParams, $timeout, $location, UserService, Plan
|
|||
'failure': showOrg
|
||||
};
|
||||
|
||||
PlanService.changePlan($scope, org.name, $scope.currentPlan.stripeId, callbacks);
|
||||
PlanService.changePlan($scope, org.name, $scope.holder.currentPlan.stripeId, callbacks);
|
||||
}, function(result) {
|
||||
$scope.creating = false;
|
||||
$scope.createError = result.data.message || result.data;
|
||||
|
|
|
@ -186,6 +186,11 @@ ImageHistoryTree.prototype.draw = function(container) {
|
|||
// Save the container.
|
||||
this.container_ = container;
|
||||
|
||||
if (!$('#' + container)[0]) {
|
||||
this.container_ = null;
|
||||
return;
|
||||
}
|
||||
|
||||
// Create the tree and all its components.
|
||||
var tree = d3.layout.tree()
|
||||
.separation(function() { return 2; });
|
||||
|
@ -193,11 +198,10 @@ ImageHistoryTree.prototype.draw = function(container) {
|
|||
var diagonal = d3.svg.diagonal()
|
||||
.projection(function(d) { return [d.x, d.y]; });
|
||||
|
||||
var rootSvg = d3.select("#" + container).append("svg:svg")
|
||||
var rootSvg = d3.select("#" + container).append("svg:svg")
|
||||
.attr("class", "image-tree");
|
||||
|
||||
var vis = rootSvg.append("svg:g");
|
||||
|
||||
var formatComment = this.formatComment_;
|
||||
var formatTime = this.formatTime_;
|
||||
var formatCommand = this.formatCommand_;
|
||||
|
@ -262,6 +266,8 @@ ImageHistoryTree.prototype.draw = function(container) {
|
|||
|
||||
this.setTag_(this.currentTag_);
|
||||
this.setupOverscroll_();
|
||||
|
||||
return this;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1129,7 +1135,12 @@ FileTreeBase.prototype.update_ = function(source) {
|
|||
};
|
||||
|
||||
// Update the height of the container and the SVG.
|
||||
document.getElementById(this.container_).style.height = this.getContainerHeight_() + 'px';
|
||||
var containerElm = document.getElementById(this.container_);
|
||||
if (!containerElm) {
|
||||
return;
|
||||
}
|
||||
|
||||
containerElm.style.height = this.getContainerHeight_() + 'px';
|
||||
svg.attr('height', this.getContainerHeight_());
|
||||
|
||||
// Compute the flattened node list.
|
||||
|
@ -1691,7 +1702,12 @@ LogUsageChart.prototype.handleStateChange_ = function(e) {
|
|||
*/
|
||||
LogUsageChart.prototype.draw = function(container, logData, startDate, endDate) {
|
||||
// Reset the container's contents.
|
||||
document.getElementById(container).innerHTML = '<svg></svg>';
|
||||
var containerElm = document.getElementById(container);
|
||||
if (!containerElm) {
|
||||
return;
|
||||
}
|
||||
|
||||
containerElm.innerHTML = '<svg></svg>';
|
||||
|
||||
// Returns a date offset from the given date by "days" Days.
|
||||
var offsetDate = function(d, days) {
|
||||
|
|
|
@ -230,3 +230,4 @@ var saveAs = saveAs
|
|||
// with an attribute `content` that corresponds to the window
|
||||
|
||||
if (typeof module !== 'undefined') module.exports = saveAs;
|
||||
window.saveAs = saveAs;
|
|
@ -66,7 +66,7 @@
|
|||
<i ng-class="{'added': 'fa fa-plus-square', 'removed': 'fa fa-minus-square', 'changed': 'fa fa-pencil-square'}[change.kind]"></i>
|
||||
<span data-title="{{change.file}}">
|
||||
<span style="color: #888;">
|
||||
<span ng-repeat="folder in getFolders(change.file)"><a href="javascript:void(0)" ng-click="setFolderFilter(getFolder(change.file), $index)">{{folder}}</a>/</span></span><span>{{getFilename(change.file)}}</span>
|
||||
<span ng-repeat="folder in getFolders(change.file) track by $index"><a href="javascript:void(0)" ng-click="setFolderFilter(getFolder(change.file), $index)">{{folder}}</a>/</span></span><span>{{getFilename(change.file)}}</span>
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -329,7 +329,7 @@
|
|||
<form name="addTagForm" ng-submit="createOrMoveTag(toTagImage, tagToCreate, addTagForm.$invalid); addTagForm.$setPristine(); tagToCreate=''">
|
||||
<div class="modal-body">
|
||||
<input type="text" class="form-control" id="tagName" placeholder="Enter tag name"
|
||||
ng-model="tagToCreate" ng-pattern="/^([a-z0-9_]){3,30}$/" required
|
||||
ng-model="tagToCreate" ng-pattern="/^([a-z0-9_\.-]){3,30}$/" required
|
||||
ng-disabled="creatingTag">
|
||||
<div style="margin: 10px; margin-top: 20px;" ng-show="isOwnedTag(toTagImage, tagToCreate)">
|
||||
Note: <span class="label tag label-default">{{ tagToCreate }}</span> is already applied to this image.
|
||||
|
|
|
@ -92,6 +92,3 @@ class BaseStorage(object):
|
|||
|
||||
def remove(self, path):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_size(self, path):
|
||||
raise NotImplementedError
|
||||
|
|
|
@ -80,7 +80,3 @@ class LocalStorage(BaseStorage):
|
|||
os.remove(path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def get_size(self, path):
|
||||
path = self._init_path(path)
|
||||
return os.path.getsize(path)
|
||||
|
|
|
@ -171,12 +171,3 @@ class S3Storage(BaseStorage):
|
|||
path += '/'
|
||||
for key in self._s3_bucket.list(prefix=path):
|
||||
key.delete()
|
||||
|
||||
def get_size(self, path):
|
||||
self._initialize_s3()
|
||||
path = self._init_path(path)
|
||||
# Lookup does a HEAD HTTP Request on the object
|
||||
key = self._s3_bucket.lookup(path)
|
||||
if not key:
|
||||
raise OSError('No such key: \'{0}\''.format(path))
|
||||
return key.size
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
<script src="//cdnjs.cloudflare.com/ajax/libs/bootstrap-datepicker/1.2.0/js/bootstrap-datepicker.min.js"></script>
|
||||
|
||||
<script src="//cdn.jsdelivr.net/g/bootbox@4.1.0,underscorejs@1.5.2,restangular@1.2.0,d3js@3.3.3,momentjs"></script>
|
||||
<script src="//cdn.ravenjs.com/1.1.14/jquery,native/raven.min.js"></script>
|
||||
|
||||
<script src="https://checkout.stripe.com/checkout.js"></script>
|
||||
|
||||
|
@ -73,6 +74,12 @@
|
|||
<script src="/static/{{ script_path }}?v={{ cache_buster }}"></script>
|
||||
{% endfor %}
|
||||
|
||||
{% if sentry_public_dsn %}
|
||||
<script type="text/javascript">
|
||||
Raven.config('{{ sentry_public_dsn }}').install();
|
||||
</script>
|
||||
{% endif %}
|
||||
|
||||
{% if mixpanel_key %}
|
||||
<!-- start Mixpanel --><script type="text/javascript">
|
||||
(function(e,b){if(!b.__SV){var a,f,i,g;window.mixpanel=b;a=e.createElement("script");a.type="text/javascript";a.async=!0;a.src=("https:"===e.location.protocol?"https:":"http:")+'//cdn.mxpnl.com/libs/mixpanel-2.2.min.js';f=e.getElementsByTagName("script")[0];f.parentNode.insertBefore(a,f);b._i=[];b.init=function(a,e,d){function f(b,h){var a=h.split(".");2==a.length&&(b=b[a[0]],h=a[1]);b[h]=function(){b.push([h].concat(Array.prototype.slice.call(arguments,0)))}}var c=b;"undefined"!==
|
||||
|
|
Binary file not shown.
|
@ -39,7 +39,7 @@ class TestBuildLogs(BuildLogs):
|
|||
'total_commands': None,
|
||||
'current_command': None,
|
||||
'push_completion': 0.0,
|
||||
'image_completion': {},
|
||||
'pull_completion': 0.0,
|
||||
}
|
||||
|
||||
def __init__(self, redis_host, namespace, repository, test_build_id):
|
||||
|
@ -168,8 +168,6 @@ class TestBuildLogs(BuildLogs):
|
|||
for i in range(one_mb, image_size, one_mb):
|
||||
image_completion[image_id]['current'] = i
|
||||
new_status = deepcopy(push_status_template)
|
||||
new_status['image_completion'] = deepcopy(image_completion)
|
||||
|
||||
completion = TestBuildLogs._compute_total_completion(image_completion,
|
||||
num_images)
|
||||
new_status['push_completion'] = completion
|
||||
|
|
|
@ -96,6 +96,16 @@ def resolve_or_create(repo, docker_image_id, new_ancestry):
|
|||
raise RuntimeError()
|
||||
|
||||
|
||||
def all_ancestors_exist(ancestors):
|
||||
if not ancestors:
|
||||
return True
|
||||
|
||||
found_count = len(list(Image
|
||||
.select()
|
||||
.where(Image.id << ancestors)))
|
||||
return found_count == len(ancestors)
|
||||
|
||||
|
||||
cant_fix = []
|
||||
for img in query:
|
||||
try:
|
||||
|
@ -111,7 +121,7 @@ for img in query:
|
|||
ancestor_dbids = [int(anc_id)
|
||||
for anc_id in img.ancestors.split('/')[1:-1]]
|
||||
|
||||
if len(full_ancestry) != len(ancestor_dbids):
|
||||
if len(full_ancestry) != len(ancestor_dbids) or not all_ancestors_exist(ancestor_dbids):
|
||||
logger.error('Image has incomplete ancestry: %s, %s, %s, %s' %
|
||||
(img.id, img.docker_image_id, full_ancestry,
|
||||
ancestor_dbids))
|
||||
|
|
21
tools/backfilluploadingflag.py
Normal file
21
tools/backfilluploadingflag.py
Normal file
|
@ -0,0 +1,21 @@
|
|||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
|
||||
from data.database import ImageStorage
|
||||
from app import storage
|
||||
|
||||
for image_storage in ImageStorage.select().where(ImageStorage.uploading == None):
|
||||
mark_path = storage.image_mark_path(None, None, None, image_storage.uuid)
|
||||
json_path = storage.image_json_path(None, None, None, image_storage.uuid)
|
||||
|
||||
logger.info('Mark path: %s Json path: %s', mark_path, json_path)
|
||||
|
||||
if storage.exists(json_path):
|
||||
image_storage.uploading = storage.exists(mark_path)
|
||||
logger.info('Image existed and was currently uploading: %s', image_storage.uploading)
|
||||
image_storage.save()
|
||||
else:
|
||||
logger.warning('Image does not exist.')
|
|
@ -1,13 +1,13 @@
|
|||
from data import model
|
||||
from data.database import User
|
||||
from app import stripe
|
||||
from app import billing as stripe
|
||||
from data.plans import get_plan
|
||||
|
||||
def get_private_allowed(customer):
|
||||
if not customer.stripe_id:
|
||||
return 0
|
||||
|
||||
subscription = stripe.Customer.retrieve(customer.stripe_id).subscription
|
||||
subscription = stripe.Customer.retrieve(customer.stripe_id).get('subscription', None)
|
||||
if subscription is None:
|
||||
return 0
|
||||
|
||||
|
|
|
@ -4,23 +4,27 @@ LINE_CONTINUATION_REGEX = re.compile('\s*\\\s*\n')
|
|||
COMMAND_REGEX = re.compile('([A-Za-z]+)\s(.*)')
|
||||
|
||||
COMMENT_CHARACTER = '#'
|
||||
LATEST_TAG = 'latest'
|
||||
|
||||
class ParsedDockerfile(object):
|
||||
def __init__(self, commands):
|
||||
self.commands = commands
|
||||
|
||||
def get_commands_of_kind(self, kind):
|
||||
def _get_commands_of_kind(self, kind):
|
||||
return [command for command in self.commands if command['command'] == kind]
|
||||
|
||||
def get_base_image(self):
|
||||
image_and_tag = self.get_base_image_and_tag()
|
||||
if not image_and_tag:
|
||||
def _get_from_image_identifier(self):
|
||||
from_commands = self._get_commands_of_kind('FROM')
|
||||
if not from_commands:
|
||||
return None
|
||||
|
||||
return self.base_image_from_repo_identifier(image_and_tag)
|
||||
return from_commands[-1]['parameters']
|
||||
|
||||
@staticmethod
|
||||
def base_image_from_repo_identifier(image_and_tag):
|
||||
def parse_image_identifier(image_identifier):
|
||||
""" Parses a docker image identifier, and returns a tuple of image name and tag, where the tag
|
||||
is filled in with "latest" if left unspecified.
|
||||
"""
|
||||
# Note:
|
||||
# Dockerfile images references can be of multiple forms:
|
||||
# server:port/some/path
|
||||
|
@ -28,29 +32,34 @@ class ParsedDockerfile(object):
|
|||
# server/some/path
|
||||
# server/some/path:tag
|
||||
# server:port/some/path:tag
|
||||
parts = image_and_tag.strip().split(':')
|
||||
parts = image_identifier.strip().split(':')
|
||||
|
||||
if len(parts) == 1:
|
||||
# somepath
|
||||
return parts[0]
|
||||
return (parts[0], LATEST_TAG)
|
||||
|
||||
# Otherwise, determine if the last part is a port
|
||||
# or a tag.
|
||||
if parts[-1].find('/') >= 0:
|
||||
# Last part is part of the hostname.
|
||||
return image_and_tag
|
||||
return (image_identifier, LATEST_TAG)
|
||||
|
||||
# Remaining cases:
|
||||
# server/some/path:tag
|
||||
# server:port/some/path:tag
|
||||
return ':'.join(parts[0:-1])
|
||||
return (':'.join(parts[0:-1]), parts[-1])
|
||||
|
||||
def get_base_image_and_tag(self):
|
||||
from_commands = self.get_commands_of_kind('FROM')
|
||||
if not from_commands:
|
||||
return None
|
||||
def get_base_image(self):
|
||||
""" Return the base image without the tag name. """
|
||||
return self.get_image_and_tag()[0]
|
||||
|
||||
return from_commands[-1]['parameters']
|
||||
def get_image_and_tag(self):
|
||||
""" Returns the image and tag from the FROM line of the dockerfile. """
|
||||
image_identifier = self._get_from_image_identifier()
|
||||
if image_identifier is None:
|
||||
return (None, None)
|
||||
|
||||
return self.parse_image_identifier(image_identifier)
|
||||
|
||||
|
||||
def strip_comments(contents):
|
||||
|
|
28
util/exceptionlog.py
Normal file
28
util/exceptionlog.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
from raven.contrib.flask import Sentry as FlaskSentry
|
||||
|
||||
class FakeSentry(object):
|
||||
pass
|
||||
|
||||
class Sentry(object):
|
||||
def __init__(self, app=None):
|
||||
self.app = app
|
||||
if app is not None:
|
||||
self.state = self.init_app(app)
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def init_app(self, app):
|
||||
sentry_type = app.config.get('EXCEPTION_LOG_TYPE', 'FakeSentry')
|
||||
|
||||
if sentry_type == 'Sentry':
|
||||
sentry = FlaskSentry(app)
|
||||
else:
|
||||
sentry = FakeSentry()
|
||||
|
||||
# register extension with app
|
||||
app.extensions = getattr(app, 'extensions', {})
|
||||
app.extensions['sentry'] = sentry
|
||||
return sentry
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.state, name, None)
|
|
@ -1,23 +0,0 @@
|
|||
import logging
|
||||
import logstash_formatter
|
||||
import gunicorn.glogging
|
||||
|
||||
from gunicorn import util
|
||||
|
||||
class LogstashLogger(gunicorn.glogging.Logger):
|
||||
def _set_handler(self, log, output, fmt):
|
||||
# remove previous gunicorn log handler
|
||||
h = self._get_gunicorn_handler(log)
|
||||
if h:
|
||||
log.handlers.remove(h)
|
||||
|
||||
if output is not None:
|
||||
if output == "-":
|
||||
h = logging.StreamHandler()
|
||||
else:
|
||||
util.check_is_writeable(output)
|
||||
h = logging.FileHandler(output)
|
||||
|
||||
h.setFormatter(logstash_formatter.LogstashFormatter())
|
||||
h._gunicorn = True
|
||||
log.addHandler(h)
|
|
@ -1,6 +1,7 @@
|
|||
import urllib
|
||||
|
||||
from functools import wraps
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
def parse_namespace_repository(repository):
|
||||
|
@ -25,8 +26,19 @@ def parse_repository_name(f):
|
|||
def format_robot_username(parent_username, robot_shortname):
|
||||
return '%s+%s' % (parent_username, robot_shortname)
|
||||
|
||||
|
||||
def parse_robot_username(robot_username):
|
||||
if not '+' in robot_username:
|
||||
return None
|
||||
|
||||
return robot_username.split('+', 2)
|
||||
|
||||
|
||||
uuid_generator = lambda: str(uuid4())
|
||||
|
||||
|
||||
def urn_generator(namespace_portions, id_generator=uuid_generator):
|
||||
prefix = 'urn:%s:' % ':'.join(namespace_portions)
|
||||
def generate_urn():
|
||||
return prefix + id_generator()
|
||||
return generate_urn
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
to prepare a new build node host:
|
||||
to prepare a new build node host starting from a 14.04 base server:
|
||||
|
||||
```
|
||||
sudo apt-get update
|
||||
|
@ -10,28 +10,16 @@ check out the code, install the kernel, custom docker, nsexec, and reboot:
|
|||
```
|
||||
git clone https://bitbucket.org/yackob03/quay.git
|
||||
cd quay
|
||||
sudo gdebi --n binary_dependencies/builder/linux-headers-3.11.0-17_3.11.0-17.28_all.deb
|
||||
sudo gdebi --n binary_dependencies/builder/linux-headers-3.11.0-17-generic_3.11.0-17.28_amd64.deb
|
||||
sudo gdebi --n binary_dependencies/builder/linux-image-3.11.0-17-generic_3.11.0-17.28_amd64.deb
|
||||
sudo gdebi --n binary_dependencies/builder/linux-image-extra-3.11.0-17-generic_3.11.0-17.28_amd64.deb
|
||||
sudo gdebi --n binary_dependencies/builder/nsexec_1.22ubuntu1trusty1_amd64.deb
|
||||
sudo gdebi --n binary_dependencies/builder/lxc-docker-0.9.0-tutum2_0.9.0-tutum2-20140327210604-4c49268-dirty_amd64.deb
|
||||
sudo chown -R 100000:100000 /var/lib/docker
|
||||
sudo shutdown -r now
|
||||
|
||||
sudo gdebi --n binary_dependencies/builder/lxc-docker-0.9.0_0.9.0-20140501212101-72572f0-dirty_amd64.deb
|
||||
sudo usermod -v 100000-200000 -w 100000-200000 root
|
||||
sudo chmod +x /var/lib/lxc
|
||||
sudo chmod +x /var/lib/docker
|
||||
cd ~
|
||||
git clone https://bitbucket.org/yackob03/quayconfig.git
|
||||
ln -s ../../quayconfig/production/ quay/conf/stack
|
||||
```
|
||||
|
||||
pull some base images if you want (optional)
|
||||
```
|
||||
sudo docker pull ubuntu
|
||||
sudo docker pull stackbrew/ubuntu
|
||||
sudo docker pull busybox
|
||||
sudo docker pull lopter/raring-base
|
||||
```
|
||||
|
||||
start the worker
|
||||
|
||||
```
|
||||
|
|
|
@ -8,17 +8,19 @@ import json
|
|||
import shutil
|
||||
import tarfile
|
||||
|
||||
from docker import Client, APIError
|
||||
from docker import Client
|
||||
from docker.errors import APIError
|
||||
from tempfile import TemporaryFile, mkdtemp
|
||||
from zipfile import ZipFile
|
||||
from functools import partial
|
||||
from datetime import datetime, timedelta
|
||||
from threading import Event
|
||||
from uuid import uuid4
|
||||
from collections import defaultdict
|
||||
|
||||
from data.queue import dockerfile_build_queue
|
||||
from data import model
|
||||
from workers.worker import Worker
|
||||
from workers.worker import Worker, WorkerUnhealthyException, JobException
|
||||
from app import app, userfiles as user_files
|
||||
from util.safetar import safe_extractall
|
||||
from util.dockerfileparse import parse_dockerfile, ParsedDockerfile, serialize_dockerfile
|
||||
|
@ -36,6 +38,8 @@ build_logs = app.config['BUILDLOGS']
|
|||
|
||||
TIMEOUT_PERIOD_MINUTES = 20
|
||||
CACHE_EXPIRATION_PERIOD_HOURS = 24
|
||||
NO_TAGS = ['<none>:<none>']
|
||||
RESERVATION_TIME = (TIMEOUT_PERIOD_MINUTES + 5) * 60
|
||||
|
||||
|
||||
class StatusWrapper(object):
|
||||
|
@ -45,7 +49,7 @@ class StatusWrapper(object):
|
|||
'total_commands': None,
|
||||
'current_command': None,
|
||||
'push_completion': 0.0,
|
||||
'image_completion': {},
|
||||
'pull_completion': 0.0,
|
||||
}
|
||||
|
||||
self.__exit__(None, None, None)
|
||||
|
@ -97,11 +101,8 @@ class StreamingDockerClient(Client):
|
|||
|
||||
|
||||
class DockerfileBuildContext(object):
|
||||
image_id_to_cache_time = {}
|
||||
private_repo_tags = set()
|
||||
|
||||
def __init__(self, build_context_dir, dockerfile_subdir, repo, tag_names,
|
||||
push_token, build_uuid, pull_credentials=None):
|
||||
push_token, build_uuid, cache_size_gb, pull_credentials=None):
|
||||
self._build_dir = build_context_dir
|
||||
self._dockerfile_subdir = dockerfile_subdir
|
||||
self._repo = repo
|
||||
|
@ -110,7 +111,7 @@ class DockerfileBuildContext(object):
|
|||
self._status = StatusWrapper(build_uuid)
|
||||
self._build_logger = partial(build_logs.append_log_message, build_uuid)
|
||||
self._pull_credentials = pull_credentials
|
||||
self._public_repos = set()
|
||||
self._cache_size_gb = cache_size_gb
|
||||
|
||||
# Note: We have two different clients here because we (potentially) login
|
||||
# with both, but with different credentials that we do not want shared between
|
||||
|
@ -120,7 +121,9 @@ class DockerfileBuildContext(object):
|
|||
|
||||
dockerfile_path = os.path.join(self._build_dir, dockerfile_subdir,
|
||||
'Dockerfile')
|
||||
|
||||
if not os.path.exists(dockerfile_path):
|
||||
raise RuntimeError('Build job did not contain a Dockerfile.')
|
||||
|
||||
# Compute the number of steps
|
||||
with open(dockerfile_path, 'r') as dockerfileobj:
|
||||
self._parsed_dockerfile = parse_dockerfile(dockerfileobj.read())
|
||||
|
@ -131,18 +134,22 @@ class DockerfileBuildContext(object):
|
|||
with open(dockerfile_path, 'w') as dockerfileobj:
|
||||
dockerfileobj.write(serialize_dockerfile(self._parsed_dockerfile))
|
||||
|
||||
logger.debug('Will build and push to repo %s with tags named: %s' %
|
||||
(self._repo, self._tag_names))
|
||||
logger.debug('Will build and push to repo %s with tags named: %s', self._repo,
|
||||
self._tag_names)
|
||||
|
||||
def __enter__(self):
|
||||
self.__cleanup_containers()
|
||||
self.__evict_expired_images()
|
||||
self.__cleanup()
|
||||
try:
|
||||
self.__cleanup_containers()
|
||||
self.__cleanup_images()
|
||||
self.__prune_cache()
|
||||
except APIError:
|
||||
message = 'Docker installation is no longer healthy.'
|
||||
logger.exception(message)
|
||||
raise WorkerUnhealthyException(message)
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
self.__cleanup_containers()
|
||||
self.__cleanup()
|
||||
|
||||
shutil.rmtree(self._build_dir)
|
||||
|
||||
|
@ -159,15 +166,41 @@ class DockerfileBuildContext(object):
|
|||
parsed_dockerfile.commands.insert(new_command_index, env_command)
|
||||
break
|
||||
|
||||
|
||||
@staticmethod
|
||||
def __total_completion(statuses, total_images):
|
||||
percentage_with_sizes = float(len(statuses.values()))/total_images
|
||||
sent_bytes = sum([status[u'current'] for status in statuses.values()])
|
||||
total_bytes = sum([status[u'total'] for status in statuses.values()])
|
||||
sent_bytes = sum([status['current'] for status in statuses.values()])
|
||||
total_bytes = sum([status['total'] for status in statuses.values()])
|
||||
return float(sent_bytes)/total_bytes*percentage_with_sizes
|
||||
|
||||
def build(self):
|
||||
@staticmethod
|
||||
def __monitor_completion(status_stream, required_message, status_updater, status_completion_key,
|
||||
num_images=0):
|
||||
images = {}
|
||||
for status in status_stream:
|
||||
logger.debug('%s: %s', status_completion_key, status)
|
||||
if 'status' in status:
|
||||
status_msg = status['status']
|
||||
|
||||
if status_msg == required_message:
|
||||
if 'progressDetail' in status and 'id' in status:
|
||||
image_id = status['id']
|
||||
detail = status['progressDetail']
|
||||
|
||||
if 'current' in detail and 'total' in detail:
|
||||
images[image_id] = detail
|
||||
with status_updater as status_update:
|
||||
status_update[status_completion_key] = \
|
||||
DockerfileBuildContext.__total_completion(images, max(len(images), num_images))
|
||||
|
||||
elif 'errorDetail' in status:
|
||||
message = 'Error pushing image.'
|
||||
if 'message' in status['errorDetail']:
|
||||
message = str(status['errorDetail']['message'])
|
||||
|
||||
raise RuntimeError(message)
|
||||
|
||||
def pull(self):
|
||||
# Login with the specified credentials (if any).
|
||||
if self._pull_credentials:
|
||||
logger.debug('Logging in with pull credentials: %s@%s',
|
||||
|
@ -176,21 +209,24 @@ class DockerfileBuildContext(object):
|
|||
registry=self._pull_credentials['registry'], reauth=True)
|
||||
|
||||
# Pull the image, in case it was updated since the last build
|
||||
base_image = self._parsed_dockerfile.get_base_image()
|
||||
self._build_logger('Pulling base image: %s' % base_image)
|
||||
self._build_cl.pull(base_image)
|
||||
image_and_tag = ':'.join(self._parsed_dockerfile.get_image_and_tag())
|
||||
self._build_logger('Pulling base image: %s' % image_and_tag)
|
||||
pull_status = self._build_cl.pull(image_and_tag, stream=True)
|
||||
|
||||
self.__monitor_completion(pull_status, 'Downloading', self._status, 'pull_completion')
|
||||
|
||||
def build(self, reservation_extension_method):
|
||||
# Start the build itself.
|
||||
logger.debug('Starting build.')
|
||||
|
||||
with self._status as status:
|
||||
status['total_commands'] = self._num_steps
|
||||
|
||||
logger.debug('Building to tags named: %s' % self._tag_names)
|
||||
logger.debug('Building to tags named: %s', self._tag_names)
|
||||
context_path = os.path.join(self._build_dir, self._dockerfile_subdir)
|
||||
|
||||
logger.debug('Final context path: %s exists: %s' %
|
||||
(context_path, os.path.exists(context_path)))
|
||||
logger.debug('Final context path: %s exists: %s', context_path,
|
||||
os.path.exists(context_path))
|
||||
|
||||
build_status = self._build_cl.build(path=context_path, stream=True)
|
||||
|
||||
|
@ -216,9 +252,12 @@ class DockerfileBuildContext(object):
|
|||
if step_increment:
|
||||
self._build_logger(status_str, build_logs.COMMAND)
|
||||
current_step = int(step_increment.group(1))
|
||||
logger.debug('Step now: %s/%s' % (current_step, self._num_steps))
|
||||
logger.debug('Step now: %s/%s', current_step, self._num_steps)
|
||||
with self._status as status_update:
|
||||
status_update['current_command'] = current_step
|
||||
|
||||
# Tell the queue that we're making progress every time we advance a step
|
||||
reservation_extension_method(RESERVATION_TIME)
|
||||
continue
|
||||
else:
|
||||
self._build_logger(status_str)
|
||||
|
@ -226,7 +265,7 @@ class DockerfileBuildContext(object):
|
|||
complete = re.match(r'Successfully built ([a-z0-9]+)$', status_str)
|
||||
if complete:
|
||||
built_image = complete.group(1)
|
||||
logger.debug('Final image ID is: %s' % built_image)
|
||||
logger.debug('Final image ID is: %s', built_image)
|
||||
continue
|
||||
|
||||
# Get the image count
|
||||
|
@ -243,7 +282,7 @@ class DockerfileBuildContext(object):
|
|||
|
||||
for protocol in ['https', 'http']:
|
||||
registry_endpoint = '%s://%s/v1/' % (protocol, host.group(1))
|
||||
logger.debug('Attempting login to registry: %s' % registry_endpoint)
|
||||
logger.debug('Attempting login to registry: %s', registry_endpoint)
|
||||
|
||||
try:
|
||||
self._push_cl.login('$token', self._push_token, registry=registry_endpoint)
|
||||
|
@ -252,151 +291,103 @@ class DockerfileBuildContext(object):
|
|||
pass # Probably the wrong protocol
|
||||
|
||||
for tag in self._tag_names:
|
||||
logger.debug('Tagging image %s as %s:%s' %
|
||||
(built_image, self._repo, tag))
|
||||
logger.debug('Tagging image %s as %s:%s', built_image, self._repo, tag)
|
||||
self._push_cl.tag(built_image, self._repo, tag)
|
||||
|
||||
history = json.loads(self._push_cl.history(built_image))
|
||||
num_images = len(history)
|
||||
with self._status as status:
|
||||
status['total_images'] = num_images
|
||||
|
||||
logger.debug('Pushing to repo %s' % self._repo)
|
||||
logger.debug('Pushing to repo %s', self._repo)
|
||||
resp = self._push_cl.push(self._repo, stream=True)
|
||||
|
||||
for status in resp:
|
||||
logger.debug('Status: %s', status)
|
||||
if u'status' in status:
|
||||
status_msg = status[u'status']
|
||||
|
||||
if status_msg == 'Pushing':
|
||||
if u'progressDetail' in status and u'id' in status:
|
||||
image_id = status[u'id']
|
||||
detail = status[u'progressDetail']
|
||||
|
||||
if u'current' in detail and 'total' in detail:
|
||||
with self._status as status:
|
||||
images = status['image_completion']
|
||||
|
||||
images[image_id] = detail
|
||||
status['push_completion'] = \
|
||||
DockerfileBuildContext.__total_completion(images, num_images)
|
||||
|
||||
elif u'errorDetail' in status:
|
||||
message = 'Error pushing image.'
|
||||
if u'message' in status[u'errorDetail']:
|
||||
message = str(status[u'errorDetail'][u'message'])
|
||||
|
||||
raise RuntimeError(message)
|
||||
|
||||
def __is_repo_public(self, repo_name):
|
||||
if repo_name in self._public_repos:
|
||||
return True
|
||||
|
||||
repo_portions = repo_name.split('/')
|
||||
registry_hostname = 'index.docker.io'
|
||||
local_repo_name = repo_name
|
||||
if len(repo_portions) > 2:
|
||||
registry_hostname = repo_portions[0]
|
||||
local_repo_name = '/'.join(repo_portions[1:])
|
||||
|
||||
repo_url_template = '%s://%s/v1/repositories/%s/images'
|
||||
protocols = ['https', 'http']
|
||||
secure_repo_url, repo_url = [repo_url_template % (protocol, registry_hostname, local_repo_name)
|
||||
for protocol in protocols]
|
||||
|
||||
try:
|
||||
|
||||
try:
|
||||
repo_info = requests.get(secure_repo_url)
|
||||
except requests.exceptions.SSLError:
|
||||
repo_info = requests.get(repo_url)
|
||||
|
||||
except requests.exceptions.ConnectionError:
|
||||
return False
|
||||
|
||||
if repo_info.status_code / 100 == 2:
|
||||
self._public_repos.add(repo_name)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
self.__monitor_completion(resp, 'Pushing', self._status, 'push_completion', num_images)
|
||||
|
||||
def __cleanup_containers(self):
|
||||
# First clean up any containers that might be holding the images
|
||||
for running in self._build_cl.containers(quiet=True):
|
||||
logger.debug('Killing container: %s' % running['Id'])
|
||||
logger.debug('Killing container: %s', running['Id'])
|
||||
self._build_cl.kill(running['Id'])
|
||||
|
||||
# Next, remove all of the containers (which should all now be killed)
|
||||
for container in self._build_cl.containers(all=True, quiet=True):
|
||||
logger.debug('Removing container: %s' % container['Id'])
|
||||
logger.debug('Removing container: %s', container['Id'])
|
||||
self._build_cl.remove_container(container['Id'])
|
||||
|
||||
def __evict_expired_images(self):
|
||||
logger.debug('Cleaning images older than %s hours.', CACHE_EXPIRATION_PERIOD_HOURS)
|
||||
def __cleanup_images(self):
|
||||
""" Remove tags on internal nodes, and remove images older than the expiratino time. """
|
||||
ids_to_images, ids_to_children = self.__compute_image_graph()
|
||||
|
||||
# Untag all internal nodes, which are usually the base images
|
||||
for internal_id in ids_to_children.keys():
|
||||
internal = ids_to_images[internal_id]
|
||||
if internal['RepoTags'] != NO_TAGS:
|
||||
for tag_name in internal['RepoTags']:
|
||||
self._build_cl.remove_image(tag_name)
|
||||
|
||||
# Make sure all of the leaves have gibberish tags, and remove those older than our expiration
|
||||
leaves = set(ids_to_images.keys()) - set(ids_to_children.keys())
|
||||
now = datetime.now()
|
||||
verify_removed = set()
|
||||
for leaf_id in leaves:
|
||||
leaf = ids_to_images[leaf_id]
|
||||
|
||||
for image in self._build_cl.images():
|
||||
image_id = image[u'Id']
|
||||
created = datetime.fromtimestamp(image[u'Created'])
|
||||
created = datetime.fromtimestamp(leaf['Created'])
|
||||
expiration = created + timedelta(hours=CACHE_EXPIRATION_PERIOD_HOURS)
|
||||
if expiration > now:
|
||||
# Assign a new tag as a uuid to preserve this image
|
||||
new_tag = str(uuid4())
|
||||
self._build_cl.tag(leaf['Id'], new_tag)
|
||||
|
||||
# If we don't have a cache time, use the created time (e.g. worker reboot)
|
||||
cache_time = self.image_id_to_cache_time.get(image_id, created)
|
||||
expiration = cache_time + timedelta(hours=CACHE_EXPIRATION_PERIOD_HOURS)
|
||||
# Remove all of the existing tags
|
||||
if leaf['RepoTags'] != NO_TAGS:
|
||||
for tag_name in leaf['RepoTags']:
|
||||
self._build_cl.remove_image(tag_name)
|
||||
|
||||
if expiration < now:
|
||||
logger.debug('Removing expired image: %s' % image_id)
|
||||
def __prune_cache(self):
|
||||
""" Remove the oldest leaf image until the cache size is the desired size. """
|
||||
|
||||
for tag in image['RepoTags']:
|
||||
# We can forget about this particular tag if it was indeed one of our renamed tags
|
||||
self.private_repo_tags.discard(tag)
|
||||
logger.debug('Pruning cache to size(gb): %s', self._cache_size_gb)
|
||||
while self.__compute_cache_size_gb() > self._cache_size_gb:
|
||||
logger.debug('Locating the oldest image in the cache to prune.')
|
||||
# Find the oldest tagged image and remove it
|
||||
oldest_creation_time = datetime.max
|
||||
oldest_image = None
|
||||
for image in self._build_cl.images():
|
||||
created = datetime.fromtimestamp(image['Created'])
|
||||
if created < oldest_creation_time:
|
||||
oldest_creation_time = created
|
||||
oldest_image = image
|
||||
|
||||
try:
|
||||
self._build_cl.remove_image(tag)
|
||||
except APIError:
|
||||
# Sometimes an upstream image removed this one
|
||||
pass
|
||||
logger.debug('Removing oldest image from cache: %s', oldest_image['Id'])
|
||||
# Remove all tags on the oldest image
|
||||
if oldest_image['RepoTags'] == NO_TAGS:
|
||||
# Remove the image id directly since there are no tags
|
||||
self._build_cl.remove_image(oldest_image['Id'])
|
||||
else:
|
||||
# Remove all tags
|
||||
for tag_name in oldest_image['RepoTags']:
|
||||
self._build_cl.remove_image(tag_name)
|
||||
|
||||
try:
|
||||
self._build_cl.remove_image(image_id)
|
||||
except APIError:
|
||||
# Sometimes an upstream image removed this one
|
||||
pass
|
||||
verify_removed.add(image_id)
|
||||
def __compute_cache_size_gb(self):
|
||||
all_images = self._build_cl.images(all=True)
|
||||
size_in_bytes = sum([img['Size'] for img in all_images])
|
||||
size_in_gb = float(size_in_bytes)/1024/1024/1024
|
||||
logger.debug('Computed cache size(gb) of: %s', size_in_gb)
|
||||
return size_in_gb
|
||||
|
||||
# Verify that our images were actually removed
|
||||
for image in self._build_cl.images():
|
||||
if image['Id'] in verify_removed:
|
||||
logger.warning('Image was not removed: %s' % image['Id'])
|
||||
def __compute_image_graph(self):
|
||||
all_images = self._build_cl.images(all=True)
|
||||
|
||||
def __cleanup(self):
|
||||
# Iterate all of the images and rename the ones that aren't public. This should preserve
|
||||
# base images and also allow the cache to function.
|
||||
now = datetime.now()
|
||||
for image in self._build_cl.images():
|
||||
image_id = image[u'Id']
|
||||
ids_to_images = {}
|
||||
ids_to_children = defaultdict(list)
|
||||
for image in all_images:
|
||||
if image['ParentId'] != '':
|
||||
ids_to_children[image['ParentId']].append(image)
|
||||
ids_to_images[image['Id']] = image
|
||||
|
||||
if image_id not in self.image_id_to_cache_time:
|
||||
logger.debug('Setting image %s cache time to %s', image_id, now)
|
||||
self.image_id_to_cache_time[image_id] = now
|
||||
return (ids_to_images, ids_to_children)
|
||||
|
||||
for tag in image['RepoTags']:
|
||||
tag_repo = ParsedDockerfile.base_image_from_repo_identifier(tag)
|
||||
if tag_repo != '<none>':
|
||||
if tag_repo in self.private_repo_tags:
|
||||
logger.debug('Repo is private and has already been renamed: %s' % tag_repo)
|
||||
elif self.__is_repo_public(tag_repo):
|
||||
logger.debug('Repo was deemed public: %s', tag_repo)
|
||||
else:
|
||||
new_name = str(uuid4())
|
||||
logger.debug('Private repo tag being renamed %s -> %s', tag, new_name)
|
||||
self._build_cl.tag(image_id, new_name)
|
||||
self._build_cl.remove_image(tag)
|
||||
self.private_repo_tags.add(new_name)
|
||||
|
||||
class DockerfileBuildWorker(Worker):
|
||||
def __init__(self, *vargs, **kwargs):
|
||||
def __init__(self, cache_size_gb, *vargs, **kwargs):
|
||||
super(DockerfileBuildWorker, self).__init__(*vargs, **kwargs)
|
||||
|
||||
self._mime_processors = {
|
||||
|
@ -410,6 +401,7 @@ class DockerfileBuildWorker(Worker):
|
|||
}
|
||||
|
||||
self._timeout = Event()
|
||||
self._cache_size_gb = cache_size_gb
|
||||
|
||||
@staticmethod
|
||||
def __prepare_zip(request_file):
|
||||
|
@ -449,12 +441,12 @@ class DockerfileBuildWorker(Worker):
|
|||
|
||||
# Iterate the running containers and kill ones that have been running more than 20 minutes
|
||||
for container in docker_cl.containers():
|
||||
start_time = datetime.fromtimestamp(container[u'Created'])
|
||||
start_time = datetime.fromtimestamp(container['Created'])
|
||||
running_time = datetime.now() - start_time
|
||||
if running_time > timedelta(minutes=TIMEOUT_PERIOD_MINUTES):
|
||||
logger.warning('Container has been running too long: %s with command: %s',
|
||||
container[u'Id'], container[u'Command'])
|
||||
docker_cl.kill(container[u'Id'])
|
||||
container['Id'], container['Command'])
|
||||
docker_cl.kill(container['Id'])
|
||||
self._timeout.set()
|
||||
|
||||
def process_queue_item(self, job_details):
|
||||
|
@ -499,30 +491,41 @@ class DockerfileBuildWorker(Worker):
|
|||
log_appender('error', build_logs.PHASE)
|
||||
repository_build.phase = 'error'
|
||||
repository_build.save()
|
||||
log_appender('Unknown mime-type: %s' % c_type, build_logs.ERROR)
|
||||
return True
|
||||
message = 'Unknown mime-type: %s' % c_type
|
||||
log_appender(message, build_logs.ERROR)
|
||||
raise JobException(message)
|
||||
|
||||
build_dir = self._mime_processors[c_type](docker_resource)
|
||||
log_appender('building', build_logs.PHASE)
|
||||
repository_build.phase = 'building'
|
||||
repository_build.save()
|
||||
|
||||
with DockerfileBuildContext(build_dir, build_subdir, repo, tag_names,
|
||||
access_token,
|
||||
repository_build.uuid, pull_credentials) as build_ctxt:
|
||||
try:
|
||||
built_image = build_ctxt.build()
|
||||
try:
|
||||
with DockerfileBuildContext(build_dir, build_subdir, repo, tag_names, access_token,
|
||||
repository_build.uuid, self._cache_size_gb,
|
||||
pull_credentials) as build_ctxt:
|
||||
log_appender('pulling', build_logs.PHASE)
|
||||
repository_build.phase = 'pulling'
|
||||
repository_build.save()
|
||||
build_ctxt.pull()
|
||||
|
||||
self.extend_processing(RESERVATION_TIME)
|
||||
|
||||
log_appender('building', build_logs.PHASE)
|
||||
repository_build.phase = 'building'
|
||||
repository_build.save()
|
||||
built_image = build_ctxt.build(self.extend_processing)
|
||||
|
||||
if not built_image:
|
||||
log_appender('error', build_logs.PHASE)
|
||||
repository_build.phase = 'error'
|
||||
repository_build.save()
|
||||
|
||||
message = 'Unable to build dockerfile.'
|
||||
if self._timeout.is_set():
|
||||
log_appender('Build step was terminated after %s minutes.' % TIMEOUT_PERIOD_MINUTES,
|
||||
build_logs.ERROR)
|
||||
else:
|
||||
log_appender('Unable to build dockerfile.', build_logs.ERROR)
|
||||
return True
|
||||
message = 'Build step was terminated after %s minutes.' % TIMEOUT_PERIOD_MINUTES
|
||||
|
||||
log_appender(message, build_logs.ERROR)
|
||||
raise JobException(message)
|
||||
|
||||
self.extend_processing(RESERVATION_TIME)
|
||||
|
||||
log_appender('pushing', build_logs.PHASE)
|
||||
repository_build.phase = 'pushing'
|
||||
|
@ -534,15 +537,17 @@ class DockerfileBuildWorker(Worker):
|
|||
repository_build.phase = 'complete'
|
||||
repository_build.save()
|
||||
|
||||
except Exception as exc:
|
||||
log_appender('error', build_logs.PHASE)
|
||||
logger.exception('Exception when processing request.')
|
||||
repository_build.phase = 'error'
|
||||
repository_build.save()
|
||||
log_appender(str(exc), build_logs.ERROR)
|
||||
return True
|
||||
except WorkerUnhealthyException as exc:
|
||||
# Need a separate handler for this so it doesn't get caught by catch all below
|
||||
raise exc
|
||||
|
||||
return True
|
||||
except Exception as exc:
|
||||
log_appender('error', build_logs.PHASE)
|
||||
logger.exception('Exception when processing request.')
|
||||
repository_build.phase = 'error'
|
||||
repository_build.save()
|
||||
log_appender(str(exc), build_logs.ERROR)
|
||||
raise JobException(str(exc))
|
||||
|
||||
|
||||
desc = 'Worker daemon to monitor dockerfile build'
|
||||
|
@ -551,10 +556,13 @@ parser.add_argument('-D', action='store_true', default=False,
|
|||
help='Run the worker in daemon mode.')
|
||||
parser.add_argument('--log', default='dockerfilebuild.log',
|
||||
help='Specify the log file for the worker as a daemon.')
|
||||
parser.add_argument('--cachegb', default=20, type=float,
|
||||
help='Maximum cache size in gigabytes.')
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
worker = DockerfileBuildWorker(dockerfile_build_queue, reservation_seconds=60*60) # 1 hour
|
||||
worker = DockerfileBuildWorker(args.cachegb, dockerfile_build_queue,
|
||||
reservation_seconds=RESERVATION_TIME)
|
||||
|
||||
if args.D:
|
||||
handler = logging.FileHandler(args.log)
|
||||
|
|
|
@ -9,6 +9,19 @@ from datetime import datetime, timedelta
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class JobException(Exception):
|
||||
""" A job exception is an exception that is caused by something being malformed in the job. When
|
||||
a worker raises this exception the job will be terminated and the retry will not be returned
|
||||
to the queue. """
|
||||
pass
|
||||
|
||||
|
||||
class WorkerUnhealthyException(Exception):
|
||||
""" When this exception is raised, the worker is no longer healthy and will not accept any more
|
||||
work. When this is raised while processing a queue item, the item should be returned to the
|
||||
queue along with another retry. """
|
||||
pass
|
||||
|
||||
|
||||
class Worker(object):
|
||||
def __init__(self, queue, poll_period_seconds=30, reservation_seconds=300,
|
||||
|
@ -19,6 +32,7 @@ class Worker(object):
|
|||
self._watchdog_period_seconds = watchdog_period_seconds
|
||||
self._stop = Event()
|
||||
self._queue = queue
|
||||
self.current_queue_item = None
|
||||
|
||||
def process_queue_item(self, job_details):
|
||||
""" Return True if complete, False if it should be retried. """
|
||||
|
@ -28,24 +42,37 @@ class Worker(object):
|
|||
""" Function that gets run once every watchdog_period_seconds. """
|
||||
pass
|
||||
|
||||
def extend_processing(self, seconds_from_now):
|
||||
if self.current_queue_item is not None:
|
||||
self._queue.extend_processing(self.current_queue_item, seconds_from_now)
|
||||
|
||||
def poll_queue(self):
|
||||
logger.debug('Getting work item from queue.')
|
||||
|
||||
item = self._queue.get()
|
||||
while item:
|
||||
logger.debug('Queue gave us some work: %s' % item.body)
|
||||
self.current_queue_item = self._queue.get()
|
||||
while self.current_queue_item:
|
||||
logger.debug('Queue gave us some work: %s' % self.current_queue_item.body)
|
||||
|
||||
job_details = json.loads(item.body)
|
||||
job_details = json.loads(self.current_queue_item.body)
|
||||
|
||||
if self.process_queue_item(job_details):
|
||||
self._queue.complete(item)
|
||||
else:
|
||||
logger.warning('An error occurred processing request: %s' % item.body)
|
||||
self._queue.incomplete(item)
|
||||
try:
|
||||
self.process_queue_item(job_details)
|
||||
self._queue.complete(self.current_queue_item)
|
||||
except JobException:
|
||||
logger.warning('An error occurred processing request: %s', self.current_queue_item.body)
|
||||
self._queue.incomplete(self.current_queue_item)
|
||||
except WorkerUnhealthyException:
|
||||
logger.error('The worker has encountered an error and will not take new jobs.')
|
||||
self._stop.set()
|
||||
self._queue.incomplete(self.current_queue_item, restore_retry=True)
|
||||
finally:
|
||||
self.current_queue_item = None
|
||||
|
||||
item = self._queue.get(processing_time=self._reservation_seconds)
|
||||
if not self._stop.is_set():
|
||||
self.current_queue_item = self._queue.get(processing_time=self._reservation_seconds)
|
||||
|
||||
logger.debug('No more work.')
|
||||
if not self._stop.is_set():
|
||||
logger.debug('No more work.')
|
||||
|
||||
def start(self):
|
||||
logger.debug("Scheduling worker.")
|
||||
|
@ -58,7 +85,7 @@ class Worker(object):
|
|||
self._sched.add_interval_job(self.watchdog, seconds=self._watchdog_period_seconds)
|
||||
|
||||
signal.signal(signal.SIGTERM, self.join)
|
||||
signal.signal(signal.SIGINT, self.join)
|
||||
signal.signal(signal.SIGINT, self.join)
|
||||
|
||||
while not self._stop.wait(1):
|
||||
pass
|
||||
|
@ -70,3 +97,8 @@ class Worker(object):
|
|||
def join(self, signal_num=None, stack_frame=None):
|
||||
logger.debug('Shutting down worker gracefully.')
|
||||
self._stop.set()
|
||||
|
||||
# Give back the retry that we took for this queue item so that if it were down to zero
|
||||
# retries it will still be picked up by another worker
|
||||
if self.current_queue_item is not None:
|
||||
self._queue.incomplete(self.current_queue_item, restore_retry=True)
|
||||
|
|
Reference in a new issue