Merge branch 'master' into ackbar

This commit is contained in:
Joseph Schorr 2015-02-09 17:16:42 -05:00
commit 045614c6c8
29 changed files with 920 additions and 246 deletions

View file

@ -9,7 +9,7 @@ ENV HOME /root
RUN apt-get update # 29JAN2015
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev libgpgme11 libgpgme11-dev
# Build the python dependencies
ADD requirements.txt requirements.txt
@ -28,7 +28,7 @@ RUN npm install -g grunt-cli
ADD grunt grunt
RUN cd grunt && npm install
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev
RUN apt-get autoremove -y
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

38
app.py
View file

@ -9,6 +9,7 @@ from flask.ext.mail import Mail
import features
from avatars.avatars import Avatar
from storage import Storage
from avatars.avatars import Avatar
@ -22,16 +23,50 @@ from data.buildlogs import BuildLogs
from data.archivedlogs import LogArchive
from data.userevent import UserEventsBuilderModule
from data.queue import WorkQueue
from util.analytics import Analytics
from util.exceptionlog import Sentry
from util.names import urn_generator
from util.oauth import GoogleOAuthConfig, GithubOAuthConfig
from util.signing import Signer
from util.queuemetrics import QueueMetrics
from util.config.provider import FileConfigProvider, TestConfigProvider
from util.config.configutil import generate_secret_key
from util.config.superusermanager import SuperUserManager
# pylint: disable=invalid-name,too-many-public-methods,too-few-public-methods,too-many-ancestors
class Config(BaseConfig):
""" Flask config enhanced with a `from_yamlfile` method """
def from_yamlfile(self, config_file):
with open(config_file) as f:
c = yaml.load(f)
if not c:
logger.debug('Empty YAML config file')
return
if isinstance(c, str):
raise Exception('Invalid YAML config file: ' + str(c))
for key in c.iterkeys():
if key.isupper():
self[key] = c[key]
class Flask(BaseFlask):
""" Extends the Flask class to implement our custom Config class. """
def make_config(self, instance_relative=False):
root_path = self.instance_path if instance_relative else self.root_path
return Config(root_path, self.default_config)
OVERRIDE_CONFIG_DIRECTORY = 'conf/stack/'
OVERRIDE_CONFIG_YAML_FILENAME = 'conf/stack/config.yaml'
OVERRIDE_CONFIG_PY_FILENAME = 'conf/stack/config.py'
OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG'
LICENSE_FILENAME = 'conf/stack/license.enc'
>>>>>>> master
app = Flask(__name__)
logger = logging.getLogger(__name__)
@ -107,6 +142,7 @@ build_logs = BuildLogs(app)
authentication = UserAuthentication(app)
userevents = UserEventsBuilderModule(app)
superusers = SuperUserManager(app)
signer = Signer(app, OVERRIDE_CONFIG_DIRECTORY)
queue_metrics = QueueMetrics(app)
tf = app.config['DB_TRANSACTION_FACTORY']

View file

@ -3,6 +3,7 @@
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCC0m+hVmyR3vn/xoxJe9+atRWBxSK+YXgyufNVDMcb7H00Jfnc341QH3kDVYZamUbhVh/nyc2RP7YbnZR5zORFtgOaNSdkMYrPozzBvxjnvSUokkCCWbLqXDHvIKiR12r+UTSijPJE/Yk702Mb2ejAFuae1C3Ec+qKAoOCagDjpQ3THyb5oaKE7VPHdwCWjWIQLRhC+plu77ObhoXIFJLD13gCi01L/rp4mYVCxIc2lX5A8rkK+bZHnIZwWUQ4t8SIjWxIaUo0FE7oZ83nKuNkYj5ngmLHQLY23Nx2WhE9H6NBthUpik9SmqQPtVYbhIG+bISPoH9Xs8CLrFb0VRjz Joey's Mac
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCo6FhAP7mFFOAzM91gtaKW7saahtaN4lur42FMMztz6aqUycIltCmvxo+3FmrXgCG30maMNU36Vm1+9QRtVQEd+eRuoIWP28t+8MT01Fh4zPuE2Wca3pOHSNo3X81FfWJLzmwEHiQKs9HPQqUhezR9PcVWVkbMyAzw85c0UycGmHGFNb0UiRd9HFY6XbgbxhZv/mvKLZ99xE3xkOzS1PNsdSNvjUKwZR7pSUPqNS5S/1NXyR4GhFTU24VPH/bTATOv2ATH+PSzsZ7Qyz9UHj38tKC+ALJHEDJ4HXGzobyOUP78cHGZOfCB5FYubq0zmOudAjKIAhwI8XTFvJ2DX1P3 jimmyzelinskie
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNvw8qo9m8np7yQ/Smv/oklM8bo8VyNRZriGYBDuolWDL/mZpYCQnZJXphQo7RFdNABYistikjJlBuuwUohLf2uSq0iKoFa2TgwI43wViWzvuzU4nA02/ITD5BZdmWAFNyIoqeB50Ol4qUgDwLAZ+7Kv7uCi6chcgr9gTi99jY3GHyZjrMiXMHGVGi+FExFuzhVC2drKjbz5q6oRfQeLtNfG4psl5GU3MQU6FkX4fgoCx0r9R48/b7l4+TT7pWblJQiRfeldixu6308vyoTUEHasdkU3/X0OTaGz/h5XqTKnGQc6stvvoED3w+L3QFp0H5Z8sZ9stSsitmCBrmbcKZ jakemoshenko
write_files:
- path: /root/overrides.list

View file

@ -382,6 +382,24 @@ class ImageStorageTransformation(BaseModel):
name = CharField(index=True, unique=True)
class ImageStorageSignatureKind(BaseModel):
name = CharField(index=True, unique=True)
class ImageStorageSignature(BaseModel):
storage = ForeignKeyField(ImageStorage, index=True)
kind = ForeignKeyField(ImageStorageSignatureKind)
signature = TextField(null=True)
uploading = BooleanField(default=True, null=True)
class Meta:
database = db
read_slaves = (read_slave,)
indexes = (
(('kind', 'storage'), True),
)
class DerivedImageStorage(BaseModel):
source = ForeignKeyField(ImageStorage, null=True, related_name='source')
derivative = ForeignKeyField(ImageStorage, related_name='derivative')
@ -580,4 +598,4 @@ all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission,
Notification, ImageStorageLocation, ImageStoragePlacement,
ExternalNotificationEvent, ExternalNotificationMethod, RepositoryNotification,
RepositoryAuthorizedEmail, ImageStorageTransformation, DerivedImageStorage,
TeamMemberInvite]
TeamMemberInvite, ImageStorageSignature, ImageStorageSignatureKind]

View file

@ -0,0 +1,55 @@
"""Add signature storage
Revision ID: 5ad999136045
Revises: 228d1af6af1c
Create Date: 2015-02-05 15:01:54.989573
"""
# revision identifiers, used by Alembic.
revision = '5ad999136045'
down_revision = '228d1af6af1c'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.create_table('imagestoragesignaturekind',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragesignaturekind'))
)
op.create_index('imagestoragesignaturekind_name', 'imagestoragesignaturekind', ['name'], unique=True)
op.create_table('imagestoragesignature',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('storage_id', sa.Integer(), nullable=False),
sa.Column('kind_id', sa.Integer(), nullable=False),
sa.Column('signature', sa.Text(), nullable=True),
sa.Column('uploading', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['kind_id'], ['imagestoragesignaturekind.id'], name=op.f('fk_imagestoragesignature_kind_id_imagestoragesignaturekind')),
sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_imagestoragesignature_storage_id_imagestorage')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragesignature'))
)
op.create_index('imagestoragesignature_kind_id', 'imagestoragesignature', ['kind_id'], unique=False)
op.create_index('imagestoragesignature_kind_id_storage_id', 'imagestoragesignature', ['kind_id', 'storage_id'], unique=True)
op.create_index('imagestoragesignature_storage_id', 'imagestoragesignature', ['storage_id'], unique=False)
### end Alembic commands ###
op.bulk_insert(tables.imagestoragetransformation,
[
{'id': 2, 'name':'aci'},
])
op.bulk_insert(tables.imagestoragesignaturekind,
[
{'id': 1, 'name':'gpg2'},
])
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_table('imagestoragesignature')
op.drop_table('imagestoragesignaturekind')
### end Alembic commands ###

View file

@ -14,7 +14,8 @@ from data.database import (User, Repository, Image, AccessToken, Role, Repositor
ExternalNotificationEvent, ExternalNotificationMethod,
RepositoryNotification, RepositoryAuthorizedEmail, TeamMemberInvite,
DerivedImageStorage, ImageStorageTransformation, random_string_generator,
db, BUILD_PHASE, QuayUserField, validate_database_url, db_for_update)
db, BUILD_PHASE, QuayUserField, ImageStorageSignature,
ImageStorageSignatureKind, validate_database_url, db_for_update)
from peewee import JOIN_LEFT_OUTER, fn
from util.validation import (validate_username, validate_email, validate_password,
INVALID_PASSWORD_MESSAGE)
@ -1323,7 +1324,28 @@ def find_create_or_link_image(docker_image_id, repository, username, translation
ancestors='/')
def find_or_create_derived_storage(source, transformation_name, preferred_location):
def find_or_create_storage_signature(storage, signature_kind):
found = lookup_storage_signature(storage, signature_kind)
if found is None:
kind = ImageStorageSignatureKind.get(name=signature_kind)
found = ImageStorageSignature.create(storage=storage, kind=kind)
return found
def lookup_storage_signature(storage, signature_kind):
kind = ImageStorageSignatureKind.get(name=signature_kind)
try:
return (ImageStorageSignature
.select()
.where(ImageStorageSignature.storage == storage,
ImageStorageSignature.kind == kind)
.get())
except ImageStorageSignature.DoesNotExist:
return None
def find_derived_storage(source, transformation_name):
try:
found = (ImageStorage
.select(ImageStorage, DerivedImageStorage)
@ -1336,11 +1358,19 @@ def find_or_create_derived_storage(source, transformation_name, preferred_locati
found.locations = {placement.location.name for placement in found.imagestorageplacement_set}
return found
except ImageStorage.DoesNotExist:
logger.debug('Creating storage dervied from source: %s', source.uuid)
trans = ImageStorageTransformation.get(name=transformation_name)
new_storage = _create_storage(preferred_location)
DerivedImageStorage.create(source=source, derivative=new_storage, transformation=trans)
return new_storage
return None
def find_or_create_derived_storage(source, transformation_name, preferred_location):
existing = find_derived_storage(source, transformation_name)
if existing is not None:
return existing
logger.debug('Creating storage dervied from source: %s', source.uuid)
trans = ImageStorageTransformation.get(name=transformation_name)
new_storage = _create_storage(preferred_location)
DerivedImageStorage.create(source=source, derivative=new_storage, transformation=trans)
return new_storage
def delete_derived_storage_by_uuid(storage_uuid):

View file

@ -3,6 +3,7 @@ import urlparse
import json
import string
import datetime
import os
# Register the various exceptions via decorators.
import endpoints.decorated
@ -32,6 +33,23 @@ profile = logging.getLogger('application.profiler')
route_data = None
CACHE_BUSTERS_JSON = 'static/dist/cachebusters.json'
CACHE_BUSTERS = None
def get_cache_busters():
""" Retrieves the cache busters hashes. """
global CACHE_BUSTERS
if CACHE_BUSTERS is not None:
return CACHE_BUSTERS
if not os.path.exists(CACHE_BUSTERS_JSON):
return {}
with open(CACHE_BUSTERS_JSON, 'r') as f:
CACHE_BUSTERS = json.loads(f.read())
return CACHE_BUSTERS
class RepoPathConverter(BaseConverter):
regex = '[\.a-zA-Z0-9_\-]+/[\.a-zA-Z0-9_\-]+'
weight = 200
@ -113,17 +131,15 @@ def list_files(path, extension):
filepath = 'static/' + path
return [join_path(dp, f) for dp, dn, files in os.walk(filepath) for f in files if matches(f)]
SAVED_CACHE_STRING = random_string()
def render_page_template(name, **kwargs):
if app.config.get('DEBUGGING', False):
debugging = app.config.get('DEBUGGING', False)
if debugging:
# If DEBUGGING is enabled, then we load the full set of individual JS and CSS files
# from the file system.
library_styles = list_files('lib', 'css')
main_styles = list_files('css', 'css')
library_scripts = list_files('lib', 'js')
main_scripts = list_files('js', 'js')
cache_buster = 'debugging'
file_lists = [library_styles, main_styles, library_scripts, main_scripts]
for file_list in file_lists:
@ -133,7 +149,6 @@ def render_page_template(name, **kwargs):
main_styles = ['dist/quay-frontend.css']
library_scripts = []
main_scripts = ['dist/quay-frontend.min.js']
cache_buster = SAVED_CACHE_STRING
use_cdn = app.config.get('USE_CDN', True)
if request.args.get('use_cdn') is not None:
@ -142,6 +157,12 @@ def render_page_template(name, **kwargs):
external_styles = get_external_css(local=not use_cdn)
external_scripts = get_external_javascript(local=not use_cdn)
def add_cachebusters(filenames):
cachebusters = get_cache_busters()
for filename in filenames:
cache_buster = cachebusters.get(filename, random_string()) if not debugging else 'debugging'
yield (filename, cache_buster)
def get_oauth_config():
oauth_config = {}
for oauth_app in oauth_apps:
@ -153,13 +174,14 @@ def render_page_template(name, **kwargs):
if len(app.config.get('CONTACT_INFO', [])) == 1:
contact_href = app.config['CONTACT_INFO'][0]
resp = make_response(render_template(name, route_data=json.dumps(get_route_data()),
resp = make_response(render_template(name,
route_data=json.dumps(get_route_data()),
external_styles=external_styles,
external_scripts=external_scripts,
main_styles=main_styles,
library_styles=library_styles,
main_scripts=main_scripts,
library_scripts=library_scripts,
main_styles=add_cachebusters(main_styles),
library_styles=add_cachebusters(library_styles),
main_scripts=add_cachebusters(main_scripts),
library_scripts=add_cachebusters(library_scripts),
feature_set=json.dumps(features.get_features()),
config_set=json.dumps(getFrontendVisibleConfig(app.config)),
oauth_set=json.dumps(get_oauth_config()),
@ -169,9 +191,10 @@ def render_page_template(name, **kwargs):
sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''),
is_debug=str(app.config.get('DEBUGGING', False)).lower(),
show_chat=features.OLARK_CHAT,
cache_buster=cache_buster,
has_billing=features.BILLING,
contact_href=contact_href,
hostname=app.config['SERVER_HOSTNAME'],
preferred_scheme=app.config['PREFERRED_URL_SCHEME'],
**kwargs))
resp.headers['X-FRAME-OPTIONS'] = 'DENY'

View file

@ -2,11 +2,10 @@ import logging
import json
import hashlib
from flask import redirect, Blueprint, abort, send_file, request
from flask import redirect, Blueprint, abort, send_file, make_response
from app import app
from app import app, signer
from auth.auth import process_auth
from auth.auth_context import get_authenticated_user
from auth.permissions import ReadRepositoryPermission
from data import model
from data import database
@ -15,13 +14,16 @@ from storage import Storage
from util.queuefile import QueueFile
from util.queueprocess import QueueProcess
from util.gzipwrap import GzipWrap
from util.dockerloadformat import build_docker_load_stream
from formats.squashed import SquashedDockerImage
from formats.aci import ACIImage
# pylint: disable=invalid-name
verbs = Blueprint('verbs', __name__)
logger = logging.getLogger(__name__)
def _open_stream(namespace, repository, tag, synthetic_image_id, image_json, image_id_list):
def _open_stream(formatter, namespace, repository, tag, synthetic_image_id, image_json,
image_id_list):
store = Storage(app)
# For performance reasons, we load the full image list here, cache it, then disconnect from
@ -42,20 +44,43 @@ def _open_stream(namespace, repository, tag, synthetic_image_id, image_json, ima
current_image_path)
current_image_id = current_image_entry.id
logger.debug('Returning image layer %s: %s' % (current_image_id, current_image_path))
logger.debug('Returning image layer %s: %s', current_image_id, current_image_path)
yield current_image_stream
stream = build_docker_load_stream(namespace, repository, tag, synthetic_image_id, image_json,
stream = formatter.build_stream(namespace, repository, tag, synthetic_image_id, image_json,
get_next_image, get_next_layer)
return stream.read
def _write_synthetic_image_to_storage(linked_storage_uuid, linked_locations, queue_file):
def _sign_sythentic_image(verb, linked_storage_uuid, queue_file):
signature = None
try:
signature = signer.detached_sign(queue_file)
except:
logger.exception('Exception when signing %s image %s', verb, linked_storage_uuid)
return
# Setup the database (since this is a new process) and then disconnect immediately
# once the operation completes.
if not queue_file.raised_exception:
with database.UseThenDisconnect(app.config):
try:
derived = model.get_storage_by_uuid(linked_storage_uuid)
except model.InvalidImageException:
return
signature_entry = model.find_or_create_storage_signature(derived, signer.name)
signature_entry.signature = signature
signature_entry.uploading = False
signature_entry.save()
def _write_synthetic_image_to_storage(verb, linked_storage_uuid, linked_locations, queue_file):
store = Storage(app)
def handle_exception(ex):
logger.debug('Exception when building squashed image %s: %s', linked_storage_uuid, ex)
logger.debug('Exception when building %s image %s: %s', verb, linked_storage_uuid, ex)
with database.UseThenDisconnect(app.config):
model.delete_derived_storage_by_uuid(linked_storage_uuid)
@ -67,86 +92,193 @@ def _write_synthetic_image_to_storage(linked_storage_uuid, linked_locations, que
queue_file.close()
if not queue_file.raised_exception:
# Setup the database (since this is a new process) and then disconnect immediately
# once the operation completes.
with database.UseThenDisconnect(app.config):
done_uploading = model.get_storage_by_uuid(linked_storage_uuid)
done_uploading.uploading = False
done_uploading.save()
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET'])
@process_auth
def get_squashed_tag(namespace, repository, tag):
# pylint: disable=too-many-locals
def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None):
permission = ReadRepositoryPermission(namespace, repository)
if permission.can() or model.repository_is_public(namespace, repository):
# Lookup the requested tag.
try:
tag_image = model.get_tag_image(namespace, repository, tag)
except model.DataModelException:
abort(404)
# Lookup the tag's image and storage.
repo_image = model.get_repo_image_extended(namespace, repository, tag_image.docker_image_id)
if not repo_image:
abort(404)
# pylint: disable=no-member
if not permission.can() and not model.repository_is_public(namespace, repository):
abort(403)
# Log the action.
track_and_log('repo_verb', repo_image.repository, tag=tag, verb='squash')
# Lookup the requested tag.
try:
tag_image = model.get_tag_image(namespace, repository, tag)
except model.DataModelException:
abort(404)
store = Storage(app)
derived = model.find_or_create_derived_storage(repo_image.storage, 'squash',
store.preferred_locations[0])
if not derived.uploading:
logger.debug('Derived image %s exists in storage', derived.uuid)
derived_layer_path = store.image_layer_path(derived.uuid)
download_url = store.get_direct_download_url(derived.locations, derived_layer_path)
if download_url:
logger.debug('Redirecting to download URL for derived image %s', derived.uuid)
return redirect(download_url)
# Lookup the tag's image and storage.
repo_image = model.get_repo_image_extended(namespace, repository, tag_image.docker_image_id)
if not repo_image:
abort(404)
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
# If there is a data checker, call it first.
uuid = repo_image.storage.uuid
image_json = None
logger.debug('Sending cached derived image %s', derived.uuid)
return send_file(store.stream_read_file(derived.locations, derived_layer_path))
# Load the ancestry for the image.
logger.debug('Building and returning derived image %s', derived.uuid)
uuid = repo_image.storage.uuid
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
full_image_list = json.loads(ancestry_data)
# Load the image's JSON layer.
if checker is not None:
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
image_json = json.loads(image_json_data)
# Calculate a synthetic image ID.
synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':squash').hexdigest()
if not checker(image_json):
logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repository, tag, verb)
abort(404)
# Create a queue process to generate the data. The queue files will read from the process
# and send the results to the client and storage.
def _cleanup():
# Close any existing DB connection once the process has exited.
database.close_db_filter(None)
return (repo_image, tag_image, image_json)
args = (namespace, repository, tag, synthetic_image_id, image_json, full_image_list)
queue_process = QueueProcess(_open_stream,
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
args, finished=_cleanup)
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
# pylint: disable=too-many-locals
def _repo_verb_signature(namespace, repository, tag, verb, checker=None, **kwargs):
# Verify that the image exists and that we have access to it.
store = Storage(app)
result = _verify_repo_verb(store, namespace, repository, tag, verb, checker)
(repo_image, tag_image, image_json) = result
# Start building.
queue_process.run()
# Lookup the derived image storage for the verb.
derived = model.find_derived_storage(repo_image.storage, verb)
if derived is None or derived.uploading:
abort(404)
# Start the storage saving.
storage_args = (derived.uuid, derived.locations, storage_queue_file)
QueueProcess.run_process(_write_synthetic_image_to_storage, storage_args, finished=_cleanup)
# Check if we have a valid signer configured.
if not signer.name:
abort(404)
# Lookup the signature for the verb.
signature_entry = model.lookup_storage_signature(derived, signer.name)
if signature_entry is None:
abort(404)
# Return the signature.
return make_response(signature_entry.signature)
# pylint: disable=too-many-locals
def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker=None, **kwargs):
# Verify that the image exists and that we have access to it.
store = Storage(app)
result = _verify_repo_verb(store, namespace, repository, tag, verb, checker)
(repo_image, tag_image, image_json) = result
# Log the action.
track_and_log('repo_verb', repo_image.repository, tag=tag, verb=verb, **kwargs)
# Lookup/create the derived image storage for the verb.
derived = model.find_or_create_derived_storage(repo_image.storage, verb,
store.preferred_locations[0])
if not derived.uploading:
logger.debug('Derived %s image %s exists in storage', verb, derived.uuid)
derived_layer_path = store.image_layer_path(derived.uuid)
download_url = store.get_direct_download_url(derived.locations, derived_layer_path)
if download_url:
logger.debug('Redirecting to download URL for derived %s image %s', verb, derived.uuid)
return redirect(download_url)
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
# Return the client's data.
return send_file(client_queue_file)
logger.debug('Sending cached derived %s image %s', verb, derived.uuid)
return send_file(store.stream_read_file(derived.locations, derived_layer_path))
# Load the ancestry for the image.
uuid = repo_image.storage.uuid
logger.debug('Building and returning derived %s image %s', verb, derived.uuid)
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
full_image_list = json.loads(ancestry_data)
# Load the image's JSON layer.
if not image_json:
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
image_json = json.loads(image_json_data)
# Calculate a synthetic image ID.
synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':' + verb).hexdigest()
def _cleanup():
# Close any existing DB connection once the process has exited.
database.close_db_filter(None)
# Create a queue process to generate the data. The queue files will read from the process
# and send the results to the client and storage.
args = (formatter, namespace, repository, tag, synthetic_image_id, image_json, full_image_list)
queue_process = QueueProcess(_open_stream,
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
args, finished=_cleanup)
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
# If signing is required, add a QueueFile for signing the image as we stream it out.
signing_queue_file = None
if sign and signer.name:
signing_queue_file = QueueFile(queue_process.create_queue(), 'signing')
# Start building.
queue_process.run()
# Start the storage saving.
storage_args = (verb, derived.uuid, derived.locations, storage_queue_file)
QueueProcess.run_process(_write_synthetic_image_to_storage, storage_args, finished=_cleanup)
if sign and signer.name:
signing_args = (verb, derived.uuid, signing_queue_file)
QueueProcess.run_process(_sign_sythentic_image, signing_args, finished=_cleanup)
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
# Return the client's data.
return send_file(client_queue_file)
def os_arch_checker(os, arch):
def checker(image_json):
# Verify the architecture and os.
operating_system = image_json.get('os', 'linux')
if operating_system != os:
return False
architecture = image_json.get('architecture', 'amd64')
# Note: Some older Docker images have 'x86_64' rather than 'amd64'.
# We allow the conversion here.
if architecture == 'x86_64' and operating_system == 'linux':
architecture = 'amd64'
if architecture != arch:
return False
return True
return checker
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/sig/<os>/<arch>/', methods=['GET'])
@process_auth
# pylint: disable=unused-argument
def get_aci_signature(server, namespace, repository, tag, os, arch):
return _repo_verb_signature(namespace, repository, tag, 'aci', checker=os_arch_checker(os, arch),
os=os, arch=arch)
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci/<os>/<arch>/', methods=['GET'])
@process_auth
# pylint: disable=unused-argument
def get_aci_image(server, namespace, repository, tag, os, arch):
return _repo_verb(namespace, repository, tag, 'aci', ACIImage(),
sign=True, checker=os_arch_checker(os, arch), os=os, arch=arch)
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET'])
@process_auth
def get_squashed_tag(namespace, repository, tag):
return _repo_verb(namespace, repository, tag, 'squash', SquashedDockerImage())
abort(403)

View file

@ -1,7 +1,7 @@
import logging
from flask import (abort, redirect, request, url_for, make_response, Response,
Blueprint, send_from_directory, jsonify)
Blueprint, send_from_directory, jsonify, send_file)
from avatar_generator import Avatar
from flask.ext.login import current_user
@ -10,7 +10,7 @@ from health.healthcheck import get_healthchecker
from data import model
from data.model.oauth import DatabaseAuthorizationProvider
from app import app, billing as stripe, build_logs, avatar
from app import app, billing as stripe, build_logs, avatar, signer
from auth.auth import require_session_login, process_oauth
from auth.permissions import (AdministerOrganizationPermission, ReadRepositoryPermission,
SuperUserPermission)
@ -63,6 +63,14 @@ def snapshot(path = ''):
abort(404)
@web.route('/aci-signing-key')
@no_cache
def aci_signing_key():
if not signer.name:
abort(404)
return send_file(signer.public_key_path)
@web.route('/plans/')
@no_cache
@route_show_if(features.BILLING)

0
formats/__init__.py Normal file
View file

196
formats/aci.py Normal file
View file

@ -0,0 +1,196 @@
from app import app
from util.streamlayerformat import StreamLayerMerger
from formats.tarimageformatter import TarImageFormatter
import json
import re
# pylint: disable=bad-continuation
class ACIImage(TarImageFormatter):
""" Image formatter which produces an ACI-compatible TAR.
"""
# pylint: disable=too-many-arguments
def stream_generator(self, namespace, repository, tag, synthetic_image_id,
layer_json, get_image_iterator, get_layer_iterator):
# ACI Format (.tar):
# manifest - The JSON manifest
# rootfs - The root file system
# Yield the manifest.
yield self.tar_file('manifest', self._build_manifest(namespace, repository, tag, layer_json,
synthetic_image_id))
# Yield the merged layer dtaa.
yield self.tar_folder('rootfs')
layer_merger = StreamLayerMerger(get_layer_iterator, path_prefix='rootfs/')
for entry in layer_merger.get_generator():
yield entry
@staticmethod
def _build_isolators(docker_config):
""" Builds ACI isolator config from the docker config. """
def _isolate_memory(memory):
return {
"name": "memory/limit",
"value": str(memory) + 'B'
}
def _isolate_swap(memory):
return {
"name": "memory/swap",
"value": str(memory) + 'B'
}
def _isolate_cpu(cpu):
return {
"name": "cpu/shares",
"value": str(cpu)
}
def _isolate_capabilities(capabilities_set_value):
capabilities_set = re.split(r'[\s,]', capabilities_set_value)
return {
"name": "capabilities/bounding-set",
"value": ' '.join(capabilities_set)
}
mappers = {
'Memory': _isolate_memory,
'MemorySwap': _isolate_swap,
'CpuShares': _isolate_cpu,
'Cpuset': _isolate_capabilities
}
isolators = []
for config_key in mappers:
value = docker_config.get(config_key)
if value:
isolators.append(mappers[config_key](value))
return isolators
@staticmethod
def _build_ports(docker_config):
""" Builds the ports definitions for the ACI. """
ports = []
for docker_port_definition in docker_config.get('ports', {}):
# Formats:
# port/tcp
# port/udp
# port
protocol = 'tcp'
port_number = -1
if '/' in docker_port_definition:
(port_number, protocol) = docker_port_definition.split('/')
else:
port_number = docker_port_definition
try:
port_number = int(port_number)
ports.append({
"name": "port-%s" % port_number,
"port": port_number,
"protocol": protocol
})
except ValueError:
pass
return ports
@staticmethod
def _build_volumes(docker_config):
""" Builds the volumes definitions for the ACI. """
volumes = []
names = set()
def get_name(docker_volume_path):
parts = docker_volume_path.split('/')
name = ''
while True:
name = name + parts[-1]
parts = parts[0:-1]
if names.add(name):
break
name = '/' + name
return name
for docker_volume_path in docker_config.get('volumes', {}):
volumes.append({
"name": get_name(docker_volume_path),
"path": docker_volume_path,
"readOnly": False
})
return volumes
@staticmethod
def _build_manifest(namespace, repository, tag, docker_layer_data, synthetic_image_id):
""" Builds an ACI manifest from the docker layer data. """
config = docker_layer_data.get('config', {})
source_url = "%s://%s/%s/%s:%s" % (app.config['PREFERRED_URL_SCHEME'],
app.config['SERVER_HOSTNAME'],
namespace, repository, tag)
# ACI requires that the execution command be absolutely referenced. Therefore, if we find
# a relative command, we give it as an argument to /bin/sh to resolve and execute for us.
entrypoint = config.get('Entrypoint', []) or []
exec_path = entrypoint + (config.get('Cmd', []) or [])
if exec_path and not exec_path[0].startswith('/'):
exec_path = ['/bin/sh', '-c', '""%s""' % ' '.join(exec_path)]
# TODO(jschorr): ACI doesn't support : in the name, so remove any ports.
hostname = app.config['SERVER_HOSTNAME']
hostname = hostname.split(':', 1)[0]
manifest = {
"acKind": "ImageManifest",
"acVersion": "0.2.0",
"name": '%s/%s/%s/%s' % (hostname, namespace, repository, tag),
"labels": [
{
"name": "version",
"value": "1.0.0"
},
{
"name": "arch",
"value": docker_layer_data.get('architecture', 'amd64')
},
{
"name": "os",
"value": docker_layer_data.get('os', 'linux')
}
],
"app": {
"exec": exec_path,
# Below, `or 'root'` is required to replace empty string from Dockerfiles.
"user": config.get('User', '') or 'root',
"group": config.get('Group', '') or 'root',
"eventHandlers": [],
"workingDirectory": config.get('WorkingDir', '') or '/',
"environment": [{"name": key, "value": value}
for (key, value) in [e.split('=') for e in config.get('Env')]],
"isolators": ACIImage._build_isolators(config),
"mountPoints": ACIImage._build_volumes(config),
"ports": ACIImage._build_ports(config),
"annotations": [
{"name": "created", "value": docker_layer_data.get('created', '')},
{"name": "homepage", "value": source_url},
{"name": "quay.io/derived-image", "value": synthetic_image_id},
]
},
}
return json.dumps(manifest)

102
formats/squashed.py Normal file
View file

@ -0,0 +1,102 @@
from app import app
from util.gzipwrap import GZIP_BUFFER_SIZE
from util.streamlayerformat import StreamLayerMerger
from formats.tarimageformatter import TarImageFormatter
import copy
import json
class FileEstimationException(Exception):
""" Exception raised by build_docker_load_stream if the estimated size of the layer TAR
was lower than the actual size. This means the sent TAR header is wrong, and we have
to fail.
"""
pass
class SquashedDockerImage(TarImageFormatter):
""" Image formatter which produces a squashed image compatible with the `docker load`
command.
"""
# pylint: disable=too-many-arguments,too-many-locals
def stream_generator(self, namespace, repository, tag, synthetic_image_id,
layer_json, get_image_iterator, get_layer_iterator):
# Docker import V1 Format (.tar):
# repositories - JSON file containing a repo -> tag -> image map
# {image ID folder}:
# json - The layer JSON
# layer.tar - The TARed contents of the layer
# VERSION - The docker import version: '1.0'
layer_merger = StreamLayerMerger(get_layer_iterator)
# Yield the repositories file:
synthetic_layer_info = {}
synthetic_layer_info[tag + '.squash'] = synthetic_image_id
hostname = app.config['SERVER_HOSTNAME']
repositories = {}
repositories[hostname + '/' + namespace + '/' + repository] = synthetic_layer_info
yield self.tar_file('repositories', json.dumps(repositories))
# Yield the image ID folder.
yield self.tar_folder(synthetic_image_id)
# Yield the JSON layer data.
layer_json = SquashedDockerImage._build_layer_json(layer_json, synthetic_image_id)
yield self.tar_file(synthetic_image_id + '/json', json.dumps(layer_json))
# Yield the VERSION file.
yield self.tar_file(synthetic_image_id + '/VERSION', '1.0')
# Yield the merged layer data's header.
estimated_file_size = 0
for image in get_image_iterator():
estimated_file_size += image.storage.uncompressed_size
yield self.tar_file_header(synthetic_image_id + '/layer.tar', estimated_file_size)
# Yield the contents of the merged layer.
yielded_size = 0
for entry in layer_merger.get_generator():
yield entry
yielded_size += len(entry)
# If the yielded size is more than the estimated size (which is unlikely but possible), then
# raise an exception since the tar header will be wrong.
if yielded_size > estimated_file_size:
raise FileEstimationException()
# If the yielded size is less than the estimated size (which is likely), fill the rest with
# zeros.
if yielded_size < estimated_file_size:
to_yield = estimated_file_size - yielded_size
while to_yield > 0:
yielded = min(to_yield, GZIP_BUFFER_SIZE)
yield '\0' * yielded
to_yield -= yielded
# Yield any file padding to 512 bytes that is necessary.
yield self.tar_file_padding(estimated_file_size)
# Last two records are empty in TAR spec.
yield '\0' * 512
yield '\0' * 512
@staticmethod
def _build_layer_json(layer_json, synthetic_image_id):
updated_json = copy.deepcopy(layer_json)
updated_json['id'] = synthetic_image_id
if 'parent' in updated_json:
del updated_json['parent']
if 'config' in updated_json and 'Image' in updated_json['config']:
updated_json['config']['Image'] = synthetic_image_id
if 'container_config' in updated_json and 'Image' in updated_json['container_config']:
updated_json['container_config']['Image'] = synthetic_image_id
return updated_json

View file

@ -0,0 +1,46 @@
import tarfile
from util.gzipwrap import GzipWrap
class TarImageFormatter(object):
""" Base class for classes which produce a TAR containing image and layer data. """
def build_stream(self, namespace, repository, tag, synthetic_image_id, layer_json,
get_image_iterator, get_layer_iterator):
""" Builds and streams a synthetic .tar.gz that represents the formatted TAR created by this
class's implementation.
"""
return GzipWrap(self.stream_generator(namespace, repository, tag,
synthetic_image_id, layer_json,
get_image_iterator, get_layer_iterator))
def stream_generator(self, namespace, repository, tag, synthetic_image_id,
layer_json, get_image_iterator, get_layer_iterator):
raise NotImplementedError
def tar_file(self, name, contents):
""" Returns the TAR binary representation for a file with the given name and file contents. """
length = len(contents)
tar_data = self.tar_file_header(name, length)
tar_data += contents
tar_data += self.tar_file_padding(length)
return tar_data
def tar_file_padding(self, length):
""" Returns TAR file padding for file data of the given length. """
if length % 512 != 0:
return '\0' * (512 - (length % 512))
return ''
def tar_file_header(self, name, file_size):
""" Returns TAR file header data for a file with the given name and size. """
info = tarfile.TarInfo(name=name)
info.type = tarfile.REGTYPE
info.size = file_size
return info.tobuf()
def tar_folder(self, name):
""" Returns TAR file header data for a folder with the given name. """
info = tarfile.TarInfo(name=name)
info.type = tarfile.DIRTYPE
return info.tobuf()

View file

@ -69,6 +69,18 @@ module.exports = function(grunt) {
, '../static/directives/config/*.html'],
dest: '../static/dist/template-cache.js'
}
},
cachebuster: {
build: {
options: {
format: 'json',
basedir: '../static/'
},
src: [ '../static/dist/template-cache.js', '../static/dist/<%= pkg.name %>.min.js',
'../static/dist/<%= pkg.name %>.css' ],
dest: '../static/dist/cachebusters.json'
}
}
});
@ -76,7 +88,8 @@ module.exports = function(grunt) {
grunt.loadNpmTasks('grunt-contrib-concat');
grunt.loadNpmTasks('grunt-contrib-cssmin');
grunt.loadNpmTasks('grunt-angular-templates');
grunt.loadNpmTasks('grunt-cachebuster');
// Default task(s).
grunt.registerTask('default', ['ngtemplates', 'concat', 'cssmin', 'uglify']);
grunt.registerTask('default', ['ngtemplates', 'concat', 'cssmin', 'uglify', 'cachebuster']);
};

View file

@ -6,6 +6,7 @@
"grunt-contrib-concat": "~0.4.0",
"grunt-contrib-cssmin": "~0.9.0",
"grunt-angular-templates": "~0.5.4",
"grunt-contrib-uglify": "~0.4.0"
"grunt-contrib-uglify": "~0.4.0",
"grunt-cachebuster": "~0.1.5"
}
}

View file

@ -255,6 +255,9 @@ def initialize_database():
ImageStorageLocation.create(name='local_us')
ImageStorageTransformation.create(name='squash')
ImageStorageTransformation.create(name='aci')
ImageStorageSignatureKind.create(name='gpg2')
# NOTE: These MUST be copied over to NotificationKind, since every external
# notification can also generate a Quay.io notification.

View file

@ -43,5 +43,6 @@ git+https://github.com/DevTable/pygithub.git
git+https://github.com/jplana/python-etcd.git
gipc
pyOpenSSL
pygpgme
cachetools
mock

View file

@ -38,13 +38,14 @@ marisa-trie==0.7
mixpanel-py==3.2.1
mock==1.0.1
paramiko==1.15.2
peewee==2.4.5
peewee==2.4.7
psycopg2==2.5.4
py-bcrypt==0.4
pycrypto==2.6.1
python-dateutil==2.4.0
python-ldap==2.4.19
python-magic==0.4.6
pygpgme==0.3
pytz==2014.10
pyOpenSSL==0.14
raven==5.1.1

View file

@ -7,7 +7,7 @@
</span>
<span class="spacer"></span>
Quay.io is now part of CoreOS! <a href="http://blog.devtable.com/" target="_blank">Read the blog post.</a>
Quay.io is now part of CoreOS! <a href="https://coreos.com/blog/CoreOS-enterprise-docker-registry/" target="_blank">Read the blog post.</a>
</div>
<div class="landing-background" ng-class="user.anonymous ? 'landing': 'signedin'"></div>

View file

@ -28,11 +28,11 @@
<link rel="apple-touch-icon" sizes="152x152" href="/static/img/apple-touch-icon-152x152.png" />
<!-- /Icons -->
{% for style_path in main_styles %}
{% for style_path, cache_buster in main_styles %}
<link rel="stylesheet" href="/static/{{ style_path }}?v={{ cache_buster }}" type="text/css">
{% endfor %}
{% for style_path in library_styles %}
{% for style_path, cache_buster in library_styles %}
<link rel="stylesheet" href="/static/{{ style_path }}?v={{ cache_buster }}" type="text/css">
{% endfor %}
@ -53,7 +53,7 @@
<script src="{{ script_url }}"></script>
{% endfor %}
{% for script_path in library_scripts %}
{% for script_path, cache_buster in library_scripts %}
<script src="/static/{{ script_path }}?v={{ cache_buster }}"></script>
{% endfor %}
@ -61,7 +61,7 @@
{% endblock %}
{% for script_path in main_scripts %}
{% for script_path, cache_buster in main_scripts %}
<script src="/static/{{ script_path }}?v={{ cache_buster }}"></script>
{% endfor %}

View file

@ -10,6 +10,10 @@
<meta id="descriptionTag" name="description" content="Hosted private docker repositories. Includes full user management and history. Free for public repositories."></meta>
<meta name="google-site-verification" content="GalDznToijTsHYmLjJvE4QaB9uk_IP16aaGDz5D75T4" />
<meta name="fragment" content="!" />
<meta name="ac-discovery" content="{{ hostname }} {{ preferred_scheme }}://{{ hostname }}/c1/aci/{name}/{version}/{ext}/{os}/{arch}/">
<meta name="ac-discovery-pubkeys" content="{{ hostname }} {{ preferred_scheme }}://{{ hostname }}/aci-signing-key">
{% endblock %}
{% block body_content %}

View file

@ -28,7 +28,7 @@
{% block body_content %}
<div class="tos container">
<h2>CoreOS Terms of Service</h2>
<h4>Last Revised: February 2, 2015</h4>
<h4>Last Revised: February 5, 2015</h4>
<p>These Quay.io Terms of Service (these “<strong>Terms</strong>”) apply to the features and functions provided by CoreOS, Inc. (“<strong>CoreOS</strong>,” “<strong>our</strong>,” or “<strong>we</strong>”) via quay.io (the “<strong>Site</strong>”) (collectively, the “<strong>Services</strong>”). By accessing or using the Services, you agree to be bound by these Terms. If you do not agree to these Terms, do not use any of the Services. The “<strong>Effective Date</strong>” of these Terms is the date you first access any of the Services.</p>
<p>If you are accessing the Services in your capacity as an employee, consultant or agent of a company (or other entity), you represent that you are an employee, consultant or agent of such company (or other entity) and you have the authority to agree (and be legally bound) on behalf of such company (or other entity) to all of the terms and conditions of these Terms.</p>
@ -105,11 +105,11 @@
<p>Also, in accordance with the Digital Millennium Copyright Act (DMCA) and other applicable law, CoreOS has adopted a policy of terminating, in appropriate circumstances and at our discretion, account holders who are deemed to be repeat infringers. CoreOS also may, at its discretion, limit access to the Services and terminate the accounts of any users who infringe any intellectual property rights of others, whether or not there is any repeat infringement.</p>
<p>If you think that anything on the Services infringes upon any copyright that you own or control, you may file a notification with CoreOS Designated Agent as set forth below:</p>
<table border=0>
<tr><td>Designated Agent:</td><td>[insert name]</td></tr>
<tr><td>Address of Designated Agent:</td><td>[insert address]</td></tr>
<tr><td>Telephone Number of Designated Agent:</td><td>[insert telephone]</td></tr>
<tr><td>Fax Number of Designated Agent:</td><td>[insert telephone number]</td></tr>
<tr><td>Email Address of Designated Agent:</td><td>[insert email address]</td></tr>
<tr><td>Designated Agent:</td><td>DMCA Agent</td></tr>
<tr><td>Address of Designated Agent:</td><td>3043 Mission Street, San Francisco, CA 94110</td></tr>
<tr><td>Telephone Number of Designated Agent:</td><td>(800) 774-3507</td></tr>
<tr><td>Fax Number of Designated Agent:</td><td>(415) 580-7362</td></tr>
<tr><td>Email Address of Designated Agent:</td><td>support@quay.io</td></tr>
</table>
<p>Please see <a href="http://www.copyright.gov/title17/92chap5.html#512">17 U.S.C. § 512(c)(3)</a> for the requirements of a proper notification. If you knowingly misrepresent that any material or activity is infringing, you may be liable for any damages, including costs and attorneys fees, CoreOS or the alleged infringer incurs because we relied on the misrepresentation when removing or disabling access to the material or activity.</p>
</li>

Binary file not shown.

View file

@ -2,14 +2,14 @@ set -e
up_mysql() {
# Run a SQL database on port 3306 inside of Docker.
docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql
docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql:5.7
# Sleep for 5s to get MySQL get started.
echo 'Sleeping for 10...'
sleep 10
# Add the database to mysql.
docker run --rm --link mysql:mysql mysql sh -c 'echo "create database genschema" | mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -ppassword'
docker run --rm --link mysql:mysql mysql:5.7 sh -c 'echo "create database genschema" | mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -ppassword'
}
down_mysql() {

View file

@ -34,11 +34,11 @@ class TestStreamLayerMerger(unittest.TestCase):
def create_empty_layer(self):
return ''
def squash_layers(self, layers):
def squash_layers(self, layers, path_prefix=None):
def get_layers():
return [StringIO(layer) for layer in layers]
merger = StreamLayerMerger(get_layers)
merger = StreamLayerMerger(get_layers, path_prefix=path_prefix)
merged_data = ''.join(merger.get_generator())
return merged_data
@ -395,5 +395,57 @@ class TestStreamLayerMerger(unittest.TestCase):
except TarLayerReadException as ex:
self.assertEquals('Could not read layer', ex.message)
def test_single_layer_with_prefix(self):
tar_layer = self.create_layer(
foo = 'some_file',
bar = 'another_file',
meh = 'third_file')
squashed = self.squash_layers([tar_layer], path_prefix='foo/')
self.assertHasFile(squashed, 'foo/some_file', 'foo')
self.assertHasFile(squashed, 'foo/another_file', 'bar')
self.assertHasFile(squashed, 'foo/third_file', 'meh')
def test_multiple_layers_overwrite_with_prefix(self):
second_layer = self.create_layer(
foo = 'some_file',
bar = 'another_file',
meh = 'third_file')
first_layer = self.create_layer(
top = 'another_file')
squashed = self.squash_layers([first_layer, second_layer], path_prefix='foo/')
self.assertHasFile(squashed, 'foo/some_file', 'foo')
self.assertHasFile(squashed, 'foo/third_file', 'meh')
self.assertHasFile(squashed, 'foo/another_file', 'top')
def test_superlong_filename(self):
tar_layer = self.create_layer(
meh = 'this_is_the_filename_that_never_ends_it_goes_on_and_on_my_friend_some_people_started')
squashed = self.squash_layers([tar_layer],
path_prefix='foo/')
self.assertHasFile(squashed, 'foo/this_is_the_filename_that_never_ends_it_goes_on_and_on_my_friend_some_people_started', 'meh')
def test_superlong_prefix(self):
tar_layer = self.create_layer(
foo = 'some_file',
bar = 'another_file',
meh = 'third_file')
squashed = self.squash_layers([tar_layer],
path_prefix='foo/bar/baz/something/foo/bar/baz/anotherthing/whatever/this/is/a/really/long/filename/that/goes/here/')
self.assertHasFile(squashed, 'foo/bar/baz/something/foo/bar/baz/anotherthing/whatever/this/is/a/really/long/filename/that/goes/here/some_file', 'foo')
self.assertHasFile(squashed, 'foo/bar/baz/something/foo/bar/baz/anotherthing/whatever/this/is/a/really/long/filename/that/goes/here/another_file', 'bar')
self.assertHasFile(squashed, 'foo/bar/baz/something/foo/bar/baz/anotherthing/whatever/this/is/a/really/long/filename/that/goes/here/third_file', 'meh')
if __name__ == '__main__':
unittest.main()

View file

@ -1,132 +0,0 @@
from util.gzipwrap import GzipWrap, GZIP_BUFFER_SIZE
from util.streamlayerformat import StreamLayerMerger
from app import app
import copy
import json
import tarfile
class FileEstimationException(Exception):
""" Exception raised by build_docker_load_stream if the estimated size of the layer TAR
was lower than the actual size. This means the sent TAR header is wrong, and we have
to fail.
"""
pass
def build_docker_load_stream(namespace, repository, tag, synthetic_image_id,
layer_json, get_image_iterator, get_layer_iterator):
""" Builds and streams a synthetic .tar.gz that represents a squashed version
of the given layers, in `docker load` V1 format.
"""
return GzipWrap(_import_format_generator(namespace, repository, tag,
synthetic_image_id, layer_json,
get_image_iterator, get_layer_iterator))
def _import_format_generator(namespace, repository, tag, synthetic_image_id,
layer_json, get_image_iterator, get_layer_iterator):
# Docker import V1 Format (.tar):
# repositories - JSON file containing a repo -> tag -> image map
# {image ID folder}:
# json - The layer JSON
# layer.tar - The TARed contents of the layer
# VERSION - The docker import version: '1.0'
layer_merger = StreamLayerMerger(get_layer_iterator)
# Yield the repositories file:
synthetic_layer_info = {}
synthetic_layer_info[tag + '.squash'] = synthetic_image_id
hostname = app.config['SERVER_HOSTNAME']
repositories = {}
repositories[hostname + '/' + namespace + '/' + repository] = synthetic_layer_info
yield _tar_file('repositories', json.dumps(repositories))
# Yield the image ID folder.
yield _tar_folder(synthetic_image_id)
# Yield the JSON layer data.
layer_json = _build_layer_json(layer_json, synthetic_image_id)
yield _tar_file(synthetic_image_id + '/json', json.dumps(layer_json))
# Yield the VERSION file.
yield _tar_file(synthetic_image_id + '/VERSION', '1.0')
# Yield the merged layer data's header.
estimated_file_size = 0
for image in get_image_iterator():
estimated_file_size += image.storage.uncompressed_size
yield _tar_file_header(synthetic_image_id + '/layer.tar', estimated_file_size)
# Yield the contents of the merged layer.
yielded_size = 0
for entry in layer_merger.get_generator():
yield entry
yielded_size += len(entry)
# If the yielded size is more than the estimated size (which is unlikely but possible), then
# raise an exception since the tar header will be wrong.
if yielded_size > estimated_file_size:
raise FileEstimationException()
# If the yielded size is less than the estimated size (which is likely), fill the rest with
# zeros.
if yielded_size < estimated_file_size:
to_yield = estimated_file_size - yielded_size
while to_yield > 0:
yielded = min(to_yield, GZIP_BUFFER_SIZE)
yield '\0' * yielded
to_yield -= yielded
# Yield any file padding to 512 bytes that is necessary.
yield _tar_file_padding(estimated_file_size)
# Last two records are empty in TAR spec.
yield '\0' * 512
yield '\0' * 512
def _build_layer_json(layer_json, synthetic_image_id):
updated_json = copy.deepcopy(layer_json)
updated_json['id'] = synthetic_image_id
if 'parent' in updated_json:
del updated_json['parent']
if 'config' in updated_json and 'Image' in updated_json['config']:
updated_json['config']['Image'] = synthetic_image_id
if 'container_config' in updated_json and 'Image' in updated_json['container_config']:
updated_json['container_config']['Image'] = synthetic_image_id
return updated_json
def _tar_file(name, contents):
length = len(contents)
tar_data = _tar_file_header(name, length)
tar_data += contents
tar_data += _tar_file_padding(length)
return tar_data
def _tar_file_padding(length):
if length % 512 != 0:
return '\0' * (512 - (length % 512))
return ''
def _tar_file_header(name, file_size):
info = tarfile.TarInfo(name=name)
info.type = tarfile.REGTYPE
info.size = file_size
return info.tobuf()
def _tar_folder(name):
info = tarfile.TarInfo(name=name)
info.type = tarfile.DIRTYPE
return info.tobuf()

69
util/signing.py Normal file
View file

@ -0,0 +1,69 @@
import gpgme
import os
from StringIO import StringIO
class GPG2Signer(object):
""" Helper class for signing data using GPG2. """
def __init__(self, app, key_directory):
if not app.config.get('GPG2_PRIVATE_KEY_NAME'):
raise Exception('Missing configuration key GPG2_PRIVATE_KEY_NAME')
if not app.config.get('GPG2_PRIVATE_KEY_FILENAME'):
raise Exception('Missing configuration key GPG2_PRIVATE_KEY_FILENAME')
if not app.config.get('GPG2_PUBLIC_KEY_FILENAME'):
raise Exception('Missing configuration key GPG2_PUBLIC_KEY_FILENAME')
self._ctx = gpgme.Context()
self._ctx.armor = True
self._private_key_name = app.config['GPG2_PRIVATE_KEY_NAME']
self._public_key_path = os.path.join(key_directory, app.config['GPG2_PUBLIC_KEY_FILENAME'])
key_file = os.path.join(key_directory, app.config['GPG2_PRIVATE_KEY_FILENAME'])
if not os.path.exists(key_file):
raise Exception('Missing key file %s' % key_file)
with open(key_file, 'rb') as fp:
self._ctx.import_(fp)
@property
def name(self):
return 'gpg2'
@property
def public_key_path(self):
return self._public_key_path
def detached_sign(self, stream):
""" Signs the given stream, returning the signature. """
ctx = self._ctx
ctx.signers = [ctx.get_key(self._private_key_name)]
signature = StringIO()
new_sigs = ctx.sign(stream, signature, gpgme.SIG_MODE_DETACH)
signature.seek(0)
return signature.getvalue()
class Signer(object):
def __init__(self, app=None, key_directory=None):
self.app = app
if app is not None:
self.state = self.init_app(app, key_directory)
else:
self.state = None
def init_app(self, app, key_directory):
preference = app.config.get('SIGNING_ENGINE', None)
if preference is None:
return None
return SIGNING_ENGINES[preference](app, key_directory)
def __getattr__(self, name):
return getattr(self.state, name, None)
SIGNING_ENGINES = {
'gpg2': GPG2Signer
}

View file

@ -11,8 +11,8 @@ AUFS_WHITEOUT_PREFIX_LENGTH = len(AUFS_WHITEOUT)
class StreamLayerMerger(TarLayerFormat):
""" Class which creates a generator of the combined TAR data for a set of Docker layers. """
def __init__(self, layer_iterator):
super(StreamLayerMerger, self).__init__(layer_iterator)
def __init__(self, layer_iterator, path_prefix=None):
super(StreamLayerMerger, self).__init__(layer_iterator, path_prefix)
self.path_trie = marisa_trie.Trie()
self.path_encountered = []

View file

@ -1,5 +1,6 @@
import os
import tarfile
import copy
class TarLayerReadException(Exception):
""" Exception raised when reading a layer has failed. """
@ -8,8 +9,9 @@ class TarLayerReadException(Exception):
class TarLayerFormat(object):
""" Class which creates a generator of the combined TAR data. """
def __init__(self, tar_iterator):
def __init__(self, tar_iterator, path_prefix=None):
self.tar_iterator = tar_iterator
self.path_prefix = path_prefix
def get_generator(self):
for current_tar in self.tar_iterator():
@ -36,7 +38,20 @@ class TarLayerFormat(object):
continue
# Yield the tar header.
yield tar_info.tobuf()
if self.path_prefix:
# Note: We use a copy here because we need to make sure we copy over all the internal
# data of the tar header. We cannot use frombuf(tobuf()), however, because it doesn't
# properly handle large filenames.
clone = copy.deepcopy(tar_info)
clone.name = os.path.join(self.path_prefix, clone.name)
# If the entry is a *hard* link, then prefix it as well. Soft links are relative.
if clone.linkname and clone.type == tarfile.LNKTYPE:
clone.linkname = os.path.join(self.path_prefix, clone.linkname)
yield clone.tobuf()
else:
yield tar_info.tobuf()
# Try to extract any file contents for the tar. If found, we yield them as well.
if tar_info.isreg():