Add signing to the ACI converter
This commit is contained in:
parent
81b5b8d1dc
commit
bfb0784abc
14 changed files with 311 additions and 90 deletions
|
@ -20,7 +20,7 @@ ADD requirements.txt requirements.txt
|
|||
RUN virtualenv --distribute venv
|
||||
RUN venv/bin/pip install -r requirements.txt
|
||||
|
||||
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev
|
||||
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev gpgme
|
||||
|
||||
###############################
|
||||
# END COMMON SECION
|
||||
|
|
3
app.py
3
app.py
|
@ -26,6 +26,7 @@ from data.archivedlogs import LogArchive
|
|||
from data.queue import WorkQueue
|
||||
from data.userevent import UserEventsBuilderModule
|
||||
from avatars.avatars import Avatar
|
||||
from util.signing import Signer
|
||||
|
||||
# pylint: disable=invalid-name,too-many-public-methods,too-few-public-methods,too-many-ancestors
|
||||
|
||||
|
@ -55,6 +56,7 @@ class Flask(BaseFlask):
|
|||
return Config(root_path, self.default_config)
|
||||
|
||||
|
||||
OVERRIDE_CONFIG_DIRECTORY = 'conf/stack/'
|
||||
OVERRIDE_CONFIG_YAML_FILENAME = 'conf/stack/config.yaml'
|
||||
OVERRIDE_CONFIG_PY_FILENAME = 'conf/stack/config.py'
|
||||
|
||||
|
@ -135,6 +137,7 @@ build_logs = BuildLogs(app)
|
|||
queue_metrics = QueueMetrics(app)
|
||||
authentication = UserAuthentication(app)
|
||||
userevents = UserEventsBuilderModule(app)
|
||||
signer = Signer(app, OVERRIDE_CONFIG_DIRECTORY)
|
||||
|
||||
github_login = GithubOAuthConfig(app, 'GITHUB_LOGIN_CONFIG')
|
||||
github_trigger = GithubOAuthConfig(app, 'GITHUB_TRIGGER_CONFIG')
|
||||
|
|
|
@ -44,7 +44,7 @@ class DefaultConfig(object):
|
|||
SEND_FILE_MAX_AGE_DEFAULT = 0
|
||||
POPULATE_DB_TEST_DATA = True
|
||||
PREFERRED_URL_SCHEME = 'http'
|
||||
SERVER_HOSTNAME = 'localhost:5000'
|
||||
SERVER_HOSTNAME = '10.0.2.2'
|
||||
|
||||
AVATAR_KIND = 'local'
|
||||
|
||||
|
|
|
@ -352,6 +352,24 @@ class ImageStorageTransformation(BaseModel):
|
|||
name = CharField(index=True, unique=True)
|
||||
|
||||
|
||||
class ImageStorageSignatureKind(BaseModel):
|
||||
name = CharField(index=True, unique=True)
|
||||
|
||||
|
||||
class ImageStorageSignature(BaseModel):
|
||||
storage = ForeignKeyField(ImageStorage, index=True)
|
||||
kind = ForeignKeyField(ImageStorageSignatureKind)
|
||||
signature = TextField(null=True)
|
||||
uploading = BooleanField(default=True, null=True)
|
||||
|
||||
class Meta:
|
||||
database = db
|
||||
read_slaves = (read_slave,)
|
||||
indexes = (
|
||||
(('kind', 'storage'), True),
|
||||
)
|
||||
|
||||
|
||||
class DerivedImageStorage(BaseModel):
|
||||
source = ForeignKeyField(ImageStorage, null=True, related_name='source')
|
||||
derivative = ForeignKeyField(ImageStorage, related_name='derivative')
|
||||
|
@ -550,4 +568,4 @@ all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission,
|
|||
Notification, ImageStorageLocation, ImageStoragePlacement,
|
||||
ExternalNotificationEvent, ExternalNotificationMethod, RepositoryNotification,
|
||||
RepositoryAuthorizedEmail, ImageStorageTransformation, DerivedImageStorage,
|
||||
TeamMemberInvite]
|
||||
TeamMemberInvite, ImageStorageSignature, ImageStorageSignatureKind]
|
||||
|
|
|
@ -14,7 +14,8 @@ from data.database import (User, Repository, Image, AccessToken, Role, Repositor
|
|||
ExternalNotificationEvent, ExternalNotificationMethod,
|
||||
RepositoryNotification, RepositoryAuthorizedEmail, TeamMemberInvite,
|
||||
DerivedImageStorage, ImageStorageTransformation, random_string_generator,
|
||||
db, BUILD_PHASE, QuayUserField)
|
||||
db, BUILD_PHASE, QuayUserField, ImageStorageSignature,
|
||||
ImageStorageSignatureKind)
|
||||
from peewee import JOIN_LEFT_OUTER, fn
|
||||
from util.validation import (validate_username, validate_email, validate_password,
|
||||
INVALID_PASSWORD_MESSAGE)
|
||||
|
@ -1317,7 +1318,28 @@ def find_create_or_link_image(docker_image_id, repository, username, translation
|
|||
ancestors='/')
|
||||
|
||||
|
||||
def find_or_create_derived_storage(source, transformation_name, preferred_location):
|
||||
def find_or_create_storage_signature(storage, signature_kind):
|
||||
found = lookup_storage_signature(storage, signature_kind)
|
||||
if found is None:
|
||||
kind = ImageStorageSignatureKind.get(name=signature_kind)
|
||||
found = ImageStorageSignature.create(storage=storage, kind=kind)
|
||||
|
||||
return found
|
||||
|
||||
|
||||
def lookup_storage_signature(storage, signature_kind):
|
||||
kind = ImageStorageSignatureKind.get(name=signature_kind)
|
||||
try:
|
||||
return (ImageStorageSignature
|
||||
.select()
|
||||
.where(ImageStorageSignature.storage == storage,
|
||||
ImageStorageSignature.kind == kind)
|
||||
.get())
|
||||
except ImageStorageSignature.DoesNotExist:
|
||||
return None
|
||||
|
||||
|
||||
def find_derived_storage(source, transformation_name):
|
||||
try:
|
||||
found = (ImageStorage
|
||||
.select(ImageStorage, DerivedImageStorage)
|
||||
|
@ -1330,11 +1352,19 @@ def find_or_create_derived_storage(source, transformation_name, preferred_locati
|
|||
found.locations = {placement.location.name for placement in found.imagestorageplacement_set}
|
||||
return found
|
||||
except ImageStorage.DoesNotExist:
|
||||
logger.debug('Creating storage dervied from source: %s', source.uuid)
|
||||
trans = ImageStorageTransformation.get(name=transformation_name)
|
||||
new_storage = _create_storage(preferred_location)
|
||||
DerivedImageStorage.create(source=source, derivative=new_storage, transformation=trans)
|
||||
return new_storage
|
||||
return None
|
||||
|
||||
|
||||
def find_or_create_derived_storage(source, transformation_name, preferred_location):
|
||||
existing = find_derived_storage(source, transformation_name)
|
||||
if existing is not None:
|
||||
return existing
|
||||
|
||||
logger.debug('Creating storage dervied from source: %s', source.uuid)
|
||||
trans = ImageStorageTransformation.get(name=transformation_name)
|
||||
new_storage = _create_storage(preferred_location)
|
||||
DerivedImageStorage.create(source=source, derivative=new_storage, transformation=trans)
|
||||
return new_storage
|
||||
|
||||
|
||||
def delete_derived_storage_by_uuid(storage_uuid):
|
||||
|
|
|
@ -2,9 +2,9 @@ import logging
|
|||
import json
|
||||
import hashlib
|
||||
|
||||
from flask import redirect, Blueprint, abort, send_file
|
||||
from flask import redirect, Blueprint, abort, send_file, make_response
|
||||
|
||||
from app import app
|
||||
from app import app, signer
|
||||
from auth.auth import process_auth
|
||||
from auth.permissions import ReadRepositoryPermission
|
||||
from data import model
|
||||
|
@ -53,6 +53,26 @@ def _open_stream(formatter, namespace, repository, tag, synthetic_image_id, imag
|
|||
return stream.read
|
||||
|
||||
|
||||
def _sign_sythentic_image(verb, linked_storage_uuid, queue_file):
|
||||
signature = None
|
||||
try:
|
||||
signature = signer.detached_sign(queue_file)
|
||||
except:
|
||||
logger.exception('Exception when signing %s image %s', verb, linked_storage_uuid)
|
||||
return
|
||||
|
||||
with database.UseThenDisconnect(app.config):
|
||||
try:
|
||||
derived = model.get_storage_by_uuid(linked_storage_uuid)
|
||||
except model.InvalidImageException:
|
||||
return
|
||||
|
||||
signature_entry = model.find_or_create_storage_signature(derived, signer.name)
|
||||
signature_entry.signature = signature
|
||||
signature_entry.uploading = False
|
||||
signature_entry.save()
|
||||
|
||||
|
||||
def _write_synthetic_image_to_storage(verb, linked_storage_uuid, linked_locations, queue_file):
|
||||
store = Storage(app)
|
||||
|
||||
|
@ -76,101 +96,145 @@ def _write_synthetic_image_to_storage(verb, linked_storage_uuid, linked_location
|
|||
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def _repo_verb(namespace, repository, tag, verb, formatter, checker=None, **kwargs):
|
||||
def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None):
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
|
||||
# pylint: disable=no-member
|
||||
if permission.can() or model.repository_is_public(namespace, repository):
|
||||
# Lookup the requested tag.
|
||||
try:
|
||||
tag_image = model.get_tag_image(namespace, repository, tag)
|
||||
except model.DataModelException:
|
||||
if not permission.can() and not model.repository_is_public(namespace, repository):
|
||||
abort(403)
|
||||
|
||||
# Lookup the requested tag.
|
||||
try:
|
||||
tag_image = model.get_tag_image(namespace, repository, tag)
|
||||
except model.DataModelException:
|
||||
abort(404)
|
||||
|
||||
# Lookup the tag's image and storage.
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, tag_image.docker_image_id)
|
||||
if not repo_image:
|
||||
abort(404)
|
||||
|
||||
# If there is a data checker, call it first.
|
||||
uuid = repo_image.storage.uuid
|
||||
image_json = None
|
||||
|
||||
if checker is not None:
|
||||
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
image_json = json.loads(image_json_data)
|
||||
|
||||
if not checker(image_json):
|
||||
logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repository, tag, verb)
|
||||
abort(404)
|
||||
|
||||
# Lookup the tag's image and storage.
|
||||
repo_image = model.get_repo_image_extended(namespace, repository, tag_image.docker_image_id)
|
||||
if not repo_image:
|
||||
abort(404)
|
||||
return (repo_image, tag_image, image_json)
|
||||
|
||||
# If there is a data checker, call it first.
|
||||
store = Storage(app)
|
||||
uuid = repo_image.storage.uuid
|
||||
image_json = None
|
||||
|
||||
if checker is not None:
|
||||
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
image_json = json.loads(image_json_data)
|
||||
# pylint: disable=too-many-locals
|
||||
def _repo_verb_signature(namespace, repository, tag, verb, checker=None, **kwargs):
|
||||
# Verify that the image exists and that we have access to it.
|
||||
store = Storage(app)
|
||||
result = _verify_repo_verb(store, namespace, repository, tag, verb, checker)
|
||||
(repo_image, tag_image, image_json) = result
|
||||
|
||||
if not checker(image_json):
|
||||
logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repository, tag, verb)
|
||||
abort(404)
|
||||
# Lookup the derived image storage for the verb.
|
||||
derived = model.find_derived_storage(repo_image.storage, verb)
|
||||
if derived is None or derived.uploading:
|
||||
abort(404)
|
||||
|
||||
# Log the action.
|
||||
track_and_log('repo_verb', repo_image.repository, tag=tag, verb=verb, **kwargs)
|
||||
# Check if we have a valid signer configured.
|
||||
if not signer.name:
|
||||
abort(404)
|
||||
|
||||
derived = model.find_or_create_derived_storage(repo_image.storage, verb,
|
||||
store.preferred_locations[0])
|
||||
# Lookup the signature for the verb.
|
||||
signature_entry = model.lookup_storage_signature(derived, signer.name)
|
||||
if signature_entry is None:
|
||||
abort(404)
|
||||
|
||||
if not derived.uploading:
|
||||
logger.debug('Derived %s image %s exists in storage', verb, derived.uuid)
|
||||
derived_layer_path = store.image_layer_path(derived.uuid)
|
||||
download_url = store.get_direct_download_url(derived.locations, derived_layer_path)
|
||||
if download_url:
|
||||
logger.debug('Redirecting to download URL for derived %s image %s', verb, derived.uuid)
|
||||
return redirect(download_url)
|
||||
# Return the signature.
|
||||
return make_response(signature_entry.signature)
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
|
||||
logger.debug('Sending cached derived %s image %s', verb, derived.uuid)
|
||||
return send_file(store.stream_read_file(derived.locations, derived_layer_path))
|
||||
# pylint: disable=too-many-locals
|
||||
def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker=None, **kwargs):
|
||||
# Verify that the image exists and that we have access to it.
|
||||
store = Storage(app)
|
||||
result = _verify_repo_verb(store, namespace, repository, tag, verb, checker)
|
||||
(repo_image, tag_image, image_json) = result
|
||||
|
||||
# Load the ancestry for the image.
|
||||
logger.debug('Building and returning derived %s image %s', verb, derived.uuid)
|
||||
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
|
||||
full_image_list = json.loads(ancestry_data)
|
||||
# Log the action.
|
||||
track_and_log('repo_verb', repo_image.repository, tag=tag, verb=verb, **kwargs)
|
||||
|
||||
# Load the image's JSON layer.
|
||||
if not image_json:
|
||||
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
image_json = json.loads(image_json_data)
|
||||
# Lookup/create the derived image storage for the verb.
|
||||
derived = model.find_or_create_derived_storage(repo_image.storage, verb,
|
||||
store.preferred_locations[0])
|
||||
|
||||
# Calculate a synthetic image ID.
|
||||
synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':' + verb).hexdigest()
|
||||
|
||||
# Create a queue process to generate the data. The queue files will read from the process
|
||||
# and send the results to the client and storage.
|
||||
def _cleanup():
|
||||
# Close any existing DB connection once the process has exited.
|
||||
database.close_db_filter(None)
|
||||
|
||||
args = (formatter, namespace, repository, tag, synthetic_image_id, image_json, full_image_list)
|
||||
queue_process = QueueProcess(_open_stream,
|
||||
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
|
||||
args, finished=_cleanup)
|
||||
|
||||
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
|
||||
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
|
||||
|
||||
# Start building.
|
||||
queue_process.run()
|
||||
|
||||
# Start the storage saving.
|
||||
storage_args = (verb, derived.uuid, derived.locations, storage_queue_file)
|
||||
QueueProcess.run_process(_write_synthetic_image_to_storage, storage_args, finished=_cleanup)
|
||||
if not derived.uploading:
|
||||
logger.debug('Derived %s image %s exists in storage', verb, derived.uuid)
|
||||
derived_layer_path = store.image_layer_path(derived.uuid)
|
||||
download_url = store.get_direct_download_url(derived.locations, derived_layer_path)
|
||||
if download_url:
|
||||
logger.debug('Redirecting to download URL for derived %s image %s', verb, derived.uuid)
|
||||
return redirect(download_url)
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
|
||||
# Return the client's data.
|
||||
return send_file(client_queue_file)
|
||||
logger.debug('Sending cached derived %s image %s', verb, derived.uuid)
|
||||
return send_file(store.stream_read_file(derived.locations, derived_layer_path))
|
||||
|
||||
abort(403)
|
||||
# Load the ancestry for the image.
|
||||
uuid = repo_image.storage.uuid
|
||||
|
||||
logger.debug('Building and returning derived %s image %s', verb, derived.uuid)
|
||||
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
|
||||
full_image_list = json.loads(ancestry_data)
|
||||
|
||||
# Load the image's JSON layer.
|
||||
if not image_json:
|
||||
image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
||||
image_json = json.loads(image_json_data)
|
||||
|
||||
# Calculate a synthetic image ID.
|
||||
synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':' + verb).hexdigest()
|
||||
|
||||
def _cleanup():
|
||||
# Close any existing DB connection once the process has exited.
|
||||
database.close_db_filter(None)
|
||||
|
||||
# Create a queue process to generate the data. The queue files will read from the process
|
||||
# and send the results to the client and storage.
|
||||
args = (formatter, namespace, repository, tag, synthetic_image_id, image_json, full_image_list)
|
||||
queue_process = QueueProcess(_open_stream,
|
||||
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
|
||||
args, finished=_cleanup)
|
||||
|
||||
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
|
||||
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
|
||||
|
||||
# If signing is required, add a QueueFile for signing the image as we stream it out.
|
||||
signing_queue_file = None
|
||||
if sign and signer.name:
|
||||
signing_queue_file = QueueFile(queue_process.create_queue(), 'signing')
|
||||
|
||||
# Start building.
|
||||
queue_process.run()
|
||||
|
||||
# Start the storage saving.
|
||||
storage_args = (verb, derived.uuid, derived.locations, storage_queue_file)
|
||||
QueueProcess.run_process(_write_synthetic_image_to_storage, storage_args, finished=_cleanup)
|
||||
|
||||
if sign and signer.name:
|
||||
signing_args = (verb, derived.uuid, signing_queue_file)
|
||||
QueueProcess.run_process(_sign_sythentic_image, signing_args, finished=_cleanup)
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
|
||||
# Return the client's data.
|
||||
return send_file(client_queue_file)
|
||||
|
||||
|
||||
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci/<os>/<arch>/', methods=['GET'])
|
||||
@process_auth
|
||||
# pylint: disable=unused-argument
|
||||
def get_aci_image(server, namespace, repository, tag, os, arch):
|
||||
def os_arch_checker(os, arch):
|
||||
def checker(image_json):
|
||||
# Verify the architecture and os.
|
||||
operating_system = image_json.get('os', 'linux')
|
||||
|
@ -183,8 +247,23 @@ def get_aci_image(server, namespace, repository, tag, os, arch):
|
|||
|
||||
return True
|
||||
|
||||
return checker
|
||||
|
||||
|
||||
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/sig/<os>/<arch>/', methods=['GET'])
|
||||
@process_auth
|
||||
# pylint: disable=unused-argument
|
||||
def get_aci_signature(server, namespace, repository, tag, os, arch):
|
||||
return _repo_verb_signature(namespace, repository, tag, 'aci', checker=os_arch_checker(os, arch),
|
||||
os=os, arch=arch)
|
||||
|
||||
|
||||
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci/<os>/<arch>/', methods=['GET'])
|
||||
@process_auth
|
||||
# pylint: disable=unused-argument
|
||||
def get_aci_image(server, namespace, repository, tag, os, arch):
|
||||
return _repo_verb(namespace, repository, tag, 'aci', ACIImage(),
|
||||
checker=checker, os=os, arch=arch)
|
||||
sign=True, checker=os_arch_checker(os, arch), os=os, arch=arch)
|
||||
|
||||
|
||||
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET'])
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import logging
|
||||
|
||||
from flask import (abort, redirect, request, url_for, make_response, Response,
|
||||
Blueprint, send_from_directory, jsonify)
|
||||
Blueprint, send_from_directory, jsonify, send_file)
|
||||
|
||||
from avatar_generator import Avatar
|
||||
from flask.ext.login import current_user
|
||||
|
@ -10,7 +10,7 @@ from health.healthcheck import HealthCheck
|
|||
|
||||
from data import model
|
||||
from data.model.oauth import DatabaseAuthorizationProvider
|
||||
from app import app, billing as stripe, build_logs, avatar
|
||||
from app import app, billing as stripe, build_logs, avatar, signer
|
||||
from auth.auth import require_session_login, process_oauth
|
||||
from auth.permissions import AdministerOrganizationPermission, ReadRepositoryPermission
|
||||
from util.invoice import renderInvoiceToPdf
|
||||
|
@ -57,6 +57,14 @@ def snapshot(path = ''):
|
|||
abort(404)
|
||||
|
||||
|
||||
@web.route('/aci-signing-key')
|
||||
@no_cache
|
||||
def aci_signing_key():
|
||||
if not signer.name:
|
||||
abort(404)
|
||||
|
||||
return send_file(signer.public_key_path)
|
||||
|
||||
@web.route('/plans/')
|
||||
@no_cache
|
||||
@route_show_if(features.BILLING)
|
||||
|
|
|
@ -180,8 +180,11 @@ class ACIImage(TarImageFormatter):
|
|||
"group": config.get('Group', '') or 'root',
|
||||
"eventHandlers": [],
|
||||
"workingDirectory": config.get('WorkingDir', '') or '/',
|
||||
"environment": [{"name": key, "value": value}
|
||||
for (key, value) in [e.split('=') for e in config.get('Env')]],
|
||||
# TODO(jschorr): Use the commented version once rocket has upgraded to 0.3.0.
|
||||
#"environment": [{"name": key, "value": value}
|
||||
# for (key, value) in [e.split('=') for e in config.get('Env')]],
|
||||
"environment": {key: value
|
||||
for (key, value) in [e.split('=') for e in config.get('Env')]},
|
||||
"isolators": ACIImage._build_isolators(config),
|
||||
"mountPoints": ACIImage._build_volumes(config),
|
||||
"ports": ACIImage._build_ports(config),
|
||||
|
|
|
@ -257,6 +257,8 @@ def initialize_database():
|
|||
ImageStorageTransformation.create(name='squash')
|
||||
ImageStorageTransformation.create(name='aci')
|
||||
|
||||
ImageStorageSignatureKind.create(name='gpg2')
|
||||
|
||||
# NOTE: These MUST be copied over to NotificationKind, since every external
|
||||
# notification can also generate a Quay.io notification.
|
||||
ExternalNotificationEvent.create(name='repo_push')
|
||||
|
|
|
@ -41,3 +41,4 @@ git+https://github.com/DevTable/anunidecode.git
|
|||
git+https://github.com/DevTable/avatar-generator.git
|
||||
git+https://github.com/DevTable/pygithub.git
|
||||
gipc
|
||||
pygpgme
|
|
@ -48,6 +48,7 @@ python-dateutil==2.2
|
|||
python-ldap==2.4.18
|
||||
python-magic==0.4.6
|
||||
pytz==2014.9
|
||||
pygpgme==0.3
|
||||
raven==5.1.1
|
||||
redis==2.10.3
|
||||
reportlab==2.7
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
<meta name="google-site-verification" content="GalDznToijTsHYmLjJvE4QaB9uk_IP16aaGDz5D75T4" />
|
||||
<meta name="fragment" content="!" />
|
||||
<meta name="ac-discovery" content="{{ hostname }} {{ preferred_scheme }}://{{ hostname }}/c1/aci/{name}/{version}/{ext}/{os}/{arch}/">
|
||||
<meta name="ac-discovery-pubkeys" content="{{ hostname }} { preferred_scheme }}://{{ hostname }}/aci-signing-key">
|
||||
|
||||
|
||||
{% endblock %}
|
||||
|
||||
|
|
69
util/signing.py
Normal file
69
util/signing.py
Normal file
|
@ -0,0 +1,69 @@
|
|||
import gpgme
|
||||
import os
|
||||
from StringIO import StringIO
|
||||
|
||||
class GPG2Signer(object):
|
||||
""" Helper class for signing data using GPG2. """
|
||||
def __init__(self, app, key_directory):
|
||||
if not app.config.get('GPG2_PRIVATE_KEY_NAME'):
|
||||
raise Exception('Missing configuration key GPG2_PRIVATE_KEY_NAME')
|
||||
|
||||
if not app.config.get('GPG2_PRIVATE_KEY_FILENAME'):
|
||||
raise Exception('Missing configuration key GPG2_PRIVATE_KEY_FILENAME')
|
||||
|
||||
if not app.config.get('GPG2_PUBLIC_KEY_FILENAME'):
|
||||
raise Exception('Missing configuration key GPG2_PUBLIC_KEY_FILENAME')
|
||||
|
||||
self._ctx = gpgme.Context()
|
||||
self._ctx.armor = True
|
||||
self._private_key_name = app.config['GPG2_PRIVATE_KEY_NAME']
|
||||
self._public_key_path = os.path.join(key_directory, app.config['GPG2_PUBLIC_KEY_FILENAME'])
|
||||
|
||||
key_file = os.path.join(key_directory, app.config['GPG2_PRIVATE_KEY_FILENAME'])
|
||||
if not os.path.exists(key_file):
|
||||
raise Exception('Missing key file %s' % key_file)
|
||||
|
||||
with open(key_file, 'rb') as fp:
|
||||
self._ctx.import_(fp)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return 'gpg2'
|
||||
|
||||
@property
|
||||
def public_key_path(self):
|
||||
return self._public_key_path
|
||||
|
||||
def detached_sign(self, stream):
|
||||
""" Signs the given stream, returning the signature. """
|
||||
ctx = self._ctx
|
||||
ctx.signers = [ctx.get_key(self._private_key_name)]
|
||||
signature = StringIO()
|
||||
new_sigs = ctx.sign(stream, signature, gpgme.SIG_MODE_DETACH)
|
||||
|
||||
signature.seek(0)
|
||||
return signature.getvalue()
|
||||
|
||||
|
||||
class Signer(object):
|
||||
def __init__(self, app=None, key_directory=None):
|
||||
self.app = app
|
||||
if app is not None:
|
||||
self.state = self.init_app(app, key_directory)
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def init_app(self, app, key_directory):
|
||||
preference = app.config.get('SIGNING_ENGINE', None)
|
||||
if preference is None:
|
||||
return None
|
||||
|
||||
return SIGNING_ENGINES[preference](app, key_directory)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.state, name, None)
|
||||
|
||||
|
||||
SIGNING_ENGINES = {
|
||||
'gpg2': GPG2Signer
|
||||
}
|
|
@ -44,6 +44,11 @@ class TarLayerFormat(object):
|
|||
# properly handle large filenames.
|
||||
clone = copy.deepcopy(tar_info)
|
||||
clone.name = os.path.join(self.path_prefix, clone.name)
|
||||
|
||||
# If the entry is a link of some kind, and it is not relative, then prefix it as well.
|
||||
if clone.linkname and clone.type == tarfile.LNKTYPE:
|
||||
clone.linkname = os.path.join(self.path_prefix, clone.linkname)
|
||||
|
||||
yield clone.tobuf()
|
||||
else:
|
||||
yield tar_info.tobuf()
|
||||
|
|
Reference in a new issue