Merge branch 'master' into ackbar
This commit is contained in:
commit
045614c6c8
29 changed files with 920 additions and 246 deletions
|
@ -1,132 +0,0 @@
|
|||
from util.gzipwrap import GzipWrap, GZIP_BUFFER_SIZE
|
||||
from util.streamlayerformat import StreamLayerMerger
|
||||
from app import app
|
||||
|
||||
import copy
|
||||
import json
|
||||
import tarfile
|
||||
|
||||
class FileEstimationException(Exception):
|
||||
""" Exception raised by build_docker_load_stream if the estimated size of the layer TAR
|
||||
was lower than the actual size. This means the sent TAR header is wrong, and we have
|
||||
to fail.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def build_docker_load_stream(namespace, repository, tag, synthetic_image_id,
|
||||
layer_json, get_image_iterator, get_layer_iterator):
|
||||
""" Builds and streams a synthetic .tar.gz that represents a squashed version
|
||||
of the given layers, in `docker load` V1 format.
|
||||
"""
|
||||
return GzipWrap(_import_format_generator(namespace, repository, tag,
|
||||
synthetic_image_id, layer_json,
|
||||
get_image_iterator, get_layer_iterator))
|
||||
|
||||
|
||||
def _import_format_generator(namespace, repository, tag, synthetic_image_id,
|
||||
layer_json, get_image_iterator, get_layer_iterator):
|
||||
# Docker import V1 Format (.tar):
|
||||
# repositories - JSON file containing a repo -> tag -> image map
|
||||
# {image ID folder}:
|
||||
# json - The layer JSON
|
||||
# layer.tar - The TARed contents of the layer
|
||||
# VERSION - The docker import version: '1.0'
|
||||
layer_merger = StreamLayerMerger(get_layer_iterator)
|
||||
|
||||
# Yield the repositories file:
|
||||
synthetic_layer_info = {}
|
||||
synthetic_layer_info[tag + '.squash'] = synthetic_image_id
|
||||
|
||||
hostname = app.config['SERVER_HOSTNAME']
|
||||
repositories = {}
|
||||
repositories[hostname + '/' + namespace + '/' + repository] = synthetic_layer_info
|
||||
|
||||
yield _tar_file('repositories', json.dumps(repositories))
|
||||
|
||||
# Yield the image ID folder.
|
||||
yield _tar_folder(synthetic_image_id)
|
||||
|
||||
# Yield the JSON layer data.
|
||||
layer_json = _build_layer_json(layer_json, synthetic_image_id)
|
||||
yield _tar_file(synthetic_image_id + '/json', json.dumps(layer_json))
|
||||
|
||||
# Yield the VERSION file.
|
||||
yield _tar_file(synthetic_image_id + '/VERSION', '1.0')
|
||||
|
||||
# Yield the merged layer data's header.
|
||||
estimated_file_size = 0
|
||||
for image in get_image_iterator():
|
||||
estimated_file_size += image.storage.uncompressed_size
|
||||
|
||||
yield _tar_file_header(synthetic_image_id + '/layer.tar', estimated_file_size)
|
||||
|
||||
# Yield the contents of the merged layer.
|
||||
yielded_size = 0
|
||||
for entry in layer_merger.get_generator():
|
||||
yield entry
|
||||
yielded_size += len(entry)
|
||||
|
||||
# If the yielded size is more than the estimated size (which is unlikely but possible), then
|
||||
# raise an exception since the tar header will be wrong.
|
||||
if yielded_size > estimated_file_size:
|
||||
raise FileEstimationException()
|
||||
|
||||
# If the yielded size is less than the estimated size (which is likely), fill the rest with
|
||||
# zeros.
|
||||
if yielded_size < estimated_file_size:
|
||||
to_yield = estimated_file_size - yielded_size
|
||||
while to_yield > 0:
|
||||
yielded = min(to_yield, GZIP_BUFFER_SIZE)
|
||||
yield '\0' * yielded
|
||||
to_yield -= yielded
|
||||
|
||||
# Yield any file padding to 512 bytes that is necessary.
|
||||
yield _tar_file_padding(estimated_file_size)
|
||||
|
||||
# Last two records are empty in TAR spec.
|
||||
yield '\0' * 512
|
||||
yield '\0' * 512
|
||||
|
||||
|
||||
def _build_layer_json(layer_json, synthetic_image_id):
|
||||
updated_json = copy.deepcopy(layer_json)
|
||||
updated_json['id'] = synthetic_image_id
|
||||
|
||||
if 'parent' in updated_json:
|
||||
del updated_json['parent']
|
||||
|
||||
if 'config' in updated_json and 'Image' in updated_json['config']:
|
||||
updated_json['config']['Image'] = synthetic_image_id
|
||||
|
||||
if 'container_config' in updated_json and 'Image' in updated_json['container_config']:
|
||||
updated_json['container_config']['Image'] = synthetic_image_id
|
||||
|
||||
return updated_json
|
||||
|
||||
|
||||
def _tar_file(name, contents):
|
||||
length = len(contents)
|
||||
tar_data = _tar_file_header(name, length)
|
||||
tar_data += contents
|
||||
tar_data += _tar_file_padding(length)
|
||||
return tar_data
|
||||
|
||||
|
||||
def _tar_file_padding(length):
|
||||
if length % 512 != 0:
|
||||
return '\0' * (512 - (length % 512))
|
||||
|
||||
return ''
|
||||
|
||||
def _tar_file_header(name, file_size):
|
||||
info = tarfile.TarInfo(name=name)
|
||||
info.type = tarfile.REGTYPE
|
||||
info.size = file_size
|
||||
return info.tobuf()
|
||||
|
||||
|
||||
def _tar_folder(name):
|
||||
info = tarfile.TarInfo(name=name)
|
||||
info.type = tarfile.DIRTYPE
|
||||
return info.tobuf()
|
69
util/signing.py
Normal file
69
util/signing.py
Normal file
|
@ -0,0 +1,69 @@
|
|||
import gpgme
|
||||
import os
|
||||
from StringIO import StringIO
|
||||
|
||||
class GPG2Signer(object):
|
||||
""" Helper class for signing data using GPG2. """
|
||||
def __init__(self, app, key_directory):
|
||||
if not app.config.get('GPG2_PRIVATE_KEY_NAME'):
|
||||
raise Exception('Missing configuration key GPG2_PRIVATE_KEY_NAME')
|
||||
|
||||
if not app.config.get('GPG2_PRIVATE_KEY_FILENAME'):
|
||||
raise Exception('Missing configuration key GPG2_PRIVATE_KEY_FILENAME')
|
||||
|
||||
if not app.config.get('GPG2_PUBLIC_KEY_FILENAME'):
|
||||
raise Exception('Missing configuration key GPG2_PUBLIC_KEY_FILENAME')
|
||||
|
||||
self._ctx = gpgme.Context()
|
||||
self._ctx.armor = True
|
||||
self._private_key_name = app.config['GPG2_PRIVATE_KEY_NAME']
|
||||
self._public_key_path = os.path.join(key_directory, app.config['GPG2_PUBLIC_KEY_FILENAME'])
|
||||
|
||||
key_file = os.path.join(key_directory, app.config['GPG2_PRIVATE_KEY_FILENAME'])
|
||||
if not os.path.exists(key_file):
|
||||
raise Exception('Missing key file %s' % key_file)
|
||||
|
||||
with open(key_file, 'rb') as fp:
|
||||
self._ctx.import_(fp)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return 'gpg2'
|
||||
|
||||
@property
|
||||
def public_key_path(self):
|
||||
return self._public_key_path
|
||||
|
||||
def detached_sign(self, stream):
|
||||
""" Signs the given stream, returning the signature. """
|
||||
ctx = self._ctx
|
||||
ctx.signers = [ctx.get_key(self._private_key_name)]
|
||||
signature = StringIO()
|
||||
new_sigs = ctx.sign(stream, signature, gpgme.SIG_MODE_DETACH)
|
||||
|
||||
signature.seek(0)
|
||||
return signature.getvalue()
|
||||
|
||||
|
||||
class Signer(object):
|
||||
def __init__(self, app=None, key_directory=None):
|
||||
self.app = app
|
||||
if app is not None:
|
||||
self.state = self.init_app(app, key_directory)
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def init_app(self, app, key_directory):
|
||||
preference = app.config.get('SIGNING_ENGINE', None)
|
||||
if preference is None:
|
||||
return None
|
||||
|
||||
return SIGNING_ENGINES[preference](app, key_directory)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.state, name, None)
|
||||
|
||||
|
||||
SIGNING_ENGINES = {
|
||||
'gpg2': GPG2Signer
|
||||
}
|
|
@ -11,8 +11,8 @@ AUFS_WHITEOUT_PREFIX_LENGTH = len(AUFS_WHITEOUT)
|
|||
|
||||
class StreamLayerMerger(TarLayerFormat):
|
||||
""" Class which creates a generator of the combined TAR data for a set of Docker layers. """
|
||||
def __init__(self, layer_iterator):
|
||||
super(StreamLayerMerger, self).__init__(layer_iterator)
|
||||
def __init__(self, layer_iterator, path_prefix=None):
|
||||
super(StreamLayerMerger, self).__init__(layer_iterator, path_prefix)
|
||||
|
||||
self.path_trie = marisa_trie.Trie()
|
||||
self.path_encountered = []
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import tarfile
|
||||
import copy
|
||||
|
||||
class TarLayerReadException(Exception):
|
||||
""" Exception raised when reading a layer has failed. """
|
||||
|
@ -8,8 +9,9 @@ class TarLayerReadException(Exception):
|
|||
|
||||
class TarLayerFormat(object):
|
||||
""" Class which creates a generator of the combined TAR data. """
|
||||
def __init__(self, tar_iterator):
|
||||
def __init__(self, tar_iterator, path_prefix=None):
|
||||
self.tar_iterator = tar_iterator
|
||||
self.path_prefix = path_prefix
|
||||
|
||||
def get_generator(self):
|
||||
for current_tar in self.tar_iterator():
|
||||
|
@ -36,7 +38,20 @@ class TarLayerFormat(object):
|
|||
continue
|
||||
|
||||
# Yield the tar header.
|
||||
yield tar_info.tobuf()
|
||||
if self.path_prefix:
|
||||
# Note: We use a copy here because we need to make sure we copy over all the internal
|
||||
# data of the tar header. We cannot use frombuf(tobuf()), however, because it doesn't
|
||||
# properly handle large filenames.
|
||||
clone = copy.deepcopy(tar_info)
|
||||
clone.name = os.path.join(self.path_prefix, clone.name)
|
||||
|
||||
# If the entry is a *hard* link, then prefix it as well. Soft links are relative.
|
||||
if clone.linkname and clone.type == tarfile.LNKTYPE:
|
||||
clone.linkname = os.path.join(self.path_prefix, clone.linkname)
|
||||
|
||||
yield clone.tobuf()
|
||||
else:
|
||||
yield tar_info.tobuf()
|
||||
|
||||
# Try to extract any file contents for the tar. If found, we yield them as well.
|
||||
if tar_info.isreg():
|
||||
|
|
Reference in a new issue