Get squashed endpoint for docker import
working
This commit is contained in:
parent
e3c52fa0eb
commit
9344839295
3 changed files with 4 additions and 136 deletions
|
@ -44,7 +44,7 @@ application.register_blueprint(callback, url_prefix='/oauth2')
|
||||||
application.register_blueprint(index, url_prefix='/v1')
|
application.register_blueprint(index, url_prefix='/v1')
|
||||||
application.register_blueprint(tags, url_prefix='/v1')
|
application.register_blueprint(tags, url_prefix='/v1')
|
||||||
application.register_blueprint(registry, url_prefix='/v1')
|
application.register_blueprint(registry, url_prefix='/v1')
|
||||||
application.register_blueprint(verbs, url_prefix='/v1/repositories')
|
application.register_blueprint(verbs, url_prefix='/verbs/v1/repositories')
|
||||||
application.register_blueprint(api_bp, url_prefix='/api')
|
application.register_blueprint(api_bp, url_prefix='/api')
|
||||||
application.register_blueprint(webhooks, url_prefix='/webhooks')
|
application.register_blueprint(webhooks, url_prefix='/webhooks')
|
||||||
application.register_blueprint(realtime, url_prefix='/realtime')
|
application.register_blueprint(realtime, url_prefix='/realtime')
|
||||||
|
|
|
@ -11,7 +11,8 @@ from auth.permissions import ReadRepositoryPermission
|
||||||
from data import model
|
from data import model
|
||||||
from endpoints.registry import set_cache_headers
|
from endpoints.registry import set_cache_headers
|
||||||
|
|
||||||
from util.dockerimportformat import build_docker_import_stream
|
from util.gzipwrap import GzipWrap
|
||||||
|
from util.streamlayerformat import StreamLayerMerger
|
||||||
|
|
||||||
from werkzeug.wsgi import wrap_file
|
from werkzeug.wsgi import wrap_file
|
||||||
|
|
||||||
|
@ -46,14 +47,6 @@ def get_squashed_tag(namespace, repository, tag, headers):
|
||||||
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
|
ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid))
|
||||||
full_image_list = json.loads(ancestry_data)
|
full_image_list = json.loads(ancestry_data)
|
||||||
|
|
||||||
# Load the JSON for the image.
|
|
||||||
json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid))
|
|
||||||
layer_json = json.loads(json_data)
|
|
||||||
|
|
||||||
def get_next_image():
|
|
||||||
for current_image_id in full_image_list:
|
|
||||||
yield model.get_repo_image(namespace, repository, current_image_id)
|
|
||||||
|
|
||||||
def get_next_layer():
|
def get_next_layer():
|
||||||
for current_image_id in full_image_list:
|
for current_image_id in full_image_list:
|
||||||
current_image_entry = model.get_repo_image(namespace, repository, current_image_id)
|
current_image_entry = model.get_repo_image(namespace, repository, current_image_id)
|
||||||
|
@ -64,9 +57,7 @@ def get_squashed_tag(namespace, repository, tag, headers):
|
||||||
logger.debug('Returning image layer %s: %s' % (current_image_id, current_image_path))
|
logger.debug('Returning image layer %s: %s' % (current_image_id, current_image_path))
|
||||||
yield current_image_stream
|
yield current_image_stream
|
||||||
|
|
||||||
stream = build_docker_import_stream(namespace, repository, tag, synthetic_image_id,
|
stream = GzipWrap(StreamLayerMerger(get_next_layer).get_generator())
|
||||||
layer_json, get_next_image, get_next_layer)
|
|
||||||
|
|
||||||
return app.response_class(wrap_file(request.environ, stream, 1024 * 16),
|
return app.response_class(wrap_file(request.environ, stream, 1024 * 16),
|
||||||
mimetype='application/octet-stream',
|
mimetype='application/octet-stream',
|
||||||
direct_passthrough=True)
|
direct_passthrough=True)
|
||||||
|
|
|
@ -1,123 +0,0 @@
|
||||||
from util.gzipwrap import GzipWrap
|
|
||||||
from util.streamlayerformat import StreamLayerMerger
|
|
||||||
|
|
||||||
import copy
|
|
||||||
import json
|
|
||||||
import tarfile
|
|
||||||
|
|
||||||
from itertools import chain, islice
|
|
||||||
class some_magic_adaptor(object):
|
|
||||||
def __init__(self, src):
|
|
||||||
self.src = chain.from_iterable(src)
|
|
||||||
def read(self, n):
|
|
||||||
return "".join(islice(self.src, None, n))
|
|
||||||
|
|
||||||
def build_docker_import_stream(namespace, repository, tag, synthetic_image_id,
|
|
||||||
layer_json, get_image_iterator, get_layer_iterator):
|
|
||||||
""" Builds and streams a synthetic .tar.gz that represents a squashed version
|
|
||||||
of the given layers, in `docker import` V1 format.
|
|
||||||
"""
|
|
||||||
return some_magic_adaptor(_import_format_generator(namespace, repository, tag,
|
|
||||||
synthetic_image_id, layer_json,
|
|
||||||
get_image_iterator, get_layer_iterator))
|
|
||||||
|
|
||||||
|
|
||||||
def _import_format_generator(namespace, repository, tag, synthetic_image_id,
|
|
||||||
layer_json, get_image_iterator, get_layer_iterator):
|
|
||||||
|
|
||||||
# Docker import V1 Format (.tar):
|
|
||||||
# repositories - JSON file containing a repo -> tag -> image map
|
|
||||||
# {image ID folder}:
|
|
||||||
# json - The layer JSON
|
|
||||||
# layer.tar - The TARed contents of the layer
|
|
||||||
# VERSION - The docker import version: '1.0'
|
|
||||||
layer_merger = StreamLayerMerger(get_layer_iterator)
|
|
||||||
|
|
||||||
# Yield the repositories file:
|
|
||||||
synthetic_layer_info = {}
|
|
||||||
synthetic_layer_info[tag + '.squash'] = synthetic_image_id
|
|
||||||
|
|
||||||
repositories = {}
|
|
||||||
repositories[namespace + '/' + repository] = synthetic_layer_info
|
|
||||||
|
|
||||||
yield _tar_file('repositories', json.dumps(repositories))
|
|
||||||
|
|
||||||
# Yield the image ID folder.
|
|
||||||
yield _tar_folder(synthetic_image_id)
|
|
||||||
|
|
||||||
# Yield the JSON layer data.
|
|
||||||
layer_json = _build_layer_json(layer_json, synthetic_image_id)
|
|
||||||
yield _tar_file(synthetic_image_id + '/json', json.dumps(layer_json))
|
|
||||||
|
|
||||||
# Yield the VERSION file.
|
|
||||||
yield _tar_file(synthetic_image_id + '/VERSION', '1.0')
|
|
||||||
|
|
||||||
# Yield the merged layer data's header.
|
|
||||||
estimated_file_size = 0
|
|
||||||
for image in get_image_iterator():
|
|
||||||
estimated_file_size += image.storage.uncompressed_size or 0
|
|
||||||
|
|
||||||
yield _tar_file_header(synthetic_image_id + '/layer.tar', estimated_file_size)
|
|
||||||
|
|
||||||
# Yield the contents of the merged layer.
|
|
||||||
yielded_size = 0
|
|
||||||
for entry in layer_merger.get_generator():
|
|
||||||
yield entry
|
|
||||||
yielded_size += len(entry)
|
|
||||||
|
|
||||||
# If the yielded size is less than the estimated size (which is likely), fill the rest with
|
|
||||||
# zeros.
|
|
||||||
if yielded_size < estimated_file_size:
|
|
||||||
yield '\0' * (estimated_file_size - yielded_size)
|
|
||||||
|
|
||||||
print estimated_file_size
|
|
||||||
print yielded_size
|
|
||||||
|
|
||||||
# Yield any file padding to 512 bytes that is necessary.
|
|
||||||
yield _tar_file_padding(estimated_file_size)
|
|
||||||
|
|
||||||
# Last two records are empty in TAR spec.
|
|
||||||
yield '\0' * 512
|
|
||||||
yield '\0' * 512
|
|
||||||
|
|
||||||
|
|
||||||
def _build_layer_json(layer_json, synthetic_image_id):
|
|
||||||
updated_json = copy.deepcopy(layer_json)
|
|
||||||
updated_json['id'] = synthetic_image_id
|
|
||||||
|
|
||||||
if 'parent' in updated_json:
|
|
||||||
del updated_json['parent']
|
|
||||||
|
|
||||||
if 'config' in updated_json and 'Image' in updated_json['config']:
|
|
||||||
updated_json['config']['Image'] = synthetic_image_id
|
|
||||||
|
|
||||||
if 'container_config' in updated_json and 'Image' in updated_json['container_config']:
|
|
||||||
updated_json['container_config']['Image'] = synthetic_image_id
|
|
||||||
|
|
||||||
return updated_json
|
|
||||||
|
|
||||||
|
|
||||||
def _tar_file(name, contents):
|
|
||||||
length = len(contents)
|
|
||||||
tar_data = _tar_file_header(name, length)
|
|
||||||
tar_data += contents
|
|
||||||
tar_data += _tar_file_padding(length)
|
|
||||||
return tar_data
|
|
||||||
|
|
||||||
|
|
||||||
def _tar_file_padding(length):
|
|
||||||
if length % 512 != 0:
|
|
||||||
return '\0' * (512 - (length % 512))
|
|
||||||
|
|
||||||
|
|
||||||
def _tar_file_header(name, file_size):
|
|
||||||
info = tarfile.TarInfo(name=name)
|
|
||||||
info.type = tarfile.REGTYPE
|
|
||||||
info.size = file_size
|
|
||||||
return info.tobuf()
|
|
||||||
|
|
||||||
|
|
||||||
def _tar_folder(name):
|
|
||||||
info = tarfile.TarInfo(name=name)
|
|
||||||
info.type = tarfile.DIRTYPE
|
|
||||||
return info.tobuf()
|
|
Reference in a new issue