4ec3a6c231
This will ensure that no matter which signature we write for the generated ACI, it is correct for that image.
127 lines
4.8 KiB
Python
127 lines
4.8 KiB
Python
from app import app
|
|
from util.registry.gzipwrap import GZIP_BUFFER_SIZE
|
|
from util.registry.streamlayerformat import StreamLayerMerger
|
|
from formats.tarimageformatter import TarImageFormatter
|
|
|
|
import copy
|
|
import json
|
|
import math
|
|
import calendar
|
|
|
|
class FileEstimationException(Exception):
|
|
""" Exception raised by build_docker_load_stream if the estimated size of the layer TAR
|
|
was lower than the actual size. This means the sent TAR header is wrong, and we have
|
|
to fail.
|
|
"""
|
|
pass
|
|
|
|
|
|
class SquashedDockerImage(TarImageFormatter):
|
|
""" Image formatter which produces a squashed image compatible with the `docker load`
|
|
command.
|
|
"""
|
|
|
|
# Multiplier against the image size reported by Docker to account for the TAR metadata.
|
|
# Note: This multiplier was not formally calculated in anyway and should be adjusted overtime
|
|
# if/when we encounter issues with it. Unfortunately, we cannot make it too large or the Docker
|
|
# daemon dies when trying to load the entire TAR into memory.
|
|
SIZE_MULTIPLIER = 1.2
|
|
|
|
def stream_generator(self, namespace, repository, tag, synthetic_image_id,
|
|
layer_json, get_image_iterator, get_layer_iterator, get_image_json):
|
|
image_mtime = 0
|
|
created = next(get_image_iterator()).created
|
|
if created is not None:
|
|
image_mtime = calendar.timegm(created.utctimetuple())
|
|
|
|
|
|
# Docker import V1 Format (.tar):
|
|
# repositories - JSON file containing a repo -> tag -> image map
|
|
# {image ID folder}:
|
|
# json - The layer JSON
|
|
# layer.tar - The TARed contents of the layer
|
|
# VERSION - The docker import version: '1.0'
|
|
layer_merger = StreamLayerMerger(get_layer_iterator)
|
|
|
|
# Yield the repositories file:
|
|
synthetic_layer_info = {}
|
|
synthetic_layer_info[tag + '.squash'] = synthetic_image_id
|
|
|
|
hostname = app.config['SERVER_HOSTNAME']
|
|
repositories = {}
|
|
repositories[hostname + '/' + namespace + '/' + repository] = synthetic_layer_info
|
|
|
|
yield self.tar_file('repositories', json.dumps(repositories), mtime=image_mtime)
|
|
|
|
# Yield the image ID folder.
|
|
yield self.tar_folder(synthetic_image_id, mtime=image_mtime)
|
|
|
|
# Yield the JSON layer data.
|
|
layer_json = SquashedDockerImage._build_layer_json(layer_json, synthetic_image_id)
|
|
yield self.tar_file(synthetic_image_id + '/json', json.dumps(layer_json), mtime=image_mtime)
|
|
|
|
# Yield the VERSION file.
|
|
yield self.tar_file(synthetic_image_id + '/VERSION', '1.0', mtime=image_mtime)
|
|
|
|
# Yield the merged layer data's header.
|
|
estimated_file_size = 0
|
|
for image in get_image_iterator():
|
|
# In V1 we have the actual uncompressed size, which is needed for back compat with
|
|
# older versions of Docker.
|
|
# In V2, we use the size given in the image JSON.
|
|
if image.storage.uncompressed_size:
|
|
estimated_file_size += image.storage.uncompressed_size
|
|
else:
|
|
image_json = get_image_json(image)
|
|
estimated_file_size += image_json.get('Size', 0) * SquashedDockerImage.SIZE_MULTIPLIER
|
|
|
|
# Make sure the estimated file size is an integer number of bytes.
|
|
estimated_file_size = int(math.ceil(estimated_file_size))
|
|
|
|
yield self.tar_file_header(synthetic_image_id + '/layer.tar', estimated_file_size,
|
|
mtime=image_mtime)
|
|
|
|
# Yield the contents of the merged layer.
|
|
yielded_size = 0
|
|
for entry in layer_merger.get_generator():
|
|
yield entry
|
|
yielded_size += len(entry)
|
|
|
|
# If the yielded size is more than the estimated size (which is unlikely but possible), then
|
|
# raise an exception since the tar header will be wrong.
|
|
if yielded_size > estimated_file_size:
|
|
message = "Expected %s bytes, found %s bytes" % (estimated_file_size, yielded_size)
|
|
raise FileEstimationException(message)
|
|
|
|
# If the yielded size is less than the estimated size (which is likely), fill the rest with
|
|
# zeros.
|
|
if yielded_size < estimated_file_size:
|
|
to_yield = estimated_file_size - yielded_size
|
|
while to_yield > 0:
|
|
yielded = min(to_yield, GZIP_BUFFER_SIZE)
|
|
yield '\0' * yielded
|
|
to_yield -= yielded
|
|
|
|
# Yield any file padding to 512 bytes that is necessary.
|
|
yield self.tar_file_padding(estimated_file_size)
|
|
|
|
# Last two records are empty in TAR spec.
|
|
yield '\0' * 512
|
|
yield '\0' * 512
|
|
|
|
|
|
@staticmethod
|
|
def _build_layer_json(layer_json, synthetic_image_id):
|
|
updated_json = copy.deepcopy(layer_json)
|
|
updated_json['id'] = synthetic_image_id
|
|
|
|
if 'parent' in updated_json:
|
|
del updated_json['parent']
|
|
|
|
if 'config' in updated_json and 'Image' in updated_json['config']:
|
|
updated_json['config']['Image'] = synthetic_image_id
|
|
|
|
if 'container_config' in updated_json and 'Image' in updated_json['container_config']:
|
|
updated_json['container_config']['Image'] = synthetic_image_id
|
|
|
|
return updated_json
|