initial import for Open Source 🎉

This commit is contained in:
Jimmy Zelinskie 2019-11-12 11:09:47 -05:00
parent 1898c361f3
commit 9c0dd3b722
2048 changed files with 218743 additions and 0 deletions

0
image/__init__.py Normal file
View file

242
image/appc/__init__.py Normal file
View file

@ -0,0 +1,242 @@
import json
import re
import calendar
from uuid import uuid4
from app import app
from util.registry.streamlayerformat import StreamLayerMerger
from util.dict_wrappers import JSONPathDict
from image.common import TarImageFormatter
ACNAME_REGEX = re.compile(r'[^a-z-]+')
class AppCImageFormatter(TarImageFormatter):
"""
Image formatter which produces an tarball according to the AppC specification.
"""
def stream_generator(self, tag, parsed_manifest, synthetic_image_id, layer_iterator,
tar_stream_getter_iterator, reporter=None):
image_mtime = 0
created = parsed_manifest.created_datetime
if created is not None:
image_mtime = calendar.timegm(created.utctimetuple())
# ACI Format (.tar):
# manifest - The JSON manifest
# rootfs - The root file system
# Yield the manifest.
aci_manifest = json.dumps(DockerV1ToACIManifestTranslator.build_manifest(
tag,
parsed_manifest,
synthetic_image_id
))
yield self.tar_file('manifest', aci_manifest, mtime=image_mtime)
# Yield the merged layer dtaa.
yield self.tar_folder('rootfs', mtime=image_mtime)
layer_merger = StreamLayerMerger(tar_stream_getter_iterator, path_prefix='rootfs/',
reporter=reporter)
for entry in layer_merger.get_generator():
yield entry
class DockerV1ToACIManifestTranslator(object):
@staticmethod
def _build_isolators(docker_config):
"""
Builds ACI isolator config from the docker config.
"""
def _isolate_memory(memory):
return {
"name": "memory/limit",
"value": {
"request": str(memory) + 'B',
}
}
def _isolate_swap(memory):
return {
"name": "memory/swap",
"value": {
"request": str(memory) + 'B',
}
}
def _isolate_cpu(cpu):
return {
"name": "cpu/shares",
"value": {
"request": str(cpu),
}
}
def _isolate_capabilities(capabilities_set_value):
capabilities_set = re.split(r'[\s,]', capabilities_set_value)
return {
"name": "os/linux/capabilities-retain-set",
"value": {
"set": capabilities_set,
}
}
mappers = {
'Memory': _isolate_memory,
'MemorySwap': _isolate_swap,
'CpuShares': _isolate_cpu,
'Cpuset': _isolate_capabilities
}
isolators = []
for config_key in mappers:
value = docker_config.get(config_key)
if value:
isolators.append(mappers[config_key](value))
return isolators
@staticmethod
def _build_ports(docker_config):
"""
Builds the ports definitions for the ACI.
Formats:
port/tcp
port/udp
port
"""
ports = []
exposed_ports = docker_config['ExposedPorts']
if exposed_ports is not None:
port_list = exposed_ports.keys()
else:
port_list = docker_config['Ports'] or docker_config['ports'] or []
for docker_port in port_list:
protocol = 'tcp'
port_number = -1
if '/' in docker_port:
(port_number, protocol) = docker_port.split('/')
else:
port_number = docker_port
try:
port_number = int(port_number)
ports.append({
"name": "port-%s" % port_number,
"port": port_number,
"protocol": protocol,
})
except ValueError:
pass
return ports
@staticmethod
def _ac_name(value):
sanitized = ACNAME_REGEX.sub('-', value.lower()).strip('-')
if sanitized == '':
return str(uuid4())
return sanitized
@staticmethod
def _build_volumes(docker_config):
""" Builds the volumes definitions for the ACI. """
volumes = []
def get_name(docker_volume_path):
volume_name = DockerV1ToACIManifestTranslator._ac_name(docker_volume_path)
return "volume-%s" % volume_name
volume_list = docker_config['Volumes'] or docker_config['volumes'] or {}
for docker_volume_path in volume_list.iterkeys():
if not docker_volume_path:
continue
volumes.append({
"name": get_name(docker_volume_path),
"path": docker_volume_path,
"readOnly": False,
})
return volumes
@staticmethod
def build_manifest(tag, manifest, synthetic_image_id):
""" Builds an ACI manifest of an existing repository image. """
docker_layer_data = JSONPathDict(json.loads(manifest.leaf_layer.raw_v1_metadata))
config = docker_layer_data['config'] or JSONPathDict({})
namespace = tag.repository.namespace_name
repo_name = tag.repository.name
source_url = "%s://%s/%s/%s:%s" % (app.config['PREFERRED_URL_SCHEME'],
app.config['SERVER_HOSTNAME'],
namespace, repo_name, tag.name)
# ACI requires that the execution command be absolutely referenced. Therefore, if we find
# a relative command, we give it as an argument to /bin/sh to resolve and execute for us.
entrypoint = config['Entrypoint'] or []
exec_path = entrypoint + (config['Cmd'] or [])
if exec_path and not exec_path[0].startswith('/'):
exec_path = ['/bin/sh', '-c', '""%s""' % ' '.join(exec_path)]
# TODO: ACI doesn't support : in the name, so remove any ports.
hostname = app.config['SERVER_HOSTNAME']
hostname = hostname.split(':', 1)[0]
# Calculate the environment variables.
docker_env_vars = config.get('Env') or []
env_vars = []
for var in docker_env_vars:
pieces = var.split('=')
if len(pieces) != 2:
continue
env_vars.append(pieces)
manifest = {
"acKind": "ImageManifest",
"acVersion": "0.6.1",
"name": '%s/%s/%s' % (hostname.lower(), namespace.lower(), repo_name.lower()),
"labels": [
{
"name": "version",
"value": tag.name,
},
{
"name": "arch",
"value": docker_layer_data.get('architecture') or 'amd64'
},
{
"name": "os",
"value": docker_layer_data.get('os') or 'linux'
}
],
"app": {
"exec": exec_path,
# Below, `or 'root'` is required to replace empty string from Dockerfiles.
"user": config.get('User') or 'root',
"group": config.get('Group') or 'root',
"eventHandlers": [],
"workingDirectory": config.get('WorkingDir') or '/',
"environment": [{"name": key, "value": value} for (key, value) in env_vars],
"isolators": DockerV1ToACIManifestTranslator._build_isolators(config),
"mountPoints": DockerV1ToACIManifestTranslator._build_volumes(config),
"ports": DockerV1ToACIManifestTranslator._build_ports(config),
"annotations": [
{"name": "created", "value": docker_layer_data.get('created') or ''},
{"name": "homepage", "value": source_url},
{"name": "quay.io/derived-image", "value": synthetic_image_id},
]
},
}
return manifest

View file

@ -0,0 +1,86 @@
import pytest
from image.appc import DockerV1ToACIManifestTranslator
from util.dict_wrappers import JSONPathDict
EXAMPLE_MANIFEST_OBJ = {
"architecture": "amd64",
"config": {
"Hostname": "1d811a9194c4",
"Domainname": "",
"User": "",
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"ExposedPorts": {
"2379/tcp": {},
"2380/tcp": {}
},
"Tty": False,
"OpenStdin": False,
"StdinOnce": False,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/usr/local/bin/etcd"
],
"ArgsEscaped": True,
"Image": "sha256:4c86d1f362d42420c137846fae31667ee85ce6f2cab406cdff26a8ff8a2c31c4",
"Volumes": None,
"WorkingDir": "",
"Entrypoint": None,
"OnBuild": [],
"Labels": {}
},
"container": "5a3565ce9b808a0eb0bcbc966dad624f76ad308ad24e11525b5da1201a1df135",
"container_config": {
"Hostname": "1d811a9194c4",
"Domainname": "",
"User": "",
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"ExposedPorts": {
"2379/tcp": {},
"2380/tcp": {}
},
"Tty": False,
"OpenStdin": False,
"StdinOnce": False,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/bin/sh",
"-c",
"#(nop) CMD [\"/usr/local/bin/etcd\"]"
],
"ArgsEscaped": True,
"Image": "sha256:4c86d1f362d42420c137846fae31667ee85ce6f2cab406cdff26a8ff8a2c31c4",
"Volumes": None,
"WorkingDir": "",
"Entrypoint": None,
"OnBuild": [],
"Labels": {}
},
"created": "2016-11-11T19:03:55.137387628Z",
"docker_version": "1.11.1",
"id": "3314a3781a526fe728e2e96cfcfb3cc0de901b5c102e6204e8b0155c8f7d5fd2",
"os": "linux",
"parent": "625342ec4d0f3d7a96fd3bb1ef0b4b0b6bc65ebb3d252fd33af0691f7984440e",
"throwaway": True
}
@pytest.mark.parametrize("vcfg,expected", [
({'Volumes': None}, []),
({'Volumes': {}}, []),
({'Volumes': {'/bin': {}}}, [{'name': 'volume-bin', 'path': '/bin', 'readOnly': False}]),
({'volumes': None}, []),
({'volumes': {}}, []),
({'volumes': {'/bin': {}}}, [{'name': 'volume-bin', 'path': '/bin', 'readOnly': False}]),
])
def test_volume_version_easy(vcfg, expected):
output = DockerV1ToACIManifestTranslator._build_volumes(JSONPathDict(vcfg))
assert output == expected

66
image/common.py Normal file
View file

@ -0,0 +1,66 @@
import tarfile
from util.registry.gzipwrap import GzipWrap
class TarImageFormatter(object):
"""
Base class for classes which produce a tar containing image and layer data.
"""
def build_stream(self, tag, manifest, synthetic_image_id, layer_iterator,
tar_stream_getter_iterator, reporter=None):
"""
Builds and streams a synthetic .tar.gz that represents the formatted tar created by this class's
implementation.
"""
return GzipWrap(self.stream_generator(tag, manifest, synthetic_image_id, layer_iterator,
tar_stream_getter_iterator, reporter=reporter))
def stream_generator(self, tag, manifest, synthetic_image_id, layer_iterator,
tar_stream_getter_iterator, reporter=None):
raise NotImplementedError
def tar_file(self, name, contents, mtime=None):
"""
Returns the tar binary representation for a file with the given name and file contents.
"""
length = len(contents)
tar_data = self.tar_file_header(name, length, mtime=mtime)
tar_data += contents
tar_data += self.tar_file_padding(length)
return tar_data
def tar_file_padding(self, length):
"""
Returns tar file padding for file data of the given length.
"""
if length % 512 != 0:
return '\0' * (512 - (length % 512))
return ''
def tar_file_header(self, name, file_size, mtime=None):
"""
Returns tar file header data for a file with the given name and size.
"""
info = tarfile.TarInfo(name=name)
info.type = tarfile.REGTYPE
info.size = file_size
if mtime is not None:
info.mtime = mtime
return info.tobuf()
def tar_folder(self, name, mtime=None):
"""
Returns tar file header data for a folder with the given name.
"""
info = tarfile.TarInfo(name=name)
info.type = tarfile.DIRTYPE
if mtime is not None:
info.mtime = mtime
# allow the directory to be readable by non-root users
info.mode = 0755
return info.tobuf()

10
image/docker/__init__.py Normal file
View file

@ -0,0 +1,10 @@
"""
docker implements pure data transformations according to the many Docker specifications.
"""
class DockerFormatException(Exception):
pass
class ManifestException(DockerFormatException):
pass

148
image/docker/interfaces.py Normal file
View file

@ -0,0 +1,148 @@
from abc import ABCMeta, abstractproperty, abstractmethod
from six import add_metaclass
@add_metaclass(ABCMeta)
class ManifestInterface(object):
""" Defines the interface for the various manifests types supported. """
@abstractproperty
def is_manifest_list(self):
""" Returns whether this manifest is a list. """
@abstractproperty
def schema_version(self):
""" The version of the schema. """
@abstractproperty
def digest(self):
""" The digest of the manifest, including type prefix. """
pass
@abstractproperty
def media_type(self):
""" The media type of the schema. """
pass
@abstractproperty
def manifest_dict(self):
""" Returns the manifest as a dictionary ready to be serialized to JSON. """
pass
@abstractproperty
def bytes(self):
""" Returns the bytes of the manifest. """
pass
@abstractproperty
def layers_compressed_size(self):
""" Returns the total compressed size of all the layers in this manifest. Returns None if this
cannot be computed locally.
"""
@abstractmethod
def validate(self, content_retriever):
""" Performs validation of required assertions about the manifest. Raises a ManifestException
on failure.
"""
pass
@abstractmethod
def get_layers(self, content_retriever):
""" Returns the layers of this manifest, from base to leaf or None if this kind of manifest
does not support layers. The layer must be of type ManifestImageLayer. """
pass
@abstractmethod
def get_leaf_layer_v1_image_id(self, content_retriever):
""" Returns the Docker V1 image ID for the leaf (top) layer, if any, or None if
not applicable. """
pass
@abstractmethod
def get_legacy_image_ids(self, content_retriever):
""" Returns the Docker V1 image IDs for the layers of this manifest or None if not applicable.
"""
pass
@abstractproperty
def blob_digests(self):
""" Returns an iterator over all the blob digests referenced by this manifest,
from base to leaf. The blob digests are strings with prefixes. For manifests that reference
config as a blob, the blob will be included here as the last entry.
"""
@abstractmethod
def get_blob_digests_for_translation(self):
""" Returns the blob digests for translation of this manifest into another manifest. This
method will ignore missing IDs in layers, unlike `blob_digests`.
"""
@abstractproperty
def local_blob_digests(self):
""" Returns an iterator over all the *non-remote* blob digests referenced by this manifest,
from base to leaf. The blob digests are strings with prefixes. For manifests that reference
config as a blob, the blob will be included here as the last entry.
"""
@abstractmethod
def child_manifests(self, content_retriever):
""" Returns an iterator of all manifests that live under this manifest, if any or None if not
applicable.
"""
@abstractmethod
def get_manifest_labels(self, content_retriever):
""" Returns a dictionary of all the labels defined inside this manifest or None if this kind
of manifest does not support labels. """
pass
@abstractmethod
def get_requires_empty_layer_blob(self, content_retriever):
""" Whether this schema requires the special empty layer blob. """
pass
@abstractmethod
def unsigned(self):
""" Returns an unsigned version of this manifest. """
@abstractproperty
def has_legacy_image(self):
""" Returns True if this manifest has a legacy V1 image, or False if not. """
@abstractmethod
def generate_legacy_layers(self, images_map, content_retriever):
"""
Rewrites Docker v1 image IDs and returns a generator of DockerV1Metadata, starting
at the base layer and working towards the leaf.
If Docker gives us a layer with a v1 image ID that already points to existing
content, but the checksums don't match, then we need to rewrite the image ID
to something new in order to ensure consistency.
Returns None if there are no legacy images associated with the manifest.
"""
@abstractmethod
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
""" Returns a schema1 version of the manifest. If this is a mainfest list, should return the
manifest that is compatible with V1, by virtue of being `amd64` and `linux`.
If none, returns None.
"""
@abstractmethod
def convert_manifest(self, allowed_mediatypes, namespace_name, repo_name, tag_name,
content_retriever):
""" Returns a version of this schema that has a media type found in the given media type set.
If not possible, or an error occurs, returns None.
"""
@add_metaclass(ABCMeta)
class ContentRetriever(object):
""" Defines the interface for retrieval of various content referenced by a manifest. """
@abstractmethod
def get_manifest_bytes_with_digest(self, digest):
""" Returns the bytes of the manifest with the given digest or None if none found. """
@abstractmethod
def get_blob_bytes_with_digest(self, digest):
""" Returns the bytes of the blob with the given digest or None if none found. """

683
image/docker/schema1.py Normal file
View file

@ -0,0 +1,683 @@
"""
schema1 implements pure data transformations according to the Docker Manifest v2.1 Specification.
https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md
"""
import hashlib
import json
import logging
from collections import namedtuple, OrderedDict
from datetime import datetime
import dateutil.parser
from jsonschema import validate as validate_schema, ValidationError
from jwkest.jws import SIGNER_ALGS, keyrep, BadSignature
from jwt.utils import base64url_encode, base64url_decode
from digest import digest_tools
from image.docker import ManifestException
from image.docker.types import ManifestImageLayer
from image.docker.interfaces import ManifestInterface
from image.docker.v1 import DockerV1Metadata
from image.docker.schemautil import to_canonical_json
from util.bytes import Bytes
logger = logging.getLogger(__name__)
# Content Types
DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE = 'application/vnd.docker.distribution.manifest.v1+json'
DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE = 'application/vnd.docker.distribution.manifest.v1+prettyjws'
DOCKER_SCHEMA1_CONTENT_TYPES = {DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE,
DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE}
# Keys for signature-related data
DOCKER_SCHEMA1_SIGNATURES_KEY = 'signatures'
DOCKER_SCHEMA1_HEADER_KEY = 'header'
DOCKER_SCHEMA1_SIGNATURE_KEY = 'signature'
DOCKER_SCHEMA1_PROTECTED_KEY = 'protected'
DOCKER_SCHEMA1_FORMAT_LENGTH_KEY = 'formatLength'
DOCKER_SCHEMA1_FORMAT_TAIL_KEY = 'formatTail'
# Keys for manifest-related data
DOCKER_SCHEMA1_REPO_NAME_KEY = 'name'
DOCKER_SCHEMA1_REPO_TAG_KEY = 'tag'
DOCKER_SCHEMA1_ARCH_KEY = 'architecture'
DOCKER_SCHEMA1_FS_LAYERS_KEY = 'fsLayers'
DOCKER_SCHEMA1_BLOB_SUM_KEY = 'blobSum'
DOCKER_SCHEMA1_HISTORY_KEY = 'history'
DOCKER_SCHEMA1_V1_COMPAT_KEY = 'v1Compatibility'
DOCKER_SCHEMA1_SCHEMA_VER_KEY = 'schemaVersion'
# Format for time used in the protected payload.
_ISO_DATETIME_FORMAT_ZULU = '%Y-%m-%dT%H:%M:%SZ'
# The algorithm we use to sign the JWS.
_JWS_SIGNING_ALGORITHM = 'RS256'
class MalformedSchema1Manifest(ManifestException):
"""
Raised when a manifest fails an assertion that should be true according to the Docker Manifest
v2.1 Specification.
"""
pass
class InvalidSchema1Signature(ManifestException):
"""
Raised when there is a failure verifying the signature of a signed Docker 2.1 Manifest.
"""
pass
class Schema1Layer(namedtuple('Schema1Layer', ['digest', 'v1_metadata', 'raw_v1_metadata',
'compressed_size', 'is_remote', 'urls'])):
"""
Represents all of the data about an individual layer in a given Manifest.
This is the union of the fsLayers (digest) and the history entries (v1_compatibility).
"""
class Schema1V1Metadata(namedtuple('Schema1V1Metadata', ['image_id', 'parent_image_id', 'created',
'comment', 'command', 'author',
'labels'])):
"""
Represents the necessary data extracted from the v1 compatibility string in a given layer of a
Manifest.
"""
class DockerSchema1Manifest(ManifestInterface):
METASCHEMA = {
'type': 'object',
'properties': {
DOCKER_SCHEMA1_SIGNATURES_KEY: {
'type': 'array',
'items': {
'type': 'object',
'properties': {
DOCKER_SCHEMA1_PROTECTED_KEY: {
'type': 'string',
},
DOCKER_SCHEMA1_HEADER_KEY: {
'type': 'object',
'properties': {
'alg': {
'type': 'string',
},
'jwk': {
'type': 'object',
},
},
'required': ['alg', 'jwk'],
},
DOCKER_SCHEMA1_SIGNATURE_KEY: {
'type': 'string',
},
},
'required': [DOCKER_SCHEMA1_PROTECTED_KEY, DOCKER_SCHEMA1_HEADER_KEY,
DOCKER_SCHEMA1_SIGNATURE_KEY],
},
},
DOCKER_SCHEMA1_REPO_TAG_KEY: {
'type': 'string',
},
DOCKER_SCHEMA1_REPO_NAME_KEY: {
'type': 'string',
},
DOCKER_SCHEMA1_HISTORY_KEY: {
'type': 'array',
'items': {
'type': 'object',
'properties': {
DOCKER_SCHEMA1_V1_COMPAT_KEY: {
'type': 'string',
},
},
'required': [DOCKER_SCHEMA1_V1_COMPAT_KEY],
},
},
DOCKER_SCHEMA1_FS_LAYERS_KEY: {
'type': 'array',
'items': {
'type': 'object',
'properties': {
DOCKER_SCHEMA1_BLOB_SUM_KEY: {
'type': 'string',
},
},
'required': [DOCKER_SCHEMA1_BLOB_SUM_KEY],
},
},
},
'required': [DOCKER_SCHEMA1_REPO_TAG_KEY,
DOCKER_SCHEMA1_REPO_NAME_KEY, DOCKER_SCHEMA1_FS_LAYERS_KEY,
DOCKER_SCHEMA1_HISTORY_KEY],
}
def __init__(self, manifest_bytes, validate=True):
assert isinstance(manifest_bytes, Bytes)
self._layers = None
self._bytes = manifest_bytes
try:
self._parsed = json.loads(manifest_bytes.as_encoded_str())
except ValueError as ve:
raise MalformedSchema1Manifest('malformed manifest data: %s' % ve)
try:
validate_schema(self._parsed, DockerSchema1Manifest.METASCHEMA)
except ValidationError as ve:
raise MalformedSchema1Manifest('manifest data does not match schema: %s' % ve)
self._signatures = self._parsed.get(DOCKER_SCHEMA1_SIGNATURES_KEY)
self._architecture = self._parsed.get(DOCKER_SCHEMA1_ARCH_KEY)
self._tag = self._parsed[DOCKER_SCHEMA1_REPO_TAG_KEY]
repo_name = self._parsed[DOCKER_SCHEMA1_REPO_NAME_KEY]
repo_name_tuple = repo_name.split('/')
if len(repo_name_tuple) > 1:
self._namespace, self._repo_name = repo_name_tuple
elif len(repo_name_tuple) == 1:
self._namespace = ''
self._repo_name = repo_name_tuple[0]
else:
raise MalformedSchema1Manifest('malformed repository name: %s' % repo_name)
if validate:
self._validate()
def _validate(self):
if not self._signatures:
return
payload_str = self._payload
for signature in self._signatures:
bytes_to_verify = '{0}.{1}'.format(signature['protected'], base64url_encode(payload_str))
signer = SIGNER_ALGS[signature['header']['alg']]
key = keyrep(signature['header']['jwk'])
gk = key.get_key()
sig = base64url_decode(signature['signature'].encode('utf-8'))
try:
verified = signer.verify(bytes_to_verify, sig, gk)
except BadSignature:
raise InvalidSchema1Signature()
if not verified:
raise InvalidSchema1Signature()
def validate(self, content_retriever):
""" Performs validation of required assertions about the manifest. Raises a ManifestException
on failure.
"""
# Already validated.
@property
def is_signed(self):
""" Returns whether the schema is signed. """
return bool(self._signatures)
@property
def architecture(self):
return self._architecture
@property
def is_manifest_list(self):
return False
@property
def schema_version(self):
return 1
@property
def content_type(self):
return (DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
if self._signatures else DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE)
@property
def media_type(self):
return self.content_type
@property
def signatures(self):
return self._signatures
@property
def namespace(self):
return self._namespace
@property
def repo_name(self):
return self._repo_name
@property
def tag(self):
return self._tag
@property
def bytes(self):
return self._bytes
@property
def manifest_json(self):
return self._parsed
@property
def manifest_dict(self):
return self._parsed
@property
def layers_compressed_size(self):
return None
@property
def digest(self):
return digest_tools.sha256_digest(self._payload)
@property
def image_ids(self):
return {mdata.v1_metadata.image_id for mdata in self.layers}
@property
def parent_image_ids(self):
return {mdata.v1_metadata.parent_image_id for mdata in self.layers
if mdata.v1_metadata.parent_image_id}
@property
def checksums(self):
return list({str(mdata.digest) for mdata in self.layers})
@property
def leaf_layer(self):
return self.layers[-1]
@property
def created_datetime(self):
created_datetime_str = self.leaf_layer.v1_metadata.created
if created_datetime_str is None:
return None
try:
return dateutil.parser.parse(created_datetime_str).replace(tzinfo=None)
except:
# parse raises different exceptions, so we cannot use a specific kind of handler here.
return None
@property
def layers(self):
if self._layers is None:
self._layers = list(self._generate_layers())
return self._layers
def get_layers(self, content_retriever):
""" Returns the layers of this manifest, from base to leaf or None if this kind of manifest
does not support layers. """
for layer in self.layers:
created_datetime = None
try:
created_datetime = dateutil.parser.parse(layer.v1_metadata.created).replace(tzinfo=None)
except:
pass
yield ManifestImageLayer(layer_id=layer.v1_metadata.image_id,
compressed_size=layer.compressed_size,
is_remote=False,
urls=None,
command=layer.v1_metadata.command,
comment=layer.v1_metadata.comment,
author=layer.v1_metadata.author,
blob_digest=layer.digest,
created_datetime=created_datetime,
internal_layer=layer)
@property
def blob_digests(self):
return [str(layer.digest) for layer in self.layers]
@property
def local_blob_digests(self):
return self.blob_digests
def get_blob_digests_for_translation(self):
""" Returns the blob digests for translation of this manifest into another manifest. This
method will ignore missing IDs in layers, unlike `blob_digests`.
"""
layers = self._generate_layers(allow_missing_ids=True)
return [str(layer.digest) for layer in layers]
def child_manifests(self, content_retriever):
return None
def get_manifest_labels(self, content_retriever):
return self.layers[-1].v1_metadata.labels
def get_requires_empty_layer_blob(self, content_retriever):
return False
def _unsigned_builder(self):
builder = DockerSchema1ManifestBuilder(self._namespace, self._repo_name, self._tag,
self._architecture)
for layer in reversed(self.layers):
builder.add_layer(str(layer.digest), layer.raw_v1_metadata)
return builder
def unsigned(self):
if self.media_type == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE:
return self
# Create an unsigned version of the manifest.
return self._unsigned_builder().build()
def with_tag_name(self, tag_name, json_web_key=None):
""" Returns a copy of this manifest, with the tag changed to the given tag name. """
builder = DockerSchema1ManifestBuilder(self._namespace, self._repo_name, tag_name,
self._architecture)
for layer in reversed(self.layers):
builder.add_layer(str(layer.digest), layer.raw_v1_metadata)
return builder.build(json_web_key)
def _generate_layers(self, allow_missing_ids=False):
"""
Returns a generator of objects that have the blobSum and v1Compatibility keys in them,
starting from the base image and working toward the leaf node.
"""
for blob_sum_obj, history_obj in reversed(zip(self._parsed[DOCKER_SCHEMA1_FS_LAYERS_KEY],
self._parsed[DOCKER_SCHEMA1_HISTORY_KEY])):
try:
image_digest = digest_tools.Digest.parse_digest(blob_sum_obj[DOCKER_SCHEMA1_BLOB_SUM_KEY])
except digest_tools.InvalidDigestException:
raise MalformedSchema1Manifest('could not parse manifest digest: %s' %
blob_sum_obj[DOCKER_SCHEMA1_BLOB_SUM_KEY])
metadata_string = history_obj[DOCKER_SCHEMA1_V1_COMPAT_KEY]
try:
v1_metadata = json.loads(metadata_string)
except (ValueError, TypeError):
raise MalformedSchema1Manifest('Could not parse metadata string: %s' % metadata_string)
container_config = v1_metadata.get('container_config') or {}
command_list = container_config.get('Cmd', None)
command = to_canonical_json(command_list) if command_list else None
if not allow_missing_ids and not 'id' in v1_metadata:
raise MalformedSchema1Manifest('id field missing from v1Compatibility JSON')
labels = v1_metadata.get('config', {}).get('Labels', {}) or {}
extracted = Schema1V1Metadata(image_id=v1_metadata.get('id'),
parent_image_id=v1_metadata.get('parent'),
created=v1_metadata.get('created'),
comment=v1_metadata.get('comment'),
author=v1_metadata.get('author'),
command=command,
labels=labels)
compressed_size = v1_metadata.get('Size')
yield Schema1Layer(image_digest, extracted, metadata_string, compressed_size, False, None)
@property
def _payload(self):
if self._signatures is None:
return self._bytes.as_encoded_str()
byte_data = self._bytes.as_encoded_str()
protected = str(self._signatures[0][DOCKER_SCHEMA1_PROTECTED_KEY])
parsed_protected = json.loads(base64url_decode(protected))
signed_content_head = byte_data[:parsed_protected[DOCKER_SCHEMA1_FORMAT_LENGTH_KEY]]
signed_content_tail = base64url_decode(str(parsed_protected[DOCKER_SCHEMA1_FORMAT_TAIL_KEY]))
return signed_content_head + signed_content_tail
def generate_legacy_layers(self, images_map, content_retriever):
return self.rewrite_invalid_image_ids(images_map)
def get_legacy_image_ids(self, content_retriever):
return self.legacy_image_ids
@property
def legacy_image_ids(self):
return {mdata.v1_metadata.image_id for mdata in self.layers}
@property
def has_legacy_image(self):
return True
@property
def leaf_layer_v1_image_id(self):
return self.layers[-1].v1_metadata.image_id
def get_leaf_layer_v1_image_id(self, content_retriever):
return self.layers[-1].v1_metadata.image_id
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
""" Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`.
If none, returns None.
"""
# Note: schema1 *technically* supports non-amd64 architectures, but in practice these were never
# used, so to ensure full backwards compatibility, we just always return the schema.
return self
def convert_manifest(self, allowed_mediatypes, namespace_name, repo_name, tag_name,
content_retriever):
if self.media_type in allowed_mediatypes:
return self
unsigned = self.unsigned()
if unsigned.media_type in allowed_mediatypes:
return unsigned
return None
def rewrite_invalid_image_ids(self, images_map):
"""
Rewrites Docker v1 image IDs and returns a generator of DockerV1Metadata.
If Docker gives us a layer with a v1 image ID that already points to existing
content, but the checksums don't match, then we need to rewrite the image ID
to something new in order to ensure consistency.
"""
# Used to synthesize a new "content addressable" image id
digest_history = hashlib.sha256()
has_rewritten_ids = False
updated_id_map = {}
for layer in self.layers:
digest_str = str(layer.digest)
extracted_v1_metadata = layer.v1_metadata
working_image_id = extracted_v1_metadata.image_id
# Update our digest_history hash for the new layer data.
digest_history.update(digest_str)
digest_history.update("@")
digest_history.update(layer.raw_v1_metadata.encode('utf-8'))
digest_history.update("|")
# Ensure that the v1 image's storage matches the V2 blob. If not, we've
# found a data inconsistency and need to create a new layer ID for the V1
# image, and all images that follow it in the ancestry chain.
digest_mismatch = (extracted_v1_metadata.image_id in images_map and
images_map[extracted_v1_metadata.image_id].content_checksum != digest_str)
if digest_mismatch or has_rewritten_ids:
working_image_id = digest_history.hexdigest()
has_rewritten_ids = True
# Store the new docker id in the map
updated_id_map[extracted_v1_metadata.image_id] = working_image_id
# Lookup the parent image for the layer, if any.
parent_image_id = extracted_v1_metadata.parent_image_id
if parent_image_id is not None:
parent_image_id = updated_id_map.get(parent_image_id, parent_image_id)
# Synthesize and store the v1 metadata in the db.
v1_metadata_json = layer.raw_v1_metadata
if has_rewritten_ids:
v1_metadata_json = _updated_v1_metadata(v1_metadata_json, updated_id_map)
updated_image = DockerV1Metadata(
namespace_name=self.namespace,
repo_name=self.repo_name,
image_id=working_image_id,
created=extracted_v1_metadata.created,
comment=extracted_v1_metadata.comment,
author=extracted_v1_metadata.author,
command=extracted_v1_metadata.command,
compat_json=v1_metadata_json,
parent_image_id=parent_image_id,
checksum=None, # TODO: Check if we need this.
content_checksum=digest_str,
)
yield updated_image
class DockerSchema1ManifestBuilder(object):
"""
A convenient abstraction around creating new DockerSchema1Manifests.
"""
def __init__(self, namespace_name, repo_name, tag, architecture='amd64'):
repo_name_key = '{0}/{1}'.format(namespace_name, repo_name)
if namespace_name == '':
repo_name_key = repo_name
self._base_payload = {
DOCKER_SCHEMA1_REPO_TAG_KEY: tag,
DOCKER_SCHEMA1_REPO_NAME_KEY: repo_name_key,
DOCKER_SCHEMA1_ARCH_KEY: architecture,
DOCKER_SCHEMA1_SCHEMA_VER_KEY: 1,
}
self._fs_layer_digests = []
self._history = []
self._namespace_name = namespace_name
self._repo_name = repo_name
self._tag = tag
self._architecture = architecture
def add_layer(self, layer_digest, v1_json_metadata):
self._fs_layer_digests.append({
DOCKER_SCHEMA1_BLOB_SUM_KEY: layer_digest,
})
self._history.append({
DOCKER_SCHEMA1_V1_COMPAT_KEY: v1_json_metadata or '{}',
})
return self
def with_metadata_removed(self):
""" Returns a copy of the builder where every layer but the leaf layer has
its metadata stripped down to the bare essentials.
"""
builder = DockerSchema1ManifestBuilder(self._namespace_name, self._repo_name, self._tag,
self._architecture)
for index, fs_layer in enumerate(self._fs_layer_digests):
try:
metadata = json.loads(self._history[index][DOCKER_SCHEMA1_V1_COMPAT_KEY])
except (ValueError, TypeError):
logger.exception('Could not parse existing builder')
raise MalformedSchema1Manifest
fixed_metadata = {}
if index == 0: # Leaf layer is at index 0 in schema 1.
fixed_metadata = metadata
else:
# Remove all container config from the metadata.
fixed_metadata['id'] = metadata['id']
if 'parent' in metadata:
fixed_metadata['parent'] = metadata['parent']
if 'created' in metadata:
fixed_metadata['created'] = metadata['created']
if 'author' in metadata:
fixed_metadata['author'] = metadata['author']
if 'comment' in metadata:
fixed_metadata['comment'] = metadata['comment']
if 'Size' in metadata:
fixed_metadata['Size'] = metadata['Size']
if 'Cmd' in metadata.get('container_config', {}):
fixed_metadata['container_config'] = {
'Cmd': metadata['container_config']['Cmd'],
}
builder.add_layer(fs_layer[DOCKER_SCHEMA1_BLOB_SUM_KEY], json.dumps(fixed_metadata))
return builder
def build(self, json_web_key=None, ensure_ascii=True):
"""
Builds a DockerSchema1Manifest object, with optional signature.
"""
payload = OrderedDict(self._base_payload)
payload.update({
DOCKER_SCHEMA1_HISTORY_KEY: self._history,
DOCKER_SCHEMA1_FS_LAYERS_KEY: self._fs_layer_digests,
})
payload_str = json.dumps(payload, indent=3, ensure_ascii=ensure_ascii)
if json_web_key is None:
return DockerSchema1Manifest(Bytes.for_string_or_unicode(payload_str))
payload_str = Bytes.for_string_or_unicode(payload_str).as_encoded_str()
split_point = payload_str.rfind('\n}')
protected_payload = {
'formatTail': base64url_encode(payload_str[split_point:]),
'formatLength': split_point,
'time': datetime.utcnow().strftime(_ISO_DATETIME_FORMAT_ZULU),
}
protected = base64url_encode(json.dumps(protected_payload, ensure_ascii=ensure_ascii))
logger.debug('Generated protected block: %s', protected)
bytes_to_sign = '{0}.{1}'.format(protected, base64url_encode(payload_str))
signer = SIGNER_ALGS[_JWS_SIGNING_ALGORITHM]
signature = base64url_encode(signer.sign(bytes_to_sign, json_web_key.get_key()))
logger.debug('Generated signature: %s', signature)
public_members = set(json_web_key.public_members)
public_key = {comp: value for comp, value in json_web_key.to_dict().items()
if comp in public_members}
signature_block = {
DOCKER_SCHEMA1_HEADER_KEY: {'jwk': public_key, 'alg': _JWS_SIGNING_ALGORITHM},
DOCKER_SCHEMA1_SIGNATURE_KEY: signature,
DOCKER_SCHEMA1_PROTECTED_KEY: protected,
}
logger.debug('Encoded signature block: %s', json.dumps(signature_block))
payload.update({DOCKER_SCHEMA1_SIGNATURES_KEY: [signature_block]})
json_str = json.dumps(payload, indent=3, ensure_ascii=ensure_ascii)
return DockerSchema1Manifest(Bytes.for_string_or_unicode(json_str))
def _updated_v1_metadata(v1_metadata_json, updated_id_map):
"""
Updates v1_metadata with new image IDs.
"""
parsed = json.loads(v1_metadata_json)
parsed['id'] = updated_id_map[parsed['id']]
if parsed.get('parent') and parsed['parent'] in updated_id_map:
parsed['parent'] = updated_id_map[parsed['parent']]
if parsed.get('container_config', {}).get('Image'):
existing_image = parsed['container_config']['Image']
if existing_image in updated_id_map:
parsed['container_config']['image'] = updated_id_map[existing_image]
return to_canonical_json(parsed)

View file

@ -0,0 +1,30 @@
"""
schema2 implements pure data transformations according to the Docker Manifest v2.2 Specification.
https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md
"""
# Content Types
DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE = 'application/vnd.docker.distribution.manifest.v2+json'
DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE = 'application/vnd.docker.distribution.manifest.list.v2+json'
DOCKER_SCHEMA2_LAYER_CONTENT_TYPE = 'application/vnd.docker.image.rootfs.diff.tar.gzip'
DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE = 'application/vnd.docker.image.rootfs.foreign.diff.tar.gzip'
DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE = 'application/vnd.docker.container.image.v1+json'
OCI_MANIFEST_CONTENT_TYPE = 'application/vnd.oci.image.manifest.v1+json'
OCI_MANIFESTLIST_CONTENT_TYPE = 'application/vnd.oci.image.index.v1+json'
DOCKER_SCHEMA2_CONTENT_TYPES = {DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE}
OCI_CONTENT_TYPES = {OCI_MANIFEST_CONTENT_TYPE, OCI_MANIFESTLIST_CONTENT_TYPE}
# The magical digest to be used for "empty" layers.
# https://github.com/docker/distribution/blob/749f6afb4572201e3c37325d0ffedb6f32be8950/manifest/schema1/config_builder.go#L22
EMPTY_LAYER_BLOB_DIGEST = 'sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4'
EMPTY_LAYER_SIZE = 32
EMPTY_LAYER_BYTES = "".join(map(chr, [
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
]))

View file

@ -0,0 +1,275 @@
"""
Implements validation and conversion for the Schema2 config JSON.
Example:
{
"architecture": "amd64",
"config": {
"Hostname": "",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"HTTP_PROXY=http:\/\/localhost:8080",
"http_proxy=http:\/\/localhost:8080",
"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin"
],
"Cmd": [
"sh"
],
"Image": "",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
}
},
"container": "b7a43694b435c8e9932615643f61f975a9213e453b15cd6c2a386f144a2d2de9",
"container_config": {
"Hostname": "b7a43694b435",
"Domainname": "",
"User": "",
"AttachStdin": true,
"AttachStdout": true,
"AttachStderr": true,
"Tty": true,
"OpenStdin": true,
"StdinOnce": true,
"Env": [
"HTTP_PROXY=http:\/\/localhost:8080",
"http_proxy=http:\/\/localhost:8080",
"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin"
],
"Cmd": [
"sh"
],
"Image": "somenamespace\/somerepo",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
}
},
"created": "2018-04-16T10:41:19.079522722Z",
"docker_version": "17.09.0-ce",
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "\/bin\/sh -c #(nop) ADD file:9e4ca21cbd24dc05b454b6be21c7c639216ae66559b21ba24af0d665c62620dc in \/ "
},
{
"created": "2018-04-03T18:37:09.613317719Z",
"created_by": "\/bin\/sh -c #(nop) CMD [\"sh\"]",
"empty_layer": true
},
{
"created": "2018-04-16T10:37:44.418262777Z",
"created_by": "sh"
},
{
"created": "2018-04-16T10:41:19.079522722Z",
"created_by": "sh"
}
],
"os": "linux",
"rootfs": {
"type": "layers",
"diff_ids": [
"sha256:3e596351c689c8827a3c9635bc1083cff17fa4a174f84f0584bd0ae6f384195b",
"sha256:4552be273c71275a88de0b8c8853dcac18cb74d5790f5383d9b38d4ac55062d5",
"sha256:1319c76152ca37fbeb7fb71e0ffa7239bc19ffbe3b95c00417ece39d89d06e6e"
]
}
}
"""
import copy
import json
import hashlib
from collections import namedtuple
from jsonschema import validate as validate_schema, ValidationError
from dateutil.parser import parse as parse_date
from digest import digest_tools
from image.docker import ManifestException
from util.bytes import Bytes
DOCKER_SCHEMA2_CONFIG_HISTORY_KEY = "history"
DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY = "rootfs"
DOCKER_SCHEMA2_CONFIG_CREATED_KEY = "created"
DOCKER_SCHEMA2_CONFIG_CREATED_BY_KEY = "created_by"
DOCKER_SCHEMA2_CONFIG_COMMENT_KEY = "comment"
DOCKER_SCHEMA2_CONFIG_AUTHOR_KEY = "author"
DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY = "empty_layer"
DOCKER_SCHEMA2_CONFIG_TYPE_KEY = "type"
LayerHistory = namedtuple('LayerHistory', ['created', 'created_datetime', 'command', 'is_empty',
'author', 'comment', 'raw_entry'])
class MalformedSchema2Config(ManifestException):
"""
Raised when a config fails an assertion that should be true according to the Docker Manifest
v2.2 Config Specification.
"""
pass
class DockerSchema2Config(object):
METASCHEMA = {
'type': 'object',
'description': 'The container configuration found in a schema 2 manifest',
'required': [DOCKER_SCHEMA2_CONFIG_HISTORY_KEY, DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY],
'properties': {
DOCKER_SCHEMA2_CONFIG_HISTORY_KEY: {
'type': 'array',
'description': 'The history used to create the container image',
'items': {
'type': 'object',
'properties': {
DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY: {
'type': 'boolean',
'description': 'If present, this layer is empty',
},
DOCKER_SCHEMA2_CONFIG_CREATED_KEY: {
'type': 'string',
'description': 'The date/time that the layer was created',
'format': 'date-time',
'x-example': '2018-04-03T18:37:09.284840891Z',
},
DOCKER_SCHEMA2_CONFIG_CREATED_BY_KEY: {
'type': 'string',
'description': 'The command used to create the layer',
'x-example': '\/bin\/sh -c #(nop) ADD file:somesha in /',
},
DOCKER_SCHEMA2_CONFIG_COMMENT_KEY: {
'type': 'string',
'description': 'Comment describing the layer',
},
DOCKER_SCHEMA2_CONFIG_AUTHOR_KEY: {
'type': 'string',
'description': 'The author of the layer',
},
},
'additionalProperties': True,
},
},
DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY: {
'type': 'object',
'description': 'Describes the root filesystem for this image',
'properties': {
DOCKER_SCHEMA2_CONFIG_TYPE_KEY: {
'type': 'string',
'description': 'The type of the root file system entries',
},
},
'required': [DOCKER_SCHEMA2_CONFIG_TYPE_KEY],
'additionalProperties': True,
},
},
'additionalProperties': True,
}
def __init__(self, config_bytes):
assert isinstance(config_bytes, Bytes)
self._config_bytes = config_bytes
try:
self._parsed = json.loads(config_bytes.as_unicode())
except ValueError as ve:
raise MalformedSchema2Config('malformed config data: %s' % ve)
try:
validate_schema(self._parsed, DockerSchema2Config.METASCHEMA)
except ValidationError as ve:
raise MalformedSchema2Config('config data does not match schema: %s' % ve)
@property
def digest(self):
""" Returns the digest of this config object. """
return digest_tools.sha256_digest(self._config_bytes.as_encoded_str())
@property
def size(self):
""" Returns the size of this config object. """
return len(self._config_bytes.as_encoded_str())
@property
def bytes(self):
""" Returns the bytes of this config object. """
return self._config_bytes
@property
def labels(self):
""" Returns a dictionary of all the labels defined in this configuration. """
return self._parsed.get('config', {}).get('Labels', {}) or {}
@property
def has_empty_layer(self):
""" Returns whether this config contains an empty layer. """
for history_entry in self._parsed[DOCKER_SCHEMA2_CONFIG_HISTORY_KEY]:
if history_entry.get(DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY, False):
return True
return False
@property
def history(self):
""" Returns the history of the image, started at the base layer. """
for history_entry in self._parsed[DOCKER_SCHEMA2_CONFIG_HISTORY_KEY]:
created_datetime = parse_date(history_entry[DOCKER_SCHEMA2_CONFIG_CREATED_KEY])
yield LayerHistory(created_datetime=created_datetime,
created=history_entry.get(DOCKER_SCHEMA2_CONFIG_CREATED_KEY),
command=history_entry.get(DOCKER_SCHEMA2_CONFIG_CREATED_BY_KEY),
author=history_entry.get(DOCKER_SCHEMA2_CONFIG_AUTHOR_KEY),
comment=history_entry.get(DOCKER_SCHEMA2_CONFIG_COMMENT_KEY),
is_empty=history_entry.get(DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY, False),
raw_entry=history_entry)
def build_v1_compatibility(self, history, v1_id, v1_parent_id, is_leaf, compressed_size=None):
""" Builds the V1 compatibility block for the given layer.
"""
# If the layer is the leaf, it gets the full config (minus 2 fields). Otherwise, it gets only
# IDs.
v1_compatibility = copy.deepcopy(self._parsed) if is_leaf else {}
v1_compatibility['id'] = v1_id
if v1_parent_id is not None:
v1_compatibility['parent'] = v1_parent_id
if 'created' not in v1_compatibility and history.created:
v1_compatibility['created'] = history.created
if 'author' not in v1_compatibility and history.author:
v1_compatibility['author'] = history.author
if 'comment' not in v1_compatibility and history.comment:
v1_compatibility['comment'] = history.comment
if 'throwaway' not in v1_compatibility and history.is_empty:
v1_compatibility['throwaway'] = True
if 'container_config' not in v1_compatibility:
v1_compatibility['container_config'] = {
'Cmd': [history.command],
}
if compressed_size is not None:
v1_compatibility['Size'] = compressed_size
# The history and rootfs keys are schema2-config specific.
v1_compatibility.pop(DOCKER_SCHEMA2_CONFIG_HISTORY_KEY, None)
v1_compatibility.pop(DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY, None)
return v1_compatibility

View file

@ -0,0 +1,379 @@
import logging
import json
from cachetools.func import lru_cache
from jsonschema import validate as validate_schema, ValidationError
from digest import digest_tools
from image.docker import ManifestException
from image.docker.interfaces import ManifestInterface
from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
from image.docker.schema1 import DockerSchema1Manifest
from image.docker.schema2 import (DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE,
DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
from image.docker.schema2.manifest import DockerSchema2Manifest
from util.bytes import Bytes
logger = logging.getLogger(__name__)
# Keys.
DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY = 'schemaVersion'
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY = 'mediaType'
DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY = 'size'
DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY = 'digest'
DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY = 'manifests'
DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY = 'platform'
DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY = 'architecture'
DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY = 'os'
DOCKER_SCHEMA2_MANIFESTLIST_OS_VERSION_KEY = 'os.version'
DOCKER_SCHEMA2_MANIFESTLIST_OS_FEATURES_KEY = 'os.features'
DOCKER_SCHEMA2_MANIFESTLIST_FEATURES_KEY = 'features'
DOCKER_SCHEMA2_MANIFESTLIST_VARIANT_KEY = 'variant'
class MalformedSchema2ManifestList(ManifestException):
"""
Raised when a manifest list fails an assertion that should be true according to the
Docker Manifest v2.2 Specification.
"""
pass
class MismatchManifestException(MalformedSchema2ManifestList):
""" Raised when a manifest list contains a schema 1 manifest with a differing architecture
from that specified in the manifest list for the manifest.
"""
pass
class LazyManifestLoader(object):
def __init__(self, manifest_data, content_retriever):
self._manifest_data = manifest_data
self._content_retriever = content_retriever
self._loaded_manifest = None
@property
def manifest_obj(self):
if self._loaded_manifest is not None:
return self._loaded_manifest
self._loaded_manifest = self._load_manifest()
return self._loaded_manifest
def _load_manifest(self):
digest = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY]
size = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY]
manifest_bytes = self._content_retriever.get_manifest_bytes_with_digest(digest)
if manifest_bytes is None:
raise MalformedSchema2ManifestList('Could not find child manifest with digest `%s`' % digest)
if len(manifest_bytes) != size:
raise MalformedSchema2ManifestList('Size of manifest does not match that retrieved: %s vs %s',
len(manifest_bytes), size)
content_type = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY]
if content_type == DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE:
return DockerSchema2Manifest(Bytes.for_string_or_unicode(manifest_bytes))
if content_type == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE:
return DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=False)
raise MalformedSchema2ManifestList('Unknown manifest content type')
class DockerSchema2ManifestList(ManifestInterface):
METASCHEMA = {
'type': 'object',
'properties': {
DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY: {
'type': 'number',
'description': 'The version of the manifest list. Must always be `2`.',
'minimum': 2,
'maximum': 2,
},
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: {
'type': 'string',
'description': 'The media type of the manifest list.',
'enum': [DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE],
},
DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY: {
'type': 'array',
'description': 'The manifests field contains a list of manifests for specific platforms',
'items': {
'type': 'object',
'properties': {
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: {
'type': 'string',
'description': 'The MIME type of the referenced object. This will generally be ' +
'application/vnd.docker.distribution.manifest.v2+json, but it ' +
'could also be application/vnd.docker.distribution.manifest.v1+json ' +
'if the manifest list references a legacy schema-1 manifest.',
'enum': [DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE, DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE],
},
DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY: {
'type': 'number',
'description': 'The size in bytes of the object. This field exists so that a ' +
'client will have an expected size for the content before ' +
'validating. If the length of the retrieved content does not ' +
'match the specified length, the content should not be trusted.',
},
DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY: {
'type': 'string',
'description': 'The content addressable digest of the manifest in the blob store',
},
DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY: {
'type': 'object',
'description': 'The platform object describes the platform which the image in ' +
'the manifest runs on',
'properties': {
DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY: {
'type': 'string',
'description': 'Specifies the CPU architecture, for example amd64 or ppc64le.',
},
DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY: {
'type': 'string',
'description': 'Specifies the operating system, for example linux or windows',
},
DOCKER_SCHEMA2_MANIFESTLIST_OS_VERSION_KEY: {
'type': 'string',
'description': 'Specifies the operating system version, for example 10.0.10586',
},
DOCKER_SCHEMA2_MANIFESTLIST_OS_FEATURES_KEY: {
'type': 'array',
'description': 'specifies an array of strings, each listing a required OS ' +
'feature (for example on Windows win32k)',
'items': {
'type': 'string',
},
},
DOCKER_SCHEMA2_MANIFESTLIST_VARIANT_KEY: {
'type': 'string',
'description': 'Specifies a variant of the CPU, for example armv6l to specify ' +
'a particular CPU variant of the ARM CPU',
},
DOCKER_SCHEMA2_MANIFESTLIST_FEATURES_KEY: {
'type': 'array',
'description': 'specifies an array of strings, each listing a required CPU ' +
'feature (for example sse4 or aes).',
'items': {
'type': 'string',
},
},
},
'required': [DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY],
},
},
'required': [DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY],
},
},
},
'required': [DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY],
}
def __init__(self, manifest_bytes):
assert isinstance(manifest_bytes, Bytes)
self._layers = None
self._manifest_bytes = manifest_bytes
try:
self._parsed = json.loads(manifest_bytes.as_unicode())
except ValueError as ve:
raise MalformedSchema2ManifestList('malformed manifest data: %s' % ve)
try:
validate_schema(self._parsed, DockerSchema2ManifestList.METASCHEMA)
except ValidationError as ve:
raise MalformedSchema2ManifestList('manifest data does not match schema: %s' % ve)
@property
def is_manifest_list(self):
""" Returns whether this manifest is a list. """
return True
@property
def schema_version(self):
return 2
@property
def digest(self):
""" The digest of the manifest, including type prefix. """
return digest_tools.sha256_digest(self._manifest_bytes.as_encoded_str())
@property
def media_type(self):
""" The media type of the schema. """
return self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY]
@property
def manifest_dict(self):
""" Returns the manifest as a dictionary ready to be serialized to JSON. """
return self._parsed
@property
def bytes(self):
return self._manifest_bytes
def get_layers(self, content_retriever):
""" Returns the layers of this manifest, from base to leaf or None if this kind of manifest
does not support layers. """
return None
@property
def blob_digests(self):
# Manifest lists have no blob digests, since everything is stored as a manifest.
return []
@property
def local_blob_digests(self):
return self.blob_digests
def get_blob_digests_for_translation(self):
return self.blob_digests
@property
def layers_compressed_size(self):
return None
@lru_cache(maxsize=1)
def manifests(self, content_retriever):
""" Returns the manifests in the list.
"""
manifests = self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]
return [LazyManifestLoader(m, content_retriever) for m in manifests]
def validate(self, content_retriever):
""" Performs validation of required assertions about the manifest. Raises a ManifestException
on failure.
"""
for index, m in enumerate(self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]):
if m[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY] == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE:
platform = m[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
# Validate the architecture against the schema 1 architecture defined.
parsed = self.manifests(content_retriever)[index].manifest_obj
assert isinstance(parsed, DockerSchema1Manifest)
if (parsed.architecture and
parsed.architecture != platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]):
raise MismatchManifestException('Mismatch in arch for manifest `%s`' % parsed.digest)
def child_manifests(self, content_retriever):
return self.manifests(content_retriever)
def child_manifest_digests(self):
return [m[DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY]
for m in self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]]
def get_manifest_labels(self, content_retriever):
return None
def get_leaf_layer_v1_image_id(self, content_retriever):
return None
def get_legacy_image_ids(self, content_retriever):
return None
@property
def has_legacy_image(self):
return False
def get_requires_empty_layer_blob(self, content_retriever):
return False
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
""" Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`.
If none, returns None.
"""
legacy_manifest = self._get_legacy_manifest(content_retriever)
if legacy_manifest is None:
return None
return legacy_manifest.get_schema1_manifest(namespace_name, repo_name, tag_name,
content_retriever)
def convert_manifest(self, allowed_mediatypes, namespace_name, repo_name, tag_name,
content_retriever):
if self.media_type in allowed_mediatypes:
return self
legacy_manifest = self._get_legacy_manifest(content_retriever)
if legacy_manifest is None:
return None
return legacy_manifest.convert_manifest(allowed_mediatypes, namespace_name, repo_name,
tag_name, content_retriever)
def _get_legacy_manifest(self, content_retriever):
""" Returns the manifest under this list with architecture amd64 and os linux, if any, or None
if none or error.
"""
for manifest_ref in self.manifests(content_retriever):
platform = manifest_ref._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
architecture = platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]
os = platform[DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY]
if architecture != 'amd64' or os != 'linux':
continue
try:
return manifest_ref.manifest_obj
except (ManifestException, IOError):
logger.exception('Could not load child manifest')
return None
return None
def unsigned(self):
return self
def generate_legacy_layers(self, images_map, content_retriever):
return None
class DockerSchema2ManifestListBuilder(object):
"""
A convenient abstraction around creating new DockerSchema2ManifestList's.
"""
def __init__(self):
self.manifests = []
def add_manifest(self, manifest, architecture, os):
""" Adds a manifest to the list. """
manifest = manifest.unsigned() # Make sure we add the unsigned version to the list.
self.add_manifest_digest(manifest.digest,
len(manifest.bytes.as_encoded_str()),
manifest.media_type,
architecture, os)
def add_manifest_digest(self, manifest_digest, manifest_size, media_type, architecture, os):
""" Adds a manifest to the list. """
self.manifests.append((manifest_digest, manifest_size, media_type, {
DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY: architecture,
DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY: os,
}))
def build(self):
""" Builds and returns the DockerSchema2ManifestList. """
assert self.manifests
manifest_list_dict = {
DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY: 2,
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE,
DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY: [
{
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: manifest[2],
DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY: manifest[0],
DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY: manifest[1],
DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY: manifest[3],
} for manifest in self.manifests
],
}
json_str = Bytes.for_string_or_unicode(json.dumps(manifest_list_dict, indent=3))
return DockerSchema2ManifestList(json_str)

View file

@ -0,0 +1,462 @@
import json
import logging
import hashlib
from collections import namedtuple
from jsonschema import validate as validate_schema, ValidationError
from digest import digest_tools
from image.docker import ManifestException
from image.docker.interfaces import ManifestInterface
from image.docker.types import ManifestImageLayer
from image.docker.schema2 import (DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE,
DOCKER_SCHEMA2_LAYER_CONTENT_TYPE,
DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE,
EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_SIZE)
from image.docker.schema1 import DockerSchema1ManifestBuilder
from image.docker.schema2.config import DockerSchema2Config
from util.bytes import Bytes
# Keys.
DOCKER_SCHEMA2_MANIFEST_VERSION_KEY = 'schemaVersion'
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY = 'mediaType'
DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY = 'config'
DOCKER_SCHEMA2_MANIFEST_SIZE_KEY = 'size'
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY = 'digest'
DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY = 'layers'
DOCKER_SCHEMA2_MANIFEST_URLS_KEY = 'urls'
# Named tuples.
DockerV2ManifestConfig = namedtuple('DockerV2ManifestConfig', ['size', 'digest'])
DockerV2ManifestLayer = namedtuple('DockerV2ManifestLayer', ['index', 'digest',
'is_remote', 'urls',
'compressed_size'])
DockerV2ManifestImageLayer = namedtuple('DockerV2ManifestImageLayer', ['history', 'blob_layer',
'v1_id', 'v1_parent_id',
'compressed_size',
'blob_digest'])
logger = logging.getLogger(__name__)
class MalformedSchema2Manifest(ManifestException):
"""
Raised when a manifest fails an assertion that should be true according to the Docker Manifest
v2.2 Specification.
"""
pass
class DockerSchema2Manifest(ManifestInterface):
METASCHEMA = {
'type': 'object',
'properties': {
DOCKER_SCHEMA2_MANIFEST_VERSION_KEY: {
'type': 'number',
'description': 'The version of the schema. Must always be `2`.',
'minimum': 2,
'maximum': 2,
},
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: {
'type': 'string',
'description': 'The media type of the schema.',
'enum': [DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE],
},
DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY: {
'type': 'object',
'description': 'The config field references a configuration object for a container, ' +
'by digest. This configuration item is a JSON blob that the runtime ' +
'uses to set up the container.',
'properties': {
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: {
'type': 'string',
'description': 'The MIME type of the referenced object. This should generally be ' +
'application/vnd.docker.container.image.v1+json',
'enum': [DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE],
},
DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: {
'type': 'number',
'description': 'The size in bytes of the object. This field exists so that a ' +
'client will have an expected size for the content before ' +
'validating. If the length of the retrieved content does not ' +
'match the specified length, the content should not be trusted.',
},
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: {
'type': 'string',
'description': 'The content addressable digest of the config in the blob store',
},
},
'required': [DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY, DOCKER_SCHEMA2_MANIFEST_SIZE_KEY,
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY],
},
DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY: {
'type': 'array',
'description': 'The layer list is ordered starting from the base ' +
'image (opposite order of schema1).',
'items': {
'type': 'object',
'properties': {
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: {
'type': 'string',
'description': 'The MIME type of the referenced object. This should generally be ' +
'application/vnd.docker.image.rootfs.diff.tar.gzip. Layers of type ' +
'application/vnd.docker.image.rootfs.foreign.diff.tar.gzip may be ' +
'pulled from a remote location but they should never be pushed.',
'enum': [DOCKER_SCHEMA2_LAYER_CONTENT_TYPE, DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE],
},
DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: {
'type': 'number',
'description': 'The size in bytes of the object. This field exists so that a ' +
'client will have an expected size for the content before ' +
'validating. If the length of the retrieved content does not ' +
'match the specified length, the content should not be trusted.',
},
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: {
'type': 'string',
'description': 'The content addressable digest of the layer in the blob store',
},
},
'required': [
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY, DOCKER_SCHEMA2_MANIFEST_SIZE_KEY,
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY,
],
},
},
},
'required': [DOCKER_SCHEMA2_MANIFEST_VERSION_KEY, DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY,
DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY, DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY],
}
def __init__(self, manifest_bytes):
assert isinstance(manifest_bytes, Bytes)
self._payload = manifest_bytes
self._filesystem_layers = None
self._cached_built_config = None
try:
self._parsed = json.loads(self._payload.as_unicode())
except ValueError as ve:
raise MalformedSchema2Manifest('malformed manifest data: %s' % ve)
try:
validate_schema(self._parsed, DockerSchema2Manifest.METASCHEMA)
except ValidationError as ve:
raise MalformedSchema2Manifest('manifest data does not match schema: %s' % ve)
for layer in self.filesystem_layers:
if layer.is_remote and not layer.urls:
raise MalformedSchema2Manifest('missing `urls` for remote layer')
def validate(self, content_retriever):
""" Performs validation of required assertions about the manifest. Raises a ManifestException
on failure.
"""
# Nothing to validate.
@property
def is_manifest_list(self):
return False
@property
def schema_version(self):
return 2
@property
def manifest_dict(self):
return self._parsed
@property
def media_type(self):
return self._parsed[DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY]
@property
def digest(self):
return digest_tools.sha256_digest(self._payload.as_encoded_str())
@property
def config(self):
config = self._parsed[DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY]
return DockerV2ManifestConfig(size=config[DOCKER_SCHEMA2_MANIFEST_SIZE_KEY],
digest=config[DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY])
@property
def filesystem_layers(self):
""" Returns the file system layers of this manifest, from base to leaf. """
if self._filesystem_layers is None:
self._filesystem_layers = list(self._generate_filesystem_layers())
return self._filesystem_layers
@property
def leaf_filesystem_layer(self):
""" Returns the leaf file system layer for this manifest. """
return self.filesystem_layers[-1]
@property
def layers_compressed_size(self):
return sum(layer.compressed_size for layer in self.filesystem_layers)
@property
def has_remote_layer(self):
for layer in self.filesystem_layers:
if layer.is_remote:
return True
return False
@property
def blob_digests(self):
return [str(layer.digest) for layer in self.filesystem_layers] + [str(self.config.digest)]
@property
def local_blob_digests(self):
return ([str(layer.digest) for layer in self.filesystem_layers if not layer.urls] +
[str(self.config.digest)])
def get_blob_digests_for_translation(self):
return self.blob_digests
def get_manifest_labels(self, content_retriever):
return self._get_built_config(content_retriever).labels
def get_layers(self, content_retriever):
""" Returns the layers of this manifest, from base to leaf or None if this kind of manifest
does not support layers. """
for image_layer in self._manifest_image_layers(content_retriever):
is_remote = image_layer.blob_layer.is_remote if image_layer.blob_layer else False
urls = image_layer.blob_layer.urls if image_layer.blob_layer else None
yield ManifestImageLayer(layer_id=image_layer.v1_id,
compressed_size=image_layer.compressed_size,
is_remote=is_remote,
urls=urls,
command=image_layer.history.command,
blob_digest=image_layer.blob_digest,
created_datetime=image_layer.history.created_datetime,
author=image_layer.history.author,
comment=image_layer.history.comment,
internal_layer=image_layer)
@property
def bytes(self):
return self._payload
def child_manifests(self, content_retriever):
return None
def _manifest_image_layers(self, content_retriever):
# Retrieve the configuration for the manifest.
config = self._get_built_config(content_retriever)
history = list(config.history)
if len(history) < len(self.filesystem_layers):
raise MalformedSchema2Manifest('Found less history than layer blobs')
digest_history = hashlib.sha256()
v1_layer_parent_id = None
v1_layer_id = None
blob_index = 0
for history_index, history_entry in enumerate(history):
if not history_entry.is_empty and blob_index >= len(self.filesystem_layers):
raise MalformedSchema2Manifest('Missing history entry #%s' % blob_index)
v1_layer_parent_id = v1_layer_id
blob_layer = None if history_entry.is_empty else self.filesystem_layers[blob_index]
blob_digest = EMPTY_LAYER_BLOB_DIGEST if blob_layer is None else str(blob_layer.digest)
compressed_size = EMPTY_LAYER_SIZE if blob_layer is None else blob_layer.compressed_size
# Create a new synthesized V1 ID for the history layer by hashing its content and
# the blob associated with it.
digest_history.update(json.dumps(history_entry.raw_entry))
digest_history.update("|")
digest_history.update(str(history_index))
digest_history.update("|")
digest_history.update(blob_digest)
digest_history.update("||")
v1_layer_id = digest_history.hexdigest()
yield DockerV2ManifestImageLayer(history=history_entry,
blob_layer=blob_layer,
blob_digest=blob_digest,
v1_id=v1_layer_id,
v1_parent_id=v1_layer_parent_id,
compressed_size=compressed_size)
if not history_entry.is_empty:
blob_index += 1
@property
def has_legacy_image(self):
return not self.has_remote_layer
def generate_legacy_layers(self, images_map, content_retriever):
assert not self.has_remote_layer
# NOTE: We use the DockerSchema1ManifestBuilder here because it already contains
# the logic for generating the DockerV1Metadata. All of this will go away once we get
# rid of legacy images in the database, so this is a temporary solution.
v1_builder = DockerSchema1ManifestBuilder('', '', '')
self._populate_schema1_builder(v1_builder, content_retriever)
return v1_builder.build().generate_legacy_layers(images_map, content_retriever)
def get_leaf_layer_v1_image_id(self, content_retriever):
# NOTE: If there exists a layer with remote content, then we consider this manifest
# to not support legacy images.
if self.has_remote_layer:
return None
return self.get_legacy_image_ids(content_retriever)[-1].v1_id
def get_legacy_image_ids(self, content_retriever):
if self.has_remote_layer:
return None
return [l.v1_id for l in self._manifest_image_layers(content_retriever)]
def convert_manifest(self, allowed_mediatypes, namespace_name, repo_name, tag_name,
content_retriever):
if self.media_type in allowed_mediatypes:
return self
# If this manifest is not on the allowed list, try to convert the schema 1 version (if any)
schema1 = self.get_schema1_manifest(namespace_name, repo_name, tag_name, content_retriever)
if schema1 is None:
return None
return schema1.convert_manifest(allowed_mediatypes, namespace_name, repo_name, tag_name,
content_retriever)
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
if self.has_remote_layer:
return None
v1_builder = DockerSchema1ManifestBuilder(namespace_name, repo_name, tag_name)
self._populate_schema1_builder(v1_builder, content_retriever)
return v1_builder.build()
def unsigned(self):
return self
def get_requires_empty_layer_blob(self, content_retriever):
schema2_config = self._get_built_config(content_retriever)
if schema2_config is None:
return None
return schema2_config.has_empty_layer
def _populate_schema1_builder(self, v1_builder, content_retriever):
""" Populates a DockerSchema1ManifestBuilder with the layers and config from
this schema.
"""
assert not self.has_remote_layer
schema2_config = self._get_built_config(content_retriever)
layers = list(self._manifest_image_layers(content_retriever))
for index, layer in enumerate(reversed(layers)): # Schema 1 layers are in reverse order
v1_compatibility = schema2_config.build_v1_compatibility(layer.history,
layer.v1_id,
layer.v1_parent_id,
index == 0,
layer.compressed_size)
v1_builder.add_layer(str(layer.blob_digest), json.dumps(v1_compatibility))
return v1_builder
def _get_built_config(self, content_retriever):
if self._cached_built_config:
return self._cached_built_config
config_bytes = content_retriever.get_blob_bytes_with_digest(self.config.digest)
if config_bytes is None:
raise MalformedSchema2Manifest('Could not load config blob for manifest')
if len(config_bytes) != self.config.size:
msg = 'Size of config does not match that retrieved: %s vs %s' % (len(config_bytes),
self.config.size)
raise MalformedSchema2Manifest(msg)
self._cached_built_config = DockerSchema2Config(Bytes.for_string_or_unicode(config_bytes))
return self._cached_built_config
def _generate_filesystem_layers(self):
for index, layer in enumerate(self._parsed[DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY]):
content_type = layer[DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY]
is_remote = content_type == DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE
try:
digest = digest_tools.Digest.parse_digest(layer[DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY])
except digest_tools.InvalidDigestException:
raise MalformedSchema2Manifest('could not parse manifest digest: %s' %
layer[DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY])
yield DockerV2ManifestLayer(index=index,
compressed_size=layer[DOCKER_SCHEMA2_MANIFEST_SIZE_KEY],
digest=digest,
is_remote=is_remote,
urls=layer.get(DOCKER_SCHEMA2_MANIFEST_URLS_KEY))
class DockerSchema2ManifestBuilder(object):
"""
A convenient abstraction around creating new DockerSchema2Manifests.
"""
def __init__(self):
self.config = None
self.filesystem_layers = []
def set_config(self, schema2_config):
""" Sets the configuration for the manifest being built. """
self.set_config_digest(schema2_config.digest, schema2_config.size)
def set_config_digest(self, config_digest, config_size):
""" Sets the digest and size of the configuration layer. """
self.config = DockerV2ManifestConfig(size=config_size, digest=config_digest)
def add_layer(self, digest, size, urls=None):
""" Adds a filesystem layer to the manifest. """
self.filesystem_layers.append(DockerV2ManifestLayer(index=len(self.filesystem_layers),
digest=digest,
compressed_size=size,
urls=urls,
is_remote=bool(urls)))
def build(self, ensure_ascii=True):
""" Builds and returns the DockerSchema2Manifest. """
assert self.filesystem_layers
assert self.config
def _build_layer(layer):
if layer.urls:
return {
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE,
DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: layer.compressed_size,
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(layer.digest),
DOCKER_SCHEMA2_MANIFEST_URLS_KEY: layer.urls,
}
return {
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_LAYER_CONTENT_TYPE,
DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: layer.compressed_size,
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(layer.digest),
}
manifest_dict = {
DOCKER_SCHEMA2_MANIFEST_VERSION_KEY: 2,
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
# Config
DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY: {
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE,
DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: self.config.size,
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(self.config.digest),
},
# Layers
DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY: [
_build_layer(layer) for layer in self.filesystem_layers
],
}
json_str = json.dumps(manifest_dict, ensure_ascii=ensure_ascii, indent=3)
return DockerSchema2Manifest(Bytes.for_string_or_unicode(json_str))

View file

View file

@ -0,0 +1,129 @@
{
"architecture": "amd64",
"config": {
"Hostname": "",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"3306/tcp": {},
"33060/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "GOSU_VERSION=1.7", "MYSQL_MAJOR=5.7", "MYSQL_VERSION=5.7.24-1debian9"],
"Cmd": ["mysqld"],
"ArgsEscaped": true,
"Image": "sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265",
"Volumes": {
"/var/lib/mysql": {}
},
"WorkingDir": "",
"Entrypoint": ["docker-entrypoint.sh"],
"OnBuild": [],
"Labels": null
},
"container": "54bd04ff79350d28d0da33fa3e483567156c7c9f87a7254d6fa8267b0878c339",
"container_config": {
"Hostname": "54bd04ff7935",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"3306/tcp": {},
"33060/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "GOSU_VERSION=1.7", "MYSQL_MAJOR=5.7", "MYSQL_VERSION=5.7.24-1debian9"],
"Cmd": ["/bin/sh", "-c", "#(nop) ", "CMD [\"mysqld\"]"],
"ArgsEscaped": true,
"Image": "sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265",
"Volumes": {
"/var/lib/mysql": {}
},
"WorkingDir": "",
"Entrypoint": ["docker-entrypoint.sh"],
"OnBuild": [],
"Labels": {}
},
"created": "2018-11-16T01:14:20.755008004Z",
"docker_version": "17.06.2-ce",
"history": [{
"created": "2018-11-15T22:45:06.938205528Z",
"created_by": "/bin/sh -c #(nop) ADD file:dab9baf938799c515ddce14c02f899da5992f0b76a432fa10a2338556a3cb04f in / "
}, {
"created": "2018-11-15T22:45:07.243453424Z",
"created_by": "/bin/sh -c #(nop) CMD [\"bash\"]",
"empty_layer": true
}, {
"created": "2018-11-16T01:11:01.00193007Z",
"created_by": "/bin/sh -c groupadd -r mysql \u0026\u0026 useradd -r -g mysql mysql"
}, {
"created": "2018-11-16T01:11:11.128616814Z",
"created_by": "/bin/sh -c apt-get update \u0026\u0026 apt-get install -y --no-install-recommends gnupg dirmngr \u0026\u0026 rm -rf /var/lib/apt/lists/*"
}, {
"created": "2018-11-16T01:11:11.466721945Z",
"created_by": "/bin/sh -c #(nop) ENV GOSU_VERSION=1.7",
"empty_layer": true
}, {
"created": "2018-11-16T01:11:33.651099664Z",
"created_by": "/bin/sh -c set -x \t\u0026\u0026 apt-get update \u0026\u0026 apt-get install -y --no-install-recommends ca-certificates wget \u0026\u0026 rm -rf /var/lib/apt/lists/* \t\u0026\u0026 wget -O /usr/local/bin/gosu \"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture)\" \t\u0026\u0026 wget -O /usr/local/bin/gosu.asc \"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture).asc\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \t\u0026\u0026 gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu \t\u0026\u0026 gpgconf --kill all \t\u0026\u0026 rm -rf \"$GNUPGHOME\" /usr/local/bin/gosu.asc \t\u0026\u0026 chmod +x /usr/local/bin/gosu \t\u0026\u0026 gosu nobody true \t\u0026\u0026 apt-get purge -y --auto-remove ca-certificates wget"
}, {
"created": "2018-11-16T01:11:34.772616243Z",
"created_by": "/bin/sh -c mkdir /docker-entrypoint-initdb.d"
}, {
"created": "2018-11-16T01:11:46.048879774Z",
"created_by": "/bin/sh -c apt-get update \u0026\u0026 apt-get install -y --no-install-recommends \t\tpwgen \t\topenssl \t\tperl \t\u0026\u0026 rm -rf /var/lib/apt/lists/*"
}, {
"created": "2018-11-16T01:11:49.672488713Z",
"created_by": "/bin/sh -c set -ex; \tkey='A4A9406876FCBD3C456770C88C718D3B5072E1F5'; \texport GNUPGHOME=\"$(mktemp -d)\"; \tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \"$key\"; \tgpg --batch --export \"$key\" \u003e /etc/apt/trusted.gpg.d/mysql.gpg; \tgpgconf --kill all; \trm -rf \"$GNUPGHOME\"; \tapt-key list \u003e /dev/null"
}, {
"created": "2018-11-16T01:13:49.699875841Z",
"created_by": "/bin/sh -c #(nop) ENV MYSQL_MAJOR=5.7",
"empty_layer": true
}, {
"created": "2018-11-16T01:13:50.087751031Z",
"created_by": "/bin/sh -c #(nop) ENV MYSQL_VERSION=5.7.24-1debian9",
"empty_layer": true
}, {
"created": "2018-11-16T01:13:51.211877582Z",
"created_by": "/bin/sh -c echo \"deb http://repo.mysql.com/apt/debian/ stretch mysql-${MYSQL_MAJOR}\" \u003e /etc/apt/sources.list.d/mysql.list"
}, {
"created": "2018-11-16T01:14:17.521774936Z",
"created_by": "/bin/sh -c { \t\techo mysql-community-server mysql-community-server/data-dir select ''; \t\techo mysql-community-server mysql-community-server/root-pass password ''; \t\techo mysql-community-server mysql-community-server/re-root-pass password ''; \t\techo mysql-community-server mysql-community-server/remove-test-db select false; \t} | debconf-set-selections \t\u0026\u0026 apt-get update \u0026\u0026 apt-get install -y mysql-server=\"${MYSQL_VERSION}\" \u0026\u0026 rm -rf /var/lib/apt/lists/* \t\u0026\u0026 rm -rf /var/lib/mysql \u0026\u0026 mkdir -p /var/lib/mysql /var/run/mysqld \t\u0026\u0026 chown -R mysql:mysql /var/lib/mysql /var/run/mysqld \t\u0026\u0026 chmod 777 /var/run/mysqld \t\u0026\u0026 find /etc/mysql/ -name '*.cnf' -print0 \t\t| xargs -0 grep -lZE '^(bind-address|log)' \t\t| xargs -rt -0 sed -Ei 's/^(bind-address|log)/#\u0026/' \t\u0026\u0026 echo '[mysqld]\\nskip-host-cache\\nskip-name-resolve' \u003e /etc/mysql/conf.d/docker.cnf"
}, {
"created": "2018-11-16T01:14:17.959906008Z",
"created_by": "/bin/sh -c #(nop) VOLUME [/var/lib/mysql]",
"empty_layer": true
}, {
"created": "2018-11-16T01:14:18.574646682Z",
"created_by": "/bin/sh -c #(nop) COPY file:4b5f8335c16a8bc9f76a2164458df1d71cf76facbf16d02f18ce7409122c2146 in /usr/local/bin/ "
}, {
"created": "2018-11-16T01:14:19.715707659Z",
"created_by": "/bin/sh -c ln -s usr/local/bin/docker-entrypoint.sh /entrypoint.sh # backwards compat"
}, {
"created": "2018-11-16T01:14:20.063426223Z",
"created_by": "/bin/sh -c #(nop) ENTRYPOINT [\"docker-entrypoint.sh\"]",
"empty_layer": true
}, {
"created": "2018-11-16T01:14:20.416001274Z",
"created_by": "/bin/sh -c #(nop) EXPOSE 3306/tcp 33060/tcp",
"empty_layer": true
}, {
"created": "2018-11-16T01:14:20.755008004Z",
"created_by": "/bin/sh -c #(nop) CMD [\"mysqld\"]",
"empty_layer": true
}],
"os": "linux",
"rootfs": {
"type": "layers",
"diff_ids": ["sha256:ef68f6734aa485edf13a8509fe60e4272428deaf63f446a441b79d47fc5d17d3", "sha256:a588c986cf971b87ee2aacd9b57877c47e68e4981b67793d301720a1d0d03a68", "sha256:0f1205f1cd43db6d5f837f792eecb84e773482eb0fb353a4f3f42c3cabb5747f", "sha256:0ad177796f339bf4f5c114bbd97721536d48b452915479884ff3d16acc1c612f", "sha256:2566141f200b8e249db6663d24063a3e1d0e33622e933fa99bee27a4f5b8db02", "sha256:783b13a988e3ec069e08019c89292fdf4e6316141ed74a6d896a422f7ee30077", "sha256:3d4164460bf0c8c4959e6acb51757d63dea47c162a334f65dfbf32537a4b552f", "sha256:ea66b8e6103f47f1934007a9b4c03c28f0398fdc7f9fbe9b5eea335b10448fed", "sha256:347571a8da208bf019b880ef4c73bad7884ad0570ec70dbfe8f95c6c0b37c082", "sha256:ceb15396dc26b48c1dc6222a4cc3934761c1ec06623d895efdb1cb77517a3887", "sha256:0d954c604c768947cd9630283f96bca6c244b971d004565b57f42db100ca3178"]
}
}

View file

@ -0,0 +1,140 @@
{
"schemaVersion": 1,
"name": "user/test",
"tag": "1",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:e81e5181556720e9c821bdb826dd9dbeb078dd28af8fe84586aa904ff212d117"
},
{
"blobSum": "sha256:5f906b8da5fed2070448fed578b93cb1a995be5bdde5624163fbcb842ce4460f"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:cd2a6583723557a1dc58584f53564f791dbb7a5d264bb2f8d71aa8c5d21ac38c"
},
{
"blobSum": "sha256:a7905d9fbbea59dc29d709d1d61a96c06c26a2d1e506ac5c3832a348969052b8"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:0283dc49ef4e5bc0dc8897b14818de315faeceb0a5272464ff3c48cd4ea3b626"
},
{
"blobSum": "sha256:5ed0ae805b65407ddd0ff1aeb8371de3133e5daed97726717d4225cb7a8efaaa"
},
{
"blobSum": "sha256:e2ae0d063e89542abdd8abd1613e8e27743fa669f4a418c8b0a813991621d892"
},
{
"blobSum": "sha256:1f212fb371f936c524c624e6830242a8cb91b3b710942f9241004dae45828f87"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:283fa4c95fb4e349b501ef8c864b2259500d83ca6529253da12d658aa480cbb5"
},
{
"blobSum": "sha256:936836019e67889c1f5a95732c62c476d2450010988792e99d6e7ece84fdce2f"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a5a6f2f73cd8abbdc55d0df0d8834f7262713e87d6c8800ea3851f103025e0f0"
}
],
"history": [
{
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"3306/tcp\":{},\"33060/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOSU_VERSION=1.7\",\"MYSQL_MAJOR=5.7\",\"MYSQL_VERSION=5.7.24-1debian9\"],\"Cmd\":[\"mysqld\"],\"ArgsEscaped\":true,\"Image\":\"sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265\",\"Volumes\":{\"/var/lib/mysql\":{}},\"WorkingDir\":\"\",\"Entrypoint\":[\"docker-entrypoint.sh\"],\"OnBuild\":[],\"Labels\":null},\"container\":\"54bd04ff79350d28d0da33fa3e483567156c7c9f87a7254d6fa8267b0878c339\",\"container_config\":{\"Hostname\":\"54bd04ff7935\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"3306/tcp\":{},\"33060/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOSU_VERSION=1.7\",\"MYSQL_MAJOR=5.7\",\"MYSQL_VERSION=5.7.24-1debian9\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"mysqld\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265\",\"Volumes\":{\"/var/lib/mysql\":{}},\"WorkingDir\":\"\",\"Entrypoint\":[\"docker-entrypoint.sh\"],\"OnBuild\":[],\"Labels\":{}},\"created\":\"2018-11-16T01:14:20.755008004Z\",\"docker_version\":\"17.06.2-ce\",\"id\":\"3cc8ec7719abb3a11fc9ce9b5c5151f368bf3e7e2702d3618e17b4f5055237f8\",\"os\":\"linux\",\"parent\":\"2904b34db6cd1083a7b47ec5e8c1fcb538b9d0ecb790488ec22badabf6143fcb\",\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"2904b34db6cd1083a7b47ec5e8c1fcb538b9d0ecb790488ec22badabf6143fcb\",\"parent\":\"53d4b89c676dd5970862f366ded1a212a24f10a862a12a340ca2f35b5d766308\",\"created\":\"2018-11-16T01:14:20.416001274Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) EXPOSE 3306/tcp 33060/tcp\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"53d4b89c676dd5970862f366ded1a212a24f10a862a12a340ca2f35b5d766308\",\"parent\":\"73c0e3215914add0dc50583090572ae5cd78fb16cc3b3427c8874472cdca93fb\",\"created\":\"2018-11-16T01:14:20.063426223Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENTRYPOINT [\\\"docker-entrypoint.sh\\\"]\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"73c0e3215914add0dc50583090572ae5cd78fb16cc3b3427c8874472cdca93fb\",\"parent\":\"95180e8ac981681a12fa3767b32747b21514581605b20a99cf1713c78cf2ddaa\",\"created\":\"2018-11-16T01:14:19.715707659Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c ln -s usr/local/bin/docker-entrypoint.sh /entrypoint.sh # backwards compat\"]}}"
},
{
"v1Compatibility": "{\"id\":\"95180e8ac981681a12fa3767b32747b21514581605b20a99cf1713c78cf2ddaa\",\"parent\":\"afb72c06112722395dcb38ffdda4c9564480a69bb0fe587bba8f10d8d0adffaa\",\"created\":\"2018-11-16T01:14:18.574646682Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) COPY file:4b5f8335c16a8bc9f76a2164458df1d71cf76facbf16d02f18ce7409122c2146 in /usr/local/bin/ \"]}}"
},
{
"v1Compatibility": "{\"id\":\"afb72c06112722395dcb38ffdda4c9564480a69bb0fe587bba8f10d8d0adffaa\",\"parent\":\"ccadd71e7e80b1772df1c309938e1cbac71c6deed75c9b21212f72a662ce11be\",\"created\":\"2018-11-16T01:14:17.959906008Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) VOLUME [/var/lib/mysql]\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"ccadd71e7e80b1772df1c309938e1cbac71c6deed75c9b21212f72a662ce11be\",\"parent\":\"e053ced3cc09f28a3ab8547dac6bde4220a5f920c559318ba2c807353c0cbdad\",\"created\":\"2018-11-16T01:14:17.521774936Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c { \\t\\techo mysql-community-server mysql-community-server/data-dir select ''; \\t\\techo mysql-community-server mysql-community-server/root-pass password ''; \\t\\techo mysql-community-server mysql-community-server/re-root-pass password ''; \\t\\techo mysql-community-server mysql-community-server/remove-test-db select false; \\t} | debconf-set-selections \\t\\u0026\\u0026 apt-get update \\u0026\\u0026 apt-get install -y mysql-server=\\\"${MYSQL_VERSION}\\\" \\u0026\\u0026 rm -rf /var/lib/apt/lists/* \\t\\u0026\\u0026 rm -rf /var/lib/mysql \\u0026\\u0026 mkdir -p /var/lib/mysql /var/run/mysqld \\t\\u0026\\u0026 chown -R mysql:mysql /var/lib/mysql /var/run/mysqld \\t\\u0026\\u0026 chmod 777 /var/run/mysqld \\t\\u0026\\u0026 find /etc/mysql/ -name '*.cnf' -print0 \\t\\t| xargs -0 grep -lZE '^(bind-address|log)' \\t\\t| xargs -rt -0 sed -Ei 's/^(bind-address|log)/#\\u0026/' \\t\\u0026\\u0026 echo '[mysqld]\\\\nskip-host-cache\\\\nskip-name-resolve' \\u003e /etc/mysql/conf.d/docker.cnf\"]}}"
},
{
"v1Compatibility": "{\"id\":\"e053ced3cc09f28a3ab8547dac6bde4220a5f920c559318ba2c807353c0cbdad\",\"parent\":\"0dd7718b64000ac1bfb2c1d4bd3226244c9d55e4b741ef2eddf22c03ee638c3b\",\"created\":\"2018-11-16T01:13:51.211877582Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c echo \\\"deb http://repo.mysql.com/apt/debian/ stretch mysql-${MYSQL_MAJOR}\\\" \\u003e /etc/apt/sources.list.d/mysql.list\"]}}"
},
{
"v1Compatibility": "{\"id\":\"0dd7718b64000ac1bfb2c1d4bd3226244c9d55e4b741ef2eddf22c03ee638c3b\",\"parent\":\"5e0187996d55a7fa5c81fa75caa2cb57677edbd45abfa68a7a8769d8f640466b\",\"created\":\"2018-11-16T01:13:50.087751031Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV MYSQL_VERSION=5.7.24-1debian9\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"5e0187996d55a7fa5c81fa75caa2cb57677edbd45abfa68a7a8769d8f640466b\",\"parent\":\"2d69915517f4a342dd3b3c719212e7349274a213551239b38c54ac0c44e7fb12\",\"created\":\"2018-11-16T01:13:49.699875841Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV MYSQL_MAJOR=5.7\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"2d69915517f4a342dd3b3c719212e7349274a213551239b38c54ac0c44e7fb12\",\"parent\":\"a3492a643c2e7bd54083848276a38e7569e47ccdf42541abd082191f55632e22\",\"created\":\"2018-11-16T01:11:49.672488713Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -ex; \\tkey='A4A9406876FCBD3C456770C88C718D3B5072E1F5'; \\texport GNUPGHOME=\\\"$(mktemp -d)\\\"; \\tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \\\"$key\\\"; \\tgpg --batch --export \\\"$key\\\" \\u003e /etc/apt/trusted.gpg.d/mysql.gpg; \\tgpgconf --kill all; \\trm -rf \\\"$GNUPGHOME\\\"; \\tapt-key list \\u003e /dev/null\"]}}"
},
{
"v1Compatibility": "{\"id\":\"a3492a643c2e7bd54083848276a38e7569e47ccdf42541abd082191f55632e22\",\"parent\":\"2e7e8bdd723f6a45f9d789b8d2595b1f6c0a702c70f6922792296c681cb5a14e\",\"created\":\"2018-11-16T01:11:46.048879774Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\u0026\\u0026 apt-get install -y --no-install-recommends \\t\\tpwgen \\t\\topenssl \\t\\tperl \\t\\u0026\\u0026 rm -rf /var/lib/apt/lists/*\"]}}"
},
{
"v1Compatibility": "{\"id\":\"2e7e8bdd723f6a45f9d789b8d2595b1f6c0a702c70f6922792296c681cb5a14e\",\"parent\":\"855801645898a11047b72b6740ccc614f49a9cd5bd07f60820ade1635180acb3\",\"created\":\"2018-11-16T01:11:34.772616243Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir /docker-entrypoint-initdb.d\"]}}"
},
{
"v1Compatibility": "{\"id\":\"855801645898a11047b72b6740ccc614f49a9cd5bd07f60820ade1635180acb3\",\"parent\":\"123f7f7e13504138215a283c07589c9a506f249305ff2c78567ef3d1eaf27314\",\"created\":\"2018-11-16T01:11:33.651099664Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -x \\t\\u0026\\u0026 apt-get update \\u0026\\u0026 apt-get install -y --no-install-recommends ca-certificates wget \\u0026\\u0026 rm -rf /var/lib/apt/lists/* \\t\\u0026\\u0026 wget -O /usr/local/bin/gosu \\\"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture)\\\" \\t\\u0026\\u0026 wget -O /usr/local/bin/gosu.asc \\\"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture).asc\\\" \\t\\u0026\\u0026 export GNUPGHOME=\\\"$(mktemp -d)\\\" \\t\\u0026\\u0026 gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \\t\\u0026\\u0026 gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu \\t\\u0026\\u0026 gpgconf --kill all \\t\\u0026\\u0026 rm -rf \\\"$GNUPGHOME\\\" /usr/local/bin/gosu.asc \\t\\u0026\\u0026 chmod +x /usr/local/bin/gosu \\t\\u0026\\u0026 gosu nobody true \\t\\u0026\\u0026 apt-get purge -y --auto-remove ca-certificates wget\"]}}"
},
{
"v1Compatibility": "{\"id\":\"123f7f7e13504138215a283c07589c9a506f249305ff2c78567ef3d1eaf27314\",\"parent\":\"6f3aeec2779f98f81f65151bc886b26eac21c79eecbc79aed3a414e1413643a4\",\"created\":\"2018-11-16T01:11:11.466721945Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV GOSU_VERSION=1.7\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"6f3aeec2779f98f81f65151bc886b26eac21c79eecbc79aed3a414e1413643a4\",\"parent\":\"4597be70a8abf812caed7f0d37ddd738d361ff4fc271e8dc4dde8b7746378d0b\",\"created\":\"2018-11-16T01:11:11.128616814Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\u0026\\u0026 apt-get install -y --no-install-recommends gnupg dirmngr \\u0026\\u0026 rm -rf /var/lib/apt/lists/*\"]}}"
},
{
"v1Compatibility": "{\"id\":\"4597be70a8abf812caed7f0d37ddd738d361ff4fc271e8dc4dde8b7746378d0b\",\"parent\":\"97569d305060de34859e5d55a8bbb010f4026af7cc4b9ca40294689bd6af1909\",\"created\":\"2018-11-16T01:11:01.00193007Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c groupadd -r mysql \\u0026\\u0026 useradd -r -g mysql mysql\"]}}"
},
{
"v1Compatibility": "{\"id\":\"97569d305060de34859e5d55a8bbb010f4026af7cc4b9ca40294689bd6af1909\",\"parent\":\"0454203f6769f870345aa82f55f4699dfaab41bcb3e38f0c44c9ecc11ef2a38f\",\"created\":\"2018-11-15T22:45:07.243453424Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"bash\\\"]\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"0454203f6769f870345aa82f55f4699dfaab41bcb3e38f0c44c9ecc11ef2a38f\",\"created\":\"2018-11-15T22:45:06.938205528Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:dab9baf938799c515ddce14c02f899da5992f0b76a432fa10a2338556a3cb04f in / \"]}}"
}
],
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "BTGA:CY7S:HZ7T:FEUS:DZJD:FNS5:O5U2:BTGQ:SGZZ:AY5P:R5MA:UJEY",
"kty": "EC",
"x": "0xF2dZ_HLk8VVrqMLMm838LWFAi60P7V5fBjlhlt7xI",
"y": "niBqFvBqOvtABZSpMoQoSMT7H13Pb0POo00OX7Xsmvc"
},
"alg": "ES256"
},
"signature": "w8TITz0xkMNqgchKNSfQ-4OlfIGUnG4MLT4Tt738Z0NiD1bHaWFef8wCCBNuDLiKHllrqcqM6Aj__LhsctSwyA",
"protected": "eyJmb3JtYXRMZW5ndGgiOjEyODM2LCJmb3JtYXRUYWlsIjoiQ24wIiwidGltZSI6IjIwMTgtMTEtMjFUMTk6MTU6MTNaIn0"
}
]
}

View file

@ -0,0 +1,66 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 8171,
"digest": "sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 22486277,
"digest": "sha256:a5a6f2f73cd8abbdc55d0df0d8834f7262713e87d6c8800ea3851f103025e0f0"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 1747,
"digest": "sha256:936836019e67889c1f5a95732c62c476d2450010988792e99d6e7ece84fdce2f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 4500948,
"digest": "sha256:283fa4c95fb4e349b501ef8c864b2259500d83ca6529253da12d658aa480cbb5"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 1270313,
"digest": "sha256:1f212fb371f936c524c624e6830242a8cb91b3b710942f9241004dae45828f87"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 115,
"digest": "sha256:e2ae0d063e89542abdd8abd1613e8e27743fa669f4a418c8b0a813991621d892"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 12091270,
"digest": "sha256:5ed0ae805b65407ddd0ff1aeb8371de3133e5daed97726717d4225cb7a8efaaa"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 24045,
"digest": "sha256:0283dc49ef4e5bc0dc8897b14818de315faeceb0a5272464ff3c48cd4ea3b626"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 223,
"digest": "sha256:a7905d9fbbea59dc29d709d1d61a96c06c26a2d1e506ac5c3832a348969052b8"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 83565354,
"digest": "sha256:cd2a6583723557a1dc58584f53564f791dbb7a5d264bb2f8d71aa8c5d21ac38c"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 2876,
"digest": "sha256:5f906b8da5fed2070448fed578b93cb1a995be5bdde5624163fbcb842ce4460f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 121,
"digest": "sha256:e81e5181556720e9c821bdb826dd9dbeb078dd28af8fe84586aa904ff212d117"
}
]
}

View file

@ -0,0 +1,75 @@
{
"architecture": "amd64",
"config": {
"Hostname": "",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"sh"
],
"Image": "",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {}
},
"container": "86fff20ea922659929a4716850cc9b3a2cca6c197f7a7ece7da5b6d9d8ac4954",
"container_config": {
"Hostname": "86fff20ea922",
"Domainname": "",
"User": "",
"AttachStdin": true,
"AttachStdout": true,
"AttachStderr": true,
"Tty": true,
"OpenStdin": true,
"StdinOnce": true,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"sh"
],
"Image": "busybox",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {}
},
"created": "2018-11-20T21:15:01.569237Z",
"docker_version": "17.09.0-ce",
"history": [
{
"created": "2018-10-02T17:19:34.03981888Z",
"created_by": "/bin/sh -c #(nop) ADD file:63eebd629a5f7558c361be0305df5f16baac1d3bbec014b7c486e28812441969 in / "
},
{
"created": "2018-10-02T17:19:34.239926273Z",
"created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
"empty_layer": true
},
{
"created": "2018-11-20T21:15:01.569237Z",
"created_by": "sh"
}
],
"os": "linux",
"rootfs": {
"type": "layers",
"diff_ids": [
"sha256:8a788232037eaf17794408ff3df6b922a1aedf9ef8de36afdae3ed0b0381907b",
"sha256:70d967d052ce14cd372b12663d84046ade5712c3a4ece6078cdb63e75bbfcfa1"
]
}
}

View file

@ -0,0 +1,44 @@
{
"schemaVersion": 1,
"name": "devtable/somerepo",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:28b98663b93a1c984379691300f284ee1536db1b6ecd8a1d59222528f80cee89"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:90e01955edcd85dac7985b72a8374545eac617ccdddcc992b732e43cd42534af"
}
],
"history": [
{
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"container\":\"86fff20ea922659929a4716850cc9b3a2cca6c197f7a7ece7da5b6d9d8ac4954\",\"container_config\":{\"Hostname\":\"86fff20ea922\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":true,\"AttachStdout\":true,\"AttachStderr\":true,\"Tty\":true,\"OpenStdin\":true,\"StdinOnce\":true,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"Image\":\"busybox\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"created\":\"2018-11-20T21:15:01.569237Z\",\"docker_version\":\"17.09.0-ce\",\"id\":\"692854afd8718d5285bf99cecfc9d6385f41122d3cea70fc9961b3f23ae0d768\",\"os\":\"linux\",\"parent\":\"61b2663f44edc9a6af340b9bfd46d17d8ed2574ffe289e0d95c0476da3c6faac\"}"
},
{
"v1Compatibility": "{\"id\":\"61b2663f44edc9a6af340b9bfd46d17d8ed2574ffe289e0d95c0476da3c6faac\",\"parent\":\"5327db1e651c0f49157ace3ffd8569c7361b1f2e61d0b49ff617e83a42bf78d6\",\"created\":\"2018-10-02T17:19:34.239926273Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"sh\\\"]\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"5327db1e651c0f49157ace3ffd8569c7361b1f2e61d0b49ff617e83a42bf78d6\",\"created\":\"2018-10-02T17:19:34.03981888Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:63eebd629a5f7558c361be0305df5f16baac1d3bbec014b7c486e28812441969 in / \"]}}"
}
],
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX",
"kty": "EC",
"x": "34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY",
"y": "LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8"
},
"alg": "ES256"
},
"signature": "4-nlo2R9Dn3PIGHuhvPkamCzLgFYURziihwZYAnmw5eMKLRj4ir-VeEJI30mDh8ArTeDo-PnMLRNZGRX2NwXHw",
"protected": "eyJmb3JtYXRMZW5ndGgiOjIzNDEsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0xMS0yMFQyMToxNzozMVoifQ"
}
]
}

View file

@ -0,0 +1,21 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 1977,
"digest": "sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 727978,
"digest": "sha256:90e01955edcd85dac7985b72a8374545eac617ccdddcc992b732e43cd42534af"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 190,
"digest": "sha256:28b98663b93a1c984379691300f284ee1536db1b6ecd8a1d59222528f80cee89"
}
]
}

View file

@ -0,0 +1,90 @@
{
"architecture": "amd64",
"config": {
"Hostname": "",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/bin/bash"
],
"ArgsEscaped": true,
"Image": "sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": null
},
"container": "1501390588c62f6c7c0e4fec25d6587c75c2f330536b9d08c610a56ed013f64b",
"container_config": {
"Hostname": "1501390588c6",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/bin/sh",
"-c",
"#(nop) ",
"CMD [\"/bin/bash\"]"
],
"ArgsEscaped": true,
"Image": "sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {}
},
"created": "2018-11-19T21:20:42.235528208Z",
"docker_version": "17.06.2-ce",
"history": [
{
"created": "2018-11-19T21:20:39.739838469Z",
"created_by": "/bin/sh -c #(nop) ADD file:39e5bc157a8be63bbb36a142e18b644b0cfff07a8a02b42f7d0c4ee4ba75a5bc in / "
},
{
"created": "2018-11-19T21:20:40.571619714Z",
"created_by": "/bin/sh -c set -xe \t\t&& echo '#!/bin/sh' > /usr/sbin/policy-rc.d \t&& echo 'exit 101' >> /usr/sbin/policy-rc.d \t&& chmod +x /usr/sbin/policy-rc.d \t\t&& dpkg-divert --local --rename --add /sbin/initctl \t&& cp -a /usr/sbin/policy-rc.d /sbin/initctl \t&& sed -i 's/^exit.*/exit 0/' /sbin/initctl \t\t&& echo 'force-unsafe-io' > /etc/dpkg/dpkg.cfg.d/docker-apt-speedup \t\t&& echo 'DPkg::Post-Invoke { \"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\"; };' > /etc/apt/apt.conf.d/docker-clean \t&& echo 'APT::Update::Post-Invoke { \"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\"; };' >> /etc/apt/apt.conf.d/docker-clean \t&& echo 'Dir::Cache::pkgcache \"\"; Dir::Cache::srcpkgcache \"\";' >> /etc/apt/apt.conf.d/docker-clean \t\t&& echo 'Acquire::Languages \"none\";' > /etc/apt/apt.conf.d/docker-no-languages \t\t&& echo 'Acquire::GzipIndexes \"true\"; Acquire::CompressionTypes::Order:: \"gz\";' > /etc/apt/apt.conf.d/docker-gzip-indexes \t\t&& echo 'Apt::AutoRemove::SuggestsImportant \"false\";' > /etc/apt/apt.conf.d/docker-autoremove-suggests"
},
{
"created": "2018-11-19T21:20:41.293060457Z",
"created_by": "/bin/sh -c rm -rf /var/lib/apt/lists/*"
},
{
"created": "2018-11-19T21:20:42.002883522Z",
"created_by": "/bin/sh -c mkdir -p /run/systemd && echo 'docker' > /run/systemd/container"
},
{
"created": "2018-11-19T21:20:42.235528208Z",
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]",
"empty_layer": true
}
],
"os": "linux",
"rootfs": {
"type": "layers",
"diff_ids": [
"sha256:bc7f4b25d0ae3524466891c41cefc7c6833c533e00ba80f8063c68da9a8b65fe",
"sha256:a768c3f3878e96565d2bf0dcf90508261862847b2e7b8fc804a0770c07f0d5d5",
"sha256:ca2991e4676cba899ad9bc6ad3a044cd0816915f9e97a6f2e67b6accbc779ba5",
"sha256:b9b7103af585bd8ae9130de947817be7ce76092aa19cf6d2f9d5290440c645eb"
]
}
}

View file

@ -0,0 +1,56 @@
{
"schemaVersion": 1,
"name": "devtable/ubuntu",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:f85999a86bef2603a9e9a4fa488a7c1f82e471cbb76c3b5068e54e1a9320964a"
},
{
"blobSum": "sha256:fa83472a3562898caaf8d77542181a473a84039376f2ba56254619d9317ba00d"
},
{
"blobSum": "sha256:da1315cffa03c17988ae5c66f56d5f50517652a622afc1611a8bdd6c00b1fde3"
},
{
"blobSum": "sha256:32802c0cfa4defde2981bec336096350d0bb490469c494e21f678b1dcf6d831f"
}
],
"history": [
{
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/bash\"],\"ArgsEscaped\":true,\"Image\":\"sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"container\":\"1501390588c62f6c7c0e4fec25d6587c75c2f330536b9d08c610a56ed013f64b\",\"container_config\":{\"Hostname\":\"1501390588c6\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"/bin/bash\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"created\":\"2018-11-19T21:20:42.235528208Z\",\"docker_version\":\"17.06.2-ce\",\"id\":\"d71fc6939e162a01d90cefeeb3d7f6d6b2583fac2ef98833ec69a95d12ffeeaa\",\"os\":\"linux\",\"parent\":\"ba7177adc95198e86c00039d17d22f35ed1eed39f4e2c3ffc7b2c29a3e81271a\",\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"ba7177adc95198e86c00039d17d22f35ed1eed39f4e2c3ffc7b2c29a3e81271a\",\"parent\":\"69d0081dfb37f77fa9c971f367c6b86a3eb4090e7ab56741da954523ec3a786f\",\"created\":\"2018-11-19T21:20:42.002883522Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir -p /run/systemd \\u0026\\u0026 echo 'docker' \\u003e /run/systemd/container\"]}}"
},
{
"v1Compatibility": "{\"id\":\"69d0081dfb37f77fa9c971f367c6b86a3eb4090e7ab56741da954523ec3a786f\",\"parent\":\"1bd3843430506ff885fc1a3c1d050c19e2dcf70f8ef6cea1536692fd396c87bc\",\"created\":\"2018-11-19T21:20:41.293060457Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -rf /var/lib/apt/lists/*\"]}}"
},
{
"v1Compatibility": "{\"id\":\"1bd3843430506ff885fc1a3c1d050c19e2dcf70f8ef6cea1536692fd396c87bc\",\"parent\":\"248632e87271aa5118ebc0ebf46758791e032c481f9702a2a36e7c85e83d33d2\",\"created\":\"2018-11-19T21:20:40.571619714Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -xe \\t\\t\\u0026\\u0026 echo '#!/bin/sh' \\u003e /usr/sbin/policy-rc.d \\t\\u0026\\u0026 echo 'exit 101' \\u003e\\u003e /usr/sbin/policy-rc.d \\t\\u0026\\u0026 chmod +x /usr/sbin/policy-rc.d \\t\\t\\u0026\\u0026 dpkg-divert --local --rename --add /sbin/initctl \\t\\u0026\\u0026 cp -a /usr/sbin/policy-rc.d /sbin/initctl \\t\\u0026\\u0026 sed -i 's/^exit.*/exit 0/' /sbin/initctl \\t\\t\\u0026\\u0026 echo 'force-unsafe-io' \\u003e /etc/dpkg/dpkg.cfg.d/docker-apt-speedup \\t\\t\\u0026\\u0026 echo 'DPkg::Post-Invoke { \\\"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\\\"; };' \\u003e /etc/apt/apt.conf.d/docker-clean \\t\\u0026\\u0026 echo 'APT::Update::Post-Invoke { \\\"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\\\"; };' \\u003e\\u003e /etc/apt/apt.conf.d/docker-clean \\t\\u0026\\u0026 echo 'Dir::Cache::pkgcache \\\"\\\"; Dir::Cache::srcpkgcache \\\"\\\";' \\u003e\\u003e /etc/apt/apt.conf.d/docker-clean \\t\\t\\u0026\\u0026 echo 'Acquire::Languages \\\"none\\\";' \\u003e /etc/apt/apt.conf.d/docker-no-languages \\t\\t\\u0026\\u0026 echo 'Acquire::GzipIndexes \\\"true\\\"; Acquire::CompressionTypes::Order:: \\\"gz\\\";' \\u003e /etc/apt/apt.conf.d/docker-gzip-indexes \\t\\t\\u0026\\u0026 echo 'Apt::AutoRemove::SuggestsImportant \\\"false\\\";' \\u003e /etc/apt/apt.conf.d/docker-autoremove-suggests\"]}}"
},
{
"v1Compatibility": "{\"id\":\"248632e87271aa5118ebc0ebf46758791e032c481f9702a2a36e7c85e83d33d2\",\"created\":\"2018-11-19T21:20:39.739838469Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:39e5bc157a8be63bbb36a142e18b644b0cfff07a8a02b42f7d0c4ee4ba75a5bc in / \"]}}"
}
],
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX",
"kty": "EC",
"x": "34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY",
"y": "LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8"
},
"alg": "ES256"
},
"signature": "0wBIubWqf-71Im54gbPlOjFBH7lr6MFLW75bdb-McFvDnfgSdOIMuJ9NHtKEYNF8qFe9hMoO6_GrSDVTJ-pryQ",
"protected": "eyJmb3JtYXRMZW5ndGgiOjQ5MjMsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0xMS0yNlQxMDo0MjozMloifQ"
}
]
}

View file

@ -0,0 +1,31 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 3894,
"digest": "sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32102249,
"digest": "sha256:32802c0cfa4defde2981bec336096350d0bb490469c494e21f678b1dcf6d831f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 847,
"digest": "sha256:da1315cffa03c17988ae5c66f56d5f50517652a622afc1611a8bdd6c00b1fde3"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 556,
"digest": "sha256:fa83472a3562898caaf8d77542181a473a84039376f2ba56254619d9317ba00d"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 162,
"digest": "sha256:f85999a86bef2603a9e9a4fa488a7c1f82e471cbb76c3b5068e54e1a9320964a"
}
]
}

View file

@ -0,0 +1,133 @@
import json
import pytest
from image.docker.schema2.config import MalformedSchema2Config, DockerSchema2Config
from util.bytes import Bytes
@pytest.mark.parametrize('json_data', [
'',
'{}',
"""
{
"unknown": "key"
}
""",
])
def test_malformed_configs(json_data):
with pytest.raises(MalformedSchema2Config):
DockerSchema2Config(Bytes.for_string_or_unicode(json_data))
CONFIG_BYTES = json.dumps({
"architecture": "amd64",
"config": {
"Hostname": "",
"Domainname": "",
"User": "",
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"Tty": False,
"OpenStdin": False,
"StdinOnce": False,
"Env": [
"HTTP_PROXY=http:\/\/localhost:8080",
"http_proxy=http:\/\/localhost:8080",
"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin"
],
"Cmd": [
"sh"
],
"Image": "",
"Volumes": None,
"WorkingDir": "",
"Entrypoint": None,
"OnBuild": None,
"Labels": {
}
},
"container": "b7a43694b435c8e9932615643f61f975a9213e453b15cd6c2a386f144a2d2de9",
"container_config": {
"Hostname": "b7a43694b435",
"Domainname": "",
"User": "",
"AttachStdin": True,
"AttachStdout": True,
"AttachStderr": True,
"Tty": True,
"OpenStdin": True,
"StdinOnce": True,
"Env": [
"HTTP_PROXY=http:\/\/localhost:8080",
"http_proxy=http:\/\/localhost:8080",
"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin"
],
"Cmd": [
"sh"
],
"Image": "jschorr\/somerepo",
"Volumes": None,
"WorkingDir": "",
"Entrypoint": None,
"OnBuild": None,
"Labels": {
}
},
"created": "2018-04-16T10:41:19.079522722Z",
"docker_version": "17.09.0-ce",
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "\/bin\/sh -c #(nop) ADD file:9e4ca21cbd24dc05b454b6be21c7c639216ae66559b21ba24af0d665c62620dc in \/ "
},
{
"created": "2018-04-03T18:37:09.613317719Z",
"created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
"empty_layer": True
},
{
"created": "2018-04-16T10:37:44.418262777Z",
"created_by": "sh"
},
{
"created": "2018-04-16T10:41:19.079522722Z",
"created_by": "sh"
}
],
"os": "linux",
"rootfs": {
"type": "layers",
"diff_ids": [
"sha256:3e596351c689c8827a3c9635bc1083cff17fa4a174f84f0584bd0ae6f384195b",
"sha256:4552be273c71275a88de0b8c8853dcac18cb74d5790f5383d9b38d4ac55062d5",
"sha256:1319c76152ca37fbeb7fb71e0ffa7239bc19ffbe3b95c00417ece39d89d06e6e"
]
}
})
def test_valid_config():
config = DockerSchema2Config(Bytes.for_string_or_unicode(CONFIG_BYTES))
history = list(config.history)
assert len(history) == 4
assert not history[0].is_empty
assert history[1].is_empty
assert history[0].created_datetime.year == 2018
assert history[1].command == '/bin/sh -c #(nop) CMD ["sh"]'
assert history[2].command == 'sh'
for index, history_entry in enumerate(history):
v1_compat = config.build_v1_compatibility(history_entry, 'somev1id', 'someparentid',
index == 3)
assert v1_compat['id'] == 'somev1id'
assert v1_compat['parent'] == 'someparentid'
if index == 3:
assert v1_compat['container_config'] == config._parsed['container_config']
else:
assert 'Hostname' not in v1_compat['container_config']
assert v1_compat['container_config']['Cmd'] == [history_entry.command]
assert config.labels == {}

View file

@ -0,0 +1,115 @@
import os
import json
import pytest
from image.docker.schema1 import DockerSchema1Manifest, DOCKER_SCHEMA1_CONTENT_TYPES
from image.docker.schema2.manifest import DockerSchema2Manifest
from image.docker.schemautil import ContentRetrieverForTesting
from util.bytes import Bytes
def _get_test_file_contents(test_name, kind):
filename = '%s.%s.json' % (test_name, kind)
data_dir = os.path.dirname(__file__)
with open(os.path.join(data_dir, 'conversion_data', filename), 'r') as f:
return Bytes.for_string_or_unicode(f.read())
@pytest.mark.parametrize('name, config_sha', [
('simple', 'sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf'),
('complex', 'sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e'),
('ubuntu', 'sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26'),
])
def test_legacy_layers(name, config_sha):
cr = {}
cr[config_sha] = _get_test_file_contents(name, 'config').as_encoded_str()
retriever = ContentRetrieverForTesting(cr)
schema2 = DockerSchema2Manifest(_get_test_file_contents(name, 'schema2'))
schema1 = DockerSchema1Manifest(_get_test_file_contents(name, 'schema1'), validate=False)
# Check legacy layers
schema2_legacy_layers = list(schema2.generate_legacy_layers({}, retriever))
schema1_legacy_layers = list(schema1.generate_legacy_layers({}, retriever))
assert len(schema1_legacy_layers) == len(schema2_legacy_layers)
for index in range(0, len(schema1_legacy_layers)):
schema1_legacy_layer = schema1_legacy_layers[index]
schema2_legacy_layer = schema2_legacy_layers[index]
assert schema1_legacy_layer.content_checksum == schema2_legacy_layer.content_checksum
assert schema1_legacy_layer.comment == schema2_legacy_layer.comment
assert schema1_legacy_layer.command == schema2_legacy_layer.command
@pytest.mark.parametrize('name, config_sha', [
('simple', 'sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf'),
('complex', 'sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e'),
('ubuntu', 'sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26'),
])
def test_conversion(name, config_sha):
cr = {}
cr[config_sha] = _get_test_file_contents(name, 'config').as_encoded_str()
retriever = ContentRetrieverForTesting(cr)
schema2 = DockerSchema2Manifest(_get_test_file_contents(name, 'schema2'))
schema1 = DockerSchema1Manifest(_get_test_file_contents(name, 'schema1'), validate=False)
s2to2 = schema2.convert_manifest([schema2.media_type], 'devtable', 'somerepo', 'latest',
retriever)
assert s2to2 == schema2
s1to1 = schema1.convert_manifest([schema1.media_type], 'devtable', 'somerepo', 'latest',
retriever)
assert s1to1 == schema1
s2to1 = schema2.convert_manifest(DOCKER_SCHEMA1_CONTENT_TYPES, 'devtable', 'somerepo', 'latest',
retriever)
assert s2to1.media_type in DOCKER_SCHEMA1_CONTENT_TYPES
assert len(s2to1.layers) == len(schema1.layers)
s2toempty = schema2.convert_manifest([], 'devtable', 'somerepo', 'latest', retriever)
assert s2toempty is None
@pytest.mark.parametrize('name, config_sha', [
('simple', 'sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf'),
('complex', 'sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e'),
('ubuntu', 'sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26'),
])
def test_2to1_conversion(name, config_sha):
cr = {}
cr[config_sha] = _get_test_file_contents(name, 'config').as_encoded_str()
retriever = ContentRetrieverForTesting(cr)
schema2 = DockerSchema2Manifest(_get_test_file_contents(name, 'schema2'))
schema1 = DockerSchema1Manifest(_get_test_file_contents(name, 'schema1'), validate=False)
converted = schema2.get_schema1_manifest('devtable', 'somerepo', 'latest', retriever)
assert len(converted.layers) == len(schema1.layers)
image_id_map = {}
for index in range(0, len(converted.layers)):
converted_layer = converted.layers[index]
schema1_layer = schema1.layers[index]
image_id_map[schema1_layer.v1_metadata.image_id] = converted_layer.v1_metadata.image_id
assert str(schema1_layer.digest) == str(converted_layer.digest)
schema1_parent_id = schema1_layer.v1_metadata.parent_image_id
converted_parent_id = converted_layer.v1_metadata.parent_image_id
assert (schema1_parent_id is None) == (converted_parent_id is None)
if schema1_parent_id is not None:
assert image_id_map[schema1_parent_id] == converted_parent_id
assert schema1_layer.v1_metadata.created == converted_layer.v1_metadata.created
assert schema1_layer.v1_metadata.comment == converted_layer.v1_metadata.comment
assert schema1_layer.v1_metadata.command == converted_layer.v1_metadata.command
assert schema1_layer.v1_metadata.labels == converted_layer.v1_metadata.labels
schema1_container_config = json.loads(schema1_layer.raw_v1_metadata)['container_config']
converted_container_config = json.loads(converted_layer.raw_v1_metadata)['container_config']
assert schema1_container_config == converted_container_config

View file

@ -0,0 +1,151 @@
import json
import pytest
from image.docker.schema1 import (DockerSchema1Manifest, DOCKER_SCHEMA1_CONTENT_TYPES,
DockerSchema1ManifestBuilder)
from image.docker.schema2 import DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE
from image.docker.schema2.manifest import DockerSchema2Manifest
from image.docker.schema2.list import (MalformedSchema2ManifestList, DockerSchema2ManifestList,
DockerSchema2ManifestListBuilder, MismatchManifestException)
from image.docker.schema2.test.test_manifest import MANIFEST_BYTES as v22_bytes
from image.docker.schemautil import ContentRetrieverForTesting
from image.docker.test.test_schema1 import MANIFEST_BYTES as v21_bytes
from util.bytes import Bytes
@pytest.mark.parametrize('json_data', [
'',
'{}',
"""
{
"unknown": "key"
}
""",
])
def test_malformed_manifest_lists(json_data):
with pytest.raises(MalformedSchema2ManifestList):
DockerSchema2ManifestList(Bytes.for_string_or_unicode(json_data))
MANIFESTLIST_BYTES = json.dumps({
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 946,
"digest": "sha256:e6",
"platform": {
"architecture": "ppc64le",
"os": "linux",
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
"size": 878,
"digest": "sha256:5b",
"platform": {
"architecture": "amd64",
"os": "linux",
"features": [
"sse4"
]
}
}
]
})
NO_AMD_MANIFESTLIST_BYTES = json.dumps({
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 946,
"digest": "sha256:e6",
"platform": {
"architecture": "ppc64le",
"os": "linux",
}
},
]
})
retriever = ContentRetrieverForTesting({
'sha256:e6': v22_bytes,
'sha256:5b': v21_bytes,
})
def test_valid_manifestlist():
manifestlist = DockerSchema2ManifestList(Bytes.for_string_or_unicode(MANIFESTLIST_BYTES))
assert len(manifestlist.manifests(retriever)) == 2
assert manifestlist.media_type == 'application/vnd.docker.distribution.manifest.list.v2+json'
assert manifestlist.bytes.as_encoded_str() == MANIFESTLIST_BYTES
assert manifestlist.manifest_dict == json.loads(MANIFESTLIST_BYTES)
assert manifestlist.get_layers(retriever) is None
assert not manifestlist.blob_digests
for index, manifest in enumerate(manifestlist.manifests(retriever)):
if index == 0:
assert isinstance(manifest.manifest_obj, DockerSchema2Manifest)
assert manifest.manifest_obj.schema_version == 2
else:
assert isinstance(manifest.manifest_obj, DockerSchema1Manifest)
assert manifest.manifest_obj.schema_version == 1
# Check retrieval of a schema 2 manifest. This should return None, because the schema 2 manifest
# is not amd64-compatible.
schema2_manifest = manifestlist.convert_manifest([DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE], 'foo',
'bar', 'baz', retriever)
assert schema2_manifest is None
# Check retrieval of a schema 1 manifest.
compatible_manifest = manifestlist.get_schema1_manifest('foo', 'bar', 'baz', retriever)
assert compatible_manifest.schema_version == 1
schema1_manifest = manifestlist.convert_manifest(DOCKER_SCHEMA1_CONTENT_TYPES, 'foo',
'bar', 'baz', retriever)
assert schema1_manifest.schema_version == 1
assert schema1_manifest.digest == compatible_manifest.digest
# Ensure it validates.
manifestlist.validate(retriever)
def test_get_schema1_manifest_no_matching_list():
manifestlist = DockerSchema2ManifestList(Bytes.for_string_or_unicode(NO_AMD_MANIFESTLIST_BYTES))
assert len(manifestlist.manifests(retriever)) == 1
assert manifestlist.media_type == 'application/vnd.docker.distribution.manifest.list.v2+json'
assert manifestlist.bytes.as_encoded_str() == NO_AMD_MANIFESTLIST_BYTES
compatible_manifest = manifestlist.get_schema1_manifest('foo', 'bar', 'baz', retriever)
assert compatible_manifest is None
def test_builder():
existing = DockerSchema2ManifestList(Bytes.for_string_or_unicode(MANIFESTLIST_BYTES))
builder = DockerSchema2ManifestListBuilder()
for index, manifest in enumerate(existing.manifests(retriever)):
builder.add_manifest(manifest.manifest_obj, "amd64", "os")
built = builder.build()
assert len(built.manifests(retriever)) == 2
def test_invalid_manifestlist():
# Build a manifest list with a schema 1 manifest of the wrong architecture.
builder = DockerSchema1ManifestBuilder('foo', 'bar', 'baz')
builder.add_layer('sha:2356', '{"id": "foo"}')
manifest = builder.build().unsigned()
listbuilder = DockerSchema2ManifestListBuilder()
listbuilder.add_manifest(manifest, 'amd32', 'linux')
manifestlist = listbuilder.build()
retriever = ContentRetrieverForTesting()
retriever.add_digest(manifest.digest, manifest.bytes.as_encoded_str())
with pytest.raises(MismatchManifestException):
manifestlist.validate(retriever)

View file

@ -0,0 +1,422 @@
# -*- coding: utf-8 -*-
import json
import pytest
import os
from app import docker_v2_signing_key
from image.docker.schema1 import (DockerSchema1ManifestBuilder,
DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE,
DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE)
from image.docker.schema2.manifest import (MalformedSchema2Manifest, DockerSchema2Manifest,
DockerSchema2ManifestBuilder, EMPTY_LAYER_BLOB_DIGEST)
from image.docker.schema2.config import DockerSchema2Config
from image.docker.schema2.test.test_config import CONFIG_BYTES
from image.docker.schemautil import ContentRetrieverForTesting
from util.bytes import Bytes
@pytest.mark.parametrize('json_data', [
'',
'{}',
"""
{
"unknown": "key"
}
""",
])
def test_malformed_manifests(json_data):
with pytest.raises(MalformedSchema2Manifest):
DockerSchema2Manifest(Bytes.for_string_or_unicode(json_data))
MANIFEST_BYTES = json.dumps({
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 1885,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 1234,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
},
],
})
REMOTE_MANIFEST_BYTES = json.dumps({
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 1885,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7",
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
"size": 1234,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
"urls": ['http://some/url'],
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
},
],
})
def test_valid_manifest():
manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
assert manifest.config.size == 1885
assert str(manifest.config.digest) == 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7'
assert manifest.media_type == "application/vnd.docker.distribution.manifest.v2+json"
assert not manifest.has_remote_layer
assert manifest.has_legacy_image
retriever = ContentRetrieverForTesting.for_config({
"config": {
"Labels": {},
},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "foo"
},
{
"created": "2018-04-12T18:37:09.284840891Z",
"created_by": "bar"
},
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "foo"
},
{
"created": "2018-04-12T18:37:09.284840891Z",
"created_by": "bar"
},
],
}, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
assert len(manifest.filesystem_layers) == 4
assert manifest.filesystem_layers[0].compressed_size == 1234
assert str(manifest.filesystem_layers[0].digest) == 'sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736'
assert not manifest.filesystem_layers[0].is_remote
assert manifest.leaf_filesystem_layer == manifest.filesystem_layers[3]
assert not manifest.leaf_filesystem_layer.is_remote
assert manifest.leaf_filesystem_layer.compressed_size == 73109
blob_digests = list(manifest.blob_digests)
expected = [str(layer.digest) for layer in manifest.filesystem_layers] + [manifest.config.digest]
assert blob_digests == expected
assert list(manifest.local_blob_digests) == expected
manifest_image_layers = list(manifest.get_layers(retriever))
assert len(manifest_image_layers) == len(list(manifest.filesystem_layers))
for index in range(0, 4):
assert manifest_image_layers[index].blob_digest == str(manifest.filesystem_layers[index].digest)
def test_valid_remote_manifest():
manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(REMOTE_MANIFEST_BYTES))
assert manifest.config.size == 1885
assert str(manifest.config.digest) == 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7'
assert manifest.media_type == "application/vnd.docker.distribution.manifest.v2+json"
assert manifest.has_remote_layer
assert len(manifest.filesystem_layers) == 4
assert manifest.filesystem_layers[0].compressed_size == 1234
assert str(manifest.filesystem_layers[0].digest) == 'sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736'
assert manifest.filesystem_layers[0].is_remote
assert manifest.filesystem_layers[0].urls == ['http://some/url']
assert manifest.leaf_filesystem_layer == manifest.filesystem_layers[3]
assert not manifest.leaf_filesystem_layer.is_remote
assert manifest.leaf_filesystem_layer.compressed_size == 73109
expected = set([str(layer.digest) for layer in manifest.filesystem_layers] +
[manifest.config.digest])
blob_digests = set(manifest.blob_digests)
local_digests = set(manifest.local_blob_digests)
assert blob_digests == expected
assert local_digests == (expected - {manifest.filesystem_layers[0].digest})
assert manifest.has_remote_layer
assert manifest.get_leaf_layer_v1_image_id(None) is None
assert manifest.get_legacy_image_ids(None) is None
retriever = ContentRetrieverForTesting.for_config({
"config": {
"Labels": {},
},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "foo"
},
{
"created": "2018-04-12T18:37:09.284840891Z",
"created_by": "bar"
},
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "foo"
},
{
"created": "2018-04-12T18:37:09.284840891Z",
"created_by": "bar"
},
],
}, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
manifest_image_layers = list(manifest.get_layers(retriever))
assert len(manifest_image_layers) == len(list(manifest.filesystem_layers))
for index in range(0, 4):
assert manifest_image_layers[index].blob_digest == str(manifest.filesystem_layers[index].digest)
def test_schema2_builder():
manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
builder = DockerSchema2ManifestBuilder()
builder.set_config_digest(manifest.config.digest, manifest.config.size)
for layer in manifest.filesystem_layers:
builder.add_layer(layer.digest, layer.compressed_size, urls=layer.urls)
built = builder.build()
assert built.filesystem_layers == manifest.filesystem_layers
assert built.config == manifest.config
def test_get_manifest_labels():
labels = dict(foo='bar', baz='meh')
retriever = ContentRetrieverForTesting.for_config({
"config": {
"Labels": labels,
},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [],
}, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
assert manifest.get_manifest_labels(retriever) == labels
def test_build_schema1():
manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
assert not manifest.has_remote_layer
retriever = ContentRetrieverForTesting({
'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7': CONFIG_BYTES,
})
builder = DockerSchema1ManifestBuilder('somenamespace', 'somename', 'sometag')
manifest._populate_schema1_builder(builder, retriever)
schema1 = builder.build(docker_v2_signing_key)
assert schema1.media_type == DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
def test_get_schema1_manifest():
retriever = ContentRetrieverForTesting.for_config({
"config": {
"Labels": {},
},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "foo"
},
{
"created": "2018-04-12T18:37:09.284840891Z",
"created_by": "bar"
},
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "foo"
},
{
"created": "2018-04-12T18:37:09.284840891Z",
"created_by": "bar"
},
],
}, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
schema1 = manifest.get_schema1_manifest('somenamespace', 'somename', 'sometag', retriever)
assert schema1 is not None
assert schema1.media_type == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
via_convert = manifest.convert_manifest([schema1.media_type], 'somenamespace', 'somename',
'sometag', retriever)
assert via_convert.digest == schema1.digest
def test_generate_legacy_layers():
builder = DockerSchema2ManifestBuilder()
builder.add_layer('sha256:abc123', 123)
builder.add_layer('sha256:def456', 789)
builder.set_config_digest('sha256:def456', 2000)
manifest = builder.build()
retriever = ContentRetrieverForTesting.for_config({
"config": {
},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "base"
},
{
"created": "2018-04-06T18:37:09.284840891Z",
"created_by": "middle",
"empty_layer": True,
},
{
"created": "2018-04-12T18:37:09.284840891Z",
"created_by": "leaf"
},
],
}, 'sha256:def456', 2000)
legacy_layers = list(manifest.generate_legacy_layers({}, retriever))
assert len(legacy_layers) == 3
assert legacy_layers[0].content_checksum == 'sha256:abc123'
assert legacy_layers[1].content_checksum == EMPTY_LAYER_BLOB_DIGEST
assert legacy_layers[2].content_checksum == 'sha256:def456'
assert legacy_layers[0].created == "2018-04-03T18:37:09.284840891Z"
assert legacy_layers[1].created == "2018-04-06T18:37:09.284840891Z"
assert legacy_layers[2].created == "2018-04-12T18:37:09.284840891Z"
assert legacy_layers[0].command == '["base"]'
assert legacy_layers[1].command == '["middle"]'
assert legacy_layers[2].command == '["leaf"]'
assert legacy_layers[2].parent_image_id == legacy_layers[1].image_id
assert legacy_layers[1].parent_image_id == legacy_layers[0].image_id
assert legacy_layers[0].parent_image_id is None
assert legacy_layers[1].image_id != legacy_layers[2]
assert legacy_layers[0].image_id != legacy_layers[1]
def test_remote_layer_manifest():
builder = DockerSchema2ManifestBuilder()
builder.set_config_digest('sha256:abcd', 1234)
builder.add_layer('sha256:adef', 1234, urls=['http://some/url'])
builder.add_layer('sha256:1352', 4567)
builder.add_layer('sha256:1353', 4567)
manifest = builder.build()
assert manifest.has_remote_layer
assert manifest.get_leaf_layer_v1_image_id(None) is None
assert manifest.get_legacy_image_ids(None) is None
assert not manifest.has_legacy_image
schema1 = manifest.get_schema1_manifest('somenamespace', 'somename', 'sometag', None)
assert schema1 is None
assert set(manifest.blob_digests) == {'sha256:adef', 'sha256:abcd', 'sha256:1352', 'sha256:1353'}
assert set(manifest.local_blob_digests) == {'sha256:abcd', 'sha256:1352', 'sha256:1353'}
def test_unencoded_unicode_manifest():
builder = DockerSchema2ManifestBuilder()
builder.add_layer('sha256:abc123', 123)
builder.set_config_digest('sha256:def456', 2000)
manifest = builder.build()
retriever = ContentRetrieverForTesting.for_config({
"config": {
"author": u"Sômé guy",
},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "base",
"author": u"Sômé guy",
},
],
}, 'sha256:def456', 2000, ensure_ascii=False)
layers = list(manifest.get_layers(retriever))
assert layers[0].author == u"Sômé guy"
def test_build_unencoded_unicode_manifest():
config_json = json.dumps({
"config": {
"author": u"Sômé guy",
},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "base",
"author": u"Sômé guy",
},
],
}, ensure_ascii=False)
schema2_config = DockerSchema2Config(Bytes.for_string_or_unicode(config_json))
builder = DockerSchema2ManifestBuilder()
builder.set_config(schema2_config)
builder.add_layer('sha256:abc123', 123)
builder.build()
def test_load_unicode_manifest():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, 'unicode_manifest_config.json'), 'r') as f:
retriever = ContentRetrieverForTesting()
retriever.add_digest('sha256:5bdd65cdd055c7f3bbaecdc9fd6c75f155322520f85953aa0e2724cab006d407',
f.read())
with open(os.path.join(test_dir, 'unicode_manifest.json'), 'r') as f:
manifest_bytes = f.read()
manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(manifest_bytes))
assert manifest.digest == 'sha256:97556fa8c553395bd9d8e19a04acef4716ca287ffbf6bde14dd9966053912613'
layers = list(manifest.get_layers(retriever))
assert layers[-1].author == u"Sômé guy"

View file

@ -0,0 +1,16 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 1661,
"digest": "sha256:5bdd65cdd055c7f3bbaecdc9fd6c75f155322520f85953aa0e2724cab006d407"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 727978,
"digest": "sha256:90e01955edcd85dac7985b72a8374545eac617ccdddcc992b732e43cd42534af"
}
]
}

View file

@ -0,0 +1 @@
{"architecture":"amd64","author":"Sômé guy","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"ArgsEscaped":true,"Image":"sha256:59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":[],"Labels":null},"container":"de786c5a14d0622c39dd9639abf60a4ee299ed0ee4ef3848342f46f13a77d2c8","container_config":{"Hostname":"de786c5a14d0","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ","MAINTAINER Sômé guy"],"ArgsEscaped":true,"Image":"sha256:59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":[],"Labels":{}},"created":"2018-12-17T19:02:18.9295865Z","docker_version":"17.09.0-ce","history":[{"created":"2018-10-02T17:19:34.03981888Z","created_by":"/bin/sh -c #(nop) ADD file:63eebd629a5f7558c361be0305df5f16baac1d3bbec014b7c486e28812441969 in / "},{"created":"2018-10-02T17:19:34.239926273Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]","empty_layer":true},{"created":"2018-12-17T19:02:18.9295865Z","author":"Sômé guy","created_by":"/bin/sh -c #(nop) MAINTAINER Sômé guy","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:8a788232037eaf17794408ff3df6b922a1aedf9ef8de36afdae3ed0b0381907b"]}}

25
image/docker/schemas.py Normal file
View file

@ -0,0 +1,25 @@
from image.docker import ManifestException
from image.docker.schema1 import DockerSchema1Manifest, DOCKER_SCHEMA1_CONTENT_TYPES
from image.docker.schema2 import (DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE)
from image.docker.schema2.manifest import DockerSchema2Manifest
from image.docker.schema2.list import DockerSchema2ManifestList
from util.bytes import Bytes
def parse_manifest_from_bytes(manifest_bytes, media_type, validate=True):
""" Parses and returns a manifest from the given bytes, for the given media type.
Raises a ManifestException if the parse fails for some reason.
"""
assert isinstance(manifest_bytes, Bytes)
if media_type == DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE:
return DockerSchema2Manifest(manifest_bytes)
if media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
return DockerSchema2ManifestList(manifest_bytes)
if media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
return DockerSchema1Manifest(manifest_bytes, validate=validate)
raise ManifestException('Unknown or unsupported manifest media type `%s`' % media_type)

View file

@ -0,0 +1,44 @@
import json
from image.docker.interfaces import ContentRetriever
class ContentRetrieverForTesting(ContentRetriever):
def __init__(self, digests=None):
self.digests = digests or {}
def add_digest(self, digest, content):
self.digests[digest] = content
def get_manifest_bytes_with_digest(self, digest):
return self.digests.get(digest)
def get_blob_bytes_with_digest(self, digest):
return self.digests.get(digest)
@classmethod
def for_config(cls, config_obj, digest, size, ensure_ascii=True):
config_str = json.dumps(config_obj, ensure_ascii=ensure_ascii)
padded_string = config_str + ' ' * (size - len(config_str))
digests = {}
digests[digest] = padded_string
return ContentRetrieverForTesting(digests)
class _CustomEncoder(json.JSONEncoder):
def encode(self, o):
encoded = super(_CustomEncoder, self).encode(o)
if isinstance(o, basestring):
encoded = encoded.replace('<', '\\u003c')
encoded = encoded.replace('>', '\\u003e')
encoded = encoded.replace('&', '\\u0026')
return encoded
def to_canonical_json(value, ensure_ascii=True, indent=None):
""" Returns the canonical JSON string form of the given value,
as per the guidelines in https://github.com/docker/distribution/blob/master/docs/spec/json.md.
`indent` is allowed only for the purposes of indenting for debugging.
"""
return json.dumps(value, ensure_ascii=ensure_ascii, sort_keys=True, separators=(',', ':'),
cls=_CustomEncoder, indent=indent)

129
image/docker/squashed.py Normal file
View file

@ -0,0 +1,129 @@
import copy
import json
import math
import calendar
from app import app
from image.common import TarImageFormatter
from util.registry.gzipwrap import GZIP_BUFFER_SIZE
from util.registry.streamlayerformat import StreamLayerMerger
class FileEstimationException(Exception):
"""
Exception raised by build_docker_load_stream if the estimated size of the layer tar was lower
than the actual size. This means the sent tar header is wrong, and we have to fail.
"""
pass
class SquashedDockerImageFormatter(TarImageFormatter):
"""
Image formatter which produces a squashed image compatible with the `docker load` command.
"""
# Multiplier against the image size reported by Docker to account for the tar metadata.
# Note: This multiplier was not formally calculated in anyway and should be adjusted overtime
# if/when we encounter issues with it. Unfortunately, we cannot make it too large or the Docker
# daemon dies when trying to load the entire tar into memory.
SIZE_MULTIPLIER = 1.2
def stream_generator(self, tag, parsed_manifest, synthetic_image_id, layer_iterator,
tar_stream_getter_iterator, reporter=None):
image_mtime = 0
created = parsed_manifest.created_datetime
if created is not None:
image_mtime = calendar.timegm(created.utctimetuple())
# Docker import V1 Format (.tar):
# repositories - JSON file containing a repo -> tag -> image map
# {image ID folder}:
# json - The layer JSON
# layer.tar - The tarballed contents of the layer
# VERSION - The docker import version: '1.0'
layer_merger = StreamLayerMerger(tar_stream_getter_iterator, reporter=reporter)
# Yield the repositories file:
synthetic_layer_info = {}
synthetic_layer_info[tag.name + '.squash'] = synthetic_image_id
hostname = app.config['SERVER_HOSTNAME']
repositories = {}
namespace = tag.repository.namespace_name
repository = tag.repository.name
repositories[hostname + '/' + namespace + '/' + repository] = synthetic_layer_info
yield self.tar_file('repositories', json.dumps(repositories), mtime=image_mtime)
# Yield the image ID folder.
yield self.tar_folder(synthetic_image_id, mtime=image_mtime)
# Yield the JSON layer data.
layer_json = SquashedDockerImageFormatter._build_layer_json(parsed_manifest, synthetic_image_id)
yield self.tar_file(synthetic_image_id + '/json', json.dumps(layer_json), mtime=image_mtime)
# Yield the VERSION file.
yield self.tar_file(synthetic_image_id + '/VERSION', '1.0', mtime=image_mtime)
# Yield the merged layer data's header.
estimated_file_size = 0
for layer in layer_iterator:
estimated_file_size += layer.estimated_size(SquashedDockerImageFormatter.SIZE_MULTIPLIER)
# Make sure the estimated file size is an integer number of bytes.
estimated_file_size = int(math.ceil(estimated_file_size))
yield self.tar_file_header(synthetic_image_id + '/layer.tar', estimated_file_size,
mtime=image_mtime)
# Yield the contents of the merged layer.
yielded_size = 0
for entry in layer_merger.get_generator():
yield entry
yielded_size += len(entry)
# If the yielded size is more than the estimated size (which is unlikely but possible), then
# raise an exception since the tar header will be wrong.
if yielded_size > estimated_file_size:
leaf_image_id = parsed_manifest.leaf_layer_v1_image_id
message = "For %s/%s:%s (%s:%s): Expected %s bytes, found %s bytes" % (namespace,
repository,
tag,
parsed_manifest.digest,
leaf_image_id,
estimated_file_size,
yielded_size)
raise FileEstimationException(message)
# If the yielded size is less than the estimated size (which is likely), fill the rest with
# zeros.
if yielded_size < estimated_file_size:
to_yield = estimated_file_size - yielded_size
while to_yield > 0:
yielded = min(to_yield, GZIP_BUFFER_SIZE)
yield '\0' * yielded
to_yield -= yielded
# Yield any file padding to 512 bytes that is necessary.
yield self.tar_file_padding(estimated_file_size)
# Last two records are empty in tar spec.
yield '\0' * 512
yield '\0' * 512
@staticmethod
def _build_layer_json(manifest, synthetic_image_id):
updated_json = json.loads(manifest.leaf_layer.raw_v1_metadata)
updated_json['id'] = synthetic_image_id
if 'parent' in updated_json:
del updated_json['parent']
if 'config' in updated_json and 'Image' in updated_json['config']:
updated_json['config']['Image'] = synthetic_image_id
if 'container_config' in updated_json and 'Image' in updated_json['container_config']:
updated_json['container_config']['Image'] = synthetic_image_id
return updated_json

View file

View file

@ -0,0 +1,44 @@
{
"schemaVersion": 1,
"name": "devtable/testimage",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:90e01955edcd85dac7985b72a8374545eac617ccdddcc992b732e43cd42534af"
}
],
"history": [
{
"v1Compatibility": "{\"architecture\":\"amd64\",\"author\":\"Sômé guy\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"ArgsEscaped\":true,\"Image\":\"sha256:59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":null},\"container\":\"de786c5a14d0622c39dd9639abf60a4ee299ed0ee4ef3848342f46f13a77d2c8\",\"container_config\":{\"Hostname\":\"de786c5a14d0\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"MAINTAINER Sômé guy\"],\"ArgsEscaped\":true,\"Image\":\"sha256:59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"created\":\"2018-12-17T19:02:18.9295865Z\",\"docker_version\":\"17.09.0-ce\",\"id\":\"b68e6d1f5027887177ddf83c2b9566e1f9eb38454af649b2c0806d13c4c2f01d\",\"os\":\"linux\",\"parent\":\"61b2663f44edc9a6af340b9bfd46d17d8ed2574ffe289e0d95c0476da3c6faac\",\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"61b2663f44edc9a6af340b9bfd46d17d8ed2574ffe289e0d95c0476da3c6faac\",\"parent\":\"5327db1e651c0f49157ace3ffd8569c7361b1f2e61d0b49ff617e83a42bf78d6\",\"created\":\"2018-10-02T17:19:34.239926273Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"sh\\\"]\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"5327db1e651c0f49157ace3ffd8569c7361b1f2e61d0b49ff617e83a42bf78d6\",\"created\":\"2018-10-02T17:19:34.03981888Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:63eebd629a5f7558c361be0305df5f16baac1d3bbec014b7c486e28812441969 in / \"]}}"
}
],
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX",
"kty": "EC",
"x": "34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY",
"y": "LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8"
},
"alg": "ES256"
},
"signature": "eAhgOTAxmWLK25O5lfpJA9ZuTvEdm-E-8qS4pbaYkKwWq9Nc0iLmJ9tKy3QBWP0QtXmK8dz2J0CpCvV0xCheSw",
"protected": "eyJmb3JtYXRMZW5ndGgiOjI2MTQsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0xMi0xN1QxOToxMDo1M1oifQ"
}
]
}

View file

@ -0,0 +1 @@
[{"id":"13080314","tag_id":"93362429","digest":"sha256:dde3714ce7e23edc6413aa85c0b42792e4f2f79e9ea36afc154d63ff3d04e86c","json_data":"{\n \"schemaVersion\": 1,\n \"name\": \"josephschorr\/buildtest2\",\n \"tag\": \"unicode\",\n \"architecture\": \"amd64\",\n \"fsLayers\": [\n {\n \"blobSum\": \"sha256:9dcda8e13dc6f3aa30ce7867d8a9e3941dc3a54cfefb5e76cbdfa90d2b56ed2f\"\n },\n {\n \"blobSum\": \"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4\"\n },\n {\n \"blobSum\": \"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4\"\n },\n {\n \"blobSum\": \"sha256:8c5a7da1afbc602695fcb2cd6445743cec5ff32053ea589ea9bd8773b7068185\"\n }\n ],\n \"history\": [\n {\n \"v1Compatibility\": \"{\\\"architecture\\\":\\\"amd64\\\",\\\"config\\\":{\\\"Hostname\\\":\\\"\\\",\\\"Domainname\\\":\\\"\\\",\\\"User\\\":\\\"\\\",\\\"AttachStdin\\\":false,\\\"AttachStdout\\\":false,\\\"AttachStderr\\\":false,\\\"Tty\\\":false,\\\"OpenStdin\\\":false,\\\"StdinOnce\\\":false,\\\"Env\\\":[\\\"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin\\\"],\\\"Cmd\\\":[\\\"sh\\\"],\\\"ArgsEscaped\\\":true,\\\"Image\\\":\\\"sha256:746d49e88c1eac6e3d3384d73db788f166a51b5a2eb9da49671586f62baf6c0c\\\",\\\"Volumes\\\":null,\\\"WorkingDir\\\":\\\"\\\",\\\"Entrypoint\\\":null,\\\"OnBuild\\\":[],\\\"Labels\\\":{\\\"maintainer\\\":\\\"Ge\u00e9 L\u00e9fleur\\\"}},\\\"container\\\":\\\"654ee2461cf64a54484624d8b7efbb76c5e197ba6f3322538b6810dad097c11f\\\",\\\"container_config\\\":{\\\"Hostname\\\":\\\"\\\",\\\"Domainname\\\":\\\"\\\",\\\"User\\\":\\\"\\\",\\\"AttachStdin\\\":false,\\\"AttachStdout\\\":false,\\\"AttachStderr\\\":false,\\\"Tty\\\":false,\\\"OpenStdin\\\":false,\\\"StdinOnce\\\":false,\\\"Env\\\":[\\\"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin\\\"],\\\"Cmd\\\":[\\\"\/bin\/sh\\\",\\\"-c\\\",\\\"echo foo \\\\u003e bar\\\"],\\\"ArgsEscaped\\\":true,\\\"Image\\\":\\\"sha256:746d49e88c1eac6e3d3384d73db788f166a51b5a2eb9da49671586f62baf6c0c\\\",\\\"Volumes\\\":null,\\\"WorkingDir\\\":\\\"\\\",\\\"Entrypoint\\\":null,\\\"OnBuild\\\":[],\\\"Labels\\\":{\\\"maintainer\\\":\\\"Ge\u00e9 L\u00e9fleur\\\"}},\\\"created\\\":\\\"2018-08-14T22:17:55.7294283Z\\\",\\\"docker_version\\\":\\\"17.09.0-ce\\\",\\\"id\\\":\\\"db077d203993a3a1cfeaf4bbaedb34ff1a706452cb598c62d2873ba78dd0d2fe\\\",\\\"os\\\":\\\"linux\\\",\\\"parent\\\":\\\"539016dae3ce29f825af4d27a60b8d42306a86727f7406371682612124bc6db3\\\"}\"\n },\n {\n \"v1Compatibility\": \"{\\\"id\\\":\\\"539016dae3ce29f825af4d27a60b8d42306a86727f7406371682612124bc6db3\\\",\\\"parent\\\":\\\"5a1738daa8064e42d79a0b1f3d1b75ca4406c6695969860ff8e814999bda9470\\\",\\\"created\\\":\\\"2018-08-14T22:17:54.5902216Z\\\",\\\"container_config\\\":{\\\"Cmd\\\":[\\\"\/bin\/sh -c #(nop) LABEL maintainer=Ge\u00e9 L\u00e9fleur\\\"]},\\\"throwaway\\\":true}\"\n },\n {\n \"v1Compatibility\": \"{\\\"id\\\":\\\"5a1738daa8064e42d79a0b1f3d1b75ca4406c6695969860ff8e814999bda9470\\\",\\\"parent\\\":\\\"97d7c933c31fa951536cacfdfe3f862ce589020fa58bdf2fccc66204191a4273\\\",\\\"created\\\":\\\"2018-07-31T22:20:07.617575594Z\\\",\\\"container_config\\\":{\\\"Cmd\\\":[\\\"\/bin\/sh -c #(nop) CMD [\\\\\\\"sh\\\\\\\"]\\\"]},\\\"throwaway\\\":true}\"\n },\n {\n \"v1Compatibility\": \"{\\\"id\\\":\\\"97d7c933c31fa951536cacfdfe3f862ce589020fa58bdf2fccc66204191a4273\\\",\\\"created\\\":\\\"2018-07-31T22:20:07.361628468Z\\\",\\\"container_config\\\":{\\\"Cmd\\\":[\\\"\/bin\/sh -c #(nop) ADD file:96fda64a6b725d4df5249c12e32245e2f02469ff637c38077740f4984cd883dd in \/ \\\"]}}\"\n }\n ],\n \"signatures\": [\n {\n \"header\": {\n \"jwk\": {\n \"crv\": \"P-256\",\n \"kid\": \"AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX\",\n \"kty\": \"EC\",\n \"x\": \"34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY\",\n \"y\": \"LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8\"\n },\n \"alg\": \"ES256\"\n },\n \"signature\": \"XvA_yxSPZwnln-pl_VyT5HgfC_NRnVj2IDZjnPy4NRm99Ik82jjliZmoNL4g54AGe3CUD4i6eJiDdCgSCqjxQw\",\n \"protected\": \"eyJmb3JtYXRMZW5ndGgiOjMwODAsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0wOC0xNFQyMjoyNTo0M1oifQ\"\n }\n ]\n}"}]

View file

@ -0,0 +1,311 @@
# -*- coding: utf-8 -*-
import os
import json
import pytest
from app import docker_v2_signing_key
from image.docker.schema1 import (MalformedSchema1Manifest, DockerSchema1Manifest,
DockerSchema1ManifestBuilder)
from util.bytes import Bytes
@pytest.mark.parametrize('json_data', [
'',
'{}',
"""
{
"unknown": "key"
}
""",
])
def test_malformed_manifests(json_data):
with pytest.raises(MalformedSchema1Manifest):
DockerSchema1Manifest(Bytes.for_string_or_unicode(json_data))
MANIFEST_BYTES = json.dumps({
"name": 'hello-world',
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11"
},
{
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
}
],
"history": [
{
"v1Compatibility": "{\"id\":\"someid\", \"parent\": \"anotherid\"}"
},
{
"v1Compatibility": "{\"id\":\"anotherid\"}"
},
],
"schemaVersion": 1,
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4",
"kty": "EC",
"x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A",
"y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010"
},
"alg": "ES256"
},
"signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg",
"protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ"
}
]
})
def test_valid_manifest():
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES), validate=False)
assert len(manifest.signatures) == 1
assert manifest.namespace == ''
assert manifest.repo_name == 'hello-world'
assert manifest.tag == 'latest'
assert manifest.image_ids == {'someid', 'anotherid'}
assert manifest.parent_image_ids == {'anotherid'}
assert len(manifest.layers) == 2
assert manifest.layers[0].v1_metadata.image_id == 'anotherid'
assert manifest.layers[0].v1_metadata.parent_image_id is None
assert manifest.layers[1].v1_metadata.image_id == 'someid'
assert manifest.layers[1].v1_metadata.parent_image_id == 'anotherid'
assert manifest.layers[0].compressed_size is None
assert manifest.layers[1].compressed_size is None
assert manifest.leaf_layer == manifest.layers[1]
assert manifest.created_datetime is None
unsigned = manifest.unsigned()
assert unsigned.namespace == manifest.namespace
assert unsigned.repo_name == manifest.repo_name
assert unsigned.tag == manifest.tag
assert unsigned.layers == manifest.layers
assert unsigned.blob_digests == manifest.blob_digests
assert unsigned.digest != manifest.digest
image_layers = list(manifest.get_layers(None))
assert len(image_layers) == 2
for index in range(0, 2):
assert image_layers[index].layer_id == manifest.layers[index].v1_metadata.image_id
assert image_layers[index].blob_digest == manifest.layers[index].digest
assert image_layers[index].command == manifest.layers[index].v1_metadata.command
def test_validate_manifest():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, 'validated_manifest.json'), 'r') as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
digest = manifest.digest
assert digest == 'sha256:b5dc4f63fdbd64f34f2314c0747ef81008f9fcddce4edfc3fd0e8ec8b358d571'
assert manifest.created_datetime
def test_validate_manifest_with_unicode():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, 'validated_manifest_with_unicode.json'), 'r') as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
digest = manifest.digest
assert digest == 'sha256:815ecf45716a96b19d54d911e6ace91f78bab26ca0dd299645d9995dacd9f1ef'
assert manifest.created_datetime
def test_validate_manifest_with_unicode_encoded():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, 'manifest_unicode_row.json'), 'r') as f:
manifest_bytes = json.loads(f.read())[0]['json_data']
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
digest = manifest.digest
assert digest == 'sha256:dde3714ce7e23edc6413aa85c0b42792e4f2f79e9ea36afc154d63ff3d04e86c'
assert manifest.created_datetime
def test_validate_manifest_with_unencoded_unicode():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, 'manifest_unencoded_unicode.json'), 'r') as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes))
digest = manifest.digest
assert digest == 'sha256:5d8a0f34744a39bf566ba430251adc0cc86587f86aed3ac2acfb897f349777bc'
assert manifest.created_datetime
layers = list(manifest.get_layers(None))
assert layers[-1].author == u'Sômé guy'
@pytest.mark.parametrize('with_key', [
None,
docker_v2_signing_key,
])
def test_build_unencoded_unicode_manifest(with_key):
builder = DockerSchema1ManifestBuilder('somenamespace', 'somerepo', 'sometag')
builder.add_layer('sha256:abcde', json.dumps({
'id': 'someid',
'author': u'Sômé guy',
}, ensure_ascii=False))
built = builder.build(with_key, ensure_ascii=False)
built._validate()
def test_validate_manifest_known_issue():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, 'validate_manifest_known_issue.json'), 'r') as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes))
digest = manifest.digest
assert digest == 'sha256:44518f5a4d1cb5b7a6347763116fb6e10f6a8563b6c40bb389a0a982f0a9f47a'
assert manifest.created_datetime
layers = list(manifest.get_layers(None))
assert layers[-1].author is None
@pytest.mark.parametrize('with_key', [
None,
docker_v2_signing_key,
])
def test_validate_manifest_with_emoji(with_key):
builder = DockerSchema1ManifestBuilder('somenamespace', 'somerepo', 'sometag')
builder.add_layer('sha256:abcde', json.dumps({
'id': 'someid',
'author': u'😱',
}, ensure_ascii=False))
built = builder.build(with_key, ensure_ascii=False)
built._validate()
# Ensure the manifest can be reloaded.
built_bytes = built.bytes.as_encoded_str()
DockerSchema1Manifest(Bytes.for_string_or_unicode(built_bytes))
@pytest.mark.parametrize('with_key', [
None,
docker_v2_signing_key,
])
def test_validate_manifest_with_none_metadata_layer(with_key):
builder = DockerSchema1ManifestBuilder('somenamespace', 'somerepo', 'sometag')
builder.add_layer('sha256:abcde', None)
built = builder.build(with_key, ensure_ascii=False)
built._validate()
# Ensure the manifest can be reloaded.
built_bytes = built.bytes.as_encoded_str()
DockerSchema1Manifest(Bytes.for_string_or_unicode(built_bytes))
def test_build_with_metadata_removed():
builder = DockerSchema1ManifestBuilder('somenamespace', 'somerepo', 'sometag')
builder.add_layer('sha256:abcde', json.dumps({
'id': 'someid',
'parent': 'someid',
'author': u'😱',
'comment': 'hello world!',
'created': '1975-01-02 12:34',
'Size': 5678,
'container_config': {
'Cmd': 'foobar',
'more': 'stuff',
'goes': 'here',
},
}))
builder.add_layer('sha256:abcde', json.dumps({
'id': 'anotherid',
'author': u'😱',
'created': '1985-02-03 12:34',
'Size': 1234,
'container_config': {
'Cmd': 'barbaz',
'more': 'stuff',
'goes': 'here',
},
}))
built = builder.build(None)
built._validate()
assert built.leaf_layer_v1_image_id == 'someid'
with_metadata_removed = builder.with_metadata_removed().build()
with_metadata_removed._validate()
built_layers = list(built.get_layers(None))
with_metadata_removed_layers = list(with_metadata_removed.get_layers(None))
assert len(built_layers) == len(with_metadata_removed_layers)
for index, built_layer in enumerate(built_layers):
with_metadata_removed_layer = with_metadata_removed_layers[index]
assert built_layer.layer_id == with_metadata_removed_layer.layer_id
assert built_layer.compressed_size == with_metadata_removed_layer.compressed_size
assert built_layer.command == with_metadata_removed_layer.command
assert built_layer.comment == with_metadata_removed_layer.comment
assert built_layer.author == with_metadata_removed_layer.author
assert built_layer.blob_digest == with_metadata_removed_layer.blob_digest
assert built_layer.created_datetime == with_metadata_removed_layer.created_datetime
assert built.leaf_layer_v1_image_id == with_metadata_removed.leaf_layer_v1_image_id
assert built_layers[-1].layer_id == built.leaf_layer_v1_image_id
assert (json.loads(built_layers[-1].internal_layer.raw_v1_metadata) ==
json.loads(with_metadata_removed_layers[-1].internal_layer.raw_v1_metadata))
def test_validate_manifest_without_metadata():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, 'validated_manifest.json'), 'r') as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
digest = manifest.digest
assert digest == 'sha256:b5dc4f63fdbd64f34f2314c0747ef81008f9fcddce4edfc3fd0e8ec8b358d571'
assert manifest.created_datetime
with_metadata_removed = manifest._unsigned_builder().with_metadata_removed().build()
assert with_metadata_removed.leaf_layer_v1_image_id == manifest.leaf_layer_v1_image_id
manifest_layers = list(manifest.get_layers(None))
with_metadata_removed_layers = list(with_metadata_removed.get_layers(None))
assert len(manifest_layers) == len(with_metadata_removed_layers)
for index, built_layer in enumerate(manifest_layers):
with_metadata_removed_layer = with_metadata_removed_layers[index]
assert built_layer.layer_id == with_metadata_removed_layer.layer_id
assert built_layer.compressed_size == with_metadata_removed_layer.compressed_size
assert built_layer.command == with_metadata_removed_layer.command
assert built_layer.comment == with_metadata_removed_layer.comment
assert built_layer.author == with_metadata_removed_layer.author
assert built_layer.blob_digest == with_metadata_removed_layer.blob_digest
assert built_layer.created_datetime == with_metadata_removed_layer.created_datetime
assert with_metadata_removed.digest != manifest.digest
assert with_metadata_removed.namespace == manifest.namespace
assert with_metadata_removed.repo_name == manifest.repo_name
assert with_metadata_removed.tag == manifest.tag
assert with_metadata_removed.created_datetime == manifest.created_datetime
assert with_metadata_removed.checksums == manifest.checksums
assert with_metadata_removed.image_ids == manifest.image_ids
assert with_metadata_removed.parent_image_ids == manifest.parent_image_ids

View file

@ -0,0 +1,20 @@
import pytest
from image.docker.schemas import parse_manifest_from_bytes
from image.docker.schema1 import DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
from image.docker.schema2 import DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE
from image.docker.schema2 import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
from image.docker.test.test_schema1 import MANIFEST_BYTES as SCHEMA1_BYTES
from image.docker.schema2.test.test_list import MANIFESTLIST_BYTES
from image.docker.schema2.test.test_manifest import MANIFEST_BYTES as SCHEMA2_BYTES
from util.bytes import Bytes
@pytest.mark.parametrize('media_type, manifest_bytes', [
(DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE, SCHEMA1_BYTES),
(DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE, SCHEMA2_BYTES),
(DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE, MANIFESTLIST_BYTES),
])
def test_parse_manifest_from_bytes(media_type, manifest_bytes):
assert parse_manifest_from_bytes(Bytes.for_string_or_unicode(manifest_bytes), media_type,
validate=False)

View file

@ -0,0 +1,23 @@
import pytest
from image.docker.schemautil import to_canonical_json
@pytest.mark.parametrize('input, expected_output', [
pytest.param({}, '{}', id='empty object'),
pytest.param({'b': 2, 'a': 1}, '{"a":1,"b":2}', id='object with sorted keys'),
pytest.param('hello world', '"hello world"', id='basic string'),
pytest.param('hey & hi', '"hey \\u0026 hi"', id='string with &'),
pytest.param('<hey>', '"\\u003chey\\u003e"', id='string with brackets'),
pytest.param({
"zxcv": [{}, True, 1000000000, 'tyui'],
"asdf": 1,
"qwer": [],
}, '{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]}', id='example canonical'),
])
def test_to_canonical_json(input, expected_output):
result = to_canonical_json(input)
assert result == expected_output
# Ensure the result is utf-8.
assert isinstance(result, str)
result.decode('utf-8')

View file

@ -0,0 +1,56 @@
{
"schemaVersion": 1,
"name": "quaymonitor/monitortest2",
"tag": "latest",
"architecture": "x86_64",
"fsLayers": [
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:184dc3db39b5e19dc39547f43db46ea48cd6cc779e806a3c8a5e5396acd20206"
},
{
"blobSum": "sha256:db80bcab0e8b69656505332fcdff3ef2b9f664a2029d1b2f97224cffcf689afc"
},
{
"blobSum": "sha256:184dc3db39b5e19dc39547f43db46ea48cd6cc779e806a3c8a5e5396acd20206"
},
{
"blobSum": "sha256:f0a98344d604e54694fc6118cf7a0cbd10dc7b2e9be8607ba8c5bfd7ba3c1067"
}
],
"history": [
{
"v1Compatibility": "{\"architecture\":\"x86_64\",\"config\":{\"Hostname\":\"4c9181ab6b87\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"HOME=/\",\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\",\"echo\",\"\\\"2019-01-08 19:13:20 +0000\\\" \\u003e foo\"],\"Image\":\"quay.io/quay/busybox\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"container\":\"4c9181ab6b87fe75b5c0955c6c78983dec337914b05e65fb0073cce0ad076106\",\"container_config\":{\"Hostname\":\"4c9181ab6b87\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"HOME=/\",\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\",\"echo\",\"\\\"2019-01-08 19:13:20 +0000\\\" \\u003e foo\"],\"Image\":\"quay.io/quay/busybox\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"created\":\"2019-01-08T19:13:20.674196032Z\",\"docker_version\":\"18.06.1-ce\",\"id\":\"7da7c4e4bcb121915fb33eb5c76ffef194cdcc14608010692cfce5734bd84751\",\"os\":\"linux\",\"parent\":\"ec75e623647b299585bdb0991293bd446e5545e9a4dabf9d37922d5671d9d860\",\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"ec75e623647b299585bdb0991293bd446e5545e9a4dabf9d37922d5671d9d860\",\"parent\":\"f32bc6daa02c76f0b1773688684bf3bee719a69db06192432e6c28a238f4cf4a\",\"created\":\"2014-02-03T15:58:08.872585903Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [/bin/sh -c /bin/sh]\"]},\"author\":\"Jérôme Petazzoni \\u003cjerome@docker.com\\u003e\"}"
},
{
"v1Compatibility": "{\"id\":\"f32bc6daa02c76f0b1773688684bf3bee719a69db06192432e6c28a238f4cf4a\",\"parent\":\"02feaf4fdc57dba2b142dae9d8dd0c90e710be710bea25ce63269e65d8f32872\",\"created\":\"2014-02-03T15:58:08.72383042Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD rootfs.tar in /\"]},\"author\":\"Jérôme Petazzoni \\u003cjerome@docker.com\\u003e\"}"
},
{
"v1Compatibility": "{\"id\":\"02feaf4fdc57dba2b142dae9d8dd0c90e710be710bea25ce63269e65d8f32872\",\"parent\":\"f9a6e54178f312aa3686d7305b970e7d908d58b32e3f4554731b647e07b48fd2\",\"created\":\"2014-02-03T15:58:08.52236968Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) MAINTAINER Jérôme Petazzoni \\u003cjerome@docker.com\\u003e\"]},\"author\":\"Jérôme Petazzoni \\u003cjerome@docker.com\\u003e\"}"
},
{
"v1Compatibility": "{\"id\":\"f9a6e54178f312aa3686d7305b970e7d908d58b32e3f4554731b647e07b48fd2\",\"comment\":\"Imported from -\",\"created\":\"2013-06-13T14:03:50.821769-07:00\",\"container_config\":{\"Cmd\":[\"\"]}}"
}
],
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "XPAM:RVQE:4LWW:ABXI:QLLK:O2LK:XJ4V:UAOJ:WM24:ZG6J:UIJ3:JAYM",
"kty": "EC",
"x": "ijnW3d93SINE1y3GjNsCMYghAb7NT21vSiYK8pWdBkM",
"y": "7t-mGjoYOhEIGVaCSEclLLkMgHz2S9WXkReZJEBx-_U"
},
"alg": "ES256"
},
"signature": "N9m-NNL8CdGwxEHHHaJDhbT5_FFKBSdyy-7lP4jnWG3AQmOWbPEXTFANTeH2CNPvAbaM9ZqQm0dQFQVnOe5GNQ",
"protected": "eyJmb3JtYXRMZW5ndGgiOjM1OTgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOS0wMS0wOFQxOToxMzoyM1oifQ"
}
]
}

View file

@ -0,0 +1,62 @@
{
"schemaVersion": 1,
"name": "josephschorr/buildtest2",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:f0880d1639d2e72499fe0cfb218a98ca7aa3bffda6e0b808861505a1536cca10"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:8e3ba11ec2a2b39ab372c60c16b421536e50e5ce64a0bc81765c2e38381bcff6"
}
],
"history": [
{
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\"],\"ArgsEscaped\":true,\"Image\":\"sha256:ebd938adb98827e85616f288beb990fd9f07335305c3d77ff783253b97d84b99\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{\"foo\":\"bar\",\"this.is.cool\":\"{\\\"some\\\": \\\"json\\\"}\"}},\"container\":\"a06cd9c29efac778d1e670a2d26971cf21360f9c59eb250e771f5852ff9f49ca\",\"container_config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cat baz\"],\"ArgsEscaped\":true,\"Image\":\"sha256:ebd938adb98827e85616f288beb990fd9f07335305c3d77ff783253b97d84b99\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{\"foo\":\"bar\",\"this.is.cool\":\"{\\\"some\\\": \\\"json\\\"}\"}},\"created\":\"2018-08-13T19:19:53.358734989Z\",\"docker_version\":\"18.02.0-ce\",\"id\":\"08b0a1239a30dc9c12585c415538a3a43fab399a07cb341881b46e2fb69ae8f7\",\"os\":\"linux\",\"parent\":\"bc560233cb7ec4158c1858fd24fb093dc70a6fb7ad80b25f2a6f36a2138dd724\",\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"bc560233cb7ec4158c1858fd24fb093dc70a6fb7ad80b25f2a6f36a2138dd724\",\"parent\":\"cefdff8f1be4330d2e0414f598b0f38def3fb5c6a383d3b709162d51efe859b9\",\"created\":\"2018-08-13T19:19:52.919686159Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) LABEL this.is.cool={\\\"some\\\": \\\"json\\\"}\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"cefdff8f1be4330d2e0414f598b0f38def3fb5c6a383d3b709162d51efe859b9\",\"parent\":\"a86432a6eeb137d2342ee5ddcbc0dd32b5e58dfe3301dd09991147bb458ad6a9\",\"created\":\"2018-08-13T19:19:52.834827335Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) LABEL foo=bar\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"a86432a6eeb137d2342ee5ddcbc0dd32b5e58dfe3301dd09991147bb458ad6a9\",\"parent\":\"8b5fc1032bbcc570c28adc9b13525051c83bbf37ce305735f9c7be6e36ebff7d\",\"created\":\"2018-08-13T19:19:52.766315533Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) COPY file:9990e969595bc050f081c07b0bdf71524f3c46e6ffe8537c1778516c123f9f55 in baz \"]}}"
},
{
"v1Compatibility": "{\"id\":\"8b5fc1032bbcc570c28adc9b13525051c83bbf37ce305735f9c7be6e36ebff7d\",\"parent\":\"f18ee96f0b1656cab52554b270f19e8df5046d307296d2146539c04565d67747\",\"created\":\"2018-07-06T14:14:06.393355914Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"/bin/sh\\\"]\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"f18ee96f0b1656cab52554b270f19e8df5046d307296d2146539c04565d67747\",\"created\":\"2018-07-06T14:14:06.165546783Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:25f61d70254b9807a40cd3e8d820f6a5ec0e1e596de04e325f6a33810393e95a in / \"]}}"
}
],
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "H4QD:5X6G:2G7T:QXGN:EH3X:3UQU:REXP:7LAH:SGCZ:4FBI:EUSI:3P7Z",
"kty": "EC",
"x": "FowcV0YK1Dsn8FldhFJQJnxE247QUH43EchdZSmWrsQ",
"y": "4uUZBA9U1jC-AxmNzrwb1r9Oh2SXNXE3yqSpz7pwoiI"
},
"alg": "ES256"
},
"signature": "rJNUkqKUZ2_d2JTWTLu4XWFcNpNIMDEH6qoiOie9o_BlD_Ifhrw31OIUT23eKa-HyVm5sYOfx4DY3N5Xy1kr9A",
"protected": "eyJmb3JtYXRMZW5ndGgiOjQxMzksImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0wOC0xM1QxOToyMDowMVoifQ"
}
]
}

View file

@ -0,0 +1,50 @@
{
"schemaVersion": 1,
"name": "devtable/simple",
"tag": "unicode",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:9dcda8e13dc6f3aa30ce7867d8a9e3941dc3a54cfefb5e76cbdfa90d2b56ed2f"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:8c5a7da1afbc602695fcb2cd6445743cec5ff32053ea589ea9bd8773b7068185"
}
],
"history": [
{
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"ArgsEscaped\":true,\"Image\":\"sha256:746d49e88c1eac6e3d3384d73db788f166a51b5a2eb9da49671586f62baf6c0c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"maintainer\":\"Geé Léfleur\"}},\"container\":\"654ee2461cf64a54484624d8b7efbb76c5e197ba6f3322538b6810dad097c11f\",\"container_config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"echo foo \\u003e bar\"],\"ArgsEscaped\":true,\"Image\":\"sha256:746d49e88c1eac6e3d3384d73db788f166a51b5a2eb9da49671586f62baf6c0c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"maintainer\":\"Geé Léfleur\"}},\"created\":\"2018-08-14T22:17:55.7294283Z\",\"docker_version\":\"17.09.0-ce\",\"id\":\"db077d203993a3a1cfeaf4bbaedb34ff1a706452cb598c62d2873ba78dd0d2fe\",\"os\":\"linux\",\"parent\":\"539016dae3ce29f825af4d27a60b8d42306a86727f7406371682612124bc6db3\"}"
},
{
"v1Compatibility": "{\"id\":\"539016dae3ce29f825af4d27a60b8d42306a86727f7406371682612124bc6db3\",\"parent\":\"5a1738daa8064e42d79a0b1f3d1b75ca4406c6695969860ff8e814999bda9470\",\"created\":\"2018-08-14T22:17:54.5902216Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) LABEL maintainer=Geé Léfleur\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"5a1738daa8064e42d79a0b1f3d1b75ca4406c6695969860ff8e814999bda9470\",\"parent\":\"97d7c933c31fa951536cacfdfe3f862ce589020fa58bdf2fccc66204191a4273\",\"created\":\"2018-07-31T22:20:07.617575594Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"sh\\\"]\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"97d7c933c31fa951536cacfdfe3f862ce589020fa58bdf2fccc66204191a4273\",\"created\":\"2018-07-31T22:20:07.361628468Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:96fda64a6b725d4df5249c12e32245e2f02469ff637c38077740f4984cd883dd in / \"]}}"
}
],
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX",
"kty": "EC",
"x": "34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY",
"y": "LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8"
},
"alg": "ES256"
},
"signature": "WCTPkAwHteVVjQCbY4GWRtoFJewKnZ9b0syTm72hi3n3Z_G30Gn5EDTU3adyXQx24aMzTFI_vryexeuypHv2Rw",
"protected": "eyJmb3JtYXRMZW5ndGgiOjMwNzIsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0wOC0xNFQyMjoxOTozOFoifQ"
}
]
}

7
image/docker/types.py Normal file
View file

@ -0,0 +1,7 @@
from collections import namedtuple
ManifestImageLayer = namedtuple('ManifestImageLayer', ['layer_id', 'compressed_size',
'is_remote', 'urls', 'command',
'blob_digest', 'created_datetime',
'author', 'comment',
'internal_layer'])

16
image/docker/v1.py Normal file
View file

@ -0,0 +1,16 @@
"""
v1 implements pure data transformations according to the Docker Image Specification v1.1.
https://github.com/docker/docker/blob/master/image/spec/v1.1.md
"""
from collections import namedtuple
class DockerV1Metadata(namedtuple('DockerV1Metadata',
['namespace_name', 'repo_name', 'image_id', 'checksum',
'content_checksum', 'created', 'comment', 'command',
'author', 'parent_image_id', 'compat_json'])):
"""
DockerV1Metadata represents all of the metadata for a given Docker v1 Image.
The original form of the metadata is stored in the compat_json field.
"""