Merge pull request #3294 from quay/joseph.schorr/v22
V2_2 support in Quay!
This commit is contained in:
commit
50dc57acdf
74 changed files with 4474 additions and 764 deletions
|
@ -0,0 +1,31 @@
|
|||
from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
|
||||
|
||||
"""Add schema2 media types
|
||||
|
||||
Revision ID: c00a1f15968b
|
||||
Revises: 67f0abd172ae
|
||||
Create Date: 2018-11-13 09:20:21.968503
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'c00a1f15968b'
|
||||
down_revision = '67f0abd172ae'
|
||||
|
||||
from alembic import op
|
||||
|
||||
def upgrade(tables, tester):
|
||||
for media_type in DOCKER_SCHEMA2_CONTENT_TYPES:
|
||||
op.bulk_insert(tables.mediatype,
|
||||
[
|
||||
{'name': media_type},
|
||||
])
|
||||
|
||||
|
||||
def downgrade(tables, tester):
|
||||
for media_type in DOCKER_SCHEMA2_CONTENT_TYPES:
|
||||
op.execute(tables
|
||||
.mediatype
|
||||
.delete()
|
||||
.where(tables.
|
||||
mediatype.c.name == op.inline_literal(media_type)))
|
|
@ -1,3 +1,5 @@
|
|||
import logging
|
||||
|
||||
from datetime import datetime
|
||||
from uuid import uuid4
|
||||
|
||||
|
@ -7,6 +9,9 @@ from data.database import (Repository, Namespace, ImageStorage, Image, ImageStor
|
|||
BlobUpload, ImageStorageLocation, db_random_func)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_repository_blob_by_digest(repository, blob_digest):
|
||||
""" Find the content-addressable blob linked to the specified repository.
|
||||
"""
|
||||
|
@ -157,3 +162,31 @@ def initiate_upload(namespace, repo_name, uuid, location_name, storage_metadata)
|
|||
location = storage_model.get_image_location_for_name(location_name)
|
||||
return BlobUpload.create(repository=repo, location=location.id, uuid=uuid,
|
||||
storage_metadata=storage_metadata)
|
||||
|
||||
|
||||
def get_or_create_shared_blob(digest, byte_data, storage):
|
||||
""" Returns the ImageStorage blob with the given digest or, if not present,
|
||||
adds a row and writes the given byte data to the storage engine.
|
||||
This method is *only* to be used for shared blobs that are globally
|
||||
accessible, such as the special empty gzipped tar layer that Docker
|
||||
no longer pushes to us.
|
||||
"""
|
||||
try:
|
||||
return ImageStorage.get(content_checksum=digest, uploading=False)
|
||||
except ImageStorage.DoesNotExist:
|
||||
record = ImageStorage.create(image_size=len(byte_data), content_checksum=digest,
|
||||
cas_path=True, uploading=True)
|
||||
preferred = storage.preferred_locations[0]
|
||||
location_obj = ImageStorageLocation.get(name=preferred)
|
||||
try:
|
||||
storage.put_content([preferred], storage_model.get_layer_path(record), byte_data)
|
||||
ImageStoragePlacement.create(storage=record, location=location_obj)
|
||||
|
||||
record.uploading = False
|
||||
record.save()
|
||||
except:
|
||||
logger.exception('Exception when trying to write special layer %s', digest)
|
||||
record.delete_instance()
|
||||
raise
|
||||
|
||||
return record
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# There MUST NOT be any circular dependencies between these subsections. If there are fix it by
|
||||
# moving the minimal number of things to shared
|
||||
from data.model.oci import (
|
||||
blob,
|
||||
label,
|
||||
manifest,
|
||||
shared,
|
||||
|
|
26
data/model/oci/blob.py
Normal file
26
data/model/oci/blob.py
Normal file
|
@ -0,0 +1,26 @@
|
|||
from data.database import ImageStorage, ManifestBlob
|
||||
from data.model import BlobDoesNotExist
|
||||
from data.model.storage import get_storage_by_subquery, InvalidImageException
|
||||
from data.model.blob import get_repository_blob_by_digest as legacy_get
|
||||
|
||||
def get_repository_blob_by_digest(repository, blob_digest):
|
||||
""" Find the content-addressable blob linked to the specified repository and
|
||||
returns it or None if none.
|
||||
"""
|
||||
try:
|
||||
storage_id_query = (ImageStorage
|
||||
.select(ImageStorage.id)
|
||||
.join(ManifestBlob)
|
||||
.where(ManifestBlob.repository == repository,
|
||||
ImageStorage.content_checksum == blob_digest,
|
||||
ImageStorage.uploading == False)
|
||||
.limit(1))
|
||||
|
||||
return get_storage_by_subquery(storage_id_query)
|
||||
except InvalidImageException:
|
||||
# TODO(jschorr): Remove once we are no longer using the legacy tables.
|
||||
# Try the legacy call.
|
||||
try:
|
||||
return legacy_get(repository, blob_digest)
|
||||
except BlobDoesNotExist:
|
||||
return None
|
|
@ -1,15 +1,29 @@
|
|||
import logging
|
||||
|
||||
from peewee import IntegrityError
|
||||
from collections import namedtuple
|
||||
|
||||
from data.database import Tag, Manifest, ManifestBlob, ManifestLegacyImage, db_transaction
|
||||
from peewee import IntegrityError, JOIN
|
||||
|
||||
from data.database import (Tag, Manifest, ManifestBlob, ManifestLegacyImage, ManifestChild,
|
||||
db_transaction)
|
||||
from data.model import BlobDoesNotExist
|
||||
from data.model.blob import get_or_create_shared_blob
|
||||
from data.model.oci.tag import filter_to_alive_tags
|
||||
from data.model.oci.label import create_manifest_label
|
||||
from data.model.oci.retriever import RepositoryContentRetriever
|
||||
from data.model.storage import lookup_repo_storages_by_content_checksum
|
||||
from data.model.image import lookup_repository_images, get_image, synthesize_v1_image
|
||||
from image.docker.schema1 import DockerSchema1Manifest, ManifestException
|
||||
from image.docker.schema2 import EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES
|
||||
from image.docker.schema1 import ManifestException
|
||||
from image.docker.schema2.list import MalformedSchema2ManifestList
|
||||
from util.validation import is_json
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CreatedManifest = namedtuple('CreatedManifest', ['manifest', 'newly_created', 'labels_to_apply'])
|
||||
|
||||
|
||||
def lookup_manifest(repository_id, manifest_digest, allow_dead=False):
|
||||
""" Returns the manifest with the specified digest under the specified repository
|
||||
or None if none. If allow_dead is True, then manifests referenced by only
|
||||
|
@ -20,8 +34,24 @@ def lookup_manifest(repository_id, manifest_digest, allow_dead=False):
|
|||
.where(Manifest.repository == repository_id)
|
||||
.where(Manifest.digest == manifest_digest))
|
||||
|
||||
if not allow_dead:
|
||||
query = filter_to_alive_tags(query.join(Tag)).group_by(Manifest.id)
|
||||
if allow_dead:
|
||||
try:
|
||||
return query.get()
|
||||
except Manifest.DoesNotExist:
|
||||
return None
|
||||
|
||||
# Try first to filter to those manifests referenced by an alive tag,
|
||||
try:
|
||||
return filter_to_alive_tags(query.join(Tag)).get()
|
||||
except Manifest.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Try referenced as the child of a manifest that has an alive tag.
|
||||
query = (query
|
||||
.join(ManifestChild, on=(ManifestChild.child_manifest == Manifest.id))
|
||||
.join(Tag, on=(Tag.manifest == ManifestChild.manifest)))
|
||||
|
||||
query = filter_to_alive_tags(query)
|
||||
|
||||
try:
|
||||
return query.get()
|
||||
|
@ -29,43 +59,88 @@ def lookup_manifest(repository_id, manifest_digest, allow_dead=False):
|
|||
return None
|
||||
|
||||
|
||||
def get_or_create_manifest(repository_id, manifest_interface_instance):
|
||||
""" Returns a tuple of the manifest in the specified repository with the matching digest
|
||||
(if it already exists) or, if not yet created, creates and returns the manifest, as well as
|
||||
if the manifest was created. Returns (None, None) if there was an error creating the manifest.
|
||||
def get_or_create_manifest(repository_id, manifest_interface_instance, storage):
|
||||
""" Returns a CreatedManifest for the manifest in the specified repository with the matching
|
||||
digest (if it already exists) or, if not yet created, creates and returns the manifest.
|
||||
Returns None if there was an error creating the manifest.
|
||||
Note that *all* blobs referenced by the manifest must exist already in the repository or this
|
||||
method will fail with a (None, None).
|
||||
method will fail with a None.
|
||||
"""
|
||||
existing = lookup_manifest(repository_id, manifest_interface_instance.digest, allow_dead=True)
|
||||
if existing is not None:
|
||||
return existing, False
|
||||
return CreatedManifest(manifest=existing, newly_created=False, labels_to_apply=None)
|
||||
|
||||
assert len(list(manifest_interface_instance.layers)) > 0
|
||||
return _create_manifest(repository_id, manifest_interface_instance, storage)
|
||||
|
||||
# TODO(jschorr): Switch this to supporting schema2 once we're ready.
|
||||
assert isinstance(manifest_interface_instance, DockerSchema1Manifest)
|
||||
|
||||
def _create_manifest(repository_id, manifest_interface_instance, storage):
|
||||
# Load, parse and get/create the child manifests, if any.
|
||||
retriever = RepositoryContentRetriever.for_repository(repository_id, storage)
|
||||
child_manifest_refs = manifest_interface_instance.child_manifests(retriever)
|
||||
child_manifest_rows = {}
|
||||
child_manifest_label_dicts = []
|
||||
|
||||
if child_manifest_refs is not None:
|
||||
for child_manifest_ref in child_manifest_refs:
|
||||
# Load and parse the child manifest.
|
||||
try:
|
||||
child_manifest = child_manifest_ref.manifest_obj
|
||||
except (ManifestException, MalformedSchema2ManifestList, BlobDoesNotExist, IOError):
|
||||
logger.exception('Could not load manifest list for manifest `%s`',
|
||||
manifest_interface_instance.digest)
|
||||
return None
|
||||
|
||||
# Retrieve its labels.
|
||||
labels = child_manifest.get_manifest_labels(retriever)
|
||||
if labels is None:
|
||||
logger.exception('Could not load manifest labels for child manifest')
|
||||
return None
|
||||
|
||||
# Get/create the child manifest in the database.
|
||||
child_manifest_info = get_or_create_manifest(repository_id, child_manifest, storage)
|
||||
if child_manifest_info is None:
|
||||
logger.error('Could not get/create child manifest')
|
||||
return None
|
||||
|
||||
child_manifest_rows[child_manifest_info.manifest.digest] = child_manifest_info.manifest
|
||||
child_manifest_label_dicts.append(labels)
|
||||
|
||||
# Ensure all the blobs in the manifest exist.
|
||||
digests = manifest_interface_instance.checksums
|
||||
query = lookup_repo_storages_by_content_checksum(repository_id, digests)
|
||||
blob_map = {s.content_checksum: s for s in query}
|
||||
for digest_str in manifest_interface_instance.blob_digests:
|
||||
if digest_str not in blob_map:
|
||||
logger.warning('Unknown blob `%s` under manifest `%s` for repository `%s`', digest_str,
|
||||
manifest_interface_instance.digest, repository_id)
|
||||
return None, None
|
||||
digests = set(manifest_interface_instance.local_blob_digests)
|
||||
blob_map = {}
|
||||
if digests:
|
||||
query = lookup_repo_storages_by_content_checksum(repository_id, digests)
|
||||
blob_map = {s.content_checksum: s for s in query}
|
||||
for digest_str in digests:
|
||||
if digest_str not in blob_map:
|
||||
logger.warning('Unknown blob `%s` under manifest `%s` for repository `%s`', digest_str,
|
||||
manifest_interface_instance.digest, repository_id)
|
||||
return None
|
||||
|
||||
# Determine and populate the legacy image if necessary.
|
||||
legacy_image_id = _populate_legacy_image(repository_id, manifest_interface_instance, blob_map)
|
||||
if legacy_image_id is None:
|
||||
return None, None
|
||||
# Special check: If the empty layer blob is needed for this manifest, add it to the
|
||||
# blob map. This is necessary because Docker decided to elide sending of this special
|
||||
# empty layer in schema version 2, but we need to have it referenced for GC and schema version 1.
|
||||
if manifest_interface_instance.get_requires_empty_layer_blob(retriever):
|
||||
shared_blob = get_or_create_shared_blob(EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES, storage)
|
||||
assert not shared_blob.uploading
|
||||
assert shared_blob.content_checksum == EMPTY_LAYER_BLOB_DIGEST
|
||||
blob_map[EMPTY_LAYER_BLOB_DIGEST] = shared_blob
|
||||
|
||||
legacy_image = get_image(repository_id, legacy_image_id)
|
||||
if legacy_image is None:
|
||||
return None, None
|
||||
# Determine and populate the legacy image if necessary. Manifest lists will not have a legacy
|
||||
# image.
|
||||
legacy_image = None
|
||||
if manifest_interface_instance.has_legacy_image:
|
||||
legacy_image_id = _populate_legacy_image(repository_id, manifest_interface_instance, blob_map,
|
||||
retriever)
|
||||
if legacy_image_id is None:
|
||||
return None
|
||||
|
||||
legacy_image = get_image(repository_id, legacy_image_id)
|
||||
if legacy_image is None:
|
||||
return None
|
||||
|
||||
# Create the manifest and its blobs.
|
||||
media_type = Manifest.media_type.get_id(manifest_interface_instance.content_type)
|
||||
media_type = Manifest.media_type.get_id(manifest_interface_instance.media_type)
|
||||
storage_ids = {storage.id for storage in blob_map.values()}
|
||||
|
||||
with db_transaction():
|
||||
|
@ -77,7 +152,7 @@ def get_or_create_manifest(repository_id, manifest_interface_instance):
|
|||
manifest_bytes=manifest_interface_instance.bytes)
|
||||
except IntegrityError:
|
||||
manifest = Manifest.get(repository=repository_id, digest=manifest_interface_instance.digest)
|
||||
return manifest, False
|
||||
return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None)
|
||||
|
||||
# Insert the blobs.
|
||||
blobs_to_insert = [dict(manifest=manifest, repository=repository_id,
|
||||
|
@ -86,21 +161,51 @@ def get_or_create_manifest(repository_id, manifest_interface_instance):
|
|||
ManifestBlob.insert_many(blobs_to_insert).execute()
|
||||
|
||||
# Set the legacy image (if applicable).
|
||||
ManifestLegacyImage.create(repository=repository_id, image=legacy_image, manifest=manifest)
|
||||
if legacy_image is not None:
|
||||
ManifestLegacyImage.create(repository=repository_id, image=legacy_image, manifest=manifest)
|
||||
|
||||
return manifest, True
|
||||
# Insert the manifest child rows (if applicable).
|
||||
if child_manifest_rows:
|
||||
children_to_insert = [dict(manifest=manifest, child_manifest=child_manifest,
|
||||
repository=repository_id)
|
||||
for child_manifest in child_manifest_rows.values()]
|
||||
ManifestChild.insert_many(children_to_insert).execute()
|
||||
|
||||
# Define the labels for the manifest (if any).
|
||||
labels = manifest_interface_instance.get_manifest_labels(retriever)
|
||||
if labels:
|
||||
for key, value in labels.iteritems():
|
||||
media_type = 'application/json' if is_json(value) else 'text/plain'
|
||||
create_manifest_label(manifest, key, value, 'manifest', media_type)
|
||||
|
||||
# Return the dictionary of labels to apply (i.e. those labels that cause an action to be taken
|
||||
# on the manifest or its resulting tags). We only return those labels either defined on
|
||||
# the manifest or shared amongst all the child manifests. We intersect amongst all child manifests
|
||||
# to ensure that any action performed is defined in all manifests.
|
||||
labels_to_apply = labels or {}
|
||||
if child_manifest_label_dicts:
|
||||
labels_to_apply = child_manifest_label_dicts[0].viewitems()
|
||||
for child_manifest_label_dict in child_manifest_label_dicts[1:]:
|
||||
# Intersect the key+values of the labels to ensure we get the exact same result
|
||||
# for all the child manifests.
|
||||
labels_to_apply = labels_to_apply & child_manifest_label_dict.viewitems()
|
||||
|
||||
labels_to_apply = dict(labels_to_apply)
|
||||
|
||||
return CreatedManifest(manifest=manifest, newly_created=True, labels_to_apply=labels_to_apply)
|
||||
|
||||
|
||||
def _populate_legacy_image(repository_id, manifest_interface_instance, blob_map):
|
||||
def _populate_legacy_image(repository_id, manifest_interface_instance, blob_map, retriever):
|
||||
# Lookup all the images and their parent images (if any) inside the manifest.
|
||||
# This will let us know which v1 images we need to synthesize and which ones are invalid.
|
||||
docker_image_ids = list(manifest_interface_instance.legacy_image_ids)
|
||||
docker_image_ids = list(manifest_interface_instance.get_legacy_image_ids(retriever))
|
||||
images_query = lookup_repository_images(repository_id, docker_image_ids)
|
||||
image_storage_map = {i.docker_image_id: i.storage for i in images_query}
|
||||
|
||||
# Rewrite any v1 image IDs that do not match the checksum in the database.
|
||||
try:
|
||||
rewritten_images = manifest_interface_instance.rewrite_invalid_image_ids(image_storage_map)
|
||||
rewritten_images = manifest_interface_instance.generate_legacy_layers(image_storage_map,
|
||||
retriever)
|
||||
rewritten_images = list(rewritten_images)
|
||||
parent_image_map = {}
|
||||
|
||||
|
@ -114,10 +219,11 @@ def _populate_legacy_image(repository_id, manifest_interface_instance, blob_map)
|
|||
if parent_image is None:
|
||||
return None
|
||||
|
||||
storage_reference = blob_map[rewritten_image.content_checksum]
|
||||
synthesized = synthesize_v1_image(
|
||||
repository_id,
|
||||
blob_map[rewritten_image.content_checksum].id,
|
||||
blob_map[rewritten_image.content_checksum].image_size,
|
||||
storage_reference.id,
|
||||
storage_reference.image_size,
|
||||
rewritten_image.image_id,
|
||||
rewritten_image.created,
|
||||
rewritten_image.comment,
|
||||
|
|
37
data/model/oci/retriever.py
Normal file
37
data/model/oci/retriever.py
Normal file
|
@ -0,0 +1,37 @@
|
|||
from image.docker.interfaces import ContentRetriever
|
||||
from data.database import Manifest
|
||||
from data.model.blob import get_repository_blob_by_digest
|
||||
from data.model.storage import get_storage_locations, get_layer_path
|
||||
|
||||
class RepositoryContentRetriever(ContentRetriever):
|
||||
""" Implementation of the ContentRetriever interface for manifests that retrieves
|
||||
config blobs and child manifests for the specified repository.
|
||||
"""
|
||||
def __init__(self, repository_id, storage):
|
||||
self.repository_id = repository_id
|
||||
self.storage = storage
|
||||
|
||||
@classmethod
|
||||
def for_repository(cls, repository_id, storage):
|
||||
return RepositoryContentRetriever(repository_id, storage)
|
||||
|
||||
def get_manifest_bytes_with_digest(self, digest):
|
||||
""" Returns the bytes of the manifest with the given digest or None if none found. """
|
||||
query = (Manifest
|
||||
.select()
|
||||
.where(Manifest.repository == self.repository_id)
|
||||
.where(Manifest.digest == digest))
|
||||
|
||||
try:
|
||||
return query.get().manifest_bytes
|
||||
except Manifest.DoesNotExist:
|
||||
return None
|
||||
|
||||
def get_blob_bytes_with_digest(self, digest):
|
||||
""" Returns the bytes of the blob with the given digest or None if none found. """
|
||||
blob = get_repository_blob_by_digest(self.repository_id, digest)
|
||||
if blob is None:
|
||||
return None
|
||||
|
||||
assert blob.locations is not None
|
||||
return self.storage.get_content(blob.locations, get_layer_path(blob))
|
|
@ -1,3 +1,4 @@
|
|||
import uuid
|
||||
import logging
|
||||
|
||||
from calendar import timegm
|
||||
|
@ -98,7 +99,7 @@ def get_legacy_images_for_tags(tags):
|
|||
.where(ManifestLegacyImage.manifest << [tag.manifest_id for tag in tags]))
|
||||
|
||||
by_manifest = {mli.manifest_id: mli.image for mli in query}
|
||||
return {tag.id: by_manifest[tag.manifest_id] for tag in tags}
|
||||
return {tag.id: by_manifest[tag.manifest_id] for tag in tags if tag.manifest_id in by_manifest}
|
||||
|
||||
|
||||
def find_matching_tag(repository_id, tag_names, tag_kinds=None):
|
||||
|
@ -155,6 +156,47 @@ def get_expired_tag(repository_id, tag_name):
|
|||
return None
|
||||
|
||||
|
||||
def create_temporary_tag(manifest, expiration_sec):
|
||||
""" Creates a temporary tag pointing to the given manifest, with the given expiration in seconds.
|
||||
"""
|
||||
tag_name = '$temp-%s' % str(uuid.uuid4())
|
||||
now_ms = get_epoch_timestamp_ms()
|
||||
end_ms = now_ms + (expiration_sec * 1000)
|
||||
|
||||
legacy_image = get_legacy_image_for_manifest(manifest)
|
||||
|
||||
with db_transaction():
|
||||
created_tag = Tag.create(name=tag_name,
|
||||
repository=manifest.repository_id,
|
||||
lifetime_start_ms=now_ms,
|
||||
lifetime_end_ms=end_ms,
|
||||
reversion=False,
|
||||
hidden=True,
|
||||
manifest=manifest,
|
||||
tag_kind=Tag.tag_kind.get_id('tag'))
|
||||
|
||||
# TODO(jschorr): Remove the linkage code once RepositoryTag is gone.
|
||||
# If this is a schema 1 manifest, then add a TagManifest linkage to it. Otherwise, it will only
|
||||
# be pullable via the new OCI model.
|
||||
if manifest.media_type.name in DOCKER_SCHEMA1_CONTENT_TYPES and legacy_image is not None:
|
||||
now_ts = int(now_ms / 1000)
|
||||
end_ts = int(end_ms / 1000)
|
||||
|
||||
old_style_tag = RepositoryTag.create(repository=manifest.repository_id, image=legacy_image,
|
||||
name=tag_name, lifetime_start_ts=now_ts,
|
||||
lifetime_end_ts=end_ts,
|
||||
reversion=False, hidden=True)
|
||||
TagToRepositoryTag.create(tag=created_tag, repository_tag=old_style_tag,
|
||||
repository=manifest.repository_id)
|
||||
|
||||
tag_manifest = TagManifest.create(tag=old_style_tag, digest=manifest.digest,
|
||||
json_data=manifest.manifest_bytes)
|
||||
TagManifestToManifest.create(tag_manifest=tag_manifest, manifest=manifest,
|
||||
repository=manifest.repository_id)
|
||||
|
||||
return created_tag
|
||||
|
||||
|
||||
def retarget_tag(tag_name, manifest_id, is_reversion=False, now_ms=None):
|
||||
""" Creates or updates a tag with the specified name to point to the given manifest under
|
||||
its repository. If this action is a reversion to a previous manifest, is_reversion
|
||||
|
@ -279,7 +321,7 @@ def filter_to_visible_tags(query):
|
|||
return query.where(Tag.hidden == False)
|
||||
|
||||
|
||||
def filter_to_alive_tags(query, now_ms=None):
|
||||
def filter_to_alive_tags(query, now_ms=None, model=Tag):
|
||||
""" Adjusts the specified Tag query to only return those tags alive. If now_ms is specified,
|
||||
the given timestamp (in MS) is used in place of the current timestamp for determining wherther
|
||||
a tag is alive.
|
||||
|
@ -287,7 +329,7 @@ def filter_to_alive_tags(query, now_ms=None):
|
|||
if now_ms is None:
|
||||
now_ms = get_epoch_timestamp_ms()
|
||||
|
||||
return query.where((Tag.lifetime_end_ms >> None) | (Tag.lifetime_end_ms > now_ms))
|
||||
return query.where((model.lifetime_end_ms >> None) | (model.lifetime_end_ms > now_ms))
|
||||
|
||||
|
||||
def set_tag_expiration_sec_for_manifest(manifest_id, expiration_seconds):
|
||||
|
|
|
@ -1,13 +1,23 @@
|
|||
import json
|
||||
|
||||
from playhouse.test_utils import assert_query_count
|
||||
|
||||
from app import docker_v2_signing_key
|
||||
from app import docker_v2_signing_key, storage
|
||||
|
||||
from data.database import Tag, ManifestBlob, get_epoch_timestamp_ms
|
||||
from digest.digest_tools import sha256_digest
|
||||
from data.database import (Tag, ManifestBlob, ImageStorageLocation, ManifestChild,
|
||||
get_epoch_timestamp_ms)
|
||||
from data.model.oci.manifest import lookup_manifest, get_or_create_manifest
|
||||
from data.model.oci.tag import filter_to_alive_tags, get_tag
|
||||
from data.model.oci.tag import filter_to_alive_tags, get_tag, create_temporary_tag
|
||||
from data.model.oci.shared import get_legacy_image_for_manifest
|
||||
from data.model.repository import get_repository
|
||||
from data.model.oci.label import list_manifest_labels
|
||||
from data.model.repository import get_repository, create_repository
|
||||
from data.model.image import find_create_or_link_image
|
||||
from data.model.blob import store_blob_record_and_temp_link
|
||||
from data.model.storage import get_layer_path
|
||||
from image.docker.schema1 import DockerSchema1ManifestBuilder, DockerSchema1Manifest
|
||||
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
|
||||
from image.docker.schema2.list import DockerSchema2ManifestListBuilder
|
||||
|
||||
from test.fixtures import *
|
||||
|
||||
|
@ -38,35 +48,152 @@ def test_lookup_manifest_dead_tag(initialized_db):
|
|||
dead_tag.manifest)
|
||||
|
||||
|
||||
def test_get_or_create_manifest(initialized_db):
|
||||
repository = get_repository('devtable', 'simple')
|
||||
def test_lookup_manifest_child_tag(initialized_db):
|
||||
repository = create_repository('devtable', 'newrepo', None)
|
||||
|
||||
latest_tag = get_tag(repository, 'latest')
|
||||
legacy_image = get_legacy_image_for_manifest(latest_tag.manifest)
|
||||
parsed = DockerSchema1Manifest(latest_tag.manifest.manifest_bytes, validate=False)
|
||||
# Populate a manifest.
|
||||
layer_json = json.dumps({
|
||||
'config': {},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": []
|
||||
},
|
||||
"history": [],
|
||||
})
|
||||
|
||||
builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
|
||||
builder.add_layer(parsed.blob_digests[0], '{"id": "%s"}' % legacy_image.docker_image_id)
|
||||
sample_manifest_instance = builder.build(docker_v2_signing_key)
|
||||
# Add a blob containing the config.
|
||||
_, config_digest = _populate_blob(layer_json)
|
||||
|
||||
remote_digest = sha256_digest('something')
|
||||
builder = DockerSchema2ManifestBuilder()
|
||||
builder.set_config_digest(config_digest, len(layer_json))
|
||||
builder.add_layer(remote_digest, 1234, urls=['http://hello/world'])
|
||||
manifest = builder.build()
|
||||
|
||||
assert get_or_create_manifest(repository, manifest, storage)
|
||||
|
||||
# Ensure the manifest cannot currently be looked up, as it is pointed to by an alive tag.
|
||||
assert lookup_manifest(repository, manifest.digest) is None
|
||||
assert lookup_manifest(repository, manifest.digest, allow_dead=True) is not None
|
||||
|
||||
# Populate a manifest list.
|
||||
list_builder = DockerSchema2ManifestListBuilder()
|
||||
list_builder.add_manifest(manifest, 'amd64', 'linux')
|
||||
manifest_list = list_builder.build()
|
||||
|
||||
# Write the manifest list, which should also write the manifests themselves.
|
||||
created_tuple = get_or_create_manifest(repository, manifest_list, storage)
|
||||
assert created_tuple is not None
|
||||
|
||||
assert lookup_manifest(repository, manifest.digest) is None
|
||||
assert lookup_manifest(repository, manifest_list.digest) is None
|
||||
|
||||
# Point a tag at the manifest list. This should make it and its child manifest visible.
|
||||
create_temporary_tag(created_tuple.manifest, 1000)
|
||||
|
||||
assert lookup_manifest(repository, manifest.digest) is not None
|
||||
assert lookup_manifest(repository, manifest_list.digest) is not None
|
||||
|
||||
|
||||
def _populate_blob(content):
|
||||
digest = str(sha256_digest(content))
|
||||
location = ImageStorageLocation.get(name='local_us')
|
||||
blob = store_blob_record_and_temp_link('devtable', 'newrepo', digest, location,
|
||||
len(content), 120)
|
||||
storage.put_content(['local_us'], get_layer_path(blob), content)
|
||||
return blob, digest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('schema_version', [
|
||||
1,
|
||||
2,
|
||||
])
|
||||
def test_get_or_create_manifest(schema_version, initialized_db):
|
||||
repository = create_repository('devtable', 'newrepo', None)
|
||||
|
||||
expected_labels = {
|
||||
'Foo': 'Bar',
|
||||
'Baz': 'Meh',
|
||||
}
|
||||
|
||||
layer_json = json.dumps({
|
||||
'id': 'somelegacyid',
|
||||
'config': {
|
||||
'Labels': expected_labels,
|
||||
},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": []
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "do something",
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
# Create a legacy image.
|
||||
find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
|
||||
|
||||
# Add a blob containing the config.
|
||||
_, config_digest = _populate_blob(layer_json)
|
||||
|
||||
# Add a blob of random data.
|
||||
random_data = 'hello world'
|
||||
_, random_digest = _populate_blob(random_data)
|
||||
|
||||
# Build the manifest.
|
||||
if schema_version == 1:
|
||||
builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
|
||||
builder.add_layer(random_digest, layer_json)
|
||||
sample_manifest_instance = builder.build(docker_v2_signing_key)
|
||||
elif schema_version == 2:
|
||||
builder = DockerSchema2ManifestBuilder()
|
||||
builder.set_config_digest(config_digest, len(layer_json))
|
||||
builder.add_layer(random_digest, len(random_data))
|
||||
sample_manifest_instance = builder.build()
|
||||
|
||||
# Create a new manifest.
|
||||
created, newly_created = get_or_create_manifest(repository, sample_manifest_instance)
|
||||
created_manifest = get_or_create_manifest(repository, sample_manifest_instance, storage)
|
||||
created = created_manifest.manifest
|
||||
newly_created = created_manifest.newly_created
|
||||
|
||||
assert newly_created
|
||||
assert created is not None
|
||||
assert created.media_type.name == sample_manifest_instance.media_type
|
||||
assert created.digest == sample_manifest_instance.digest
|
||||
assert created.manifest_bytes == sample_manifest_instance.bytes
|
||||
assert created_manifest.labels_to_apply == expected_labels
|
||||
|
||||
assert get_legacy_image_for_manifest(created) is not None
|
||||
# Verify the legacy image.
|
||||
legacy_image = get_legacy_image_for_manifest(created)
|
||||
assert legacy_image is not None
|
||||
assert legacy_image.storage.content_checksum == random_digest
|
||||
|
||||
# Verify the linked blobs.
|
||||
blob_digests = [mb.blob.content_checksum for mb
|
||||
in ManifestBlob.select().where(ManifestBlob.manifest == created)]
|
||||
assert parsed.blob_digests[0] in blob_digests
|
||||
|
||||
assert random_digest in blob_digests
|
||||
if schema_version == 2:
|
||||
assert config_digest in blob_digests
|
||||
|
||||
# Retrieve it again and ensure it is the same manifest.
|
||||
created2, newly_created2 = get_or_create_manifest(repository, sample_manifest_instance)
|
||||
created_manifest2 = get_or_create_manifest(repository, sample_manifest_instance, storage)
|
||||
created2 = created_manifest2.manifest
|
||||
newly_created2 = created_manifest2.newly_created
|
||||
|
||||
assert not newly_created2
|
||||
assert created2 == created
|
||||
|
||||
# Ensure the labels were added.
|
||||
labels = list(list_manifest_labels(created))
|
||||
assert len(labels) == 2
|
||||
|
||||
labels_dict = {label.key: label.value for label in labels}
|
||||
assert labels_dict == expected_labels
|
||||
|
||||
|
||||
def test_get_or_create_manifest_invalid_image(initialized_db):
|
||||
repository = get_repository('devtable', 'simple')
|
||||
|
@ -78,6 +205,224 @@ def test_get_or_create_manifest_invalid_image(initialized_db):
|
|||
builder.add_layer(parsed.blob_digests[0], '{"id": "foo", "parent": "someinvalidimageid"}')
|
||||
sample_manifest_instance = builder.build(docker_v2_signing_key)
|
||||
|
||||
created, newly_created = get_or_create_manifest(repository, sample_manifest_instance)
|
||||
assert created is None
|
||||
assert newly_created is None
|
||||
created_manifest = get_or_create_manifest(repository, sample_manifest_instance, storage)
|
||||
assert created_manifest is None
|
||||
|
||||
|
||||
def test_get_or_create_manifest_list(initialized_db):
|
||||
repository = create_repository('devtable', 'newrepo', None)
|
||||
|
||||
expected_labels = {
|
||||
'Foo': 'Bar',
|
||||
'Baz': 'Meh',
|
||||
}
|
||||
|
||||
layer_json = json.dumps({
|
||||
'id': 'somelegacyid',
|
||||
'config': {
|
||||
'Labels': expected_labels,
|
||||
},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": []
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "do something",
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
# Create a legacy image.
|
||||
find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
|
||||
|
||||
# Add a blob containing the config.
|
||||
_, config_digest = _populate_blob(layer_json)
|
||||
|
||||
# Add a blob of random data.
|
||||
random_data = 'hello world'
|
||||
_, random_digest = _populate_blob(random_data)
|
||||
|
||||
# Build the manifests.
|
||||
v1_builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
|
||||
v1_builder.add_layer(random_digest, layer_json)
|
||||
v1_manifest = v1_builder.build(docker_v2_signing_key).unsigned()
|
||||
|
||||
v2_builder = DockerSchema2ManifestBuilder()
|
||||
v2_builder.set_config_digest(config_digest, len(layer_json))
|
||||
v2_builder.add_layer(random_digest, len(random_data))
|
||||
v2_manifest = v2_builder.build()
|
||||
|
||||
# Write the manifests.
|
||||
v1_created = get_or_create_manifest(repository, v1_manifest, storage)
|
||||
assert v1_created
|
||||
assert v1_created.manifest.digest == v1_manifest.digest
|
||||
|
||||
v2_created = get_or_create_manifest(repository, v2_manifest, storage)
|
||||
assert v2_created
|
||||
assert v2_created.manifest.digest == v2_manifest.digest
|
||||
|
||||
# Build the manifest list.
|
||||
list_builder = DockerSchema2ManifestListBuilder()
|
||||
list_builder.add_manifest(v1_manifest, 'amd64', 'linux')
|
||||
list_builder.add_manifest(v2_manifest, 'amd32', 'linux')
|
||||
manifest_list = list_builder.build()
|
||||
|
||||
# Write the manifest list, which should also write the manifests themselves.
|
||||
created_tuple = get_or_create_manifest(repository, manifest_list, storage)
|
||||
assert created_tuple is not None
|
||||
|
||||
created_list = created_tuple.manifest
|
||||
assert created_list
|
||||
assert created_list.media_type.name == manifest_list.media_type
|
||||
assert created_list.digest == manifest_list.digest
|
||||
|
||||
# Ensure the child manifest links exist.
|
||||
child_manifests = {cm.child_manifest.digest: cm.child_manifest
|
||||
for cm in ManifestChild.select().where(ManifestChild.manifest == created_list)}
|
||||
assert len(child_manifests) == 2
|
||||
assert v1_manifest.digest in child_manifests
|
||||
assert v2_manifest.digest in child_manifests
|
||||
|
||||
assert child_manifests[v1_manifest.digest].media_type.name == v1_manifest.media_type
|
||||
assert child_manifests[v2_manifest.digest].media_type.name == v2_manifest.media_type
|
||||
|
||||
|
||||
def test_get_or_create_manifest_list_duplicate_child_manifest(initialized_db):
|
||||
repository = create_repository('devtable', 'newrepo', None)
|
||||
|
||||
expected_labels = {
|
||||
'Foo': 'Bar',
|
||||
'Baz': 'Meh',
|
||||
}
|
||||
|
||||
layer_json = json.dumps({
|
||||
'id': 'somelegacyid',
|
||||
'config': {
|
||||
'Labels': expected_labels,
|
||||
},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": []
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "do something",
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
# Create a legacy image.
|
||||
find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
|
||||
|
||||
# Add a blob containing the config.
|
||||
_, config_digest = _populate_blob(layer_json)
|
||||
|
||||
# Add a blob of random data.
|
||||
random_data = 'hello world'
|
||||
_, random_digest = _populate_blob(random_data)
|
||||
|
||||
# Build the manifest.
|
||||
v2_builder = DockerSchema2ManifestBuilder()
|
||||
v2_builder.set_config_digest(config_digest, len(layer_json))
|
||||
v2_builder.add_layer(random_digest, len(random_data))
|
||||
v2_manifest = v2_builder.build()
|
||||
|
||||
# Write the manifest.
|
||||
v2_created = get_or_create_manifest(repository, v2_manifest, storage)
|
||||
assert v2_created
|
||||
assert v2_created.manifest.digest == v2_manifest.digest
|
||||
|
||||
# Build the manifest list, with the child manifest repeated.
|
||||
list_builder = DockerSchema2ManifestListBuilder()
|
||||
list_builder.add_manifest(v2_manifest, 'amd64', 'linux')
|
||||
list_builder.add_manifest(v2_manifest, 'amd32', 'linux')
|
||||
manifest_list = list_builder.build()
|
||||
|
||||
# Write the manifest list, which should also write the manifests themselves.
|
||||
created_tuple = get_or_create_manifest(repository, manifest_list, storage)
|
||||
assert created_tuple is not None
|
||||
|
||||
created_list = created_tuple.manifest
|
||||
assert created_list
|
||||
assert created_list.media_type.name == manifest_list.media_type
|
||||
assert created_list.digest == manifest_list.digest
|
||||
|
||||
# Ensure the child manifest links exist.
|
||||
child_manifests = {cm.child_manifest.digest: cm.child_manifest
|
||||
for cm in ManifestChild.select().where(ManifestChild.manifest == created_list)}
|
||||
assert len(child_manifests) == 1
|
||||
assert v2_manifest.digest in child_manifests
|
||||
assert child_manifests[v2_manifest.digest].media_type.name == v2_manifest.media_type
|
||||
|
||||
# Try to create again and ensure we get back the same manifest list.
|
||||
created2_tuple = get_or_create_manifest(repository, manifest_list, storage)
|
||||
assert created2_tuple is not None
|
||||
assert created2_tuple.manifest == created_list
|
||||
|
||||
|
||||
def test_get_or_create_manifest_with_remote_layers(initialized_db):
|
||||
repository = create_repository('devtable', 'newrepo', None)
|
||||
|
||||
layer_json = json.dumps({
|
||||
'config': {},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": []
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "do something",
|
||||
},
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "do something",
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
# Add a blob containing the config.
|
||||
_, config_digest = _populate_blob(layer_json)
|
||||
|
||||
# Add a blob of random data.
|
||||
random_data = 'hello world'
|
||||
_, random_digest = _populate_blob(random_data)
|
||||
|
||||
remote_digest = sha256_digest('something')
|
||||
|
||||
builder = DockerSchema2ManifestBuilder()
|
||||
builder.set_config_digest(config_digest, len(layer_json))
|
||||
builder.add_layer(remote_digest, 1234, urls=['http://hello/world'])
|
||||
builder.add_layer(random_digest, len(random_data))
|
||||
manifest = builder.build()
|
||||
|
||||
assert remote_digest in manifest.blob_digests
|
||||
assert remote_digest not in manifest.local_blob_digests
|
||||
|
||||
assert manifest.has_remote_layer
|
||||
assert not manifest.has_legacy_image
|
||||
assert manifest.get_schema1_manifest('foo', 'bar', 'baz', None) is None
|
||||
|
||||
# Write the manifest.
|
||||
created_tuple = get_or_create_manifest(repository, manifest, storage)
|
||||
assert created_tuple is not None
|
||||
|
||||
created_manifest = created_tuple.manifest
|
||||
assert created_manifest
|
||||
assert created_manifest.media_type.name == manifest.media_type
|
||||
assert created_manifest.digest == manifest.digest
|
||||
|
||||
# Verify the legacy image.
|
||||
legacy_image = get_legacy_image_for_manifest(created_manifest)
|
||||
assert legacy_image is None
|
||||
|
||||
# Verify the linked blobs.
|
||||
blob_digests = {mb.blob.content_checksum for mb
|
||||
in ManifestBlob.select().where(ManifestBlob.manifest == created_manifest)}
|
||||
|
||||
assert random_digest in blob_digests
|
||||
assert config_digest in blob_digests
|
||||
assert remote_digest not in blob_digests
|
||||
|
|
|
@ -10,7 +10,8 @@ from data.model.oci.tag import (find_matching_tag, get_most_recent_tag, list_ali
|
|||
filter_to_visible_tags, list_repository_tag_history,
|
||||
get_expired_tag, get_tag, delete_tag,
|
||||
delete_tags_for_manifest, change_tag_expiration,
|
||||
set_tag_expiration_for_manifest, retarget_tag)
|
||||
set_tag_expiration_for_manifest, retarget_tag,
|
||||
create_temporary_tag)
|
||||
from data.model.repository import get_repository, create_repository
|
||||
|
||||
from test.fixtures import *
|
||||
|
@ -207,6 +208,31 @@ def test_set_tag_expiration_for_manifest(initialized_db):
|
|||
assert updated_tag.lifetime_end_ms is not None
|
||||
|
||||
|
||||
def test_create_temporary_tag(initialized_db):
|
||||
tag = Tag.get()
|
||||
manifest = tag.manifest
|
||||
assert manifest is not None
|
||||
|
||||
created = create_temporary_tag(manifest, 30)
|
||||
assert created is not None
|
||||
|
||||
assert created.hidden
|
||||
assert created.name.startswith('$temp-')
|
||||
assert created.manifest == manifest
|
||||
assert created.lifetime_end_ms is not None
|
||||
assert created.lifetime_end_ms == (created.lifetime_start_ms + 30000)
|
||||
|
||||
# Verify old-style tables.
|
||||
repository_tag = TagToRepositoryTag.get(tag=created).repository_tag
|
||||
assert repository_tag.lifetime_start_ts == int(created.lifetime_start_ms / 1000)
|
||||
assert repository_tag.lifetime_end_ts == int(created.lifetime_end_ms / 1000)
|
||||
assert repository_tag.name == created.name
|
||||
assert repository_tag.hidden
|
||||
|
||||
tag_manifest = TagManifest.get(tag=repository_tag)
|
||||
assert TagManifestToManifest.get(tag_manifest=tag_manifest).manifest == manifest
|
||||
|
||||
|
||||
def test_retarget_tag(initialized_db):
|
||||
repo = get_repository('devtable', 'history')
|
||||
results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
|
||||
|
|
|
@ -13,7 +13,7 @@ from data.database import (
|
|||
RepositoryPermission, RepositoryActionCount, Role, RepositoryAuthorizedEmail,
|
||||
DerivedStorageForImage, Label, db_for_update, get_epoch_timestamp,
|
||||
db_random_func, db_concat_func, RepositorySearchScore, RepositoryKind, ApprTag,
|
||||
ManifestLegacyImage, Manifest)
|
||||
ManifestLegacyImage, Manifest, ManifestChild)
|
||||
from data.text import prefix_search
|
||||
from util.itertoolrecipes import take
|
||||
|
||||
|
@ -207,6 +207,15 @@ def garbage_collect_repo(repo, is_purge=False):
|
|||
"""
|
||||
logger.debug('Garbage collecting repository %s', repo.id)
|
||||
|
||||
# TODO(jschorr): Update GC for the new data model.
|
||||
if not is_purge:
|
||||
try:
|
||||
# Skip any repos with manifest lists in them, for now.
|
||||
ManifestChild.get(repository=repo)
|
||||
return True
|
||||
except ManifestChild.DoesNotExist:
|
||||
pass
|
||||
|
||||
if is_purge:
|
||||
tag.purge_all_tags(repo)
|
||||
images_for_tags_removed = {i.id for i in Image.select().where(Image.repository == repo)}
|
||||
|
|
|
@ -270,7 +270,7 @@ def get_layer_path_for_storage(storage_uuid, cas_path, content_checksum):
|
|||
return store.blob_path(content_checksum)
|
||||
|
||||
|
||||
def lookup_repo_storages_by_content_checksum(repo, checksums):
|
||||
def lookup_repo_storages_by_content_checksum(repo, checksums, by_manifest=False):
|
||||
""" Looks up repository storages (without placements) matching the given repository
|
||||
and checksum. """
|
||||
# There may be many duplicates of the checksums, so for performance reasons we are going
|
||||
|
@ -279,14 +279,29 @@ def lookup_repo_storages_by_content_checksum(repo, checksums):
|
|||
|
||||
for counter, checksum in enumerate(set(checksums)):
|
||||
query_alias = 'q{0}'.format(counter)
|
||||
candidate_subq = (ImageStorage
|
||||
.select(ImageStorage.id, ImageStorage.content_checksum,
|
||||
ImageStorage.image_size, ImageStorage.uuid, ImageStorage.cas_path,
|
||||
ImageStorage.uncompressed_size, ImageStorage.uploading)
|
||||
.join(Image)
|
||||
.where(Image.repository == repo, ImageStorage.content_checksum == checksum)
|
||||
.limit(1)
|
||||
.alias(query_alias))
|
||||
|
||||
# TODO(jschorr): Remove once we have a new-style model for tracking temp uploaded blobs and
|
||||
# all legacy tables have been removed.
|
||||
if by_manifest:
|
||||
candidate_subq = (ImageStorage
|
||||
.select(ImageStorage.id, ImageStorage.content_checksum,
|
||||
ImageStorage.image_size, ImageStorage.uuid, ImageStorage.cas_path,
|
||||
ImageStorage.uncompressed_size, ImageStorage.uploading)
|
||||
.join(ManifestBlob)
|
||||
.where(ManifestBlob.repository == repo,
|
||||
ImageStorage.content_checksum == checksum)
|
||||
.limit(1)
|
||||
.alias(query_alias))
|
||||
else:
|
||||
candidate_subq = (ImageStorage
|
||||
.select(ImageStorage.id, ImageStorage.content_checksum,
|
||||
ImageStorage.image_size, ImageStorage.uuid, ImageStorage.cas_path,
|
||||
ImageStorage.uncompressed_size, ImageStorage.uploading)
|
||||
.join(Image)
|
||||
.where(Image.repository == repo, ImageStorage.content_checksum == checksum)
|
||||
.limit(1)
|
||||
.alias(query_alias))
|
||||
|
||||
queries.append(ImageStorage
|
||||
.select(SQL('*'))
|
||||
.from_(candidate_subq))
|
||||
|
|
|
@ -766,7 +766,7 @@ def _populate_manifest_and_blobs(repository, manifest, storage_id_map, leaf_laye
|
|||
raise DataModelException('Invalid image with id: %s' % leaf_layer_id)
|
||||
|
||||
storage_ids = set()
|
||||
for blob_digest in manifest.blob_digests:
|
||||
for blob_digest in manifest.local_blob_digests:
|
||||
image_storage_id = storage_id_map.get(blob_digest)
|
||||
if image_storage_id is None:
|
||||
logger.error('Missing blob for manifest `%s` in: %s', blob_digest, storage_id_map)
|
||||
|
|
|
@ -6,5 +6,17 @@ from data.registry_model.registry_oci_model import oci_model
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
registry_model = oci_model if os.getenv('OCI_DATA_MODEL') == 'true' else pre_oci_model
|
||||
logger.debug('Using registry model `%s`', registry_model)
|
||||
|
||||
class RegistryModelProxy(object):
|
||||
def __init__(self):
|
||||
self._model = oci_model if os.getenv('OCI_DATA_MODEL') == 'true' else pre_oci_model
|
||||
|
||||
def set_for_testing(self, use_oci_model):
|
||||
self._model = oci_model if use_oci_model else pre_oci_model
|
||||
logger.debug('Changed registry model to `%s` for testing', self._model)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self._model, attr)
|
||||
|
||||
registry_model = RegistryModelProxy()
|
||||
logger.debug('Using registry model `%s`', registry_model._model)
|
||||
|
|
|
@ -7,7 +7,10 @@ from cachetools import lru_cache
|
|||
|
||||
from data import model
|
||||
from data.registry_model.datatype import datatype, requiresinput, optionalinput
|
||||
from image.docker.schema1 import DockerSchema1Manifest, DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
|
||||
from image.docker import ManifestException
|
||||
from image.docker.schemas import parse_manifest_from_bytes
|
||||
from image.docker.schema1 import DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
|
||||
from image.docker.schema2 import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
|
||||
|
||||
|
||||
class RepositoryReference(datatype('Repository', [])):
|
||||
|
@ -137,6 +140,13 @@ class Tag(datatype('Tag', ['name', 'reversion', 'manifest_digest', 'lifetime_sta
|
|||
""" Returns the manifest for this tag. Will only apply to new-style OCI tags. """
|
||||
return manifest
|
||||
|
||||
@property
|
||||
@optionalinput('manifest')
|
||||
def manifest(self, manifest):
|
||||
""" Returns the manifest for this tag or None if none. Will only apply to new-style OCI tags.
|
||||
"""
|
||||
return Manifest.for_manifest(manifest, self.legacy_image_if_present)
|
||||
|
||||
@property
|
||||
@requiresinput('repository')
|
||||
def repository(self, repository):
|
||||
|
@ -152,6 +162,14 @@ class Tag(datatype('Tag', ['name', 'reversion', 'manifest_digest', 'lifetime_sta
|
|||
"""
|
||||
return legacy_image
|
||||
|
||||
@property
|
||||
@optionalinput('legacy_image')
|
||||
def legacy_image_if_present(self, legacy_image):
|
||||
""" Returns the legacy Docker V1-style image for this tag. Note that this
|
||||
will be None for tags whose manifests point to other manifests instead of images.
|
||||
"""
|
||||
return legacy_image
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
""" The ID of this tag for pagination purposes only. """
|
||||
|
@ -191,7 +209,22 @@ class Manifest(datatype('Manifest', ['digest', 'media_type', 'manifest_bytes']))
|
|||
|
||||
def get_parsed_manifest(self, validate=True):
|
||||
""" Returns the parsed manifest for this manifest. """
|
||||
return DockerSchema1Manifest(self.manifest_bytes, validate=validate)
|
||||
return parse_manifest_from_bytes(self.manifest_bytes, self.media_type, validate=validate)
|
||||
|
||||
@property
|
||||
def layers_compressed_size(self):
|
||||
""" Returns the total compressed size of the layers in the manifest or None if this could not
|
||||
be computed.
|
||||
"""
|
||||
try:
|
||||
return self.get_parsed_manifest().layers_compressed_size
|
||||
except ManifestException:
|
||||
return None
|
||||
|
||||
@property
|
||||
def is_manifest_list(self):
|
||||
""" Returns True if this manifest points to a list (instead of an image). """
|
||||
return self.media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
|
||||
|
||||
|
||||
class LegacyImage(datatype('LegacyImage', ['docker_image_id', 'created', 'comment', 'command',
|
||||
|
@ -260,12 +293,14 @@ class SecurityScanStatus(Enum):
|
|||
SCANNED = 'scanned'
|
||||
FAILED = 'failed'
|
||||
QUEUED = 'queued'
|
||||
UNSUPPORTED = 'unsupported'
|
||||
|
||||
|
||||
class ManifestLayer(namedtuple('ManifestLayer', ['layer_info', 'blob'])):
|
||||
""" Represents a single layer in a manifest. The `layer_info` data will be manifest-type specific,
|
||||
but will have a few expected fields (such as `digest`). The `blob` represents the associated
|
||||
blob for this layer, optionally with placements.
|
||||
blob for this layer, optionally with placements. If the layer is a remote layer, the blob will
|
||||
be None.
|
||||
"""
|
||||
|
||||
def estimated_size(self, estimate_multiplier):
|
||||
|
|
|
@ -7,6 +7,16 @@ class RegistryDataInterface(object):
|
|||
of all tables that store registry-specific information, such as Manifests, Blobs, Images,
|
||||
and Labels.
|
||||
"""
|
||||
@abstractmethod
|
||||
def supports_schema2(self, namespace_name):
|
||||
""" Returns whether the implementation of the data interface supports schema 2 format
|
||||
manifests. """
|
||||
|
||||
@abstractmethod
|
||||
def get_legacy_tags_map(self, repository_ref, storage):
|
||||
""" Returns a map from tag name to its legacy image ID, for all tags with legacy images in
|
||||
the repository. Note that this can be a *very* heavy operation.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def find_matching_tag(self, repository_ref, tag_names):
|
||||
|
@ -36,7 +46,8 @@ class RegistryDataInterface(object):
|
|||
or None if none. """
|
||||
|
||||
@abstractmethod
|
||||
def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name):
|
||||
def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name,
|
||||
storage):
|
||||
""" Creates a manifest in a repository, adding all of the necessary data in the model.
|
||||
|
||||
The `manifest_interface_instance` parameter must be an instance of the manifest
|
||||
|
@ -127,7 +138,7 @@ class RegistryDataInterface(object):
|
|||
|
||||
@abstractmethod
|
||||
def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image,
|
||||
is_reversion=False):
|
||||
storage, is_reversion=False):
|
||||
"""
|
||||
Creates, updates or moves a tag to a new entry in history, pointing to the manifest or
|
||||
legacy image specified. If is_reversion is set to True, this operation is considered a
|
||||
|
@ -180,10 +191,16 @@ class RegistryDataInterface(object):
|
|||
""" Returns whether the given namespace exists and is enabled. """
|
||||
|
||||
@abstractmethod
|
||||
def list_manifest_layers(self, manifest, include_placements=False):
|
||||
""" Returns an *ordered list* of the layers found in the manifest, starting at the base and
|
||||
working towards the leaf, including the associated Blob and its placements (if specified).
|
||||
Returns None if the manifest could not be parsed and validated.
|
||||
def get_manifest_local_blobs(self, manifest, include_placements=False):
|
||||
""" Returns the set of local blobs for the given manifest or None if none. """
|
||||
|
||||
@abstractmethod
|
||||
def list_parsed_manifest_layers(self, repository_ref, parsed_manifest, storage,
|
||||
include_placements=False):
|
||||
""" Returns an *ordered list* of the layers found in the parsed manifest, starting at the base
|
||||
and working towards the leaf, including the associated Blob and its placements
|
||||
(if specified). The layer information in `layer_info` will be of type
|
||||
`image.docker.types.ManifestImageLayer`.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
|
@ -194,8 +211,8 @@ class RegistryDataInterface(object):
|
|||
"""
|
||||
|
||||
@abstractmethod
|
||||
def lookup_or_create_derived_image(self, manifest, verb, storage_location, varying_metadata=None,
|
||||
include_placements=False):
|
||||
def lookup_or_create_derived_image(self, manifest, verb, storage_location, storage,
|
||||
varying_metadata=None, include_placements=False):
|
||||
"""
|
||||
Looks up the derived image for the given maniest, verb and optional varying metadata
|
||||
and returns it. If none exists, a new derived image is created.
|
||||
|
@ -277,7 +294,7 @@ class RegistryDataInterface(object):
|
|||
"""
|
||||
Mounts the blob from another repository into the specified target repository, and adds an
|
||||
expiration before that blob is automatically GCed. This function is useful during push
|
||||
operations if an existing blob from another repositroy is being pushed. Returns False if
|
||||
operations if an existing blob from another repository is being pushed. Returns False if
|
||||
the mounting fails. Note that this function does *not* check security for mounting the blob
|
||||
and the caller is responsible for doing this check (an example can be found in
|
||||
endpoints/v2/blob.py).
|
||||
|
@ -288,3 +305,14 @@ class RegistryDataInterface(object):
|
|||
"""
|
||||
Sets the expiration on all tags that point to the given manifest to that specified.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage):
|
||||
""" Returns the schema 1 version of this manifest, or None if none. """
|
||||
|
||||
@abstractmethod
|
||||
def create_manifest_with_temp_tag(self, repository_ref, manifest_interface_instance,
|
||||
expiration_sec, storage):
|
||||
""" Creates a manifest under the repository and sets a temporary tag to point to it.
|
||||
Returns the manifest object created or None on error.
|
||||
"""
|
||||
|
|
|
@ -18,17 +18,17 @@ _BuilderState = namedtuple('_BuilderState', ['builder_id', 'images', 'tags', 'ch
|
|||
_SESSION_KEY = '__manifestbuilder'
|
||||
|
||||
|
||||
def create_manifest_builder(repository_ref):
|
||||
def create_manifest_builder(repository_ref, storage):
|
||||
""" Creates a new manifest builder for populating manifests under the specified repository
|
||||
and returns it. Returns None if the builder could not be constructed.
|
||||
"""
|
||||
builder_id = str(uuid.uuid4())
|
||||
builder = _ManifestBuilder(repository_ref, _BuilderState(builder_id, {}, {}, {}))
|
||||
builder = _ManifestBuilder(repository_ref, _BuilderState(builder_id, {}, {}, {}), storage)
|
||||
builder._save_to_session()
|
||||
return builder
|
||||
|
||||
|
||||
def lookup_manifest_builder(repository_ref, builder_id):
|
||||
def lookup_manifest_builder(repository_ref, builder_id, storage):
|
||||
""" Looks up the manifest builder with the given ID under the specified repository and returns
|
||||
it or None if none.
|
||||
"""
|
||||
|
@ -40,16 +40,17 @@ def lookup_manifest_builder(repository_ref, builder_id):
|
|||
if builder_state.builder_id != builder_id:
|
||||
return None
|
||||
|
||||
return _ManifestBuilder(repository_ref, builder_state)
|
||||
return _ManifestBuilder(repository_ref, builder_state, storage)
|
||||
|
||||
|
||||
class _ManifestBuilder(object):
|
||||
""" Helper class which provides an interface for bookkeeping the layers and configuration of
|
||||
manifests being constructed.
|
||||
"""
|
||||
def __init__(self, repository_ref, builder_state):
|
||||
def __init__(self, repository_ref, builder_state, storage):
|
||||
self._repository_ref = repository_ref
|
||||
self._builder_state = builder_state
|
||||
self._storage = storage
|
||||
|
||||
@property
|
||||
def builder_id(self):
|
||||
|
@ -121,7 +122,7 @@ class _ManifestBuilder(object):
|
|||
model.image.set_image_metadata(layer_id, namespace_name, repo_name,
|
||||
v1_metadata.get('created'),
|
||||
v1_metadata.get('comment'),
|
||||
command, json.dumps(v1_metadata),
|
||||
command, v1_metadata_string,
|
||||
parent=parent_image)
|
||||
|
||||
# Save the changes to the builder.
|
||||
|
@ -183,7 +184,7 @@ class _ManifestBuilder(object):
|
|||
if legacy_image is None:
|
||||
return None
|
||||
|
||||
tag = registry_model.retarget_tag(self._repository_ref, tag_name, legacy_image)
|
||||
tag = registry_model.retarget_tag(self._repository_ref, tag_name, legacy_image, self._storage)
|
||||
if tag is None:
|
||||
return None
|
||||
|
||||
|
|
|
@ -6,12 +6,15 @@ from contextlib import contextmanager
|
|||
from data import database
|
||||
from data import model
|
||||
from data.model import oci, DataModelException
|
||||
from data.model.oci.retriever import RepositoryContentRetriever
|
||||
from data.database import db_transaction, Image
|
||||
from data.registry_model.interface import RegistryDataInterface
|
||||
from data.registry_model.datatypes import Tag, Manifest, LegacyImage, Label, SecurityScanStatus
|
||||
from data.registry_model.datatypes import (Tag, Manifest, LegacyImage, Label, SecurityScanStatus,
|
||||
Blob)
|
||||
from data.registry_model.shared import SharedModel
|
||||
from data.registry_model.label_handlers import apply_label_to_manifest
|
||||
from util.validation import is_json
|
||||
from image.docker import ManifestException
|
||||
from image.docker.schema2 import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -22,6 +25,59 @@ class OCIModel(SharedModel, RegistryDataInterface):
|
|||
OCIModel implements the data model for the registry API using a database schema
|
||||
after it was changed to support the OCI specification.
|
||||
"""
|
||||
def supports_schema2(self, namespace_name):
|
||||
""" Returns whether the implementation of the data interface supports schema 2 format
|
||||
manifests. """
|
||||
return True
|
||||
|
||||
def get_legacy_tags_map(self, repository_ref, storage):
|
||||
""" Returns a map from tag name to its legacy image ID, for all tags with legacy images in
|
||||
the repository. Note that this can be a *very* heavy operation.
|
||||
"""
|
||||
tags = oci.tag.list_alive_tags(repository_ref._db_id)
|
||||
legacy_images_map = oci.tag.get_legacy_images_for_tags(tags)
|
||||
|
||||
tags_map = {}
|
||||
for tag in tags:
|
||||
legacy_image = legacy_images_map.get(tag.id)
|
||||
if legacy_image is not None:
|
||||
tags_map[tag.name] = legacy_image.docker_image_id
|
||||
else:
|
||||
manifest = Manifest.for_manifest(tag.manifest, None)
|
||||
if legacy_image is None and manifest.media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
|
||||
# See if we can lookup a schema1 legacy image.
|
||||
v1_compatible = self.get_schema1_parsed_manifest(manifest, '', '', '', storage)
|
||||
if v1_compatible is not None:
|
||||
v1_id = v1_compatible.leaf_layer_v1_image_id
|
||||
if v1_id is not None:
|
||||
tags_map[tag.name] = v1_id
|
||||
|
||||
|
||||
return tags_map
|
||||
|
||||
def _get_legacy_compatible_image_for_manifest(self, manifest, storage):
|
||||
# Check for a legacy image directly on the manifest.
|
||||
if manifest.media_type != DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
|
||||
return oci.shared.get_legacy_image_for_manifest(manifest._db_id)
|
||||
|
||||
# Otherwise, lookup a legacy image associated with the v1-compatible manifest
|
||||
# in the list.
|
||||
try:
|
||||
manifest_obj = database.Manifest.get(id=manifest._db_id)
|
||||
except database.Manifest.DoesNotExist:
|
||||
logger.exception('Could not find manifest for manifest `%s`', manifest._db_id)
|
||||
return None
|
||||
|
||||
# See if we can lookup a schema1 legacy image.
|
||||
v1_compatible = self.get_schema1_parsed_manifest(manifest, '', '', '', storage)
|
||||
if v1_compatible is None:
|
||||
return None
|
||||
|
||||
v1_id = v1_compatible.leaf_layer_v1_image_id
|
||||
if v1_id is None:
|
||||
return None
|
||||
|
||||
return model.image.get_image(manifest_obj.repository_id, v1_id)
|
||||
|
||||
def find_matching_tag(self, repository_ref, tag_names):
|
||||
""" Finds an alive tag in the repository matching one of the given tag names and returns it
|
||||
|
@ -176,7 +232,8 @@ class OCIModel(SharedModel, RegistryDataInterface):
|
|||
|
||||
return Tag.for_tag(tag, legacy_image=LegacyImage.for_image(legacy_image))
|
||||
|
||||
def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name):
|
||||
def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name,
|
||||
storage):
|
||||
""" Creates a manifest in a repository, adding all of the necessary data in the model.
|
||||
|
||||
The `manifest_interface_instance` parameter must be an instance of the manifest
|
||||
|
@ -188,40 +245,32 @@ class OCIModel(SharedModel, RegistryDataInterface):
|
|||
Returns a reference to the (created manifest, tag) or (None, None) on error.
|
||||
"""
|
||||
# Get or create the manifest itself.
|
||||
manifest, newly_created = oci.manifest.get_or_create_manifest(repository_ref._db_id,
|
||||
manifest_interface_instance)
|
||||
if manifest is None:
|
||||
created_manifest = oci.manifest.get_or_create_manifest(repository_ref._db_id,
|
||||
manifest_interface_instance,
|
||||
storage)
|
||||
if created_manifest is None:
|
||||
return (None, None)
|
||||
|
||||
# Re-target the tag to it.
|
||||
tag = oci.tag.retarget_tag(tag_name, manifest)
|
||||
tag = oci.tag.retarget_tag(tag_name, created_manifest.manifest)
|
||||
if tag is None:
|
||||
return (None, None)
|
||||
|
||||
legacy_image = oci.shared.get_legacy_image_for_manifest(manifest)
|
||||
if legacy_image is None:
|
||||
return (None, None)
|
||||
legacy_image = oci.shared.get_legacy_image_for_manifest(created_manifest.manifest)
|
||||
li = LegacyImage.for_image(legacy_image)
|
||||
wrapped_manifest = Manifest.for_manifest(created_manifest.manifest, li)
|
||||
|
||||
# Save the labels on the manifest. Note that order is important here: This must come after the
|
||||
# tag has been changed.
|
||||
# TODO(jschorr): Support schema2 here when we're ready.
|
||||
if newly_created:
|
||||
has_labels = False
|
||||
|
||||
with self.batch_create_manifest_labels(Manifest.for_manifest(manifest, None)) as add_label:
|
||||
for key, value in manifest_interface_instance.layers[-1].v1_metadata.labels.iteritems():
|
||||
media_type = 'application/json' if is_json(value) else 'text/plain'
|
||||
add_label(key, value, 'manifest', media_type)
|
||||
has_labels = True
|
||||
# Apply any labels that should modify the created tag.
|
||||
if created_manifest.labels_to_apply:
|
||||
for key, value in created_manifest.labels_to_apply.iteritems():
|
||||
apply_label_to_manifest(dict(key=key, value=value), wrapped_manifest, self)
|
||||
|
||||
# Reload the tag in case any updates were applied.
|
||||
if has_labels:
|
||||
tag = database.Tag.get(id=tag.id)
|
||||
tag = database.Tag.get(id=tag.id)
|
||||
|
||||
li = LegacyImage.for_image(legacy_image)
|
||||
return (Manifest.for_manifest(manifest, li), Tag.for_tag(tag, li))
|
||||
return (wrapped_manifest, Tag.for_tag(tag, li))
|
||||
|
||||
def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image,
|
||||
def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image, storage,
|
||||
is_reversion=False):
|
||||
"""
|
||||
Creates, updates or moves a tag to a new entry in history, pointing to the manifest or
|
||||
|
@ -240,11 +289,12 @@ class OCIModel(SharedModel, RegistryDataInterface):
|
|||
if manifest_instance is None:
|
||||
return None
|
||||
|
||||
manifest, _ = oci.manifest.get_or_create_manifest(repository_ref._db_id, manifest_instance)
|
||||
if manifest is None:
|
||||
created = oci.manifest.get_or_create_manifest(repository_ref._db_id, manifest_instance,
|
||||
storage)
|
||||
if created is None:
|
||||
return None
|
||||
|
||||
manifest_id = manifest.id
|
||||
manifest_id = created.manifest.id
|
||||
|
||||
tag = oci.tag.retarget_tag(tag_name, manifest_id, is_reversion=is_reversion)
|
||||
legacy_image = LegacyImage.for_image(oci.shared.get_legacy_image_for_manifest(manifest_id))
|
||||
|
@ -337,7 +387,7 @@ class OCIModel(SharedModel, RegistryDataInterface):
|
|||
if isinstance(manifest_or_legacy_image, Manifest):
|
||||
image = oci.shared.get_legacy_image_for_manifest(manifest_or_legacy_image._db_id)
|
||||
if image is None:
|
||||
return None
|
||||
return SecurityScanStatus.UNSUPPORTED
|
||||
else:
|
||||
try:
|
||||
image = database.Image.get(id=manifest_or_legacy_image._db_id)
|
||||
|
@ -375,7 +425,14 @@ class OCIModel(SharedModel, RegistryDataInterface):
|
|||
logger.exception('Could not find manifest for manifest `%s`', manifest._db_id)
|
||||
return None
|
||||
|
||||
return self._list_manifest_layers(manifest, manifest_obj.repository_id, include_placements)
|
||||
try:
|
||||
parsed = manifest.get_parsed_manifest()
|
||||
except ManifestException:
|
||||
logger.exception('Could not parse and validate manifest `%s`', manifest._db_id)
|
||||
return None
|
||||
|
||||
return self._list_manifest_layers(manifest_obj.repository_id, parsed, include_placements,
|
||||
by_manifest=True)
|
||||
|
||||
def lookup_derived_image(self, manifest, verb, varying_metadata=None, include_placements=False):
|
||||
"""
|
||||
|
@ -389,13 +446,14 @@ class OCIModel(SharedModel, RegistryDataInterface):
|
|||
derived = model.image.find_derived_storage_for_image(legacy_image, verb, varying_metadata)
|
||||
return self._build_derived(derived, verb, varying_metadata, include_placements)
|
||||
|
||||
def lookup_or_create_derived_image(self, manifest, verb, storage_location, varying_metadata=None,
|
||||
def lookup_or_create_derived_image(self, manifest, verb, storage_location, storage,
|
||||
varying_metadata=None,
|
||||
include_placements=False):
|
||||
"""
|
||||
Looks up the derived image for the given maniest, verb and optional varying metadata
|
||||
and returns it. If none exists, a new derived image is created.
|
||||
"""
|
||||
legacy_image = oci.shared.get_legacy_image_for_manifest(manifest._db_id)
|
||||
legacy_image = self._get_legacy_compatible_image_for_manifest(manifest, storage)
|
||||
if legacy_image is None:
|
||||
return None
|
||||
|
||||
|
@ -409,5 +467,81 @@ class OCIModel(SharedModel, RegistryDataInterface):
|
|||
"""
|
||||
oci.tag.set_tag_expiration_sec_for_manifest(manifest._db_id, expiration_sec)
|
||||
|
||||
def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage):
|
||||
""" Returns the schema 1 manifest for this manifest, or None if none. """
|
||||
try:
|
||||
parsed = manifest.get_parsed_manifest()
|
||||
except ManifestException:
|
||||
return None
|
||||
|
||||
try:
|
||||
manifest_row = database.Manifest.get(id=manifest._db_id)
|
||||
except database.Manifest.DoesNotExist:
|
||||
return None
|
||||
|
||||
retriever = RepositoryContentRetriever(manifest_row.repository_id, storage)
|
||||
return parsed.get_schema1_manifest(namespace_name, repo_name, tag_name, retriever)
|
||||
|
||||
def create_manifest_with_temp_tag(self, repository_ref, manifest_interface_instance,
|
||||
expiration_sec, storage):
|
||||
""" Creates a manifest under the repository and sets a temporary tag to point to it.
|
||||
Returns the manifest object created or None on error.
|
||||
"""
|
||||
# Get or create the manifest itself.
|
||||
created_manifest = oci.manifest.get_or_create_manifest(repository_ref._db_id,
|
||||
manifest_interface_instance,
|
||||
storage)
|
||||
if created_manifest is None:
|
||||
return None
|
||||
|
||||
# Point a temporary tag to the manifest.
|
||||
tag = oci.tag.create_temporary_tag(created_manifest.manifest, expiration_sec)
|
||||
if tag is None:
|
||||
return None
|
||||
|
||||
legacy_image = oci.shared.get_legacy_image_for_manifest(created_manifest.manifest)
|
||||
li = LegacyImage.for_image(legacy_image)
|
||||
return Manifest.for_manifest(created_manifest.manifest, li)
|
||||
|
||||
def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
|
||||
"""
|
||||
Returns the blob in the repository with the given digest, if any or None if none. Note that
|
||||
there may be multiple records in the same repository for the same blob digest, so the return
|
||||
value of this function may change.
|
||||
"""
|
||||
image_storage = oci.blob.get_repository_blob_by_digest(repository_ref._db_id, blob_digest)
|
||||
if image_storage is None:
|
||||
return None
|
||||
|
||||
assert image_storage.cas_path is not None
|
||||
|
||||
placements = None
|
||||
if include_placements:
|
||||
placements = list(model.storage.get_storage_locations(image_storage.uuid))
|
||||
|
||||
return Blob.for_image_storage(image_storage,
|
||||
storage_path=model.storage.get_layer_path(image_storage),
|
||||
placements=placements)
|
||||
|
||||
def list_parsed_manifest_layers(self, repository_ref, parsed_manifest, storage,
|
||||
include_placements=False):
|
||||
""" Returns an *ordered list* of the layers found in the parsed manifest, starting at the base
|
||||
and working towards the leaf, including the associated Blob and its placements
|
||||
(if specified).
|
||||
"""
|
||||
return self._list_manifest_layers(repository_ref._db_id, parsed_manifest, storage,
|
||||
include_placements=include_placements,
|
||||
by_manifest=True)
|
||||
|
||||
def get_manifest_local_blobs(self, manifest, include_placements=False):
|
||||
""" Returns the set of local blobs for the given manifest or None if none. """
|
||||
try:
|
||||
manifest_row = database.Manifest.get(id=manifest._db_id)
|
||||
except database.Manifest.DoesNotExist:
|
||||
return None
|
||||
|
||||
return self._get_manifest_local_blobs(manifest, manifest_row.repository_id, include_placements,
|
||||
by_manifest=True)
|
||||
|
||||
|
||||
oci_model = OCIModel()
|
||||
|
|
|
@ -11,7 +11,8 @@ from data import model
|
|||
from data.database import db_transaction
|
||||
from data.registry_model.interface import RegistryDataInterface
|
||||
from data.registry_model.datatypes import (Tag, Manifest, LegacyImage, Label,
|
||||
SecurityScanStatus, ManifestLayer, Blob, DerivedImage)
|
||||
SecurityScanStatus, ManifestLayer, Blob, DerivedImage,
|
||||
RepositoryReference)
|
||||
from data.registry_model.shared import SharedModel
|
||||
from data.registry_model.label_handlers import apply_label_to_manifest
|
||||
from image.docker.schema1 import (DockerSchema1ManifestBuilder, ManifestException,
|
||||
|
@ -27,6 +28,17 @@ class PreOCIModel(SharedModel, RegistryDataInterface):
|
|||
PreOCIModel implements the data model for the registry API using a database schema
|
||||
before it was changed to support the OCI specification.
|
||||
"""
|
||||
def supports_schema2(self, namespace_name):
|
||||
""" Returns whether the implementation of the data interface supports schema 2 format
|
||||
manifests. """
|
||||
return False
|
||||
|
||||
def get_legacy_tags_map(self, repository_ref, storage):
|
||||
""" Returns a map from tag name to its legacy image, for all tags with legacy images in
|
||||
the repository.
|
||||
"""
|
||||
tags = self.list_repository_tags(repository_ref, include_legacy_images=True)
|
||||
return {tag.name: tag.legacy_image.docker_image_id for tag in tags}
|
||||
|
||||
def find_matching_tag(self, repository_ref, tag_names):
|
||||
""" Finds an alive tag in the repository matching one of the given tag names and returns it
|
||||
|
@ -79,7 +91,8 @@ class PreOCIModel(SharedModel, RegistryDataInterface):
|
|||
|
||||
return Manifest.for_tag_manifest(tag_manifest, legacy_image)
|
||||
|
||||
def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name):
|
||||
def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name,
|
||||
storage):
|
||||
""" Creates a manifest in a repository, adding all of the necessary data in the model.
|
||||
|
||||
The `manifest_interface_instance` parameter must be an instance of the manifest
|
||||
|
@ -298,7 +311,7 @@ class PreOCIModel(SharedModel, RegistryDataInterface):
|
|||
manifest_digest = tag_manifest.digest if tag_manifest else None
|
||||
return Tag.for_repository_tag(tag, legacy_image=legacy_image, manifest_digest=manifest_digest)
|
||||
|
||||
def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image,
|
||||
def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image, storage,
|
||||
is_reversion=False):
|
||||
"""
|
||||
Creates, updates or moves a tag to a new entry in history, pointing to the manifest or
|
||||
|
@ -477,7 +490,14 @@ class PreOCIModel(SharedModel, RegistryDataInterface):
|
|||
logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id)
|
||||
return None
|
||||
|
||||
return self._list_manifest_layers(manifest, tag_manifest.tag.repository_id, include_placements)
|
||||
try:
|
||||
parsed = manifest.get_parsed_manifest()
|
||||
except ManifestException:
|
||||
logger.exception('Could not parse and validate manifest `%s`', manifest._db_id)
|
||||
return None
|
||||
|
||||
repo_ref = RepositoryReference.for_id(tag_manifest.tag.repository_id)
|
||||
return self.list_parsed_manifest_layers(repo_ref, parsed, include_placements)
|
||||
|
||||
def lookup_derived_image(self, manifest, verb, varying_metadata=None, include_placements=False):
|
||||
"""
|
||||
|
@ -494,8 +514,8 @@ class PreOCIModel(SharedModel, RegistryDataInterface):
|
|||
derived = model.image.find_derived_storage_for_image(repo_image, verb, varying_metadata)
|
||||
return self._build_derived(derived, verb, varying_metadata, include_placements)
|
||||
|
||||
def lookup_or_create_derived_image(self, manifest, verb, storage_location, varying_metadata=None,
|
||||
include_placements=False):
|
||||
def lookup_or_create_derived_image(self, manifest, verb, storage_location, storage,
|
||||
varying_metadata=None, include_placements=False):
|
||||
"""
|
||||
Looks up the derived image for the given maniest, verb and optional varying metadata
|
||||
and returns it. If none exists, a new derived image is created.
|
||||
|
@ -522,4 +542,59 @@ class PreOCIModel(SharedModel, RegistryDataInterface):
|
|||
|
||||
model.tag.set_tag_expiration_for_manifest(tag_manifest, expiration_sec)
|
||||
|
||||
def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage):
|
||||
""" Returns the schema 1 version of this manifest, or None if none. """
|
||||
try:
|
||||
return manifest.get_parsed_manifest()
|
||||
except ManifestException:
|
||||
return None
|
||||
|
||||
def create_manifest_with_temp_tag(self, repository_ref, manifest_interface_instance,
|
||||
expiration_sec, storage):
|
||||
""" Creates a manifest under the repository and sets a temporary tag to point to it.
|
||||
Returns the manifest object created or None on error.
|
||||
"""
|
||||
raise NotImplementedError('Unsupported in pre OCI model')
|
||||
|
||||
def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
|
||||
"""
|
||||
Returns the blob in the repository with the given digest, if any or None if none. Note that
|
||||
there may be multiple records in the same repository for the same blob digest, so the return
|
||||
value of this function may change.
|
||||
"""
|
||||
try:
|
||||
image_storage = model.blob.get_repository_blob_by_digest(repository_ref._db_id, blob_digest)
|
||||
except model.BlobDoesNotExist:
|
||||
return None
|
||||
|
||||
assert image_storage.cas_path is not None
|
||||
|
||||
placements = None
|
||||
if include_placements:
|
||||
placements = list(model.storage.get_storage_locations(image_storage.uuid))
|
||||
|
||||
return Blob.for_image_storage(image_storage,
|
||||
storage_path=model.storage.get_layer_path(image_storage),
|
||||
placements=placements)
|
||||
|
||||
def list_parsed_manifest_layers(self, repository_ref, parsed_manifest, storage,
|
||||
include_placements=False):
|
||||
""" Returns an *ordered list* of the layers found in the parsed manifest, starting at the base
|
||||
and working towards the leaf, including the associated Blob and its placements
|
||||
(if specified).
|
||||
"""
|
||||
return self._list_manifest_layers(repository_ref._db_id, parsed_manifest, storage,
|
||||
include_placements=include_placements)
|
||||
|
||||
def get_manifest_local_blobs(self, manifest, include_placements=False):
|
||||
""" Returns the set of local blobs for the given manifest or None if none. """
|
||||
try:
|
||||
tag_manifest = database.TagManifest.get(id=manifest._db_id)
|
||||
except database.TagManifest.DoesNotExist:
|
||||
return None
|
||||
|
||||
return self._get_manifest_local_blobs(manifest, tag_manifest.tag.repository_id,
|
||||
include_placements)
|
||||
|
||||
|
||||
pre_oci_model = PreOCIModel()
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
# pylint: disable=protected-access
|
||||
import logging
|
||||
|
||||
from abc import abstractmethod
|
||||
from collections import defaultdict
|
||||
|
||||
from data import database
|
||||
from data import model
|
||||
from data.cache import cache_key
|
||||
from data.model.oci.retriever import RepositoryContentRetriever
|
||||
from data.registry_model.datatype import FromDictionaryException
|
||||
from data.registry_model.datatypes import (RepositoryReference, Blob, TorrentInfo, BlobUpload,
|
||||
LegacyImage, ManifestLayer, DerivedImage)
|
||||
|
@ -149,26 +151,9 @@ class SharedModel:
|
|||
|
||||
return self.get_repo_blob_by_digest(repository_ref, blob_digest, include_placements=True)
|
||||
|
||||
@abstractmethod
|
||||
def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
|
||||
"""
|
||||
Returns the blob in the repository with the given digest, if any or None if none. Note that
|
||||
there may be multiple records in the same repository for the same blob digest, so the return
|
||||
value of this function may change.
|
||||
"""
|
||||
try:
|
||||
image_storage = model.blob.get_repository_blob_by_digest(repository_ref._db_id, blob_digest)
|
||||
except model.BlobDoesNotExist:
|
||||
return None
|
||||
|
||||
assert image_storage.cas_path is not None
|
||||
|
||||
placements = None
|
||||
if include_placements:
|
||||
placements = list(model.storage.get_storage_locations(image_storage.uuid))
|
||||
|
||||
return Blob.for_image_storage(image_storage,
|
||||
storage_path=model.storage.get_layer_path(image_storage),
|
||||
placements=placements)
|
||||
pass
|
||||
|
||||
def create_blob_upload(self, repository_ref, new_upload_id, location_name, storage_metadata):
|
||||
""" Creates a new blob upload and returns a reference. If the blob upload could not be
|
||||
|
@ -306,29 +291,65 @@ class SharedModel:
|
|||
|
||||
return LegacyImage.for_image(image, images_map=parent_images_map, blob=blob)
|
||||
|
||||
def _list_manifest_layers(self, manifest, repo_id, include_placements=False):
|
||||
def _get_manifest_local_blobs(self, manifest, repo_id, include_placements=False,
|
||||
by_manifest=False):
|
||||
parsed = manifest.get_parsed_manifest()
|
||||
if parsed is None:
|
||||
return None
|
||||
|
||||
local_blob_digests = list(set(parsed.local_blob_digests))
|
||||
if not len(local_blob_digests):
|
||||
return []
|
||||
|
||||
blob_query = model.storage.lookup_repo_storages_by_content_checksum(repo_id,
|
||||
local_blob_digests,
|
||||
by_manifest=by_manifest)
|
||||
blobs = []
|
||||
for image_storage in blob_query:
|
||||
placements = None
|
||||
if include_placements:
|
||||
placements = list(model.storage.get_storage_locations(image_storage.uuid))
|
||||
|
||||
blob = Blob.for_image_storage(image_storage,
|
||||
storage_path=model.storage.get_layer_path(image_storage),
|
||||
placements=placements)
|
||||
blobs.append(blob)
|
||||
|
||||
return blobs
|
||||
|
||||
def _list_manifest_layers(self, repo_id, parsed, storage, include_placements=False,
|
||||
by_manifest=False):
|
||||
""" Returns an *ordered list* of the layers found in the manifest, starting at the base and
|
||||
working towards the leaf, including the associated Blob and its placements (if specified).
|
||||
Returns None if the manifest could not be parsed and validated.
|
||||
"""
|
||||
try:
|
||||
parsed = manifest.get_parsed_manifest()
|
||||
except ManifestException:
|
||||
logger.exception('Could not parse and validate manifest `%s`', manifest._db_id)
|
||||
storage_map = {}
|
||||
if parsed.local_blob_digests:
|
||||
blob_query = model.storage.lookup_repo_storages_by_content_checksum(repo_id,
|
||||
parsed.local_blob_digests,
|
||||
by_manifest=by_manifest)
|
||||
storage_map = {blob.content_checksum: blob for blob in blob_query}
|
||||
|
||||
retriever = RepositoryContentRetriever(repo_id, storage)
|
||||
layers = parsed.get_layers(retriever)
|
||||
if layers is None:
|
||||
logger.error('Could not load layers for manifest `%s`', parsed.digest)
|
||||
return None
|
||||
|
||||
blob_query = model.storage.lookup_repo_storages_by_content_checksum(repo_id, parsed.checksums)
|
||||
storage_map = {blob.content_checksum: blob for blob in blob_query}
|
||||
|
||||
manifest_layers = []
|
||||
for layer in parsed.layers:
|
||||
digest_str = str(layer.digest)
|
||||
for layer in layers:
|
||||
if layer.is_remote:
|
||||
manifest_layers.append(ManifestLayer(layer, None))
|
||||
continue
|
||||
|
||||
digest_str = str(layer.blob_digest)
|
||||
if digest_str not in storage_map:
|
||||
logger.error('Missing digest `%s` for manifest `%s`', layer.digest, manifest._db_id)
|
||||
logger.error('Missing digest `%s` for manifest `%s`', layer.blob_digest, parsed.digest)
|
||||
return None
|
||||
|
||||
image_storage = storage_map[digest_str]
|
||||
assert image_storage.cas_path is not None
|
||||
assert image_storage.image_size is not None
|
||||
|
||||
placements = None
|
||||
if include_placements:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import hashlib
|
||||
import os
|
||||
import tarfile
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
|
@ -113,3 +114,36 @@ def test_extra_blob_stream_handlers(pre_oci_model):
|
|||
|
||||
assert ''.join(handler1_result) == 'hello world'
|
||||
assert ''.join(handler2_result) == 'hello world'
|
||||
|
||||
|
||||
def valid_tar_gz(contents):
|
||||
layer_data = BytesIO()
|
||||
tar_file = tarfile.open(fileobj=layer_data, mode='w|gz')
|
||||
|
||||
tar_file_info = tarfile.TarInfo(name='somefile')
|
||||
tar_file_info.type = tarfile.REGTYPE
|
||||
tar_file_info.size = len(contents)
|
||||
tar_file_info.mtime = 1
|
||||
|
||||
tar_file.addfile(tar_file_info, BytesIO(contents))
|
||||
tar_file.close()
|
||||
|
||||
layer_bytes = layer_data.getvalue()
|
||||
layer_data.close()
|
||||
return layer_bytes
|
||||
|
||||
|
||||
def test_uncompressed_size(pre_oci_model):
|
||||
repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
|
||||
storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
|
||||
settings = BlobUploadSettings('1K', 512 * 1024, 3600)
|
||||
app_config = {'TESTING': True}
|
||||
|
||||
with upload_blob(repository_ref, storage, settings) as manager:
|
||||
manager.upload_chunk(app_config, BytesIO(valid_tar_gz('hello world')))
|
||||
|
||||
blob = manager.commit_to_blob(app_config)
|
||||
|
||||
assert blob.compressed_size is not None
|
||||
assert blob.uncompressed_size is not None
|
||||
|
||||
|
|
|
@ -3,13 +3,14 @@ import json
|
|||
import uuid
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from io import BytesIO
|
||||
|
||||
import pytest
|
||||
|
||||
from mock import patch
|
||||
from playhouse.test_utils import assert_query_count
|
||||
|
||||
from app import docker_v2_signing_key
|
||||
from app import docker_v2_signing_key, storage
|
||||
from data import model
|
||||
from data.database import (TagManifestLabelMap, TagManifestToManifest, Manifest, ManifestBlob,
|
||||
ManifestLegacyImage, ManifestLabel, TagManifest, RepositoryTag, Image,
|
||||
|
@ -19,7 +20,10 @@ from data.cache.impl import InMemoryDataModelCache
|
|||
from data.registry_model.registry_pre_oci_model import PreOCIModel
|
||||
from data.registry_model.registry_oci_model import OCIModel
|
||||
from data.registry_model.datatypes import RepositoryReference
|
||||
from data.registry_model.blobuploader import upload_blob, BlobUploadSettings
|
||||
from image.docker.types import ManifestImageLayer
|
||||
from image.docker.schema1 import DockerSchema1ManifestBuilder
|
||||
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
|
||||
|
||||
from test.fixtures import *
|
||||
|
||||
|
@ -32,6 +36,10 @@ def registry_model(request, initialized_db):
|
|||
def pre_oci_model(initialized_db):
|
||||
return PreOCIModel()
|
||||
|
||||
@pytest.fixture()
|
||||
def oci_model(initialized_db):
|
||||
return OCIModel()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('names, expected', [
|
||||
(['unknown'], None),
|
||||
|
@ -95,6 +103,9 @@ def test_lookup_manifests(repo_namespace, repo_name, registry_model):
|
|||
assert found.legacy_image
|
||||
assert found.legacy_image.parents
|
||||
|
||||
schema1_parsed = registry_model.get_schema1_parsed_manifest(found, 'foo', 'bar', 'baz', storage)
|
||||
assert schema1_parsed is not None
|
||||
|
||||
|
||||
def test_lookup_unknown_manifest(registry_model):
|
||||
repo = model.repository.get_repository('devtable', 'simple')
|
||||
|
@ -221,6 +232,8 @@ def test_repository_tags(repo_namespace, repo_name, registry_model):
|
|||
tags = registry_model.list_repository_tags(repository_ref, include_legacy_images=True)
|
||||
assert len(tags)
|
||||
|
||||
tags_map = registry_model.get_legacy_tags_map(repository_ref, storage)
|
||||
|
||||
for tag in tags:
|
||||
found_tag = registry_model.get_repo_tag(repository_ref, tag.name, include_legacy_image=True)
|
||||
assert found_tag == tag
|
||||
|
@ -231,6 +244,8 @@ def test_repository_tags(repo_namespace, repo_name, registry_model):
|
|||
found_image = registry_model.get_legacy_image(repository_ref,
|
||||
found_tag.legacy_image.docker_image_id)
|
||||
assert found_image == found_tag.legacy_image
|
||||
assert tag.name in tags_map
|
||||
assert tags_map[tag.name] == found_image.docker_image_id
|
||||
|
||||
|
||||
def test_repository_tag_history(registry_model):
|
||||
|
@ -306,7 +321,7 @@ def test_retarget_tag_history(use_manifest, registry_model):
|
|||
# Retarget the tag.
|
||||
assert manifest_or_legacy_image
|
||||
updated_tag = registry_model.retarget_tag(repository_ref, 'latest', manifest_or_legacy_image,
|
||||
is_reversion=True)
|
||||
storage, is_reversion=True)
|
||||
|
||||
# Ensure the tag has changed targets.
|
||||
if use_manifest:
|
||||
|
@ -448,7 +463,7 @@ def test_is_namespace_enabled(namespace, expect_enabled, registry_model):
|
|||
('devtable', 'history'),
|
||||
('buynlarge', 'orgrepo'),
|
||||
])
|
||||
def test_list_manifest_layers(repo_namespace, repo_name, registry_model):
|
||||
def test_layers_and_blobs(repo_namespace, repo_name, registry_model):
|
||||
repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
|
||||
tags = registry_model.list_repository_tags(repository_ref)
|
||||
assert tags
|
||||
|
@ -457,18 +472,17 @@ def test_list_manifest_layers(repo_namespace, repo_name, registry_model):
|
|||
manifest = registry_model.get_manifest_for_tag(tag)
|
||||
assert manifest
|
||||
|
||||
layers = registry_model.list_manifest_layers(manifest)
|
||||
parsed = manifest.get_parsed_manifest()
|
||||
assert parsed
|
||||
|
||||
layers = registry_model.list_parsed_manifest_layers(repository_ref, parsed, storage)
|
||||
assert layers
|
||||
|
||||
layers = registry_model.list_manifest_layers(manifest, include_placements=True)
|
||||
layers = registry_model.list_parsed_manifest_layers(repository_ref, parsed, storage,
|
||||
include_placements=True)
|
||||
assert layers
|
||||
|
||||
parsed_layers = list(manifest.get_parsed_manifest().layers)
|
||||
assert len(layers) == len(parsed_layers)
|
||||
|
||||
for index, manifest_layer in enumerate(layers):
|
||||
assert manifest_layer.layer_info == parsed_layers[index]
|
||||
assert manifest_layer.blob.digest == str(parsed_layers[index].digest)
|
||||
assert manifest_layer.blob.storage_path
|
||||
assert manifest_layer.blob.placements
|
||||
|
||||
|
@ -476,6 +490,51 @@ def test_list_manifest_layers(repo_namespace, repo_name, registry_model):
|
|||
assert repo_blob.digest == manifest_layer.blob.digest
|
||||
|
||||
assert manifest_layer.estimated_size(1) is not None
|
||||
assert isinstance(manifest_layer.layer_info, ManifestImageLayer)
|
||||
|
||||
blobs = registry_model.get_manifest_local_blobs(manifest, include_placements=True)
|
||||
assert {b.digest for b in blobs} == set(parsed.local_blob_digests)
|
||||
|
||||
|
||||
def test_manifest_remote_layers(oci_model):
|
||||
# Create a config blob for testing.
|
||||
config_json = json.dumps({
|
||||
'config': {},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": []
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "do something",
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
app_config = {'TESTING': True}
|
||||
repository_ref = oci_model.lookup_repository('devtable', 'simple')
|
||||
with upload_blob(repository_ref, storage, BlobUploadSettings(500, 500, 500)) as upload:
|
||||
upload.upload_chunk(app_config, BytesIO(config_json))
|
||||
blob = upload.commit_to_blob(app_config)
|
||||
|
||||
# Create the manifest in the repo.
|
||||
builder = DockerSchema2ManifestBuilder()
|
||||
builder.set_config_digest(blob.digest, blob.compressed_size)
|
||||
builder.add_layer('sha256:abcd', 1234, urls=['http://hello/world'])
|
||||
manifest = builder.build()
|
||||
|
||||
created_manifest, _ = oci_model.create_manifest_and_retarget_tag(repository_ref, manifest,
|
||||
'sometag', storage)
|
||||
assert created_manifest
|
||||
|
||||
layers = oci_model.list_parsed_manifest_layers(repository_ref,
|
||||
created_manifest.get_parsed_manifest(),
|
||||
storage)
|
||||
assert len(layers) == 1
|
||||
assert layers[0].layer_info.is_remote
|
||||
assert layers[0].layer_info.urls == ['http://hello/world']
|
||||
assert layers[0].blob is None
|
||||
|
||||
|
||||
def test_derived_image(registry_model):
|
||||
|
@ -490,8 +549,10 @@ def test_derived_image(registry_model):
|
|||
assert registry_model.lookup_derived_image(manifest, 'squash', {}) is None
|
||||
|
||||
# Create a new one.
|
||||
squashed = registry_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us', {})
|
||||
assert registry_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us', {}) == squashed
|
||||
squashed = registry_model.lookup_or_create_derived_image(manifest, 'squash',
|
||||
'local_us', storage, {})
|
||||
assert registry_model.lookup_or_create_derived_image(manifest, 'squash',
|
||||
'local_us', storage, {}) == squashed
|
||||
assert squashed.unique_id
|
||||
|
||||
# Check and set the size.
|
||||
|
@ -507,15 +568,15 @@ def test_derived_image(registry_model):
|
|||
assert registry_model.lookup_derived_image(manifest, 'squash', {'foo': 'bar'}) is None
|
||||
|
||||
squashed_foo = registry_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us',
|
||||
{'foo': 'bar'})
|
||||
storage, {'foo': 'bar'})
|
||||
assert squashed_foo != squashed
|
||||
assert registry_model.lookup_derived_image(manifest, 'squash', {'foo': 'bar'}) == squashed_foo
|
||||
|
||||
assert squashed.unique_id != squashed_foo.unique_id
|
||||
|
||||
# Lookup with placements.
|
||||
squashed = registry_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us', {},
|
||||
include_placements=True)
|
||||
squashed = registry_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us',
|
||||
storage, {}, include_placements=True)
|
||||
assert squashed.blob.placements
|
||||
|
||||
# Delete the derived image.
|
||||
|
@ -546,25 +607,25 @@ def test_torrent_info(registry_model):
|
|||
tag = registry_model.get_repo_tag(repository_ref, 'latest')
|
||||
manifest = registry_model.get_manifest_for_tag(tag)
|
||||
|
||||
layers = registry_model.list_manifest_layers(manifest)
|
||||
assert layers
|
||||
blobs = registry_model.get_manifest_local_blobs(manifest)
|
||||
assert blobs
|
||||
|
||||
assert registry_model.get_torrent_info(layers[0].blob) is None
|
||||
registry_model.set_torrent_info(layers[0].blob, 2, 'foo')
|
||||
assert registry_model.get_torrent_info(blobs[0]) is None
|
||||
registry_model.set_torrent_info(blobs[0], 2, 'foo')
|
||||
|
||||
# Set it again exactly, which should be a no-op.
|
||||
registry_model.set_torrent_info(layers[0].blob, 2, 'foo')
|
||||
registry_model.set_torrent_info(blobs[0], 2, 'foo')
|
||||
|
||||
# Check the information we've set.
|
||||
torrent_info = registry_model.get_torrent_info(layers[0].blob)
|
||||
torrent_info = registry_model.get_torrent_info(blobs[0])
|
||||
assert torrent_info is not None
|
||||
assert torrent_info.piece_length == 2
|
||||
assert torrent_info.pieces == 'foo'
|
||||
|
||||
# Try setting it again. Nothing should happen.
|
||||
registry_model.set_torrent_info(layers[0].blob, 3, 'bar')
|
||||
registry_model.set_torrent_info(blobs[0], 3, 'bar')
|
||||
|
||||
torrent_info = registry_model.get_torrent_info(layers[0].blob)
|
||||
torrent_info = registry_model.get_torrent_info(blobs[0])
|
||||
assert torrent_info is not None
|
||||
assert torrent_info.piece_length == 2
|
||||
assert torrent_info.pieces == 'foo'
|
||||
|
@ -617,26 +678,27 @@ def test_commit_blob_upload(registry_model):
|
|||
assert not registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
|
||||
|
||||
|
||||
def test_mount_blob_into_repository(registry_model):
|
||||
repository_ref = registry_model.lookup_repository('devtable', 'simple')
|
||||
latest_tag = registry_model.get_repo_tag(repository_ref, 'latest')
|
||||
manifest = registry_model.get_manifest_for_tag(latest_tag)
|
||||
# TODO(jschorr): Re-enable for OCI model once we have a new table for temporary blobs.
|
||||
def test_mount_blob_into_repository(pre_oci_model):
|
||||
repository_ref = pre_oci_model.lookup_repository('devtable', 'simple')
|
||||
latest_tag = pre_oci_model.get_repo_tag(repository_ref, 'latest')
|
||||
manifest = pre_oci_model.get_manifest_for_tag(latest_tag)
|
||||
|
||||
target_repository_ref = registry_model.lookup_repository('devtable', 'complex')
|
||||
target_repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
|
||||
|
||||
layers = registry_model.list_manifest_layers(manifest, include_placements=True)
|
||||
assert layers
|
||||
blobs = pre_oci_model.get_manifest_local_blobs(manifest, include_placements=True)
|
||||
assert blobs
|
||||
|
||||
for layer in layers:
|
||||
for blob in blobs:
|
||||
# Ensure the blob doesn't exist under the repository.
|
||||
assert not registry_model.get_repo_blob_by_digest(target_repository_ref, layer.blob.digest)
|
||||
assert not pre_oci_model.get_repo_blob_by_digest(target_repository_ref, blob.digest)
|
||||
|
||||
# Mount the blob into the repository.
|
||||
assert registry_model.mount_blob_into_repository(layer.blob, target_repository_ref, 60)
|
||||
assert pre_oci_model.mount_blob_into_repository(blob, target_repository_ref, 60)
|
||||
|
||||
# Ensure it now exists.
|
||||
found = registry_model.get_repo_blob_by_digest(target_repository_ref, layer.blob.digest)
|
||||
assert found == layer.blob
|
||||
found = pre_oci_model.get_repo_blob_by_digest(target_repository_ref, blob.digest)
|
||||
assert found == blob
|
||||
|
||||
|
||||
class SomeException(Exception):
|
||||
|
@ -650,10 +712,10 @@ def test_get_cached_repo_blob(registry_model):
|
|||
latest_tag = registry_model.get_repo_tag(repository_ref, 'latest')
|
||||
manifest = registry_model.get_manifest_for_tag(latest_tag)
|
||||
|
||||
layers = registry_model.list_manifest_layers(manifest, include_placements=True)
|
||||
assert layers
|
||||
blobs = registry_model.get_manifest_local_blobs(manifest, include_placements=True)
|
||||
assert blobs
|
||||
|
||||
blob = layers[0].blob
|
||||
blob = blobs[0]
|
||||
|
||||
# Load a blob to add it to the cache.
|
||||
found = registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', blob.digest)
|
||||
|
@ -670,19 +732,21 @@ def test_get_cached_repo_blob(registry_model):
|
|||
|
||||
with patch('data.registry_model.registry_pre_oci_model.model.blob.get_repository_blob_by_digest',
|
||||
fail):
|
||||
# Make sure we can load again, which should hit the cache.
|
||||
cached = registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', blob.digest)
|
||||
assert cached.digest == blob.digest
|
||||
assert cached.uuid == blob.uuid
|
||||
assert cached.compressed_size == blob.compressed_size
|
||||
assert cached.uncompressed_size == blob.uncompressed_size
|
||||
assert cached.uploading == blob.uploading
|
||||
assert cached.placements == blob.placements
|
||||
with patch('data.registry_model.registry_oci_model.model.oci.blob.get_repository_blob_by_digest',
|
||||
fail):
|
||||
# Make sure we can load again, which should hit the cache.
|
||||
cached = registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', blob.digest)
|
||||
assert cached.digest == blob.digest
|
||||
assert cached.uuid == blob.uuid
|
||||
assert cached.compressed_size == blob.compressed_size
|
||||
assert cached.uncompressed_size == blob.uncompressed_size
|
||||
assert cached.uploading == blob.uploading
|
||||
assert cached.placements == blob.placements
|
||||
|
||||
# Try another blob, which should fail since the DB is not connected and the cache
|
||||
# does not contain the blob.
|
||||
with pytest.raises(SomeException):
|
||||
registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', 'some other digest')
|
||||
# Try another blob, which should fail since the DB is not connected and the cache
|
||||
# does not contain the blob.
|
||||
with pytest.raises(SomeException):
|
||||
registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', 'some other digest')
|
||||
|
||||
|
||||
def test_create_manifest_and_retarget_tag(registry_model):
|
||||
|
@ -698,15 +762,20 @@ def test_create_manifest_and_retarget_tag(registry_model):
|
|||
|
||||
another_manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref,
|
||||
sample_manifest,
|
||||
'anothertag')
|
||||
'anothertag',
|
||||
storage)
|
||||
assert another_manifest is not None
|
||||
assert tag is not None
|
||||
|
||||
assert tag.name == 'anothertag'
|
||||
assert another_manifest.get_parsed_manifest().manifest_dict == sample_manifest.manifest_dict
|
||||
|
||||
layers = registry_model.list_manifest_layers(another_manifest)
|
||||
assert len(layers) == 1
|
||||
|
||||
def test_get_schema1_parsed_manifest(registry_model):
|
||||
repository_ref = registry_model.lookup_repository('devtable', 'simple')
|
||||
latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
|
||||
manifest = registry_model.get_manifest_for_tag(latest_tag)
|
||||
assert registry_model.get_schema1_parsed_manifest(manifest, '', '', '', storage)
|
||||
|
||||
|
||||
def test_create_manifest_and_retarget_tag_with_labels(registry_model):
|
||||
|
@ -730,15 +799,13 @@ def test_create_manifest_and_retarget_tag_with_labels(registry_model):
|
|||
|
||||
another_manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref,
|
||||
sample_manifest,
|
||||
'anothertag')
|
||||
'anothertag',
|
||||
storage)
|
||||
assert another_manifest is not None
|
||||
assert tag is not None
|
||||
|
||||
assert tag.name == 'anothertag'
|
||||
assert another_manifest.get_parsed_manifest().manifest_dict == sample_manifest.manifest_dict
|
||||
|
||||
layers = registry_model.list_manifest_layers(another_manifest)
|
||||
assert len(layers) == 1
|
||||
|
||||
# Ensure the labels were applied.
|
||||
assert tag.lifetime_end_ms is not None
|
||||
|
|
|
@ -40,9 +40,9 @@ def test_build_manifest(layers, fake_session, registry_model):
|
|||
settings = BlobUploadSettings('2M', 512 * 1024, 3600)
|
||||
app_config = {'TESTING': True}
|
||||
|
||||
builder = create_manifest_builder(repository_ref)
|
||||
assert lookup_manifest_builder(repository_ref, 'anotherid') is None
|
||||
assert lookup_manifest_builder(repository_ref, builder.builder_id) is not None
|
||||
builder = create_manifest_builder(repository_ref, storage)
|
||||
assert lookup_manifest_builder(repository_ref, 'anotherid', storage) is None
|
||||
assert lookup_manifest_builder(repository_ref, builder.builder_id, storage) is not None
|
||||
|
||||
blobs_by_layer = {}
|
||||
for layer_id, parent_id, layer_bytes in layers:
|
||||
|
@ -89,8 +89,9 @@ def test_build_manifest(layers, fake_session, registry_model):
|
|||
|
||||
|
||||
def test_build_manifest_missing_parent(fake_session, registry_model):
|
||||
storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
|
||||
repository_ref = registry_model.lookup_repository('devtable', 'complex')
|
||||
builder = create_manifest_builder(repository_ref)
|
||||
builder = create_manifest_builder(repository_ref, storage)
|
||||
|
||||
assert builder.start_layer('somelayer', json.dumps({'id': 'somelayer', 'parent': 'someparent'}),
|
||||
'local_us', None, 60) is None
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
""" Manage the tags of a repository. """
|
||||
import json
|
||||
|
||||
from datetime import datetime
|
||||
from flask import request, abort
|
||||
|
||||
from app import storage
|
||||
from auth.auth_context import get_authenticated_user
|
||||
from data.registry_model import registry_model
|
||||
from endpoints.api import (resource, nickname, require_repo_read, require_repo_write,
|
||||
|
@ -26,14 +28,26 @@ def _tag_dict(tag):
|
|||
if tag.lifetime_end_ts > 0:
|
||||
tag_info['end_ts'] = tag.lifetime_end_ts
|
||||
|
||||
if tag.manifest_digest:
|
||||
tag_info['manifest_digest'] = tag.manifest_digest
|
||||
|
||||
if tag.legacy_image:
|
||||
# TODO(jschorr): Remove this once fully on OCI data model.
|
||||
if tag.legacy_image_if_present:
|
||||
tag_info['docker_image_id'] = tag.legacy_image.docker_image_id
|
||||
tag_info['image_id'] = tag.legacy_image.docker_image_id
|
||||
tag_info['size'] = tag.legacy_image.aggregate_size
|
||||
|
||||
# TODO(jschorr): Remove this check once fully on OCI data model.
|
||||
if tag.manifest_digest:
|
||||
tag_info['manifest_digest'] = tag.manifest_digest
|
||||
if tag.manifest:
|
||||
try:
|
||||
tag_info['manifest'] = json.loads(tag.manifest.manifest_bytes)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
tag_info['is_manifest_list'] = tag.manifest.is_manifest_list
|
||||
|
||||
if 'size' not in tag_info:
|
||||
tag_info['size'] = tag.manifest.layers_compressed_size
|
||||
|
||||
if tag.lifetime_start_ts > 0:
|
||||
last_modified = format_date(datetime.utcfromtimestamp(tag.lifetime_start_ts))
|
||||
tag_info['last_modified'] = last_modified
|
||||
|
@ -146,15 +160,30 @@ class RepositoryTag(RepositoryParamResource):
|
|||
else:
|
||||
raise InvalidRequest('Could not update tag expiration; Tag has probably changed')
|
||||
|
||||
if 'image' in request.get_json():
|
||||
if 'image' in request.get_json() or 'manifest_digest' in request.get_json():
|
||||
existing_tag = registry_model.get_repo_tag(repo_ref, tag, include_legacy_image=True)
|
||||
|
||||
image_id = request.get_json()['image']
|
||||
image = registry_model.get_legacy_image(repo_ref, image_id)
|
||||
if image is None:
|
||||
manifest_or_image = None
|
||||
image_id = None
|
||||
manifest_digest = None
|
||||
|
||||
if 'image' in request.get_json():
|
||||
image_id = request.get_json()['image']
|
||||
manifest_or_image = registry_model.get_legacy_image(repo_ref, image_id)
|
||||
else:
|
||||
manifest_digest = request.get_json()['manifest_digest']
|
||||
manifest_or_image = registry_model.lookup_manifest_by_digest(repo_ref, manifest_digest)
|
||||
|
||||
if manifest_or_image is None:
|
||||
raise NotFound()
|
||||
|
||||
if not registry_model.retarget_tag(repo_ref, tag, image):
|
||||
# TODO(jschorr): Remove this check once fully on V22
|
||||
existing_manifest_digest = None
|
||||
if existing_tag:
|
||||
existing_manifest = registry_model.get_manifest_for_tag(existing_tag)
|
||||
existing_manifest_digest = existing_manifest.digest if existing_manifest else None
|
||||
|
||||
if not registry_model.retarget_tag(repo_ref, tag, manifest_or_image, storage):
|
||||
raise InvalidRequest('Could not move tag')
|
||||
|
||||
username = get_authenticated_user().username
|
||||
|
@ -165,7 +194,11 @@ class RepositoryTag(RepositoryParamResource):
|
|||
'tag': tag,
|
||||
'namespace': namespace,
|
||||
'image': image_id,
|
||||
'original_image': existing_tag.legacy_image.docker_image_id if existing_tag else None,
|
||||
'manifest_digest': manifest_digest,
|
||||
'original_image': (existing_tag.legacy_image.docker_image_id
|
||||
if existing_tag and existing_tag.legacy_image_if_present
|
||||
else None),
|
||||
'original_manifest_digest': existing_manifest_digest,
|
||||
}, repo_name=repository)
|
||||
|
||||
return 'Updated', 201
|
||||
|
@ -213,6 +246,9 @@ class RepositoryTagImages(RepositoryParamResource):
|
|||
if tag_ref is None:
|
||||
raise NotFound()
|
||||
|
||||
if tag_ref.legacy_image_if_present is None:
|
||||
return {'images': []}
|
||||
|
||||
image_id = tag_ref.legacy_image.docker_image_id
|
||||
|
||||
all_images = None
|
||||
|
@ -287,7 +323,8 @@ class RestoreTag(RepositoryParamResource):
|
|||
if manifest_or_legacy_image is None:
|
||||
raise NotFound()
|
||||
|
||||
if not registry_model.retarget_tag(repo_ref, tag, manifest_or_legacy_image, is_reversion=True):
|
||||
if not registry_model.retarget_tag(repo_ref, tag, manifest_or_legacy_image, storage,
|
||||
is_reversion=True):
|
||||
raise InvalidRequest('Could not restore tag')
|
||||
|
||||
log_action('revert_tag', namespace, log_data, repo_name=repository)
|
||||
|
|
|
@ -6,7 +6,7 @@ from functools import wraps
|
|||
|
||||
from flask import request, make_response, jsonify, session
|
||||
|
||||
from app import userevents, metric_queue
|
||||
from app import userevents, metric_queue, storage
|
||||
from auth.auth_context import get_authenticated_context, get_authenticated_user
|
||||
from auth.credentials import validate_credentials, CredentialKind
|
||||
from auth.decorators import process_auth
|
||||
|
@ -217,7 +217,7 @@ def create_repository(namespace_name, repo_name):
|
|||
|
||||
# Start a new builder for the repository and save its ID in the session.
|
||||
assert repository_ref
|
||||
builder = create_manifest_builder(repository_ref)
|
||||
builder = create_manifest_builder(repository_ref, storage)
|
||||
logger.debug('Started repo push with manifest builder %s', builder)
|
||||
if builder is None:
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
@ -243,7 +243,7 @@ def update_images(namespace_name, repo_name):
|
|||
# Make sure the repo actually exists.
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), storage)
|
||||
if builder is None:
|
||||
abort(400)
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ def put_image_layer(namespace, repository, image_id):
|
|||
exact_abort(409, 'Image already exists')
|
||||
|
||||
logger.debug('Checking for image in manifest builder')
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store)
|
||||
if builder is None:
|
||||
abort(400)
|
||||
|
||||
|
@ -268,7 +268,7 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
image_id=image_id)
|
||||
|
||||
logger.debug('Checking for image in manifest builder')
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store)
|
||||
if builder is None:
|
||||
abort(400)
|
||||
|
||||
|
@ -361,7 +361,7 @@ def put_image_json(namespace, repository, image_id):
|
|||
if repository_ref is None:
|
||||
abort(403)
|
||||
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store)
|
||||
if builder is None:
|
||||
abort(400)
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ import json
|
|||
|
||||
from flask import abort, request, jsonify, make_response, session
|
||||
|
||||
from app import storage
|
||||
from auth.decorators import process_auth
|
||||
from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission)
|
||||
from data.registry_model import registry_model
|
||||
|
@ -26,8 +27,7 @@ def get_tags(namespace_name, repo_name):
|
|||
if repository_ref is None:
|
||||
abort(404)
|
||||
|
||||
tags = registry_model.list_repository_tags(repository_ref, include_legacy_images=True)
|
||||
tag_map = {tag.name: tag.legacy_image.docker_image_id for tag in tags}
|
||||
tag_map = registry_model.get_legacy_tags_map(repository_ref, storage)
|
||||
return jsonify(tag_map)
|
||||
|
||||
abort(403)
|
||||
|
@ -70,7 +70,7 @@ def put_tag(namespace_name, repo_name, tag):
|
|||
image_id = json.loads(request.data)
|
||||
|
||||
# Check for the image ID first in a builder (for an in-progress push).
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), storage)
|
||||
if builder is not None:
|
||||
layer = builder.lookup_layer(image_id)
|
||||
if layer is not None:
|
||||
|
@ -86,7 +86,7 @@ def put_tag(namespace_name, repo_name, tag):
|
|||
if legacy_image is None:
|
||||
abort(400)
|
||||
|
||||
if registry_model.retarget_tag(repository_ref, tag, legacy_image) is None:
|
||||
if registry_model.retarget_tag(repository_ref, tag, legacy_image, storage) is None:
|
||||
abort(400)
|
||||
|
||||
return make_response('Created', 200)
|
||||
|
|
|
@ -6,7 +6,7 @@ from flask import request, url_for, Response
|
|||
|
||||
import features
|
||||
|
||||
from app import app, metric_queue
|
||||
from app import app, metric_queue, storage, model_cache
|
||||
from auth.registry_jwt_auth import process_registry_jwt_auth
|
||||
from digest import digest_tools
|
||||
from data.registry_model import registry_model
|
||||
|
@ -17,6 +17,7 @@ from endpoints.v2.errors import (ManifestInvalid, ManifestUnknown, TagInvalid,
|
|||
from image.docker import ManifestException
|
||||
from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE, DockerSchema1Manifest
|
||||
from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES, OCI_CONTENT_TYPES
|
||||
from image.docker.schemas import parse_manifest_from_bytes
|
||||
from notifications import spawn_notification
|
||||
from util.audit import track_and_log
|
||||
from util.names import VALID_TAG_PATTERN
|
||||
|
@ -55,16 +56,27 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
|
|||
# Something went wrong.
|
||||
raise ManifestInvalid()
|
||||
|
||||
try:
|
||||
parsed = manifest.get_parsed_manifest()
|
||||
except ManifestException:
|
||||
logger.exception('Got exception when trying to parse manifest `%s`', manifest_ref)
|
||||
raise ManifestInvalid()
|
||||
|
||||
supported = _rewrite_to_schema1_if_necessary(namespace_name, repo_name, manifest_ref, manifest,
|
||||
parsed)
|
||||
if supported is None:
|
||||
raise ManifestUnknown()
|
||||
|
||||
track_and_log('pull_repo', repository_ref, analytics_name='pull_repo_100x', analytics_sample=0.01,
|
||||
tag=manifest_ref)
|
||||
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
|
||||
|
||||
return Response(
|
||||
manifest.manifest_bytes,
|
||||
supported.bytes,
|
||||
status=200,
|
||||
headers={
|
||||
'Content-Type': manifest.media_type,
|
||||
'Docker-Content-Digest': manifest.digest,
|
||||
'Content-Type': supported.media_type,
|
||||
'Docker-Content-Digest': supported.digest,
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -83,18 +95,46 @@ def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref):
|
|||
if manifest is None:
|
||||
raise ManifestUnknown()
|
||||
|
||||
try:
|
||||
parsed = manifest.get_parsed_manifest()
|
||||
except ManifestException:
|
||||
logger.exception('Got exception when trying to parse manifest `%s`', manifest_ref)
|
||||
raise ManifestInvalid()
|
||||
|
||||
supported = _rewrite_to_schema1_if_necessary(namespace_name, repo_name, '$digest', manifest,
|
||||
parsed)
|
||||
if supported is None:
|
||||
raise ManifestUnknown()
|
||||
|
||||
track_and_log('pull_repo', repository_ref, manifest_digest=manifest_ref)
|
||||
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
|
||||
|
||||
return Response(manifest.manifest_bytes, status=200, headers={
|
||||
'Content-Type': manifest.media_type,
|
||||
'Docker-Content-Digest': manifest.digest,
|
||||
return Response(supported.bytes, status=200, headers={
|
||||
'Content-Type': supported.media_type,
|
||||
'Docker-Content-Digest': supported.digest,
|
||||
})
|
||||
|
||||
|
||||
def _rewrite_to_schema1_if_necessary(namespace_name, repo_name, tag_name, manifest, parsed):
|
||||
# As per the Docker protocol, if the manifest is not schema version 1 and the manifest's
|
||||
# media type is not in the Accept header, we return a schema 1 version of the manifest for
|
||||
# the amd64+linux platform, if any, or None if none.
|
||||
# See: https://docs.docker.com/registry/spec/manifest-v2-2
|
||||
mimetypes = [mimetype for mimetype, _ in request.accept_mimetypes]
|
||||
if parsed.media_type in mimetypes:
|
||||
return parsed
|
||||
|
||||
return registry_model.get_schema1_parsed_manifest(manifest, namespace_name, repo_name, tag_name,
|
||||
storage)
|
||||
|
||||
|
||||
def _reject_manifest2_schema2(func):
|
||||
@wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
namespace_name = kwargs['namespace_name']
|
||||
if registry_model.supports_schema2(namespace_name):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
if _doesnt_accept_schema_v1() or \
|
||||
request.content_type in DOCKER_SCHEMA2_CONTENT_TYPES | OCI_CONTENT_TYPES:
|
||||
raise ManifestInvalid(detail={'message': 'manifest schema version not supported'},
|
||||
|
@ -111,41 +151,67 @@ def _doesnt_accept_schema_v1():
|
|||
|
||||
|
||||
@v2_bp.route(MANIFEST_TAGNAME_ROUTE, methods=['PUT'])
|
||||
@_reject_manifest2_schema2
|
||||
@parse_repository_name()
|
||||
@_reject_manifest2_schema2
|
||||
@process_registry_jwt_auth(scopes=['pull', 'push'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
def write_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
|
||||
try:
|
||||
manifest = DockerSchema1Manifest(request.data)
|
||||
except ManifestException as me:
|
||||
logger.exception("failed to parse manifest when writing by tagname")
|
||||
raise ManifestInvalid(detail={'message': 'failed to parse manifest: %s' % me.message})
|
||||
|
||||
if manifest.tag != manifest_ref:
|
||||
raise TagInvalid()
|
||||
|
||||
return _write_manifest_and_log(namespace_name, repo_name, manifest)
|
||||
parsed = _parse_manifest()
|
||||
return _write_manifest_and_log(namespace_name, repo_name, manifest_ref, parsed)
|
||||
|
||||
|
||||
@v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['PUT'])
|
||||
@_reject_manifest2_schema2
|
||||
@parse_repository_name()
|
||||
@_reject_manifest2_schema2
|
||||
@process_registry_jwt_auth(scopes=['pull', 'push'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
def write_manifest_by_digest(namespace_name, repo_name, manifest_ref):
|
||||
try:
|
||||
manifest = DockerSchema1Manifest(request.data)
|
||||
except ManifestException as me:
|
||||
logger.exception("failed to parse manifest when writing by digest")
|
||||
raise ManifestInvalid(detail={'message': 'failed to parse manifest: %s' % me.message})
|
||||
|
||||
if manifest.digest != manifest_ref:
|
||||
parsed = _parse_manifest()
|
||||
if parsed.digest != manifest_ref:
|
||||
raise ManifestInvalid(detail={'message': 'manifest digest mismatch'})
|
||||
|
||||
return _write_manifest_and_log(namespace_name, repo_name, manifest)
|
||||
if parsed.schema_version != 2:
|
||||
return _write_manifest_and_log(namespace_name, repo_name, parsed.tag, parsed)
|
||||
|
||||
# If the manifest is schema version 2, then this cannot be a normal tag-based push, as the
|
||||
# manifest does not contain the tag and this call was not given a tag name. Instead, we write the
|
||||
# manifest with a temporary tag, as it is being pushed as part of a call for a manifest list.
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
|
||||
manifest = registry_model.create_manifest_with_temp_tag(repository_ref, parsed, expiration_sec,
|
||||
storage)
|
||||
if manifest is None:
|
||||
raise ManifestInvalid()
|
||||
|
||||
return Response(
|
||||
'OK',
|
||||
status=202,
|
||||
headers={
|
||||
'Docker-Content-Digest': manifest.digest,
|
||||
'Location':
|
||||
url_for('v2.fetch_manifest_by_digest',
|
||||
repository='%s/%s' % (namespace_name, repo_name),
|
||||
manifest_ref=manifest.digest),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _parse_manifest():
|
||||
content_type = request.content_type or DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||
if content_type == 'application/json':
|
||||
# For back-compat.
|
||||
content_type = DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||
|
||||
try:
|
||||
return parse_manifest_from_bytes(request.data, content_type)
|
||||
except ManifestException as me:
|
||||
logger.exception("failed to parse manifest when writing by tagname")
|
||||
raise ManifestInvalid(detail={'message': 'failed to parse manifest: %s' % me.message})
|
||||
|
||||
|
||||
@v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['DELETE'])
|
||||
|
@ -178,21 +244,22 @@ def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref):
|
|||
return Response(status=202)
|
||||
|
||||
|
||||
def _write_manifest_and_log(namespace_name, repo_name, manifest_impl):
|
||||
repository_ref, manifest, tag = _write_manifest(namespace_name, repo_name, manifest_impl)
|
||||
def _write_manifest_and_log(namespace_name, repo_name, tag_name, manifest_impl):
|
||||
repository_ref, manifest, tag = _write_manifest(namespace_name, repo_name, tag_name,
|
||||
manifest_impl)
|
||||
|
||||
# Queue all blob manifests for replication.
|
||||
if features.STORAGE_REPLICATION:
|
||||
layers = registry_model.list_manifest_layers(manifest)
|
||||
if layers is None:
|
||||
raise ManifestInvalid()
|
||||
blobs = registry_model.get_manifest_local_blobs(manifest)
|
||||
if blobs is None:
|
||||
logger.error('Could not lookup blobs for manifest `%s`', manifest.digest)
|
||||
else:
|
||||
with queue_replication_batch(namespace_name) as queue_storage_replication:
|
||||
for blob_digest in blobs:
|
||||
queue_storage_replication(blob_digest)
|
||||
|
||||
with queue_replication_batch(namespace_name) as queue_storage_replication:
|
||||
for layer in layers:
|
||||
queue_storage_replication(layer.blob)
|
||||
|
||||
track_and_log('push_repo', repository_ref, tag=manifest_impl.tag)
|
||||
spawn_notification(repository_ref, 'repo_push', {'updated_tags': [manifest_impl.tag]})
|
||||
track_and_log('push_repo', repository_ref, tag=tag_name)
|
||||
spawn_notification(repository_ref, 'repo_push', {'updated_tags': [tag_name]})
|
||||
metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
|
||||
|
||||
return Response(
|
||||
|
@ -208,26 +275,30 @@ def _write_manifest_and_log(namespace_name, repo_name, manifest_impl):
|
|||
)
|
||||
|
||||
|
||||
def _write_manifest(namespace_name, repo_name, manifest_impl):
|
||||
if (manifest_impl.namespace == '' and features.LIBRARY_SUPPORT and
|
||||
namespace_name == app.config['LIBRARY_NAMESPACE']):
|
||||
pass
|
||||
elif manifest_impl.namespace != namespace_name:
|
||||
raise NameInvalid()
|
||||
def _write_manifest(namespace_name, repo_name, tag_name, manifest_impl):
|
||||
# NOTE: These extra checks are needed for schema version 1 because the manifests
|
||||
# contain the repo namespace, name and tag name.
|
||||
if manifest_impl.schema_version == 1:
|
||||
if (manifest_impl.namespace == '' and features.LIBRARY_SUPPORT and
|
||||
namespace_name == app.config['LIBRARY_NAMESPACE']):
|
||||
pass
|
||||
elif manifest_impl.namespace != namespace_name:
|
||||
raise NameInvalid()
|
||||
|
||||
if manifest_impl.repo_name != repo_name:
|
||||
raise NameInvalid()
|
||||
if manifest_impl.repo_name != repo_name:
|
||||
raise NameInvalid()
|
||||
|
||||
if not manifest_impl.layers:
|
||||
raise ManifestInvalid(detail={'message': 'manifest does not reference any layers'})
|
||||
if not manifest_impl.layers:
|
||||
raise ManifestInvalid(detail={'message': 'manifest does not reference any layers'})
|
||||
|
||||
# Ensure that the repository exists.
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
# Create the manifest(s) and retarget the tag to point to it.
|
||||
manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref, manifest_impl,
|
||||
manifest_impl.tag)
|
||||
tag_name, storage)
|
||||
if manifest is None:
|
||||
raise ManifestInvalid()
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ def test_missing_link(initialized_db):
|
|||
.add_layer(first_blob_sha, '{"id": "first"}')
|
||||
.build(docker_v2_signing_key))
|
||||
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, first_manifest)
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, FIRST_TAG, first_manifest)
|
||||
|
||||
# Delete all temp tags and perform GC.
|
||||
_perform_cleanup()
|
||||
|
@ -88,7 +88,7 @@ def test_missing_link(initialized_db):
|
|||
.add_layer(second_blob_sha, '{"id": "first"}')
|
||||
.build(docker_v2_signing_key))
|
||||
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, second_manifest)
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, SECOND_TAG, second_manifest)
|
||||
|
||||
# Delete all temp tags and perform GC.
|
||||
_perform_cleanup()
|
||||
|
@ -117,7 +117,7 @@ def test_missing_link(initialized_db):
|
|||
.add_layer(fourth_blob_sha, '{"id": "first"}') # Note the change in BLOB from the second manifest.
|
||||
.build(docker_v2_signing_key))
|
||||
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, third_manifest)
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, THIRD_TAG, third_manifest)
|
||||
|
||||
# Delete all temp tags and perform GC.
|
||||
_perform_cleanup()
|
||||
|
|
|
@ -16,6 +16,7 @@ from data.registry_model import registry_model
|
|||
from endpoints.decorators import anon_protect, anon_allowed, route_show_if, parse_repository_name
|
||||
from endpoints.v2.blob import BLOB_DIGEST_ROUTE
|
||||
from image.appc import AppCImageFormatter
|
||||
from image.docker import ManifestException
|
||||
from image.docker.squashed import SquashedDockerImageFormatter
|
||||
from storage import Storage
|
||||
from util.audit import track_and_log, wrap_repository
|
||||
|
@ -42,7 +43,7 @@ class VerbReporter(TarLayerFormatterReporter):
|
|||
metric_queue.verb_action_passes.Inc(labelvalues=[self.kind, pass_count])
|
||||
|
||||
|
||||
def _open_stream(formatter, tag, manifest, derived_image_id, handlers, reporter):
|
||||
def _open_stream(formatter, tag, schema1_manifest, derived_image_id, handlers, reporter):
|
||||
"""
|
||||
This method generates a stream of data which will be replicated and read from the queue files.
|
||||
This method runs in a separate process.
|
||||
|
@ -50,7 +51,8 @@ def _open_stream(formatter, tag, manifest, derived_image_id, handlers, reporter)
|
|||
# For performance reasons, we load the full image list here, cache it, then disconnect from
|
||||
# the database.
|
||||
with database.UseThenDisconnect(app.config):
|
||||
layers = registry_model.list_manifest_layers(manifest, include_placements=True)
|
||||
layers = registry_model.list_parsed_manifest_layers(tag.repository, schema1_manifest, storage,
|
||||
include_placements=True)
|
||||
|
||||
def image_stream_getter(store, blob):
|
||||
def get_stream_for_storage():
|
||||
|
@ -68,7 +70,7 @@ def _open_stream(formatter, tag, manifest, derived_image_id, handlers, reporter)
|
|||
for layer in reversed(layers):
|
||||
yield image_stream_getter(store, layer.blob)
|
||||
|
||||
stream = formatter.build_stream(tag, manifest, derived_image_id, layers,
|
||||
stream = formatter.build_stream(tag, schema1_manifest, derived_image_id, layers,
|
||||
tar_stream_getter_iterator, reporter=reporter)
|
||||
|
||||
for handler_fn in handlers:
|
||||
|
@ -208,6 +210,9 @@ def _verify_repo_verb(_, namespace, repo_name, tag_name, verb, checker=None):
|
|||
|
||||
# Lookup the requested tag.
|
||||
repo_ref = registry_model.lookup_repository(namespace, repo_name)
|
||||
if repo_ref is None:
|
||||
abort(404)
|
||||
|
||||
tag = registry_model.get_repo_tag(repo_ref, tag_name)
|
||||
if tag is None:
|
||||
logger.debug('Tag %s does not exist in repository %s/%s for user %s', tag, namespace, repo_name,
|
||||
|
@ -220,9 +225,21 @@ def _verify_repo_verb(_, namespace, repo_name, tag_name, verb, checker=None):
|
|||
logger.debug('Could not get manifest on %s/%s:%s::%s', namespace, repo_name, tag.name, verb)
|
||||
abort(404)
|
||||
|
||||
# Retrieve the schema1-compatible version of the manifest.
|
||||
try:
|
||||
schema1_manifest = registry_model.get_schema1_parsed_manifest(manifest, namespace,
|
||||
repo_name, tag.name,
|
||||
storage)
|
||||
except ManifestException:
|
||||
logger.exception('Could not get manifest on %s/%s:%s::%s', namespace, repo_name, tag.name, verb)
|
||||
abort(400)
|
||||
|
||||
if schema1_manifest is None:
|
||||
abort(404)
|
||||
|
||||
# If there is a data checker, call it first.
|
||||
if checker is not None:
|
||||
if not checker(tag, manifest):
|
||||
if not checker(tag, schema1_manifest):
|
||||
logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repo_name, tag.name, verb)
|
||||
abort(404)
|
||||
|
||||
|
@ -230,12 +247,12 @@ def _verify_repo_verb(_, namespace, repo_name, tag_name, verb, checker=None):
|
|||
assert tag.repository.namespace_name
|
||||
assert tag.repository.name
|
||||
|
||||
return tag, manifest
|
||||
return tag, manifest, schema1_manifest
|
||||
|
||||
|
||||
def _repo_verb_signature(namespace, repository, tag_name, verb, checker=None, **kwargs):
|
||||
# Verify that the tag exists and that we have access to it.
|
||||
tag, manifest = _verify_repo_verb(storage, namespace, repository, tag_name, verb, checker)
|
||||
tag, manifest, _ = _verify_repo_verb(storage, namespace, repository, tag_name, verb, checker)
|
||||
|
||||
# Find the derived image storage for the verb.
|
||||
derived_image = registry_model.lookup_derived_image(manifest, verb,
|
||||
|
@ -261,7 +278,8 @@ def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, che
|
|||
# Verify that the image exists and that we have access to it.
|
||||
logger.debug('Verifying repo verb %s for repository %s/%s with user %s with mimetype %s',
|
||||
verb, namespace, repository, get_authenticated_user(), request.accept_mimetypes.best)
|
||||
tag, manifest = _verify_repo_verb(storage, namespace, repository, tag_name, verb, checker)
|
||||
tag, manifest, schema1_manifest = _verify_repo_verb(storage, namespace, repository,
|
||||
tag_name, verb, checker)
|
||||
|
||||
# Load the repository for later.
|
||||
repo = model.repository.get_repository(namespace, repository)
|
||||
|
@ -280,8 +298,13 @@ def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, che
|
|||
|
||||
# Lookup/create the derived image for the verb and repo image.
|
||||
derived_image = registry_model.lookup_or_create_derived_image(
|
||||
manifest, verb, storage.preferred_locations[0], varying_metadata={'tag': tag.name},
|
||||
manifest, verb, storage.preferred_locations[0], storage,
|
||||
varying_metadata={'tag': tag.name},
|
||||
include_placements=True)
|
||||
if derived_image is None:
|
||||
logger.error('Could not create or lookup a derived image for manifest %s', manifest)
|
||||
abort(400)
|
||||
|
||||
if not derived_image.blob.uploading:
|
||||
logger.debug('Derived %s image %s exists in storage', verb, derived_image)
|
||||
is_head_request = request.method == 'HEAD'
|
||||
|
@ -323,7 +346,7 @@ def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, che
|
|||
# and send the results to the client and storage.
|
||||
handlers = [hasher.update]
|
||||
reporter = VerbReporter(verb)
|
||||
args = (formatter, tag, manifest, derived_image.unique_id, handlers, reporter)
|
||||
args = (formatter, tag, schema1_manifest, derived_image.unique_id, handlers, reporter)
|
||||
queue_process = QueueProcess(
|
||||
_open_stream,
|
||||
8 * 1024,
|
||||
|
@ -360,7 +383,7 @@ def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, che
|
|||
def os_arch_checker(os, arch):
|
||||
def checker(tag, manifest):
|
||||
try:
|
||||
image_json = json.loads(manifest.get_parsed_manifest().leaf_layer.raw_v1_metadata)
|
||||
image_json = json.loads(manifest.leaf_layer.raw_v1_metadata)
|
||||
except ValueError:
|
||||
logger.exception('Could not parse leaf layer JSON for manifest %s', manifest)
|
||||
return False
|
||||
|
|
|
@ -18,10 +18,9 @@ class AppCImageFormatter(TarImageFormatter):
|
|||
Image formatter which produces an tarball according to the AppC specification.
|
||||
"""
|
||||
|
||||
def stream_generator(self, tag, manifest, synthetic_image_id, layer_iterator,
|
||||
def stream_generator(self, tag, parsed_manifest, synthetic_image_id, layer_iterator,
|
||||
tar_stream_getter_iterator, reporter=None):
|
||||
image_mtime = 0
|
||||
parsed_manifest = manifest.get_parsed_manifest()
|
||||
created = parsed_manifest.created_datetime
|
||||
if created is not None:
|
||||
image_mtime = calendar.timegm(created.utctimetuple())
|
||||
|
|
|
@ -1,9 +1,17 @@
|
|||
from abc import ABCMeta, abstractproperty
|
||||
from abc import ABCMeta, abstractproperty, abstractmethod
|
||||
from six import add_metaclass
|
||||
|
||||
@add_metaclass(ABCMeta)
|
||||
class ManifestInterface(object):
|
||||
""" Defines the interface for the various manifests types supported. """
|
||||
@abstractproperty
|
||||
def is_manifest_list(self):
|
||||
""" Returns whether this manifest is a list. """
|
||||
|
||||
@abstractproperty
|
||||
def schema_version(self):
|
||||
""" The version of the schema. """
|
||||
|
||||
@abstractproperty
|
||||
def digest(self):
|
||||
""" The digest of the manifest, including type prefix. """
|
||||
|
@ -25,17 +33,25 @@ class ManifestInterface(object):
|
|||
pass
|
||||
|
||||
@abstractproperty
|
||||
def layers(self):
|
||||
""" Returns the layers of this manifest, from base to leaf. """
|
||||
def layers_compressed_size(self):
|
||||
""" Returns the total compressed size of all the layers in this manifest. Returns None if this
|
||||
cannot be computed locally.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_layers(self, content_retriever):
|
||||
""" Returns the layers of this manifest, from base to leaf or None if this kind of manifest
|
||||
does not support layers. The layer must be of type ManifestImageLayer. """
|
||||
pass
|
||||
|
||||
@abstractproperty
|
||||
def leaf_layer_v1_image_id(self):
|
||||
""" Returns the Docker V1 image ID for the leaf (top) layer, if any, or None if none. """
|
||||
@abstractmethod
|
||||
def get_leaf_layer_v1_image_id(self, content_retriever):
|
||||
""" Returns the Docker V1 image ID for the leaf (top) layer, if any, or None if
|
||||
not applicable. """
|
||||
pass
|
||||
|
||||
@abstractproperty
|
||||
def legacy_image_ids(self):
|
||||
@abstractmethod
|
||||
def get_legacy_image_ids(self, content_retriever):
|
||||
""" Returns the Docker V1 image IDs for the layers of this manifest or None if not applicable.
|
||||
"""
|
||||
pass
|
||||
|
@ -43,5 +59,70 @@ class ManifestInterface(object):
|
|||
@abstractproperty
|
||||
def blob_digests(self):
|
||||
""" Returns an iterator over all the blob digests referenced by this manifest,
|
||||
from base to leaf. The blob digests are strings with prefixes.
|
||||
from base to leaf. The blob digests are strings with prefixes. For manifests that reference
|
||||
config as a blob, the blob will be included here as the last entry.
|
||||
"""
|
||||
|
||||
@abstractproperty
|
||||
def local_blob_digests(self):
|
||||
""" Returns an iterator over all the *non-remote* blob digests referenced by this manifest,
|
||||
from base to leaf. The blob digests are strings with prefixes. For manifests that reference
|
||||
config as a blob, the blob will be included here as the last entry.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def child_manifests(self, content_retriever):
|
||||
""" Returns an iterator of all manifests that live under this manifest, if any or None if not
|
||||
applicable.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_manifest_labels(self, content_retriever):
|
||||
""" Returns a dictionary of all the labels defined inside this manifest or None if this kind
|
||||
of manifest does not support labels. """
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_requires_empty_layer_blob(self, content_retriever):
|
||||
""" Whether this schema requires the special empty layer blob. """
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def unsigned(self):
|
||||
""" Returns an unsigned version of this manifest. """
|
||||
|
||||
@abstractproperty
|
||||
def has_legacy_image(self):
|
||||
""" Returns True if this manifest has a legacy V1 image, or False if not. """
|
||||
|
||||
@abstractmethod
|
||||
def generate_legacy_layers(self, images_map, content_retriever):
|
||||
"""
|
||||
Rewrites Docker v1 image IDs and returns a generator of DockerV1Metadata, starting
|
||||
at the base layer and working towards the leaf.
|
||||
|
||||
If Docker gives us a layer with a v1 image ID that already points to existing
|
||||
content, but the checksums don't match, then we need to rewrite the image ID
|
||||
to something new in order to ensure consistency.
|
||||
|
||||
Returns None if there are no legacy images associated with the manifest.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
|
||||
""" Returns a schema1 version of the manifest. If this is a mainfest list, should return the
|
||||
manifest that is compatible with V1, by virtue of being `amd64` and `linux`.
|
||||
If none, returns None.
|
||||
"""
|
||||
|
||||
|
||||
@add_metaclass(ABCMeta)
|
||||
class ContentRetriever(object):
|
||||
""" Defines the interface for retrieval of various content referenced by a manifest. """
|
||||
@abstractmethod
|
||||
def get_manifest_bytes_with_digest(self, digest):
|
||||
""" Returns the bytes of the manifest with the given digest or None if none found. """
|
||||
|
||||
@abstractmethod
|
||||
def get_blob_bytes_with_digest(self, digest):
|
||||
""" Returns the bytes of the blob with the given digest or None if none found. """
|
||||
|
|
|
@ -20,6 +20,7 @@ from jwt.utils import base64url_encode, base64url_decode
|
|||
|
||||
from digest import digest_tools
|
||||
from image.docker import ManifestException
|
||||
from image.docker.types import ManifestImageLayer
|
||||
from image.docker.interfaces import ManifestInterface
|
||||
from image.docker.v1 import DockerV1Metadata
|
||||
|
||||
|
@ -73,7 +74,7 @@ class InvalidSchema1Signature(ManifestException):
|
|||
|
||||
|
||||
class Schema1Layer(namedtuple('Schema1Layer', ['digest', 'v1_metadata', 'raw_v1_metadata',
|
||||
'compressed_size'])):
|
||||
'compressed_size', 'is_remote', 'urls'])):
|
||||
"""
|
||||
Represents all of the data about an individual layer in a given Manifest.
|
||||
This is the union of the fsLayers (digest) and the history entries (v1_compatibility).
|
||||
|
@ -151,7 +152,7 @@ class DockerSchema1Manifest(ManifestInterface):
|
|||
},
|
||||
},
|
||||
},
|
||||
'required': [DOCKER_SCHEMA1_SIGNATURES_KEY, DOCKER_SCHEMA1_REPO_TAG_KEY,
|
||||
'required': [DOCKER_SCHEMA1_REPO_TAG_KEY,
|
||||
DOCKER_SCHEMA1_REPO_NAME_KEY, DOCKER_SCHEMA1_FS_LAYERS_KEY,
|
||||
DOCKER_SCHEMA1_HISTORY_KEY],
|
||||
}
|
||||
|
@ -170,7 +171,9 @@ class DockerSchema1Manifest(ManifestInterface):
|
|||
except ValidationError as ve:
|
||||
raise MalformedSchema1Manifest('manifest data does not match schema: %s' % ve)
|
||||
|
||||
self._signatures = self._parsed[DOCKER_SCHEMA1_SIGNATURES_KEY]
|
||||
self._signatures = self._parsed.get(DOCKER_SCHEMA1_SIGNATURES_KEY)
|
||||
self._architecture = self._parsed.get(DOCKER_SCHEMA1_ARCH_KEY)
|
||||
|
||||
self._tag = self._parsed[DOCKER_SCHEMA1_REPO_TAG_KEY]
|
||||
|
||||
repo_name = self._parsed[DOCKER_SCHEMA1_REPO_NAME_KEY]
|
||||
|
@ -191,6 +194,9 @@ class DockerSchema1Manifest(ManifestInterface):
|
|||
return DockerSchema1Manifest(encoded_bytes.encode('utf-8'), validate)
|
||||
|
||||
def _validate(self):
|
||||
if not self._signatures:
|
||||
return
|
||||
|
||||
for signature in self._signatures:
|
||||
bytes_to_verify = '{0}.{1}'.format(signature['protected'],
|
||||
base64url_encode(self._payload))
|
||||
|
@ -202,17 +208,22 @@ class DockerSchema1Manifest(ManifestInterface):
|
|||
if not verified:
|
||||
raise InvalidSchema1Signature()
|
||||
|
||||
@property
|
||||
def is_manifest_list(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def schema_version(self):
|
||||
return 1
|
||||
|
||||
@property
|
||||
def content_type(self):
|
||||
return DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
|
||||
return (DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
|
||||
if self._signatures else DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE)
|
||||
|
||||
@property
|
||||
def media_type(self):
|
||||
return DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
|
||||
return self.content_type
|
||||
|
||||
@property
|
||||
def signatures(self):
|
||||
|
@ -246,6 +257,10 @@ class DockerSchema1Manifest(ManifestInterface):
|
|||
def manifest_dict(self):
|
||||
return self._parsed
|
||||
|
||||
@property
|
||||
def layers_compressed_size(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def digest(self):
|
||||
return digest_tools.sha256_digest(self._payload)
|
||||
|
@ -254,10 +269,6 @@ class DockerSchema1Manifest(ManifestInterface):
|
|||
def image_ids(self):
|
||||
return {mdata.v1_metadata.image_id for mdata in self.layers}
|
||||
|
||||
@property
|
||||
def legacy_image_ids(self):
|
||||
return {mdata.v1_metadata.image_id for mdata in self.layers}
|
||||
|
||||
@property
|
||||
def parent_image_ids(self):
|
||||
return {mdata.v1_metadata.parent_image_id for mdata in self.layers
|
||||
|
@ -267,10 +278,6 @@ class DockerSchema1Manifest(ManifestInterface):
|
|||
def checksums(self):
|
||||
return list({str(mdata.digest) for mdata in self.layers})
|
||||
|
||||
@property
|
||||
def leaf_layer_v1_image_id(self):
|
||||
return self.layers[-1].v1_metadata.image_id
|
||||
|
||||
@property
|
||||
def leaf_layer(self):
|
||||
return self.layers[-1]
|
||||
|
@ -293,10 +300,54 @@ class DockerSchema1Manifest(ManifestInterface):
|
|||
self._layers = list(self._generate_layers())
|
||||
return self._layers
|
||||
|
||||
def get_layers(self, content_retriever):
|
||||
""" Returns the layers of this manifest, from base to leaf or None if this kind of manifest
|
||||
does not support layers. """
|
||||
for layer in self.layers:
|
||||
created_datetime = None
|
||||
try:
|
||||
created_datetime = dateutil.parser.parse(layer.v1_metadata.created).replace(tzinfo=None)
|
||||
except:
|
||||
pass
|
||||
|
||||
yield ManifestImageLayer(layer_id=layer.v1_metadata.image_id,
|
||||
compressed_size=layer.compressed_size,
|
||||
is_remote=False,
|
||||
urls=None,
|
||||
command=layer.v1_metadata.command,
|
||||
blob_digest=layer.digest,
|
||||
created_datetime=created_datetime,
|
||||
internal_layer=layer)
|
||||
|
||||
@property
|
||||
def blob_digests(self):
|
||||
return [str(layer.digest) for layer in self.layers]
|
||||
|
||||
@property
|
||||
def local_blob_digests(self):
|
||||
return self.blob_digests
|
||||
|
||||
def child_manifests(self, content_retriever):
|
||||
return None
|
||||
|
||||
def get_manifest_labels(self, content_retriever):
|
||||
return self.layers[-1].v1_metadata.labels
|
||||
|
||||
def get_requires_empty_layer_blob(self, content_retriever):
|
||||
return False
|
||||
|
||||
def unsigned(self):
|
||||
if self.media_type == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE:
|
||||
return self
|
||||
|
||||
# Create an unsigned version of the manifest.
|
||||
builder = DockerSchema1ManifestBuilder(self._namespace, self._repo_name, self._tag,
|
||||
self._architecture)
|
||||
for layer in reversed(self.layers):
|
||||
builder.add_layer(str(layer.digest), layer.raw_v1_metadata)
|
||||
|
||||
return builder.build()
|
||||
|
||||
def _generate_layers(self):
|
||||
"""
|
||||
Returns a generator of objects that have the blobSum and v1Compatibility keys in them,
|
||||
|
@ -326,16 +377,48 @@ class DockerSchema1Manifest(ManifestInterface):
|
|||
command, labels)
|
||||
|
||||
compressed_size = v1_metadata.get('Size')
|
||||
yield Schema1Layer(image_digest, extracted, metadata_string, compressed_size)
|
||||
yield Schema1Layer(image_digest, extracted, metadata_string, compressed_size, False, None)
|
||||
|
||||
@property
|
||||
def _payload(self):
|
||||
if self._signatures is None:
|
||||
return self._bytes
|
||||
|
||||
protected = str(self._signatures[0][DOCKER_SCHEMA1_PROTECTED_KEY])
|
||||
parsed_protected = json.loads(base64url_decode(protected))
|
||||
signed_content_head = self._bytes[:parsed_protected[DOCKER_SCHEMA1_FORMAT_LENGTH_KEY]]
|
||||
signed_content_tail = base64url_decode(str(parsed_protected[DOCKER_SCHEMA1_FORMAT_TAIL_KEY]))
|
||||
return signed_content_head + signed_content_tail
|
||||
|
||||
def generate_legacy_layers(self, images_map, content_retriever):
|
||||
return self.rewrite_invalid_image_ids(images_map)
|
||||
|
||||
def get_legacy_image_ids(self, content_retriever):
|
||||
return self.legacy_image_ids
|
||||
|
||||
@property
|
||||
def legacy_image_ids(self):
|
||||
return {mdata.v1_metadata.image_id for mdata in self.layers}
|
||||
|
||||
@property
|
||||
def has_legacy_image(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def leaf_layer_v1_image_id(self):
|
||||
return self.layers[-1].v1_metadata.image_id
|
||||
|
||||
def get_leaf_layer_v1_image_id(self, content_retriever):
|
||||
return self.layers[-1].v1_metadata.image_id
|
||||
|
||||
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
|
||||
""" Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`.
|
||||
If none, returns None.
|
||||
"""
|
||||
# Note: schema1 *technically* supports non-amd64 architectures, but in practice these were never
|
||||
# used, so to ensure full backwards compatibility, we just always return the schema.
|
||||
return self
|
||||
|
||||
def rewrite_invalid_image_ids(self, images_map):
|
||||
"""
|
||||
Rewrites Docker v1 image IDs and returns a generator of DockerV1Metadata.
|
||||
|
@ -428,9 +511,9 @@ class DockerSchema1ManifestBuilder(object):
|
|||
return self
|
||||
|
||||
|
||||
def build(self, json_web_key):
|
||||
def build(self, json_web_key=None):
|
||||
"""
|
||||
Builds a DockerSchema1Manifest object complete with signature.
|
||||
Builds a DockerSchema1Manifest object, with optional signature.
|
||||
"""
|
||||
payload = OrderedDict(self._base_payload)
|
||||
payload.update({
|
||||
|
@ -439,6 +522,8 @@ class DockerSchema1ManifestBuilder(object):
|
|||
})
|
||||
|
||||
payload_str = json.dumps(payload, indent=3)
|
||||
if json_web_key is None:
|
||||
return DockerSchema1Manifest(payload_str)
|
||||
|
||||
split_point = payload_str.rfind('\n}')
|
||||
|
||||
|
|
|
@ -19,3 +19,12 @@ OCI_MANIFESTLIST_CONTENT_TYPE = 'application/vnd.oci.image.index.v1+json'
|
|||
DOCKER_SCHEMA2_CONTENT_TYPES = {DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE}
|
||||
OCI_CONTENT_TYPES = {OCI_MANIFEST_CONTENT_TYPE, OCI_MANIFESTLIST_CONTENT_TYPE}
|
||||
|
||||
# The magical digest to be used for "empty" layers.
|
||||
# https://github.com/docker/distribution/blob/749f6afb4572201e3c37325d0ffedb6f32be8950/manifest/schema1/config_builder.go#L22
|
||||
EMPTY_LAYER_BLOB_DIGEST = 'sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4'
|
||||
EMPTY_LAYER_SIZE = 32
|
||||
EMPTY_LAYER_BYTES = "".join(map(chr, [
|
||||
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
|
||||
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
|
||||
]))
|
||||
|
|
|
@ -94,11 +94,16 @@ Example:
|
|||
|
||||
import copy
|
||||
import json
|
||||
import hashlib
|
||||
|
||||
from collections import namedtuple
|
||||
from jsonschema import validate as validate_schema, ValidationError
|
||||
from dateutil.parser import parse as parse_date
|
||||
|
||||
from digest import digest_tools
|
||||
from image.docker import ManifestException
|
||||
|
||||
|
||||
DOCKER_SCHEMA2_CONFIG_HISTORY_KEY = "history"
|
||||
DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY = "rootfs"
|
||||
DOCKER_SCHEMA2_CONFIG_CREATED_KEY = "created"
|
||||
|
@ -107,10 +112,11 @@ DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY = "empty_layer"
|
|||
DOCKER_SCHEMA2_CONFIG_TYPE_KEY = "type"
|
||||
|
||||
|
||||
LayerHistory = namedtuple('LayerHistory', ['created', 'created_datetime', 'command', 'is_empty'])
|
||||
LayerHistory = namedtuple('LayerHistory', ['created', 'created_datetime', 'command', 'is_empty',
|
||||
'raw_entry'])
|
||||
|
||||
|
||||
class MalformedSchema2Config(Exception):
|
||||
class MalformedSchema2Config(ManifestException):
|
||||
"""
|
||||
Raised when a config fails an assertion that should be true according to the Docker Manifest
|
||||
v2.2 Config Specification.
|
||||
|
@ -167,6 +173,8 @@ class DockerSchema2Config(object):
|
|||
}
|
||||
|
||||
def __init__(self, config_bytes):
|
||||
self._config_bytes = config_bytes
|
||||
|
||||
try:
|
||||
self._parsed = json.loads(config_bytes)
|
||||
except ValueError as ve:
|
||||
|
@ -177,6 +185,35 @@ class DockerSchema2Config(object):
|
|||
except ValidationError as ve:
|
||||
raise MalformedSchema2Config('config data does not match schema: %s' % ve)
|
||||
|
||||
@property
|
||||
def digest(self):
|
||||
""" Returns the digest of this config object. """
|
||||
return digest_tools.sha256_digest(self._config_bytes)
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
""" Returns the size of this config object. """
|
||||
return len(self._config_bytes)
|
||||
|
||||
@property
|
||||
def bytes(self):
|
||||
""" Returns the bytes of this config object. """
|
||||
return self._config_bytes
|
||||
|
||||
@property
|
||||
def labels(self):
|
||||
""" Returns a dictionary of all the labels defined in this configuration. """
|
||||
return self._parsed.get('config', {}).get('Labels', {}) or {}
|
||||
|
||||
@property
|
||||
def has_empty_layer(self):
|
||||
""" Returns whether this config contains an empty layer. """
|
||||
for history_entry in self._parsed[DOCKER_SCHEMA2_CONFIG_HISTORY_KEY]:
|
||||
if history_entry.get(DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY, False):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@property
|
||||
def history(self):
|
||||
""" Returns the history of the image, started at the base layer. """
|
||||
|
@ -185,31 +222,30 @@ class DockerSchema2Config(object):
|
|||
yield LayerHistory(created_datetime=created_datetime,
|
||||
created=history_entry[DOCKER_SCHEMA2_CONFIG_CREATED_KEY],
|
||||
command=history_entry[DOCKER_SCHEMA2_CONFIG_CREATED_BY_KEY],
|
||||
is_empty=history_entry.get(DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY, False))
|
||||
is_empty=history_entry.get(DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY, False),
|
||||
raw_entry=history_entry)
|
||||
|
||||
def build_v1_compatibility(self, layer_index, v1_id, v1_parent_id):
|
||||
def build_v1_compatibility(self, history, v1_id, v1_parent_id, is_leaf, compressed_size=None):
|
||||
""" Builds the V1 compatibility block for the given layer.
|
||||
|
||||
Note that the layer_index is 0-indexed, with the *base* layer being 0, and the leaf
|
||||
layer being last.
|
||||
"""
|
||||
history = list(self.history)
|
||||
|
||||
# If the layer is the leaf, it gets the full config (minus 2 fields). Otherwise, it gets only
|
||||
# IDs.
|
||||
v1_compatibility = copy.deepcopy(self._parsed) if layer_index == len(history) - 1 else {}
|
||||
v1_compatibility = copy.deepcopy(self._parsed) if is_leaf else {}
|
||||
v1_compatibility['id'] = v1_id
|
||||
if v1_parent_id is not None:
|
||||
v1_compatibility['parent'] = v1_parent_id
|
||||
|
||||
if 'created' not in v1_compatibility:
|
||||
v1_compatibility['created'] = history[layer_index].created
|
||||
v1_compatibility['created'] = history.created
|
||||
|
||||
if 'container_config' not in v1_compatibility:
|
||||
v1_compatibility['container_config'] = {
|
||||
'Cmd': history[layer_index].command,
|
||||
'Cmd': [history.command],
|
||||
}
|
||||
|
||||
if compressed_size is not None:
|
||||
v1_compatibility['Size'] = compressed_size
|
||||
|
||||
# The history and rootfs keys are schema2-config specific.
|
||||
v1_compatibility.pop(DOCKER_SCHEMA2_CONFIG_HISTORY_KEY, None)
|
||||
v1_compatibility.pop(DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY, None)
|
||||
|
|
|
@ -1,14 +1,21 @@
|
|||
import logging
|
||||
import json
|
||||
|
||||
from cachetools import lru_cache
|
||||
from jsonschema import validate as validate_schema, ValidationError
|
||||
|
||||
from digest import digest_tools
|
||||
from image.docker import ManifestException
|
||||
from image.docker.interfaces import ManifestInterface
|
||||
from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||
from image.docker.schema1 import DockerSchema1Manifest
|
||||
from image.docker.schema2 import (DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
|
||||
from image.docker.schema2.manifest import DockerSchema2Manifest
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Keys.
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY = 'schemaVersion'
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY = 'mediaType'
|
||||
|
@ -24,7 +31,7 @@ DOCKER_SCHEMA2_MANIFESTLIST_FEATURES_KEY = 'features'
|
|||
DOCKER_SCHEMA2_MANIFESTLIST_VARIANT_KEY = 'variant'
|
||||
|
||||
|
||||
class MalformedSchema2ManifestList(Exception):
|
||||
class MalformedSchema2ManifestList(ManifestException):
|
||||
"""
|
||||
Raised when a manifest list fails an assertion that should be true according to the
|
||||
Docker Manifest v2.2 Specification.
|
||||
|
@ -33,9 +40,9 @@ class MalformedSchema2ManifestList(Exception):
|
|||
|
||||
|
||||
class LazyManifestLoader(object):
|
||||
def __init__(self, manifest_data, lookup_manifest_fn):
|
||||
def __init__(self, manifest_data, content_retriever):
|
||||
self._manifest_data = manifest_data
|
||||
self._lookup_manifest_fn = lookup_manifest_fn
|
||||
self._content_retriever = content_retriever
|
||||
self._loaded_manifest = None
|
||||
|
||||
@property
|
||||
|
@ -49,7 +56,10 @@ class LazyManifestLoader(object):
|
|||
def _load_manifest(self):
|
||||
digest = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY]
|
||||
size = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY]
|
||||
manifest_bytes = self._lookup_manifest_fn(digest)
|
||||
manifest_bytes = self._content_retriever.get_manifest_bytes_with_digest(digest)
|
||||
if manifest_bytes is None:
|
||||
raise MalformedSchema2ManifestList('Could not find child manifest with digest `%s`' % digest)
|
||||
|
||||
if len(manifest_bytes) != size:
|
||||
raise MalformedSchema2ManifestList('Size of manifest does not match that retrieved: %s vs %s',
|
||||
len(manifest_bytes), size)
|
||||
|
@ -64,7 +74,7 @@ class LazyManifestLoader(object):
|
|||
raise MalformedSchema2ManifestList('Unknown manifest content type')
|
||||
|
||||
|
||||
class DockerSchema2ManifestList(object):
|
||||
class DockerSchema2ManifestList(ManifestInterface):
|
||||
METASCHEMA = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
|
@ -161,6 +171,7 @@ class DockerSchema2ManifestList(object):
|
|||
|
||||
def __init__(self, manifest_bytes):
|
||||
self._layers = None
|
||||
self._manifest_bytes = manifest_bytes
|
||||
|
||||
try:
|
||||
self._parsed = json.loads(manifest_bytes)
|
||||
|
@ -172,23 +183,144 @@ class DockerSchema2ManifestList(object):
|
|||
except ValidationError as ve:
|
||||
raise MalformedSchema2ManifestList('manifest data does not match schema: %s' % ve)
|
||||
|
||||
@property
|
||||
def is_manifest_list(self):
|
||||
""" Returns whether this manifest is a list. """
|
||||
return True
|
||||
|
||||
@property
|
||||
def schema_version(self):
|
||||
return 2
|
||||
|
||||
@property
|
||||
def digest(self):
|
||||
""" The digest of the manifest, including type prefix. """
|
||||
return digest_tools.sha256_digest(self._manifest_bytes)
|
||||
|
||||
@property
|
||||
def media_type(self):
|
||||
""" The media type of the schema. """
|
||||
return self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY]
|
||||
|
||||
@property
|
||||
def manifest_dict(self):
|
||||
""" Returns the manifest as a dictionary ready to be serialized to JSON. """
|
||||
return self._parsed
|
||||
|
||||
@property
|
||||
def bytes(self):
|
||||
return self._manifest_bytes
|
||||
|
||||
def get_layers(self, content_retriever):
|
||||
""" Returns the layers of this manifest, from base to leaf or None if this kind of manifest
|
||||
does not support layers. """
|
||||
return None
|
||||
|
||||
@property
|
||||
def blob_digests(self):
|
||||
# Manifest lists have no blob digests, since everything is stored as a manifest.
|
||||
return []
|
||||
|
||||
@property
|
||||
def local_blob_digests(self):
|
||||
return self.blob_digests
|
||||
|
||||
@property
|
||||
def layers_compressed_size(self):
|
||||
return None
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def manifests(self, lookup_manifest_fn):
|
||||
""" Returns the manifests in the list. The `lookup_manifest_fn` is a function
|
||||
that returns the manifest bytes for the specified digest.
|
||||
def manifests(self, content_retriever):
|
||||
""" Returns the manifests in the list.
|
||||
"""
|
||||
manifests = self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]
|
||||
return [LazyManifestLoader(m, lookup_manifest_fn) for m in manifests]
|
||||
return [LazyManifestLoader(m, content_retriever) for m in manifests]
|
||||
|
||||
def get_v1_compatible_manifest(self, lookup_manifest_fn):
|
||||
def child_manifests(self, content_retriever):
|
||||
return self.manifests(content_retriever)
|
||||
|
||||
def child_manifest_digests(self):
|
||||
return [m[DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY]
|
||||
for m in self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]]
|
||||
|
||||
def get_manifest_labels(self, content_retriever):
|
||||
return None
|
||||
|
||||
def get_leaf_layer_v1_image_id(self, content_retriever):
|
||||
return None
|
||||
|
||||
def get_legacy_image_ids(self, content_retriever):
|
||||
return None
|
||||
|
||||
@property
|
||||
def has_legacy_image(self):
|
||||
return False
|
||||
|
||||
def get_requires_empty_layer_blob(self, content_retriever):
|
||||
return False
|
||||
|
||||
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
|
||||
""" Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`.
|
||||
If none, returns None.
|
||||
"""
|
||||
for manifest in self.manifests(lookup_manifest_fn):
|
||||
platform = manifest._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
|
||||
for manifest_ref in self.manifests(content_retriever):
|
||||
platform = manifest_ref._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
|
||||
architecture = platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]
|
||||
os = platform[DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY]
|
||||
if architecture == 'amd64' and os == 'linux':
|
||||
return manifest
|
||||
if architecture != 'amd64' or os != 'linux':
|
||||
continue
|
||||
|
||||
try:
|
||||
manifest = manifest_ref.manifest_obj
|
||||
except (ManifestException, IOError):
|
||||
logger.exception('Could not load child manifest')
|
||||
return None
|
||||
|
||||
return manifest.get_schema1_manifest(namespace_name, repo_name, tag_name, content_retriever)
|
||||
|
||||
return None
|
||||
|
||||
def unsigned(self):
|
||||
return self
|
||||
|
||||
def generate_legacy_layers(self, images_map, content_retriever):
|
||||
return None
|
||||
|
||||
|
||||
class DockerSchema2ManifestListBuilder(object):
|
||||
"""
|
||||
A convenient abstraction around creating new DockerSchema2ManifestList's.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.manifests = []
|
||||
|
||||
def add_manifest(self, manifest, architecture, os):
|
||||
""" Adds a manifest to the list. """
|
||||
manifest = manifest.unsigned() # Make sure we add the unsigned version to the list.
|
||||
self.add_manifest_digest(manifest.digest, len(manifest.bytes), manifest.media_type,
|
||||
architecture, os)
|
||||
|
||||
def add_manifest_digest(self, manifest_digest, manifest_size, media_type, architecture, os):
|
||||
""" Adds a manifest to the list. """
|
||||
self.manifests.append((manifest_digest, manifest_size, media_type, {
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY: architecture,
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY: os,
|
||||
}))
|
||||
|
||||
def build(self):
|
||||
""" Builds and returns the DockerSchema2ManifestList. """
|
||||
assert self.manifests
|
||||
|
||||
manifest_list_dict = {
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY: 2,
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY: [
|
||||
{
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: manifest[2],
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY: manifest[0],
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY: manifest[1],
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY: manifest[3],
|
||||
} for manifest in self.manifests
|
||||
],
|
||||
}
|
||||
return DockerSchema2ManifestList(json.dumps(manifest_list_dict, indent=3))
|
||||
|
|
|
@ -8,10 +8,13 @@ from jsonschema import validate as validate_schema, ValidationError
|
|||
from digest import digest_tools
|
||||
from image.docker import ManifestException
|
||||
from image.docker.interfaces import ManifestInterface
|
||||
from image.docker.types import ManifestImageLayer
|
||||
from image.docker.schema2 import (DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_LAYER_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE)
|
||||
DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE,
|
||||
EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_SIZE)
|
||||
from image.docker.schema1 import DockerSchema1ManifestBuilder
|
||||
from image.docker.schema2.config import DockerSchema2Config
|
||||
|
||||
# Keys.
|
||||
|
@ -29,7 +32,10 @@ DockerV2ManifestLayer = namedtuple('DockerV2ManifestLayer', ['index', 'digest',
|
|||
'is_remote', 'urls',
|
||||
'compressed_size'])
|
||||
|
||||
LayerWithV1ID = namedtuple('LayerWithV1ID', ['layer', 'v1_id', 'v1_parent_id'])
|
||||
DockerV2ManifestImageLayer = namedtuple('DockerV2ManifestImageLayer', ['history', 'blob_layer',
|
||||
'v1_id', 'v1_parent_id',
|
||||
'compressed_size',
|
||||
'blob_digest'])
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -122,8 +128,9 @@ class DockerSchema2Manifest(ManifestInterface):
|
|||
}
|
||||
|
||||
def __init__(self, manifest_bytes):
|
||||
self._layers = None
|
||||
self._filesystem_layers = None
|
||||
self._payload = manifest_bytes
|
||||
self._cached_built_config = None
|
||||
|
||||
try:
|
||||
self._parsed = json.loads(manifest_bytes)
|
||||
|
@ -135,6 +142,14 @@ class DockerSchema2Manifest(ManifestInterface):
|
|||
except ValidationError as ve:
|
||||
raise MalformedSchema2Manifest('manifest data does not match schema: %s' % ve)
|
||||
|
||||
for layer in self.filesystem_layers:
|
||||
if layer.is_remote and not layer.urls:
|
||||
raise MalformedSchema2Manifest('missing `urls` for remote layer')
|
||||
|
||||
@property
|
||||
def is_manifest_list(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def schema_version(self):
|
||||
return 2
|
||||
|
@ -158,33 +173,185 @@ class DockerSchema2Manifest(ManifestInterface):
|
|||
digest=config[DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY])
|
||||
|
||||
@property
|
||||
def layers(self):
|
||||
""" Returns the layers of this manifest, from base to leaf. """
|
||||
if self._layers is None:
|
||||
self._layers = list(self._generate_layers())
|
||||
return self._layers
|
||||
def filesystem_layers(self):
|
||||
""" Returns the file system layers of this manifest, from base to leaf. """
|
||||
if self._filesystem_layers is None:
|
||||
self._filesystem_layers = list(self._generate_filesystem_layers())
|
||||
return self._filesystem_layers
|
||||
|
||||
@property
|
||||
def leaf_layer(self):
|
||||
return self.layers[-1]
|
||||
def leaf_filesystem_layer(self):
|
||||
""" Returns the leaf file system layer for this manifest. """
|
||||
return self.filesystem_layers[-1]
|
||||
|
||||
@property
|
||||
def leaf_layer_v1_image_id(self):
|
||||
return list(self.layers_with_v1_ids)[-1].v1_id
|
||||
def layers_compressed_size(self):
|
||||
return sum(layer.compressed_size for layer in self.filesystem_layers)
|
||||
|
||||
@property
|
||||
def legacy_image_ids(self):
|
||||
return [l.v1_id for l in self.layers_with_v1_ids]
|
||||
def has_remote_layer(self):
|
||||
for layer in self.filesystem_layers:
|
||||
if layer.is_remote:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@property
|
||||
def blob_digests(self):
|
||||
return [str(layer.digest) for layer in self.layers]
|
||||
return [str(layer.digest) for layer in self.filesystem_layers] + [str(self.config.digest)]
|
||||
|
||||
@property
|
||||
def local_blob_digests(self):
|
||||
return ([str(layer.digest) for layer in self.filesystem_layers if not layer.urls] +
|
||||
[str(self.config.digest)])
|
||||
|
||||
def get_manifest_labels(self, content_retriever):
|
||||
return self._get_built_config(content_retriever).labels
|
||||
|
||||
def get_layers(self, content_retriever):
|
||||
""" Returns the layers of this manifest, from base to leaf or None if this kind of manifest
|
||||
does not support layers. """
|
||||
for image_layer in self._manifest_image_layers(content_retriever):
|
||||
is_remote = image_layer.blob_layer.is_remote if image_layer.blob_layer else False
|
||||
urls = image_layer.blob_layer.urls if image_layer.blob_layer else None
|
||||
yield ManifestImageLayer(layer_id=image_layer.v1_id,
|
||||
compressed_size=image_layer.compressed_size,
|
||||
is_remote=is_remote,
|
||||
urls=urls,
|
||||
command=image_layer.history.command,
|
||||
blob_digest=image_layer.blob_digest,
|
||||
created_datetime=image_layer.history.created_datetime,
|
||||
internal_layer=image_layer)
|
||||
|
||||
@property
|
||||
def bytes(self):
|
||||
return self._payload
|
||||
|
||||
def _generate_layers(self):
|
||||
def child_manifests(self, content_retriever):
|
||||
return None
|
||||
|
||||
def _manifest_image_layers(self, content_retriever):
|
||||
# Retrieve the configuration for the manifest.
|
||||
config = self._get_built_config(content_retriever)
|
||||
history = list(config.history)
|
||||
if len(history) < len(self.filesystem_layers):
|
||||
raise MalformedSchema2Manifest('Found less history than layer blobs')
|
||||
|
||||
digest_history = hashlib.sha256()
|
||||
v1_layer_parent_id = None
|
||||
v1_layer_id = None
|
||||
blob_index = 0
|
||||
|
||||
for history_index, history_entry in enumerate(history):
|
||||
if not history_entry.is_empty and blob_index >= len(self.filesystem_layers):
|
||||
raise MalformedSchema2Manifest('Missing history entry #%s' % blob_index)
|
||||
|
||||
v1_layer_parent_id = v1_layer_id
|
||||
blob_layer = None if history_entry.is_empty else self.filesystem_layers[blob_index]
|
||||
blob_digest = EMPTY_LAYER_BLOB_DIGEST if blob_layer is None else str(blob_layer.digest)
|
||||
compressed_size = EMPTY_LAYER_SIZE if blob_layer is None else blob_layer.compressed_size
|
||||
|
||||
# Create a new synthesized V1 ID for the history layer by hashing its content and
|
||||
# the blob associated with it.
|
||||
digest_history.update(json.dumps(history_entry.raw_entry))
|
||||
digest_history.update("|")
|
||||
digest_history.update(str(history_index))
|
||||
digest_history.update("|")
|
||||
digest_history.update(blob_digest)
|
||||
digest_history.update("||")
|
||||
|
||||
v1_layer_id = digest_history.hexdigest()
|
||||
yield DockerV2ManifestImageLayer(history=history_entry,
|
||||
blob_layer=blob_layer,
|
||||
blob_digest=blob_digest,
|
||||
v1_id=v1_layer_id,
|
||||
v1_parent_id=v1_layer_parent_id,
|
||||
compressed_size=compressed_size)
|
||||
|
||||
if not history_entry.is_empty:
|
||||
blob_index += 1
|
||||
|
||||
@property
|
||||
def has_legacy_image(self):
|
||||
return not self.has_remote_layer
|
||||
|
||||
def generate_legacy_layers(self, images_map, content_retriever):
|
||||
assert not self.has_remote_layer
|
||||
|
||||
# NOTE: We use the DockerSchema1ManifestBuilder here because it already contains
|
||||
# the logic for generating the DockerV1Metadata. All of this will go away once we get
|
||||
# rid of legacy images in the database, so this is a temporary solution.
|
||||
v1_builder = DockerSchema1ManifestBuilder('', '', '')
|
||||
self._populate_schema1_builder(v1_builder, content_retriever)
|
||||
return v1_builder.build().generate_legacy_layers(images_map, content_retriever)
|
||||
|
||||
def get_leaf_layer_v1_image_id(self, content_retriever):
|
||||
# NOTE: If there exists a layer with remote content, then we consider this manifest
|
||||
# to not support legacy images.
|
||||
if self.has_remote_layer:
|
||||
return None
|
||||
|
||||
return self.get_legacy_image_ids(content_retriever)[-1].v1_id
|
||||
|
||||
def get_legacy_image_ids(self, content_retriever):
|
||||
if self.has_remote_layer:
|
||||
return None
|
||||
|
||||
return [l.v1_id for l in self._manifest_image_layers(content_retriever)]
|
||||
|
||||
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
|
||||
if self.has_remote_layer:
|
||||
return None
|
||||
|
||||
v1_builder = DockerSchema1ManifestBuilder(namespace_name, repo_name, tag_name)
|
||||
self._populate_schema1_builder(v1_builder, content_retriever)
|
||||
return v1_builder.build()
|
||||
|
||||
def unsigned(self):
|
||||
return self
|
||||
|
||||
def get_requires_empty_layer_blob(self, content_retriever):
|
||||
schema2_config = self._get_built_config(content_retriever)
|
||||
if schema2_config is None:
|
||||
return None
|
||||
|
||||
return schema2_config.has_empty_layer
|
||||
|
||||
def _populate_schema1_builder(self, v1_builder, content_retriever):
|
||||
""" Populates a DockerSchema1ManifestBuilder with the layers and config from
|
||||
this schema.
|
||||
"""
|
||||
assert not self.has_remote_layer
|
||||
schema2_config = self._get_built_config(content_retriever)
|
||||
layers = list(self._manifest_image_layers(content_retriever))
|
||||
|
||||
for index, layer in enumerate(reversed(layers)): # Schema 1 layers are in reverse order
|
||||
v1_compatibility = schema2_config.build_v1_compatibility(layer.history,
|
||||
layer.v1_id,
|
||||
layer.v1_parent_id,
|
||||
index == 0,
|
||||
layer.compressed_size)
|
||||
v1_builder.add_layer(str(layer.blob_digest), json.dumps(v1_compatibility))
|
||||
|
||||
return v1_builder
|
||||
|
||||
def _get_built_config(self, content_retriever):
|
||||
if self._cached_built_config:
|
||||
return self._cached_built_config
|
||||
|
||||
config_bytes = content_retriever.get_blob_bytes_with_digest(self.config.digest)
|
||||
if config_bytes is None:
|
||||
raise MalformedSchema2Manifest('Could not load config blob for manifest')
|
||||
|
||||
if len(config_bytes) != self.config.size:
|
||||
msg = 'Size of config does not match that retrieved: %s vs %s' % (len(config_bytes),
|
||||
self.config.size)
|
||||
raise MalformedSchema2Manifest(msg)
|
||||
|
||||
self._cached_built_config = DockerSchema2Config(config_bytes)
|
||||
return self._cached_built_config
|
||||
|
||||
def _generate_filesystem_layers(self):
|
||||
for index, layer in enumerate(self._parsed[DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY]):
|
||||
content_type = layer[DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY]
|
||||
is_remote = content_type == DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE
|
||||
|
@ -201,43 +368,65 @@ class DockerSchema2Manifest(ManifestInterface):
|
|||
is_remote=is_remote,
|
||||
urls=layer.get(DOCKER_SCHEMA2_MANIFEST_URLS_KEY))
|
||||
|
||||
@property
|
||||
def layers_with_v1_ids(self):
|
||||
digest_history = hashlib.sha256()
|
||||
v1_layer_parent_id = None
|
||||
v1_layer_id = None
|
||||
|
||||
for layer in self.layers:
|
||||
v1_layer_parent_id = v1_layer_id
|
||||
class DockerSchema2ManifestBuilder(object):
|
||||
"""
|
||||
A convenient abstraction around creating new DockerSchema2Manifests.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.config = None
|
||||
self.filesystem_layers = []
|
||||
|
||||
# Create a new synthesized V1 ID for the layer by adding its digest and index to the
|
||||
# existing digest history hash builder. This will ensure unique V1s across *all* schemas in
|
||||
# a repository.
|
||||
digest_history.update(str(layer.digest))
|
||||
digest_history.update("#")
|
||||
digest_history.update(str(layer.index))
|
||||
digest_history.update("|")
|
||||
v1_layer_id = digest_history.hexdigest()
|
||||
yield LayerWithV1ID(layer=layer, v1_id=v1_layer_id, v1_parent_id=v1_layer_parent_id)
|
||||
def set_config(self, schema2_config):
|
||||
""" Sets the configuration for the manifest being built. """
|
||||
self.set_config_digest(schema2_config.digest, schema2_config.size)
|
||||
|
||||
def populate_schema1_builder(self, v1_builder, lookup_config_fn):
|
||||
""" Populates a DockerSchema1ManifestBuilder with the layers and config from
|
||||
this schema. The `lookup_config_fn` is a function that, when given the config
|
||||
digest SHA, returns the associated configuration JSON bytes for this schema.
|
||||
"""
|
||||
config_bytes = lookup_config_fn(self.config.digest)
|
||||
if len(config_bytes) != self.config.size:
|
||||
raise MalformedSchema2Manifest('Size of config does not match that retrieved: %s vs %s',
|
||||
len(config_bytes), self.config.size)
|
||||
def set_config_digest(self, config_digest, config_size):
|
||||
""" Sets the digest and size of the configuration layer. """
|
||||
self.config = DockerV2ManifestConfig(size=config_size, digest=config_digest)
|
||||
|
||||
schema2_config = DockerSchema2Config(config_bytes)
|
||||
def add_layer(self, digest, size, urls=None):
|
||||
""" Adds a filesystem layer to the manifest. """
|
||||
self.filesystem_layers.append(DockerV2ManifestLayer(index=len(self.filesystem_layers),
|
||||
digest=digest,
|
||||
compressed_size=size,
|
||||
urls=urls,
|
||||
is_remote=bool(urls)))
|
||||
|
||||
# Build the V1 IDs for the layers.
|
||||
layers = list(self.layers_with_v1_ids)
|
||||
for layer_with_ids in reversed(layers): # Schema1 has layers in reverse order
|
||||
v1_compatibility = schema2_config.build_v1_compatibility(layer_with_ids.layer.index,
|
||||
layer_with_ids.v1_id,
|
||||
layer_with_ids.v1_parent_id)
|
||||
v1_builder.add_layer(str(layer_with_ids.layer.digest), json.dumps(v1_compatibility))
|
||||
def build(self):
|
||||
""" Builds and returns the DockerSchema2Manifest. """
|
||||
assert self.filesystem_layers
|
||||
assert self.config
|
||||
|
||||
return v1_builder
|
||||
def _build_layer(layer):
|
||||
if layer.urls:
|
||||
return {
|
||||
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: layer.compressed_size,
|
||||
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(layer.digest),
|
||||
DOCKER_SCHEMA2_MANIFEST_URLS_KEY: layer.urls,
|
||||
}
|
||||
|
||||
return {
|
||||
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_LAYER_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: layer.compressed_size,
|
||||
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(layer.digest),
|
||||
}
|
||||
|
||||
manifest_dict = {
|
||||
DOCKER_SCHEMA2_MANIFEST_VERSION_KEY: 2,
|
||||
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
|
||||
|
||||
# Config
|
||||
DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY: {
|
||||
DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: self.config.size,
|
||||
DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(self.config.digest),
|
||||
},
|
||||
|
||||
# Layers
|
||||
DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY: [
|
||||
_build_layer(layer) for layer in self.filesystem_layers
|
||||
],
|
||||
}
|
||||
return DockerSchema2Manifest(json.dumps(manifest_dict, indent=3))
|
||||
|
|
129
image/docker/schema2/test/conversion_data/complex.config.json
Normal file
129
image/docker/schema2/test/conversion_data/complex.config.json
Normal file
|
@ -0,0 +1,129 @@
|
|||
{
|
||||
"architecture": "amd64",
|
||||
"config": {
|
||||
"Hostname": "",
|
||||
"Domainname": "",
|
||||
"User": "",
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"AttachStderr": false,
|
||||
"ExposedPorts": {
|
||||
"3306/tcp": {},
|
||||
"33060/tcp": {}
|
||||
},
|
||||
"Tty": false,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "GOSU_VERSION=1.7", "MYSQL_MAJOR=5.7", "MYSQL_VERSION=5.7.24-1debian9"],
|
||||
"Cmd": ["mysqld"],
|
||||
"ArgsEscaped": true,
|
||||
"Image": "sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265",
|
||||
"Volumes": {
|
||||
"/var/lib/mysql": {}
|
||||
},
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": ["docker-entrypoint.sh"],
|
||||
"OnBuild": [],
|
||||
"Labels": null
|
||||
},
|
||||
"container": "54bd04ff79350d28d0da33fa3e483567156c7c9f87a7254d6fa8267b0878c339",
|
||||
"container_config": {
|
||||
"Hostname": "54bd04ff7935",
|
||||
"Domainname": "",
|
||||
"User": "",
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"AttachStderr": false,
|
||||
"ExposedPorts": {
|
||||
"3306/tcp": {},
|
||||
"33060/tcp": {}
|
||||
},
|
||||
"Tty": false,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "GOSU_VERSION=1.7", "MYSQL_MAJOR=5.7", "MYSQL_VERSION=5.7.24-1debian9"],
|
||||
"Cmd": ["/bin/sh", "-c", "#(nop) ", "CMD [\"mysqld\"]"],
|
||||
"ArgsEscaped": true,
|
||||
"Image": "sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265",
|
||||
"Volumes": {
|
||||
"/var/lib/mysql": {}
|
||||
},
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": ["docker-entrypoint.sh"],
|
||||
"OnBuild": [],
|
||||
"Labels": {}
|
||||
},
|
||||
"created": "2018-11-16T01:14:20.755008004Z",
|
||||
"docker_version": "17.06.2-ce",
|
||||
"history": [{
|
||||
"created": "2018-11-15T22:45:06.938205528Z",
|
||||
"created_by": "/bin/sh -c #(nop) ADD file:dab9baf938799c515ddce14c02f899da5992f0b76a432fa10a2338556a3cb04f in / "
|
||||
}, {
|
||||
"created": "2018-11-15T22:45:07.243453424Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"bash\"]",
|
||||
"empty_layer": true
|
||||
}, {
|
||||
"created": "2018-11-16T01:11:01.00193007Z",
|
||||
"created_by": "/bin/sh -c groupadd -r mysql \u0026\u0026 useradd -r -g mysql mysql"
|
||||
}, {
|
||||
"created": "2018-11-16T01:11:11.128616814Z",
|
||||
"created_by": "/bin/sh -c apt-get update \u0026\u0026 apt-get install -y --no-install-recommends gnupg dirmngr \u0026\u0026 rm -rf /var/lib/apt/lists/*"
|
||||
}, {
|
||||
"created": "2018-11-16T01:11:11.466721945Z",
|
||||
"created_by": "/bin/sh -c #(nop) ENV GOSU_VERSION=1.7",
|
||||
"empty_layer": true
|
||||
}, {
|
||||
"created": "2018-11-16T01:11:33.651099664Z",
|
||||
"created_by": "/bin/sh -c set -x \t\u0026\u0026 apt-get update \u0026\u0026 apt-get install -y --no-install-recommends ca-certificates wget \u0026\u0026 rm -rf /var/lib/apt/lists/* \t\u0026\u0026 wget -O /usr/local/bin/gosu \"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture)\" \t\u0026\u0026 wget -O /usr/local/bin/gosu.asc \"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture).asc\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \t\u0026\u0026 gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu \t\u0026\u0026 gpgconf --kill all \t\u0026\u0026 rm -rf \"$GNUPGHOME\" /usr/local/bin/gosu.asc \t\u0026\u0026 chmod +x /usr/local/bin/gosu \t\u0026\u0026 gosu nobody true \t\u0026\u0026 apt-get purge -y --auto-remove ca-certificates wget"
|
||||
}, {
|
||||
"created": "2018-11-16T01:11:34.772616243Z",
|
||||
"created_by": "/bin/sh -c mkdir /docker-entrypoint-initdb.d"
|
||||
}, {
|
||||
"created": "2018-11-16T01:11:46.048879774Z",
|
||||
"created_by": "/bin/sh -c apt-get update \u0026\u0026 apt-get install -y --no-install-recommends \t\tpwgen \t\topenssl \t\tperl \t\u0026\u0026 rm -rf /var/lib/apt/lists/*"
|
||||
}, {
|
||||
"created": "2018-11-16T01:11:49.672488713Z",
|
||||
"created_by": "/bin/sh -c set -ex; \tkey='A4A9406876FCBD3C456770C88C718D3B5072E1F5'; \texport GNUPGHOME=\"$(mktemp -d)\"; \tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \"$key\"; \tgpg --batch --export \"$key\" \u003e /etc/apt/trusted.gpg.d/mysql.gpg; \tgpgconf --kill all; \trm -rf \"$GNUPGHOME\"; \tapt-key list \u003e /dev/null"
|
||||
}, {
|
||||
"created": "2018-11-16T01:13:49.699875841Z",
|
||||
"created_by": "/bin/sh -c #(nop) ENV MYSQL_MAJOR=5.7",
|
||||
"empty_layer": true
|
||||
}, {
|
||||
"created": "2018-11-16T01:13:50.087751031Z",
|
||||
"created_by": "/bin/sh -c #(nop) ENV MYSQL_VERSION=5.7.24-1debian9",
|
||||
"empty_layer": true
|
||||
}, {
|
||||
"created": "2018-11-16T01:13:51.211877582Z",
|
||||
"created_by": "/bin/sh -c echo \"deb http://repo.mysql.com/apt/debian/ stretch mysql-${MYSQL_MAJOR}\" \u003e /etc/apt/sources.list.d/mysql.list"
|
||||
}, {
|
||||
"created": "2018-11-16T01:14:17.521774936Z",
|
||||
"created_by": "/bin/sh -c { \t\techo mysql-community-server mysql-community-server/data-dir select ''; \t\techo mysql-community-server mysql-community-server/root-pass password ''; \t\techo mysql-community-server mysql-community-server/re-root-pass password ''; \t\techo mysql-community-server mysql-community-server/remove-test-db select false; \t} | debconf-set-selections \t\u0026\u0026 apt-get update \u0026\u0026 apt-get install -y mysql-server=\"${MYSQL_VERSION}\" \u0026\u0026 rm -rf /var/lib/apt/lists/* \t\u0026\u0026 rm -rf /var/lib/mysql \u0026\u0026 mkdir -p /var/lib/mysql /var/run/mysqld \t\u0026\u0026 chown -R mysql:mysql /var/lib/mysql /var/run/mysqld \t\u0026\u0026 chmod 777 /var/run/mysqld \t\u0026\u0026 find /etc/mysql/ -name '*.cnf' -print0 \t\t| xargs -0 grep -lZE '^(bind-address|log)' \t\t| xargs -rt -0 sed -Ei 's/^(bind-address|log)/#\u0026/' \t\u0026\u0026 echo '[mysqld]\\nskip-host-cache\\nskip-name-resolve' \u003e /etc/mysql/conf.d/docker.cnf"
|
||||
}, {
|
||||
"created": "2018-11-16T01:14:17.959906008Z",
|
||||
"created_by": "/bin/sh -c #(nop) VOLUME [/var/lib/mysql]",
|
||||
"empty_layer": true
|
||||
}, {
|
||||
"created": "2018-11-16T01:14:18.574646682Z",
|
||||
"created_by": "/bin/sh -c #(nop) COPY file:4b5f8335c16a8bc9f76a2164458df1d71cf76facbf16d02f18ce7409122c2146 in /usr/local/bin/ "
|
||||
}, {
|
||||
"created": "2018-11-16T01:14:19.715707659Z",
|
||||
"created_by": "/bin/sh -c ln -s usr/local/bin/docker-entrypoint.sh /entrypoint.sh # backwards compat"
|
||||
}, {
|
||||
"created": "2018-11-16T01:14:20.063426223Z",
|
||||
"created_by": "/bin/sh -c #(nop) ENTRYPOINT [\"docker-entrypoint.sh\"]",
|
||||
"empty_layer": true
|
||||
}, {
|
||||
"created": "2018-11-16T01:14:20.416001274Z",
|
||||
"created_by": "/bin/sh -c #(nop) EXPOSE 3306/tcp 33060/tcp",
|
||||
"empty_layer": true
|
||||
}, {
|
||||
"created": "2018-11-16T01:14:20.755008004Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"mysqld\"]",
|
||||
"empty_layer": true
|
||||
}],
|
||||
"os": "linux",
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": ["sha256:ef68f6734aa485edf13a8509fe60e4272428deaf63f446a441b79d47fc5d17d3", "sha256:a588c986cf971b87ee2aacd9b57877c47e68e4981b67793d301720a1d0d03a68", "sha256:0f1205f1cd43db6d5f837f792eecb84e773482eb0fb353a4f3f42c3cabb5747f", "sha256:0ad177796f339bf4f5c114bbd97721536d48b452915479884ff3d16acc1c612f", "sha256:2566141f200b8e249db6663d24063a3e1d0e33622e933fa99bee27a4f5b8db02", "sha256:783b13a988e3ec069e08019c89292fdf4e6316141ed74a6d896a422f7ee30077", "sha256:3d4164460bf0c8c4959e6acb51757d63dea47c162a334f65dfbf32537a4b552f", "sha256:ea66b8e6103f47f1934007a9b4c03c28f0398fdc7f9fbe9b5eea335b10448fed", "sha256:347571a8da208bf019b880ef4c73bad7884ad0570ec70dbfe8f95c6c0b37c082", "sha256:ceb15396dc26b48c1dc6222a4cc3934761c1ec06623d895efdb1cb77517a3887", "sha256:0d954c604c768947cd9630283f96bca6c244b971d004565b57f42db100ca3178"]
|
||||
}
|
||||
}
|
140
image/docker/schema2/test/conversion_data/complex.schema1.json
Normal file
140
image/docker/schema2/test/conversion_data/complex.schema1.json
Normal file
|
@ -0,0 +1,140 @@
|
|||
{
|
||||
"schemaVersion": 1,
|
||||
"name": "user/test",
|
||||
"tag": "1",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:e81e5181556720e9c821bdb826dd9dbeb078dd28af8fe84586aa904ff212d117"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:5f906b8da5fed2070448fed578b93cb1a995be5bdde5624163fbcb842ce4460f"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:cd2a6583723557a1dc58584f53564f791dbb7a5d264bb2f8d71aa8c5d21ac38c"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:a7905d9fbbea59dc29d709d1d61a96c06c26a2d1e506ac5c3832a348969052b8"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:0283dc49ef4e5bc0dc8897b14818de315faeceb0a5272464ff3c48cd4ea3b626"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:5ed0ae805b65407ddd0ff1aeb8371de3133e5daed97726717d4225cb7a8efaaa"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:e2ae0d063e89542abdd8abd1613e8e27743fa669f4a418c8b0a813991621d892"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:1f212fb371f936c524c624e6830242a8cb91b3b710942f9241004dae45828f87"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:283fa4c95fb4e349b501ef8c864b2259500d83ca6529253da12d658aa480cbb5"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:936836019e67889c1f5a95732c62c476d2450010988792e99d6e7ece84fdce2f"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:a5a6f2f73cd8abbdc55d0df0d8834f7262713e87d6c8800ea3851f103025e0f0"
|
||||
}
|
||||
],
|
||||
"history": [
|
||||
{
|
||||
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"3306/tcp\":{},\"33060/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOSU_VERSION=1.7\",\"MYSQL_MAJOR=5.7\",\"MYSQL_VERSION=5.7.24-1debian9\"],\"Cmd\":[\"mysqld\"],\"ArgsEscaped\":true,\"Image\":\"sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265\",\"Volumes\":{\"/var/lib/mysql\":{}},\"WorkingDir\":\"\",\"Entrypoint\":[\"docker-entrypoint.sh\"],\"OnBuild\":[],\"Labels\":null},\"container\":\"54bd04ff79350d28d0da33fa3e483567156c7c9f87a7254d6fa8267b0878c339\",\"container_config\":{\"Hostname\":\"54bd04ff7935\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"3306/tcp\":{},\"33060/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOSU_VERSION=1.7\",\"MYSQL_MAJOR=5.7\",\"MYSQL_VERSION=5.7.24-1debian9\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"mysqld\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265\",\"Volumes\":{\"/var/lib/mysql\":{}},\"WorkingDir\":\"\",\"Entrypoint\":[\"docker-entrypoint.sh\"],\"OnBuild\":[],\"Labels\":{}},\"created\":\"2018-11-16T01:14:20.755008004Z\",\"docker_version\":\"17.06.2-ce\",\"id\":\"3cc8ec7719abb3a11fc9ce9b5c5151f368bf3e7e2702d3618e17b4f5055237f8\",\"os\":\"linux\",\"parent\":\"2904b34db6cd1083a7b47ec5e8c1fcb538b9d0ecb790488ec22badabf6143fcb\",\"throwaway\":true}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"2904b34db6cd1083a7b47ec5e8c1fcb538b9d0ecb790488ec22badabf6143fcb\",\"parent\":\"53d4b89c676dd5970862f366ded1a212a24f10a862a12a340ca2f35b5d766308\",\"created\":\"2018-11-16T01:14:20.416001274Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) EXPOSE 3306/tcp 33060/tcp\"]},\"throwaway\":true}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"53d4b89c676dd5970862f366ded1a212a24f10a862a12a340ca2f35b5d766308\",\"parent\":\"73c0e3215914add0dc50583090572ae5cd78fb16cc3b3427c8874472cdca93fb\",\"created\":\"2018-11-16T01:14:20.063426223Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENTRYPOINT [\\\"docker-entrypoint.sh\\\"]\"]},\"throwaway\":true}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"73c0e3215914add0dc50583090572ae5cd78fb16cc3b3427c8874472cdca93fb\",\"parent\":\"95180e8ac981681a12fa3767b32747b21514581605b20a99cf1713c78cf2ddaa\",\"created\":\"2018-11-16T01:14:19.715707659Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c ln -s usr/local/bin/docker-entrypoint.sh /entrypoint.sh # backwards compat\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"95180e8ac981681a12fa3767b32747b21514581605b20a99cf1713c78cf2ddaa\",\"parent\":\"afb72c06112722395dcb38ffdda4c9564480a69bb0fe587bba8f10d8d0adffaa\",\"created\":\"2018-11-16T01:14:18.574646682Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) COPY file:4b5f8335c16a8bc9f76a2164458df1d71cf76facbf16d02f18ce7409122c2146 in /usr/local/bin/ \"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"afb72c06112722395dcb38ffdda4c9564480a69bb0fe587bba8f10d8d0adffaa\",\"parent\":\"ccadd71e7e80b1772df1c309938e1cbac71c6deed75c9b21212f72a662ce11be\",\"created\":\"2018-11-16T01:14:17.959906008Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) VOLUME [/var/lib/mysql]\"]},\"throwaway\":true}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"ccadd71e7e80b1772df1c309938e1cbac71c6deed75c9b21212f72a662ce11be\",\"parent\":\"e053ced3cc09f28a3ab8547dac6bde4220a5f920c559318ba2c807353c0cbdad\",\"created\":\"2018-11-16T01:14:17.521774936Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c { \\t\\techo mysql-community-server mysql-community-server/data-dir select ''; \\t\\techo mysql-community-server mysql-community-server/root-pass password ''; \\t\\techo mysql-community-server mysql-community-server/re-root-pass password ''; \\t\\techo mysql-community-server mysql-community-server/remove-test-db select false; \\t} | debconf-set-selections \\t\\u0026\\u0026 apt-get update \\u0026\\u0026 apt-get install -y mysql-server=\\\"${MYSQL_VERSION}\\\" \\u0026\\u0026 rm -rf /var/lib/apt/lists/* \\t\\u0026\\u0026 rm -rf /var/lib/mysql \\u0026\\u0026 mkdir -p /var/lib/mysql /var/run/mysqld \\t\\u0026\\u0026 chown -R mysql:mysql /var/lib/mysql /var/run/mysqld \\t\\u0026\\u0026 chmod 777 /var/run/mysqld \\t\\u0026\\u0026 find /etc/mysql/ -name '*.cnf' -print0 \\t\\t| xargs -0 grep -lZE '^(bind-address|log)' \\t\\t| xargs -rt -0 sed -Ei 's/^(bind-address|log)/#\\u0026/' \\t\\u0026\\u0026 echo '[mysqld]\\\\nskip-host-cache\\\\nskip-name-resolve' \\u003e /etc/mysql/conf.d/docker.cnf\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"e053ced3cc09f28a3ab8547dac6bde4220a5f920c559318ba2c807353c0cbdad\",\"parent\":\"0dd7718b64000ac1bfb2c1d4bd3226244c9d55e4b741ef2eddf22c03ee638c3b\",\"created\":\"2018-11-16T01:13:51.211877582Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c echo \\\"deb http://repo.mysql.com/apt/debian/ stretch mysql-${MYSQL_MAJOR}\\\" \\u003e /etc/apt/sources.list.d/mysql.list\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"0dd7718b64000ac1bfb2c1d4bd3226244c9d55e4b741ef2eddf22c03ee638c3b\",\"parent\":\"5e0187996d55a7fa5c81fa75caa2cb57677edbd45abfa68a7a8769d8f640466b\",\"created\":\"2018-11-16T01:13:50.087751031Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV MYSQL_VERSION=5.7.24-1debian9\"]},\"throwaway\":true}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"5e0187996d55a7fa5c81fa75caa2cb57677edbd45abfa68a7a8769d8f640466b\",\"parent\":\"2d69915517f4a342dd3b3c719212e7349274a213551239b38c54ac0c44e7fb12\",\"created\":\"2018-11-16T01:13:49.699875841Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV MYSQL_MAJOR=5.7\"]},\"throwaway\":true}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"2d69915517f4a342dd3b3c719212e7349274a213551239b38c54ac0c44e7fb12\",\"parent\":\"a3492a643c2e7bd54083848276a38e7569e47ccdf42541abd082191f55632e22\",\"created\":\"2018-11-16T01:11:49.672488713Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -ex; \\tkey='A4A9406876FCBD3C456770C88C718D3B5072E1F5'; \\texport GNUPGHOME=\\\"$(mktemp -d)\\\"; \\tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \\\"$key\\\"; \\tgpg --batch --export \\\"$key\\\" \\u003e /etc/apt/trusted.gpg.d/mysql.gpg; \\tgpgconf --kill all; \\trm -rf \\\"$GNUPGHOME\\\"; \\tapt-key list \\u003e /dev/null\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"a3492a643c2e7bd54083848276a38e7569e47ccdf42541abd082191f55632e22\",\"parent\":\"2e7e8bdd723f6a45f9d789b8d2595b1f6c0a702c70f6922792296c681cb5a14e\",\"created\":\"2018-11-16T01:11:46.048879774Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\u0026\\u0026 apt-get install -y --no-install-recommends \\t\\tpwgen \\t\\topenssl \\t\\tperl \\t\\u0026\\u0026 rm -rf /var/lib/apt/lists/*\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"2e7e8bdd723f6a45f9d789b8d2595b1f6c0a702c70f6922792296c681cb5a14e\",\"parent\":\"855801645898a11047b72b6740ccc614f49a9cd5bd07f60820ade1635180acb3\",\"created\":\"2018-11-16T01:11:34.772616243Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir /docker-entrypoint-initdb.d\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"855801645898a11047b72b6740ccc614f49a9cd5bd07f60820ade1635180acb3\",\"parent\":\"123f7f7e13504138215a283c07589c9a506f249305ff2c78567ef3d1eaf27314\",\"created\":\"2018-11-16T01:11:33.651099664Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -x \\t\\u0026\\u0026 apt-get update \\u0026\\u0026 apt-get install -y --no-install-recommends ca-certificates wget \\u0026\\u0026 rm -rf /var/lib/apt/lists/* \\t\\u0026\\u0026 wget -O /usr/local/bin/gosu \\\"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture)\\\" \\t\\u0026\\u0026 wget -O /usr/local/bin/gosu.asc \\\"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture).asc\\\" \\t\\u0026\\u0026 export GNUPGHOME=\\\"$(mktemp -d)\\\" \\t\\u0026\\u0026 gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \\t\\u0026\\u0026 gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu \\t\\u0026\\u0026 gpgconf --kill all \\t\\u0026\\u0026 rm -rf \\\"$GNUPGHOME\\\" /usr/local/bin/gosu.asc \\t\\u0026\\u0026 chmod +x /usr/local/bin/gosu \\t\\u0026\\u0026 gosu nobody true \\t\\u0026\\u0026 apt-get purge -y --auto-remove ca-certificates wget\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"123f7f7e13504138215a283c07589c9a506f249305ff2c78567ef3d1eaf27314\",\"parent\":\"6f3aeec2779f98f81f65151bc886b26eac21c79eecbc79aed3a414e1413643a4\",\"created\":\"2018-11-16T01:11:11.466721945Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV GOSU_VERSION=1.7\"]},\"throwaway\":true}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"6f3aeec2779f98f81f65151bc886b26eac21c79eecbc79aed3a414e1413643a4\",\"parent\":\"4597be70a8abf812caed7f0d37ddd738d361ff4fc271e8dc4dde8b7746378d0b\",\"created\":\"2018-11-16T01:11:11.128616814Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\u0026\\u0026 apt-get install -y --no-install-recommends gnupg dirmngr \\u0026\\u0026 rm -rf /var/lib/apt/lists/*\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"4597be70a8abf812caed7f0d37ddd738d361ff4fc271e8dc4dde8b7746378d0b\",\"parent\":\"97569d305060de34859e5d55a8bbb010f4026af7cc4b9ca40294689bd6af1909\",\"created\":\"2018-11-16T01:11:01.00193007Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c groupadd -r mysql \\u0026\\u0026 useradd -r -g mysql mysql\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"97569d305060de34859e5d55a8bbb010f4026af7cc4b9ca40294689bd6af1909\",\"parent\":\"0454203f6769f870345aa82f55f4699dfaab41bcb3e38f0c44c9ecc11ef2a38f\",\"created\":\"2018-11-15T22:45:07.243453424Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"bash\\\"]\"]},\"throwaway\":true}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"0454203f6769f870345aa82f55f4699dfaab41bcb3e38f0c44c9ecc11ef2a38f\",\"created\":\"2018-11-15T22:45:06.938205528Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:dab9baf938799c515ddce14c02f899da5992f0b76a432fa10a2338556a3cb04f in / \"]}}"
|
||||
}
|
||||
],
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"jwk": {
|
||||
"crv": "P-256",
|
||||
"kid": "BTGA:CY7S:HZ7T:FEUS:DZJD:FNS5:O5U2:BTGQ:SGZZ:AY5P:R5MA:UJEY",
|
||||
"kty": "EC",
|
||||
"x": "0xF2dZ_HLk8VVrqMLMm838LWFAi60P7V5fBjlhlt7xI",
|
||||
"y": "niBqFvBqOvtABZSpMoQoSMT7H13Pb0POo00OX7Xsmvc"
|
||||
},
|
||||
"alg": "ES256"
|
||||
},
|
||||
"signature": "w8TITz0xkMNqgchKNSfQ-4OlfIGUnG4MLT4Tt738Z0NiD1bHaWFef8wCCBNuDLiKHllrqcqM6Aj__LhsctSwyA",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjEyODM2LCJmb3JtYXRUYWlsIjoiQ24wIiwidGltZSI6IjIwMTgtMTEtMjFUMTk6MTU6MTNaIn0"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 8171,
|
||||
"digest": "sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 22486277,
|
||||
"digest": "sha256:a5a6f2f73cd8abbdc55d0df0d8834f7262713e87d6c8800ea3851f103025e0f0"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 1747,
|
||||
"digest": "sha256:936836019e67889c1f5a95732c62c476d2450010988792e99d6e7ece84fdce2f"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 4500948,
|
||||
"digest": "sha256:283fa4c95fb4e349b501ef8c864b2259500d83ca6529253da12d658aa480cbb5"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 1270313,
|
||||
"digest": "sha256:1f212fb371f936c524c624e6830242a8cb91b3b710942f9241004dae45828f87"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 115,
|
||||
"digest": "sha256:e2ae0d063e89542abdd8abd1613e8e27743fa669f4a418c8b0a813991621d892"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 12091270,
|
||||
"digest": "sha256:5ed0ae805b65407ddd0ff1aeb8371de3133e5daed97726717d4225cb7a8efaaa"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 24045,
|
||||
"digest": "sha256:0283dc49ef4e5bc0dc8897b14818de315faeceb0a5272464ff3c48cd4ea3b626"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 223,
|
||||
"digest": "sha256:a7905d9fbbea59dc29d709d1d61a96c06c26a2d1e506ac5c3832a348969052b8"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 83565354,
|
||||
"digest": "sha256:cd2a6583723557a1dc58584f53564f791dbb7a5d264bb2f8d71aa8c5d21ac38c"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 2876,
|
||||
"digest": "sha256:5f906b8da5fed2070448fed578b93cb1a995be5bdde5624163fbcb842ce4460f"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 121,
|
||||
"digest": "sha256:e81e5181556720e9c821bdb826dd9dbeb078dd28af8fe84586aa904ff212d117"
|
||||
}
|
||||
]
|
||||
}
|
75
image/docker/schema2/test/conversion_data/simple.config.json
Normal file
75
image/docker/schema2/test/conversion_data/simple.config.json
Normal file
|
@ -0,0 +1,75 @@
|
|||
{
|
||||
"architecture": "amd64",
|
||||
"config": {
|
||||
"Hostname": "",
|
||||
"Domainname": "",
|
||||
"User": "",
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"AttachStderr": false,
|
||||
"Tty": false,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Cmd": [
|
||||
"sh"
|
||||
],
|
||||
"Image": "",
|
||||
"Volumes": null,
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": null,
|
||||
"OnBuild": null,
|
||||
"Labels": {}
|
||||
},
|
||||
"container": "86fff20ea922659929a4716850cc9b3a2cca6c197f7a7ece7da5b6d9d8ac4954",
|
||||
"container_config": {
|
||||
"Hostname": "86fff20ea922",
|
||||
"Domainname": "",
|
||||
"User": "",
|
||||
"AttachStdin": true,
|
||||
"AttachStdout": true,
|
||||
"AttachStderr": true,
|
||||
"Tty": true,
|
||||
"OpenStdin": true,
|
||||
"StdinOnce": true,
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Cmd": [
|
||||
"sh"
|
||||
],
|
||||
"Image": "busybox",
|
||||
"Volumes": null,
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": null,
|
||||
"OnBuild": null,
|
||||
"Labels": {}
|
||||
},
|
||||
"created": "2018-11-20T21:15:01.569237Z",
|
||||
"docker_version": "17.09.0-ce",
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-10-02T17:19:34.03981888Z",
|
||||
"created_by": "/bin/sh -c #(nop) ADD file:63eebd629a5f7558c361be0305df5f16baac1d3bbec014b7c486e28812441969 in / "
|
||||
},
|
||||
{
|
||||
"created": "2018-10-02T17:19:34.239926273Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2018-11-20T21:15:01.569237Z",
|
||||
"created_by": "sh"
|
||||
}
|
||||
],
|
||||
"os": "linux",
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:8a788232037eaf17794408ff3df6b922a1aedf9ef8de36afdae3ed0b0381907b",
|
||||
"sha256:70d967d052ce14cd372b12663d84046ade5712c3a4ece6078cdb63e75bbfcfa1"
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
{
|
||||
"schemaVersion": 1,
|
||||
"name": "devtable/somerepo",
|
||||
"tag": "latest",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": "sha256:28b98663b93a1c984379691300f284ee1536db1b6ecd8a1d59222528f80cee89"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:90e01955edcd85dac7985b72a8374545eac617ccdddcc992b732e43cd42534af"
|
||||
}
|
||||
],
|
||||
"history": [
|
||||
{
|
||||
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"container\":\"86fff20ea922659929a4716850cc9b3a2cca6c197f7a7ece7da5b6d9d8ac4954\",\"container_config\":{\"Hostname\":\"86fff20ea922\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":true,\"AttachStdout\":true,\"AttachStderr\":true,\"Tty\":true,\"OpenStdin\":true,\"StdinOnce\":true,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"Image\":\"busybox\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"created\":\"2018-11-20T21:15:01.569237Z\",\"docker_version\":\"17.09.0-ce\",\"id\":\"692854afd8718d5285bf99cecfc9d6385f41122d3cea70fc9961b3f23ae0d768\",\"os\":\"linux\",\"parent\":\"61b2663f44edc9a6af340b9bfd46d17d8ed2574ffe289e0d95c0476da3c6faac\"}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"61b2663f44edc9a6af340b9bfd46d17d8ed2574ffe289e0d95c0476da3c6faac\",\"parent\":\"5327db1e651c0f49157ace3ffd8569c7361b1f2e61d0b49ff617e83a42bf78d6\",\"created\":\"2018-10-02T17:19:34.239926273Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"sh\\\"]\"]},\"throwaway\":true}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"5327db1e651c0f49157ace3ffd8569c7361b1f2e61d0b49ff617e83a42bf78d6\",\"created\":\"2018-10-02T17:19:34.03981888Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:63eebd629a5f7558c361be0305df5f16baac1d3bbec014b7c486e28812441969 in / \"]}}"
|
||||
}
|
||||
],
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"jwk": {
|
||||
"crv": "P-256",
|
||||
"kid": "AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX",
|
||||
"kty": "EC",
|
||||
"x": "34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY",
|
||||
"y": "LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8"
|
||||
},
|
||||
"alg": "ES256"
|
||||
},
|
||||
"signature": "4-nlo2R9Dn3PIGHuhvPkamCzLgFYURziihwZYAnmw5eMKLRj4ir-VeEJI30mDh8ArTeDo-PnMLRNZGRX2NwXHw",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjIzNDEsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0xMS0yMFQyMToxNzozMVoifQ"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 1977,
|
||||
"digest": "sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 727978,
|
||||
"digest": "sha256:90e01955edcd85dac7985b72a8374545eac617ccdddcc992b732e43cd42534af"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 190,
|
||||
"digest": "sha256:28b98663b93a1c984379691300f284ee1536db1b6ecd8a1d59222528f80cee89"
|
||||
}
|
||||
]
|
||||
}
|
90
image/docker/schema2/test/conversion_data/ubuntu.config.json
Normal file
90
image/docker/schema2/test/conversion_data/ubuntu.config.json
Normal file
|
@ -0,0 +1,90 @@
|
|||
{
|
||||
"architecture": "amd64",
|
||||
"config": {
|
||||
"Hostname": "",
|
||||
"Domainname": "",
|
||||
"User": "",
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"AttachStderr": false,
|
||||
"Tty": false,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Cmd": [
|
||||
"/bin/bash"
|
||||
],
|
||||
"ArgsEscaped": true,
|
||||
"Image": "sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a",
|
||||
"Volumes": null,
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": null,
|
||||
"OnBuild": null,
|
||||
"Labels": null
|
||||
},
|
||||
"container": "1501390588c62f6c7c0e4fec25d6587c75c2f330536b9d08c610a56ed013f64b",
|
||||
"container_config": {
|
||||
"Hostname": "1501390588c6",
|
||||
"Domainname": "",
|
||||
"User": "",
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"AttachStderr": false,
|
||||
"Tty": false,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Cmd": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"#(nop) ",
|
||||
"CMD [\"/bin/bash\"]"
|
||||
],
|
||||
"ArgsEscaped": true,
|
||||
"Image": "sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a",
|
||||
"Volumes": null,
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": null,
|
||||
"OnBuild": null,
|
||||
"Labels": {}
|
||||
},
|
||||
"created": "2018-11-19T21:20:42.235528208Z",
|
||||
"docker_version": "17.06.2-ce",
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-11-19T21:20:39.739838469Z",
|
||||
"created_by": "/bin/sh -c #(nop) ADD file:39e5bc157a8be63bbb36a142e18b644b0cfff07a8a02b42f7d0c4ee4ba75a5bc in / "
|
||||
},
|
||||
{
|
||||
"created": "2018-11-19T21:20:40.571619714Z",
|
||||
"created_by": "/bin/sh -c set -xe \t\t&& echo '#!/bin/sh' > /usr/sbin/policy-rc.d \t&& echo 'exit 101' >> /usr/sbin/policy-rc.d \t&& chmod +x /usr/sbin/policy-rc.d \t\t&& dpkg-divert --local --rename --add /sbin/initctl \t&& cp -a /usr/sbin/policy-rc.d /sbin/initctl \t&& sed -i 's/^exit.*/exit 0/' /sbin/initctl \t\t&& echo 'force-unsafe-io' > /etc/dpkg/dpkg.cfg.d/docker-apt-speedup \t\t&& echo 'DPkg::Post-Invoke { \"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\"; };' > /etc/apt/apt.conf.d/docker-clean \t&& echo 'APT::Update::Post-Invoke { \"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\"; };' >> /etc/apt/apt.conf.d/docker-clean \t&& echo 'Dir::Cache::pkgcache \"\"; Dir::Cache::srcpkgcache \"\";' >> /etc/apt/apt.conf.d/docker-clean \t\t&& echo 'Acquire::Languages \"none\";' > /etc/apt/apt.conf.d/docker-no-languages \t\t&& echo 'Acquire::GzipIndexes \"true\"; Acquire::CompressionTypes::Order:: \"gz\";' > /etc/apt/apt.conf.d/docker-gzip-indexes \t\t&& echo 'Apt::AutoRemove::SuggestsImportant \"false\";' > /etc/apt/apt.conf.d/docker-autoremove-suggests"
|
||||
},
|
||||
{
|
||||
"created": "2018-11-19T21:20:41.293060457Z",
|
||||
"created_by": "/bin/sh -c rm -rf /var/lib/apt/lists/*"
|
||||
},
|
||||
{
|
||||
"created": "2018-11-19T21:20:42.002883522Z",
|
||||
"created_by": "/bin/sh -c mkdir -p /run/systemd && echo 'docker' > /run/systemd/container"
|
||||
},
|
||||
{
|
||||
"created": "2018-11-19T21:20:42.235528208Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]",
|
||||
"empty_layer": true
|
||||
}
|
||||
],
|
||||
"os": "linux",
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:bc7f4b25d0ae3524466891c41cefc7c6833c533e00ba80f8063c68da9a8b65fe",
|
||||
"sha256:a768c3f3878e96565d2bf0dcf90508261862847b2e7b8fc804a0770c07f0d5d5",
|
||||
"sha256:ca2991e4676cba899ad9bc6ad3a044cd0816915f9e97a6f2e67b6accbc779ba5",
|
||||
"sha256:b9b7103af585bd8ae9130de947817be7ce76092aa19cf6d2f9d5290440c645eb"
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
{
|
||||
"schemaVersion": 1,
|
||||
"name": "devtable/ubuntu",
|
||||
"tag": "latest",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:f85999a86bef2603a9e9a4fa488a7c1f82e471cbb76c3b5068e54e1a9320964a"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:fa83472a3562898caaf8d77542181a473a84039376f2ba56254619d9317ba00d"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:da1315cffa03c17988ae5c66f56d5f50517652a622afc1611a8bdd6c00b1fde3"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:32802c0cfa4defde2981bec336096350d0bb490469c494e21f678b1dcf6d831f"
|
||||
}
|
||||
],
|
||||
"history": [
|
||||
{
|
||||
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/bash\"],\"ArgsEscaped\":true,\"Image\":\"sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"container\":\"1501390588c62f6c7c0e4fec25d6587c75c2f330536b9d08c610a56ed013f64b\",\"container_config\":{\"Hostname\":\"1501390588c6\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"/bin/bash\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"created\":\"2018-11-19T21:20:42.235528208Z\",\"docker_version\":\"17.06.2-ce\",\"id\":\"d71fc6939e162a01d90cefeeb3d7f6d6b2583fac2ef98833ec69a95d12ffeeaa\",\"os\":\"linux\",\"parent\":\"ba7177adc95198e86c00039d17d22f35ed1eed39f4e2c3ffc7b2c29a3e81271a\",\"throwaway\":true}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"ba7177adc95198e86c00039d17d22f35ed1eed39f4e2c3ffc7b2c29a3e81271a\",\"parent\":\"69d0081dfb37f77fa9c971f367c6b86a3eb4090e7ab56741da954523ec3a786f\",\"created\":\"2018-11-19T21:20:42.002883522Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir -p /run/systemd \\u0026\\u0026 echo 'docker' \\u003e /run/systemd/container\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"69d0081dfb37f77fa9c971f367c6b86a3eb4090e7ab56741da954523ec3a786f\",\"parent\":\"1bd3843430506ff885fc1a3c1d050c19e2dcf70f8ef6cea1536692fd396c87bc\",\"created\":\"2018-11-19T21:20:41.293060457Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -rf /var/lib/apt/lists/*\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"1bd3843430506ff885fc1a3c1d050c19e2dcf70f8ef6cea1536692fd396c87bc\",\"parent\":\"248632e87271aa5118ebc0ebf46758791e032c481f9702a2a36e7c85e83d33d2\",\"created\":\"2018-11-19T21:20:40.571619714Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -xe \\t\\t\\u0026\\u0026 echo '#!/bin/sh' \\u003e /usr/sbin/policy-rc.d \\t\\u0026\\u0026 echo 'exit 101' \\u003e\\u003e /usr/sbin/policy-rc.d \\t\\u0026\\u0026 chmod +x /usr/sbin/policy-rc.d \\t\\t\\u0026\\u0026 dpkg-divert --local --rename --add /sbin/initctl \\t\\u0026\\u0026 cp -a /usr/sbin/policy-rc.d /sbin/initctl \\t\\u0026\\u0026 sed -i 's/^exit.*/exit 0/' /sbin/initctl \\t\\t\\u0026\\u0026 echo 'force-unsafe-io' \\u003e /etc/dpkg/dpkg.cfg.d/docker-apt-speedup \\t\\t\\u0026\\u0026 echo 'DPkg::Post-Invoke { \\\"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\\\"; };' \\u003e /etc/apt/apt.conf.d/docker-clean \\t\\u0026\\u0026 echo 'APT::Update::Post-Invoke { \\\"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\\\"; };' \\u003e\\u003e /etc/apt/apt.conf.d/docker-clean \\t\\u0026\\u0026 echo 'Dir::Cache::pkgcache \\\"\\\"; Dir::Cache::srcpkgcache \\\"\\\";' \\u003e\\u003e /etc/apt/apt.conf.d/docker-clean \\t\\t\\u0026\\u0026 echo 'Acquire::Languages \\\"none\\\";' \\u003e /etc/apt/apt.conf.d/docker-no-languages \\t\\t\\u0026\\u0026 echo 'Acquire::GzipIndexes \\\"true\\\"; Acquire::CompressionTypes::Order:: \\\"gz\\\";' \\u003e /etc/apt/apt.conf.d/docker-gzip-indexes \\t\\t\\u0026\\u0026 echo 'Apt::AutoRemove::SuggestsImportant \\\"false\\\";' \\u003e /etc/apt/apt.conf.d/docker-autoremove-suggests\"]}}"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"248632e87271aa5118ebc0ebf46758791e032c481f9702a2a36e7c85e83d33d2\",\"created\":\"2018-11-19T21:20:39.739838469Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:39e5bc157a8be63bbb36a142e18b644b0cfff07a8a02b42f7d0c4ee4ba75a5bc in / \"]}}"
|
||||
}
|
||||
],
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"jwk": {
|
||||
"crv": "P-256",
|
||||
"kid": "AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX",
|
||||
"kty": "EC",
|
||||
"x": "34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY",
|
||||
"y": "LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8"
|
||||
},
|
||||
"alg": "ES256"
|
||||
},
|
||||
"signature": "0wBIubWqf-71Im54gbPlOjFBH7lr6MFLW75bdb-McFvDnfgSdOIMuJ9NHtKEYNF8qFe9hMoO6_GrSDVTJ-pryQ",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjQ5MjMsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0xMS0yNlQxMDo0MjozMloifQ"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 3894,
|
||||
"digest": "sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32102249,
|
||||
"digest": "sha256:32802c0cfa4defde2981bec336096350d0bb490469c494e21f678b1dcf6d831f"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 847,
|
||||
"digest": "sha256:da1315cffa03c17988ae5c66f56d5f50517652a622afc1611a8bdd6c00b1fde3"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 556,
|
||||
"digest": "sha256:fa83472a3562898caaf8d77542181a473a84039376f2ba56254619d9317ba00d"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 162,
|
||||
"digest": "sha256:f85999a86bef2603a9e9a4fa488a7c1f82e471cbb76c3b5068e54e1a9320964a"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -118,7 +118,8 @@ def test_valid_config():
|
|||
assert history[2].command == 'sh'
|
||||
|
||||
for index, history_entry in enumerate(history):
|
||||
v1_compat = config.build_v1_compatibility(index, 'somev1id', 'someparentid')
|
||||
v1_compat = config.build_v1_compatibility(history_entry, 'somev1id', 'someparentid',
|
||||
index == 3)
|
||||
assert v1_compat['id'] == 'somev1id'
|
||||
assert v1_compat['parent'] == 'someparentid'
|
||||
|
||||
|
@ -126,4 +127,6 @@ def test_valid_config():
|
|||
assert v1_compat['container_config'] == config._parsed['container_config']
|
||||
else:
|
||||
assert 'Hostname' not in v1_compat['container_config']
|
||||
assert v1_compat['container_config']['Cmd'] == history_entry.command
|
||||
assert v1_compat['container_config']['Cmd'] == [history_entry.command]
|
||||
|
||||
assert config.labels == {}
|
||||
|
|
83
image/docker/schema2/test/test_conversion.py
Normal file
83
image/docker/schema2/test/test_conversion.py
Normal file
|
@ -0,0 +1,83 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from image.docker.schema1 import DockerSchema1Manifest
|
||||
from image.docker.schema2.manifest import DockerSchema2Manifest
|
||||
from image.docker.schemautil import ContentRetrieverForTesting
|
||||
|
||||
def _get_test_file_contents(test_name, kind):
|
||||
filename = '%s.%s.json' % (test_name, kind)
|
||||
data_dir = os.path.dirname(__file__)
|
||||
with open(os.path.join(data_dir, 'conversion_data', filename), 'r') as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('name, config_sha', [
|
||||
('simple', 'sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf'),
|
||||
('complex', 'sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e'),
|
||||
('ubuntu', 'sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26'),
|
||||
])
|
||||
def test_legacy_layers(name, config_sha):
|
||||
cr = {}
|
||||
cr[config_sha] = _get_test_file_contents(name, 'config')
|
||||
retriever = ContentRetrieverForTesting(cr)
|
||||
|
||||
schema2 = DockerSchema2Manifest(_get_test_file_contents(name, 'schema2'))
|
||||
schema1 = DockerSchema1Manifest(_get_test_file_contents(name, 'schema1'), validate=False)
|
||||
|
||||
# Check legacy layers
|
||||
schema2_legacy_layers = list(schema2.generate_legacy_layers({}, retriever))
|
||||
schema1_legacy_layers = list(schema1.generate_legacy_layers({}, retriever))
|
||||
assert len(schema1_legacy_layers) == len(schema2_legacy_layers)
|
||||
|
||||
for index in range(0, len(schema1_legacy_layers)):
|
||||
schema1_legacy_layer = schema1_legacy_layers[index]
|
||||
schema2_legacy_layer = schema2_legacy_layers[index]
|
||||
assert schema1_legacy_layer.content_checksum == schema2_legacy_layer.content_checksum
|
||||
assert schema1_legacy_layer.comment == schema2_legacy_layer.comment
|
||||
assert schema1_legacy_layer.command == schema2_legacy_layer.command
|
||||
|
||||
|
||||
@pytest.mark.parametrize('name, config_sha', [
|
||||
('simple', 'sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf'),
|
||||
('complex', 'sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e'),
|
||||
('ubuntu', 'sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26'),
|
||||
])
|
||||
def test_conversion(name, config_sha):
|
||||
cr = {}
|
||||
cr[config_sha] = _get_test_file_contents(name, 'config')
|
||||
retriever = ContentRetrieverForTesting(cr)
|
||||
|
||||
schema2 = DockerSchema2Manifest(_get_test_file_contents(name, 'schema2'))
|
||||
schema1 = DockerSchema1Manifest(_get_test_file_contents(name, 'schema1'), validate=False)
|
||||
|
||||
converted = schema2.get_schema1_manifest('devtable', 'somerepo', 'latest', retriever)
|
||||
assert len(converted.layers) == len(schema1.layers)
|
||||
|
||||
image_id_map = {}
|
||||
for index in range(0, len(converted.layers)):
|
||||
converted_layer = converted.layers[index]
|
||||
schema1_layer = schema1.layers[index]
|
||||
|
||||
image_id_map[schema1_layer.v1_metadata.image_id] = converted_layer.v1_metadata.image_id
|
||||
|
||||
assert str(schema1_layer.digest) == str(converted_layer.digest)
|
||||
|
||||
schema1_parent_id = schema1_layer.v1_metadata.parent_image_id
|
||||
converted_parent_id = converted_layer.v1_metadata.parent_image_id
|
||||
assert (schema1_parent_id is None) == (converted_parent_id is None)
|
||||
|
||||
if schema1_parent_id is not None:
|
||||
assert image_id_map[schema1_parent_id] == converted_parent_id
|
||||
|
||||
assert schema1_layer.v1_metadata.created == converted_layer.v1_metadata.created
|
||||
assert schema1_layer.v1_metadata.comment == converted_layer.v1_metadata.comment
|
||||
assert schema1_layer.v1_metadata.command == converted_layer.v1_metadata.command
|
||||
assert schema1_layer.v1_metadata.labels == converted_layer.v1_metadata.labels
|
||||
|
||||
schema1_container_config = json.loads(schema1_layer.raw_v1_metadata)['container_config']
|
||||
converted_container_config = json.loads(converted_layer.raw_v1_metadata)['container_config']
|
||||
|
||||
assert schema1_container_config == converted_container_config
|
|
@ -3,8 +3,10 @@ import pytest
|
|||
|
||||
from image.docker.schema1 import DockerSchema1Manifest
|
||||
from image.docker.schema2.manifest import DockerSchema2Manifest
|
||||
from image.docker.schema2.list import MalformedSchema2ManifestList, DockerSchema2ManifestList
|
||||
from image.docker.schema2.list import (MalformedSchema2ManifestList, DockerSchema2ManifestList,
|
||||
DockerSchema2ManifestListBuilder)
|
||||
from image.docker.schema2.test.test_manifest import MANIFEST_BYTES as v22_bytes
|
||||
from image.docker.schemautil import ContentRetrieverForTesting
|
||||
from image.docker.test.test_schema1 import MANIFEST_BYTES as v21_bytes
|
||||
|
||||
@pytest.mark.parametrize('json_data', [
|
||||
|
@ -27,7 +29,7 @@ MANIFESTLIST_BYTES = json.dumps({
|
|||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"size": 983,
|
||||
"size": 946,
|
||||
"digest": "sha256:e6",
|
||||
"platform": {
|
||||
"architecture": "ppc64le",
|
||||
|
@ -49,17 +51,38 @@ MANIFESTLIST_BYTES = json.dumps({
|
|||
]
|
||||
})
|
||||
|
||||
NO_AMD_MANIFESTLIST_BYTES = json.dumps({
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"size": 946,
|
||||
"digest": "sha256:e6",
|
||||
"platform": {
|
||||
"architecture": "ppc64le",
|
||||
"os": "linux",
|
||||
}
|
||||
},
|
||||
]
|
||||
})
|
||||
|
||||
retriever = ContentRetrieverForTesting({
|
||||
'sha256:e6': v22_bytes,
|
||||
'sha256:5b': v21_bytes,
|
||||
})
|
||||
|
||||
def test_valid_manifestlist():
|
||||
def _get_manifest(digest):
|
||||
if digest == 'sha256:e6':
|
||||
return v22_bytes
|
||||
else:
|
||||
return v21_bytes
|
||||
|
||||
manifestlist = DockerSchema2ManifestList(MANIFESTLIST_BYTES)
|
||||
assert len(manifestlist.manifests(_get_manifest)) == 2
|
||||
assert len(manifestlist.manifests(retriever)) == 2
|
||||
|
||||
for index, manifest in enumerate(manifestlist.manifests(_get_manifest)):
|
||||
assert manifestlist.media_type == 'application/vnd.docker.distribution.manifest.list.v2+json'
|
||||
assert manifestlist.bytes == MANIFESTLIST_BYTES
|
||||
assert manifestlist.manifest_dict == json.loads(MANIFESTLIST_BYTES)
|
||||
assert manifestlist.get_layers(retriever) is None
|
||||
assert not manifestlist.blob_digests
|
||||
|
||||
for index, manifest in enumerate(manifestlist.manifests(retriever)):
|
||||
if index == 0:
|
||||
assert isinstance(manifest.manifest_obj, DockerSchema2Manifest)
|
||||
assert manifest.manifest_obj.schema_version == 2
|
||||
|
@ -67,4 +90,26 @@ def test_valid_manifestlist():
|
|||
assert isinstance(manifest.manifest_obj, DockerSchema1Manifest)
|
||||
assert manifest.manifest_obj.schema_version == 1
|
||||
|
||||
assert manifestlist.get_v1_compatible_manifest(_get_manifest).manifest_obj.schema_version == 1
|
||||
compatible_manifest = manifestlist.get_schema1_manifest('foo', 'bar', 'baz', retriever)
|
||||
assert compatible_manifest.schema_version == 1
|
||||
|
||||
|
||||
def test_get_schema1_manifest_no_matching_list():
|
||||
manifestlist = DockerSchema2ManifestList(NO_AMD_MANIFESTLIST_BYTES)
|
||||
assert len(manifestlist.manifests(retriever)) == 1
|
||||
|
||||
assert manifestlist.media_type == 'application/vnd.docker.distribution.manifest.list.v2+json'
|
||||
assert manifestlist.bytes == NO_AMD_MANIFESTLIST_BYTES
|
||||
|
||||
compatible_manifest = manifestlist.get_schema1_manifest('foo', 'bar', 'baz', retriever)
|
||||
assert compatible_manifest is None
|
||||
|
||||
|
||||
def test_builder():
|
||||
existing = DockerSchema2ManifestList(MANIFESTLIST_BYTES)
|
||||
builder = DockerSchema2ManifestListBuilder()
|
||||
for index, manifest in enumerate(existing.manifests(retriever)):
|
||||
builder.add_manifest(manifest.manifest_obj, "amd64", "os")
|
||||
|
||||
built = builder.build()
|
||||
assert len(built.manifests(retriever)) == 2
|
||||
|
|
|
@ -3,9 +3,13 @@ import pytest
|
|||
|
||||
from app import docker_v2_signing_key
|
||||
from image.docker.schema1 import (DockerSchema1ManifestBuilder,
|
||||
DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE)
|
||||
from image.docker.schema2.manifest import MalformedSchema2Manifest, DockerSchema2Manifest
|
||||
DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE)
|
||||
from image.docker.schema2.manifest import (MalformedSchema2Manifest, DockerSchema2Manifest,
|
||||
DockerSchema2ManifestBuilder, EMPTY_LAYER_BLOB_DIGEST)
|
||||
from image.docker.schema2.test.test_config import CONFIG_BYTES
|
||||
from image.docker.schemautil import ContentRetrieverForTesting
|
||||
|
||||
|
||||
@pytest.mark.parametrize('json_data', [
|
||||
'',
|
||||
|
@ -29,6 +33,38 @@ MANIFEST_BYTES = json.dumps({
|
|||
"size": 1885,
|
||||
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 1234,
|
||||
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32654,
|
||||
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 16724,
|
||||
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 73109,
|
||||
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
REMOTE_MANIFEST_BYTES = json.dumps({
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 1885,
|
||||
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7",
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
|
||||
|
@ -59,39 +95,255 @@ def test_valid_manifest():
|
|||
assert manifest.config.size == 1885
|
||||
assert str(manifest.config.digest) == 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7'
|
||||
assert manifest.media_type == "application/vnd.docker.distribution.manifest.v2+json"
|
||||
assert not manifest.has_remote_layer
|
||||
assert manifest.has_legacy_image
|
||||
|
||||
assert len(manifest.layers) == 4
|
||||
assert manifest.layers[0].is_remote
|
||||
assert manifest.layers[0].compressed_size == 1234
|
||||
assert str(manifest.layers[0].digest) == 'sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736'
|
||||
assert manifest.layers[0].urls
|
||||
retriever = ContentRetrieverForTesting.for_config({
|
||||
"config": {
|
||||
"Labels": {},
|
||||
},
|
||||
"rootfs": {"type": "layers", "diff_ids": []},
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "foo"
|
||||
},
|
||||
{
|
||||
"created": "2018-04-12T18:37:09.284840891Z",
|
||||
"created_by": "bar"
|
||||
},
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "foo"
|
||||
},
|
||||
{
|
||||
"created": "2018-04-12T18:37:09.284840891Z",
|
||||
"created_by": "bar"
|
||||
},
|
||||
],
|
||||
}, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
|
||||
|
||||
assert manifest.leaf_layer == manifest.layers[3]
|
||||
assert not manifest.leaf_layer.is_remote
|
||||
assert manifest.leaf_layer.compressed_size == 73109
|
||||
assert len(manifest.filesystem_layers) == 4
|
||||
assert manifest.filesystem_layers[0].compressed_size == 1234
|
||||
assert str(manifest.filesystem_layers[0].digest) == 'sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736'
|
||||
assert not manifest.filesystem_layers[0].is_remote
|
||||
|
||||
assert manifest.leaf_filesystem_layer == manifest.filesystem_layers[3]
|
||||
assert not manifest.leaf_filesystem_layer.is_remote
|
||||
assert manifest.leaf_filesystem_layer.compressed_size == 73109
|
||||
|
||||
blob_digests = list(manifest.blob_digests)
|
||||
assert len(blob_digests) == len(manifest.layers)
|
||||
assert blob_digests == [str(layer.digest) for layer in manifest.layers]
|
||||
expected = [str(layer.digest) for layer in manifest.filesystem_layers] + [manifest.config.digest]
|
||||
assert blob_digests == expected
|
||||
assert list(manifest.local_blob_digests) == expected
|
||||
|
||||
manifest_image_layers = list(manifest.get_layers(retriever))
|
||||
assert len(manifest_image_layers) == len(list(manifest.filesystem_layers))
|
||||
for index in range(0, 4):
|
||||
assert manifest_image_layers[index].blob_digest == str(manifest.filesystem_layers[index].digest)
|
||||
|
||||
|
||||
def test_valid_remote_manifest():
|
||||
manifest = DockerSchema2Manifest(REMOTE_MANIFEST_BYTES)
|
||||
assert manifest.config.size == 1885
|
||||
assert str(manifest.config.digest) == 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7'
|
||||
assert manifest.media_type == "application/vnd.docker.distribution.manifest.v2+json"
|
||||
assert manifest.has_remote_layer
|
||||
|
||||
assert len(manifest.filesystem_layers) == 4
|
||||
assert manifest.filesystem_layers[0].compressed_size == 1234
|
||||
assert str(manifest.filesystem_layers[0].digest) == 'sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736'
|
||||
assert manifest.filesystem_layers[0].is_remote
|
||||
assert manifest.filesystem_layers[0].urls == ['http://some/url']
|
||||
|
||||
assert manifest.leaf_filesystem_layer == manifest.filesystem_layers[3]
|
||||
assert not manifest.leaf_filesystem_layer.is_remote
|
||||
assert manifest.leaf_filesystem_layer.compressed_size == 73109
|
||||
|
||||
expected = set([str(layer.digest) for layer in manifest.filesystem_layers] +
|
||||
[manifest.config.digest])
|
||||
|
||||
blob_digests = set(manifest.blob_digests)
|
||||
local_digests = set(manifest.local_blob_digests)
|
||||
|
||||
assert blob_digests == expected
|
||||
assert local_digests == (expected - {manifest.filesystem_layers[0].digest})
|
||||
|
||||
assert manifest.has_remote_layer
|
||||
assert manifest.get_leaf_layer_v1_image_id(None) is None
|
||||
assert manifest.get_legacy_image_ids(None) is None
|
||||
|
||||
retriever = ContentRetrieverForTesting.for_config({
|
||||
"config": {
|
||||
"Labels": {},
|
||||
},
|
||||
"rootfs": {"type": "layers", "diff_ids": []},
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "foo"
|
||||
},
|
||||
{
|
||||
"created": "2018-04-12T18:37:09.284840891Z",
|
||||
"created_by": "bar"
|
||||
},
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "foo"
|
||||
},
|
||||
{
|
||||
"created": "2018-04-12T18:37:09.284840891Z",
|
||||
"created_by": "bar"
|
||||
},
|
||||
],
|
||||
}, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
|
||||
|
||||
manifest_image_layers = list(manifest.get_layers(retriever))
|
||||
assert len(manifest_image_layers) == len(list(manifest.filesystem_layers))
|
||||
for index in range(0, 4):
|
||||
assert manifest_image_layers[index].blob_digest == str(manifest.filesystem_layers[index].digest)
|
||||
|
||||
|
||||
def test_schema2_builder():
|
||||
manifest = DockerSchema2Manifest(MANIFEST_BYTES)
|
||||
|
||||
builder = DockerSchema2ManifestBuilder()
|
||||
builder.set_config_digest(manifest.config.digest, manifest.config.size)
|
||||
|
||||
for layer in manifest.filesystem_layers:
|
||||
builder.add_layer(layer.digest, layer.compressed_size, urls=layer.urls)
|
||||
|
||||
built = builder.build()
|
||||
assert built.filesystem_layers == manifest.filesystem_layers
|
||||
assert built.config == manifest.config
|
||||
|
||||
|
||||
def test_get_manifest_labels():
|
||||
labels = dict(foo='bar', baz='meh')
|
||||
retriever = ContentRetrieverForTesting.for_config({
|
||||
"config": {
|
||||
"Labels": labels,
|
||||
},
|
||||
"rootfs": {"type": "layers", "diff_ids": []},
|
||||
"history": [],
|
||||
}, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
|
||||
|
||||
manifest = DockerSchema2Manifest(MANIFEST_BYTES)
|
||||
assert manifest.get_manifest_labels(retriever) == labels
|
||||
|
||||
|
||||
def test_build_schema1():
|
||||
manifest = DockerSchema2Manifest(MANIFEST_BYTES)
|
||||
assert not manifest.has_remote_layer
|
||||
|
||||
retriever = ContentRetrieverForTesting({
|
||||
'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7': CONFIG_BYTES,
|
||||
})
|
||||
|
||||
builder = DockerSchema1ManifestBuilder('somenamespace', 'somename', 'sometag')
|
||||
manifest.populate_schema1_builder(builder, lambda digest: CONFIG_BYTES)
|
||||
manifest._populate_schema1_builder(builder, retriever)
|
||||
schema1 = builder.build(docker_v2_signing_key)
|
||||
|
||||
assert schema1.media_type == DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
|
||||
assert len(schema1.layers) == len(manifest.layers)
|
||||
assert set(schema1.image_ids) == set([l.v1_id for l in manifest.layers_with_v1_ids])
|
||||
assert set(schema1.parent_image_ids) == set([l.v1_parent_id for l in manifest.layers_with_v1_ids if l.v1_parent_id])
|
||||
|
||||
manifest_layers = list(manifest.layers_with_v1_ids)
|
||||
for index, layer in enumerate(schema1.layers):
|
||||
assert layer.digest == manifest_layers[index].layer.digest
|
||||
assert layer.v1_metadata.image_id == manifest_layers[index].v1_id
|
||||
assert layer.v1_metadata.parent_image_id == manifest_layers[index].v1_parent_id
|
||||
|
||||
for index, digest in enumerate(schema1.blob_digests):
|
||||
assert digest == str(list(manifest.blob_digests)[index])
|
||||
def test_get_schema1_manifest():
|
||||
retriever = ContentRetrieverForTesting.for_config({
|
||||
"config": {
|
||||
"Labels": {},
|
||||
},
|
||||
"rootfs": {"type": "layers", "diff_ids": []},
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "foo"
|
||||
},
|
||||
{
|
||||
"created": "2018-04-12T18:37:09.284840891Z",
|
||||
"created_by": "bar"
|
||||
},
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "foo"
|
||||
},
|
||||
{
|
||||
"created": "2018-04-12T18:37:09.284840891Z",
|
||||
"created_by": "bar"
|
||||
},
|
||||
],
|
||||
}, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
|
||||
|
||||
manifest = DockerSchema2Manifest(MANIFEST_BYTES)
|
||||
schema1 = manifest.get_schema1_manifest('somenamespace', 'somename', 'sometag', retriever)
|
||||
assert schema1 is not None
|
||||
assert schema1.media_type == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||
|
||||
|
||||
def test_generate_legacy_layers():
|
||||
builder = DockerSchema2ManifestBuilder()
|
||||
builder.add_layer('sha256:abc123', 123)
|
||||
builder.add_layer('sha256:def456', 789)
|
||||
builder.set_config_digest('sha256:def456', 2000)
|
||||
manifest = builder.build()
|
||||
|
||||
retriever = ContentRetrieverForTesting.for_config({
|
||||
"config": {
|
||||
},
|
||||
"rootfs": {"type": "layers", "diff_ids": []},
|
||||
"history": [
|
||||
{
|
||||
"created": "2018-04-03T18:37:09.284840891Z",
|
||||
"created_by": "base"
|
||||
},
|
||||
{
|
||||
"created": "2018-04-06T18:37:09.284840891Z",
|
||||
"created_by": "middle",
|
||||
"empty_layer": True,
|
||||
},
|
||||
{
|
||||
"created": "2018-04-12T18:37:09.284840891Z",
|
||||
"created_by": "leaf"
|
||||
},
|
||||
],
|
||||
}, 'sha256:def456', 2000)
|
||||
|
||||
legacy_layers = list(manifest.generate_legacy_layers({}, retriever))
|
||||
assert len(legacy_layers) == 3
|
||||
assert legacy_layers[0].content_checksum == 'sha256:abc123'
|
||||
assert legacy_layers[1].content_checksum == EMPTY_LAYER_BLOB_DIGEST
|
||||
assert legacy_layers[2].content_checksum == 'sha256:def456'
|
||||
|
||||
assert legacy_layers[0].created == "2018-04-03T18:37:09.284840891Z"
|
||||
assert legacy_layers[1].created == "2018-04-06T18:37:09.284840891Z"
|
||||
assert legacy_layers[2].created == "2018-04-12T18:37:09.284840891Z"
|
||||
|
||||
assert legacy_layers[0].command == '["base"]'
|
||||
assert legacy_layers[1].command == '["middle"]'
|
||||
assert legacy_layers[2].command == '["leaf"]'
|
||||
|
||||
assert legacy_layers[2].parent_image_id == legacy_layers[1].image_id
|
||||
assert legacy_layers[1].parent_image_id == legacy_layers[0].image_id
|
||||
assert legacy_layers[0].parent_image_id is None
|
||||
|
||||
assert legacy_layers[1].image_id != legacy_layers[2]
|
||||
assert legacy_layers[0].image_id != legacy_layers[1]
|
||||
|
||||
|
||||
def test_remote_layer_manifest():
|
||||
builder = DockerSchema2ManifestBuilder()
|
||||
builder.set_config_digest('sha256:abcd', 1234)
|
||||
builder.add_layer('sha256:adef', 1234, urls=['http://some/url'])
|
||||
builder.add_layer('sha256:1352', 4567)
|
||||
builder.add_layer('sha256:1353', 4567)
|
||||
manifest = builder.build()
|
||||
|
||||
assert manifest.has_remote_layer
|
||||
assert manifest.get_leaf_layer_v1_image_id(None) is None
|
||||
assert manifest.get_legacy_image_ids(None) is None
|
||||
assert not manifest.has_legacy_image
|
||||
|
||||
schema1 = manifest.get_schema1_manifest('somenamespace', 'somename', 'sometag', None)
|
||||
assert schema1 is None
|
||||
|
||||
assert set(manifest.blob_digests) == {'sha256:adef', 'sha256:abcd', 'sha256:1352', 'sha256:1353'}
|
||||
assert set(manifest.local_blob_digests) == {'sha256:abcd', 'sha256:1352', 'sha256:1353'}
|
||||
|
|
22
image/docker/schemas.py
Normal file
22
image/docker/schemas.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
from image.docker import ManifestException
|
||||
from image.docker.schema1 import DockerSchema1Manifest, DOCKER_SCHEMA1_CONTENT_TYPES
|
||||
from image.docker.schema2 import (DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE)
|
||||
from image.docker.schema2.manifest import DockerSchema2Manifest
|
||||
from image.docker.schema2.list import DockerSchema2ManifestList
|
||||
|
||||
|
||||
def parse_manifest_from_bytes(manifest_bytes, media_type, validate=True):
|
||||
""" Parses and returns a manifest from the given bytes, for the given media type.
|
||||
Raises a ManifestException if the parse fails for some reason.
|
||||
"""
|
||||
if media_type == DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE:
|
||||
return DockerSchema2Manifest(manifest_bytes)
|
||||
|
||||
if media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
|
||||
return DockerSchema2ManifestList(manifest_bytes)
|
||||
|
||||
if media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
|
||||
return DockerSchema1Manifest(manifest_bytes, validate=validate)
|
||||
|
||||
raise ManifestException('Unknown or unsupported manifest media type `%s`' % media_type)
|
24
image/docker/schemautil.py
Normal file
24
image/docker/schemautil.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
import json
|
||||
|
||||
from image.docker.interfaces import ContentRetriever
|
||||
|
||||
class ContentRetrieverForTesting(ContentRetriever):
|
||||
def __init__(self, digests=None):
|
||||
self.digests = digests or {}
|
||||
|
||||
def add_digest(self, digest, content):
|
||||
self.digests[digest] = content
|
||||
|
||||
def get_manifest_bytes_with_digest(self, digest):
|
||||
return self.digests.get(digest)
|
||||
|
||||
def get_blob_bytes_with_digest(self, digest):
|
||||
return self.digests.get(digest)
|
||||
|
||||
@classmethod
|
||||
def for_config(cls, config_obj, digest, size):
|
||||
config_str = json.dumps(config_obj)
|
||||
padded_string = config_str + ' ' * (size - len(config_str))
|
||||
digests = {}
|
||||
digests[digest] = padded_string
|
||||
return ContentRetrieverForTesting(digests)
|
|
@ -28,10 +28,9 @@ class SquashedDockerImageFormatter(TarImageFormatter):
|
|||
# daemon dies when trying to load the entire tar into memory.
|
||||
SIZE_MULTIPLIER = 1.2
|
||||
|
||||
def stream_generator(self, tag, manifest, synthetic_image_id, layer_iterator,
|
||||
def stream_generator(self, tag, parsed_manifest, synthetic_image_id, layer_iterator,
|
||||
tar_stream_getter_iterator, reporter=None):
|
||||
image_mtime = 0
|
||||
parsed_manifest = manifest.get_parsed_manifest()
|
||||
created = parsed_manifest.created_datetime
|
||||
if created is not None:
|
||||
image_mtime = calendar.timegm(created.utctimetuple())
|
||||
|
|
|
@ -83,6 +83,21 @@ def test_valid_manifest():
|
|||
assert manifest.leaf_layer == manifest.layers[1]
|
||||
assert manifest.created_datetime is None
|
||||
|
||||
unsigned = manifest.unsigned()
|
||||
assert unsigned.namespace == manifest.namespace
|
||||
assert unsigned.repo_name == manifest.repo_name
|
||||
assert unsigned.tag == manifest.tag
|
||||
assert unsigned.layers == manifest.layers
|
||||
assert unsigned.blob_digests == manifest.blob_digests
|
||||
assert unsigned.digest != manifest.digest
|
||||
|
||||
image_layers = list(manifest.get_layers(None))
|
||||
assert len(image_layers) == 2
|
||||
for index in range(0, 2):
|
||||
assert image_layers[index].layer_id == manifest.layers[index].v1_metadata.image_id
|
||||
assert image_layers[index].blob_digest == manifest.layers[index].digest
|
||||
assert image_layers[index].command == manifest.layers[index].v1_metadata.command
|
||||
|
||||
|
||||
def test_validate_manifest():
|
||||
test_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
|
18
image/docker/test/test_schemas.py
Normal file
18
image/docker/test/test_schemas.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
import pytest
|
||||
|
||||
from image.docker.schemas import parse_manifest_from_bytes
|
||||
from image.docker.schema1 import DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
|
||||
from image.docker.schema2 import DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE
|
||||
from image.docker.schema2 import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
|
||||
from image.docker.test.test_schema1 import MANIFEST_BYTES as SCHEMA1_BYTES
|
||||
from image.docker.schema2.test.test_list import MANIFESTLIST_BYTES
|
||||
from image.docker.schema2.test.test_manifest import MANIFEST_BYTES as SCHEMA2_BYTES
|
||||
|
||||
|
||||
@pytest.mark.parametrize('media_type, manifest_bytes', [
|
||||
(DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE, SCHEMA1_BYTES),
|
||||
(DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE, SCHEMA2_BYTES),
|
||||
(DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE, MANIFESTLIST_BYTES),
|
||||
])
|
||||
def test_parse_manifest_from_bytes(media_type, manifest_bytes):
|
||||
assert parse_manifest_from_bytes(manifest_bytes, media_type, validate=False)
|
6
image/docker/types.py
Normal file
6
image/docker/types.py
Normal file
|
@ -0,0 +1,6 @@
|
|||
from collections import namedtuple
|
||||
|
||||
ManifestImageLayer = namedtuple('ManifestImageLayer', ['layer_id', 'compressed_size',
|
||||
'is_remote', 'urls', 'command',
|
||||
'blob_digest', 'created_datetime',
|
||||
'internal_layer'])
|
|
@ -29,6 +29,7 @@ from data.registry_model.registry_pre_oci_model import pre_oci_model
|
|||
from app import app, storage as store, tf
|
||||
from storage.basestorage import StoragePaths
|
||||
from image.docker.schema1 import DOCKER_SCHEMA1_CONTENT_TYPES
|
||||
from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
|
||||
|
||||
|
||||
from workers import repositoryactioncounter
|
||||
|
@ -435,6 +436,9 @@ def initialize_database():
|
|||
for media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
|
||||
MediaType.create(name=media_type)
|
||||
|
||||
for media_type in DOCKER_SCHEMA2_CONTENT_TYPES:
|
||||
MediaType.create(name=media_type)
|
||||
|
||||
LabelSourceType.create(name='manifest')
|
||||
LabelSourceType.create(name='api', mutable=True)
|
||||
LabelSourceType.create(name='internal')
|
||||
|
|
|
@ -176,6 +176,10 @@
|
|||
padding-top: 0px;
|
||||
}
|
||||
|
||||
.repo-panel-tags-element .manifest-list .labels-col {
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
.repo-panel-tags-element .signing-delegations-list {
|
||||
margin-top: 8px;
|
||||
}
|
||||
|
@ -258,4 +262,39 @@
|
|||
margin-bottom: 4px;
|
||||
text-align: center;
|
||||
padding: 4px;
|
||||
}
|
||||
|
||||
.repo-panel-tags-element .manifest-list-icons {
|
||||
display: inline-block;
|
||||
float: right;
|
||||
}
|
||||
|
||||
.repo-panel-tags-element .manifest-list-manifest-icon {
|
||||
display: inline-block;
|
||||
margin-right: 4px;
|
||||
background-color: #e8f1f6;
|
||||
padding: 6px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
padding-top: 4px;
|
||||
padding-bottom: 4px;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.repo-panel-tags-element .secscan-manifestlist {
|
||||
color: #aaa;
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
.repo-panel-tags-element .manifest-list-view td {
|
||||
border-bottom: 0px;
|
||||
border-top: 1px dotted #eee;
|
||||
}
|
||||
|
||||
.repo-panel-tags-element .expanded-viewport.manifest-list {
|
||||
border-top: 1px solid #eee;
|
||||
}
|
||||
|
||||
.repo-panel-tags-element .manifest-list-view .image-track-line {
|
||||
opacity: 0.2;
|
||||
}
|
|
@ -122,7 +122,7 @@
|
|||
<td class="hidden-xs hidden-sm"
|
||||
ng-class="tablePredicateClass('image_id', options.predicate, options.reverse)"
|
||||
style="width: 140px;">
|
||||
<a ng-click="orderBy('image_id')">Image</a>
|
||||
<a ng-click="orderBy('image_id')">Manifest</a>
|
||||
</td>
|
||||
<td class="hidden-xs hidden-sm hidden-md image-track" ng-repeat="it in imageTracks"
|
||||
ng-if="imageTracks.length <= maxTrackCount"></td>
|
||||
|
@ -137,7 +137,15 @@
|
|||
bindonce>
|
||||
<tr ng-class="expandedView ? 'expanded-view': ''">
|
||||
<td><span class="cor-checkable-item" controller="checkedTags" item="tag"></span></td>
|
||||
<td class="co-flowing-col"><span class="tag-span"><span bo-text="tag.name"></span></span></td>
|
||||
<td class="co-flowing-col">
|
||||
<span class="tag-span"><span bo-text="tag.name"></span></span>
|
||||
<span class="manifest-list-icons" bo-if="tag.is_manifest_list">
|
||||
<i class="manifest-list-manifest-icon fa fa-{{ manifest.os }}"
|
||||
ng-repeat="manifest in manifestsOf(tag)"
|
||||
data-title="{{ manifest.description }}"
|
||||
bs-tooltip></i>
|
||||
</span>
|
||||
</td>
|
||||
<td class="signing-col hidden-xs"
|
||||
quay-require="['SIGNING']"
|
||||
ng-if="repository.trust_enabled">
|
||||
|
@ -151,89 +159,33 @@
|
|||
|
||||
<!-- Security scanning -->
|
||||
<td quay-require="['SECURITY_SCANNER']" class="security-scan-col hidden-sm hidden-xs">
|
||||
<span class="cor-loader-inline" ng-if="getTagVulnerabilities(tag).loading"></span>
|
||||
<span class="vuln-load-error" ng-if="getTagVulnerabilities(tag).hasError"
|
||||
data-title="The vulnerabilities for this tag could not be retrieved at the present time, try again later"
|
||||
<!-- Manifest List -->
|
||||
<span class="secscan-manifestlist" ng-if="::tag.is_manifest_list"
|
||||
ng-click="setExpanded(true)"
|
||||
data-title="The tag points to a list of manifests. Click 'Expanded' to view."
|
||||
bs-tooltip>
|
||||
<i class="fa fa-times-circle"></i>
|
||||
Could not load security information
|
||||
See Child Manifests
|
||||
</span>
|
||||
|
||||
<span ng-if="!getTagVulnerabilities(tag).loading">
|
||||
<!-- No Digest -->
|
||||
<span class="nodigest" ng-if="getTagVulnerabilities(tag).status == 'nodigest'"
|
||||
data-title="The tag does not have a V2 digest and so is unsupported for scan"
|
||||
bs-tooltip>
|
||||
<span class="donut-chart" width="22" data="[{'index': 0, 'value': 1, 'color': '#eee'}]"></span>
|
||||
Unsupported
|
||||
</span>
|
||||
|
||||
<!-- Queued -->
|
||||
<span class="scanning" ng-if="getTagVulnerabilities(tag).status == 'queued'"
|
||||
data-title="The image for this tag is queued to be scanned for vulnerabilities"
|
||||
bs-tooltip>
|
||||
<i class="fa fa-ellipsis-h"></i>
|
||||
Queued
|
||||
</span>
|
||||
|
||||
<!-- Scan Failed -->
|
||||
<span class="failed-scan" ng-if="getTagVulnerabilities(tag).status == 'failed'"
|
||||
data-title="The image for this tag could not be scanned for vulnerabilities"
|
||||
bs-tooltip>
|
||||
<span class="donut-chart" width="22" data="[{'index': 0, 'value': 1, 'color': '#eee'}]"></span>
|
||||
Unable to scan
|
||||
</span>
|
||||
|
||||
<!-- No Features -->
|
||||
<span class="failed-scan"
|
||||
ng-if="getTagVulnerabilities(tag).status == 'scanned' && !getTagVulnerabilities(tag).hasFeatures"
|
||||
data-title="The image for this tag has an operating system or package manager unsupported by Quay Security Scanner"
|
||||
bs-tooltip
|
||||
bindonce>
|
||||
<span class="donut-chart" width="22" data="[{'index': 0, 'value': 1, 'color': '#eee'}]"></span>
|
||||
Unsupported
|
||||
</span>
|
||||
|
||||
<!-- Features and No Vulns -->
|
||||
<span class="no-vulns"
|
||||
ng-if="getTagVulnerabilities(tag).status == 'scanned' && getTagVulnerabilities(tag).hasFeatures && !getTagVulnerabilities(tag).hasVulnerabilities"
|
||||
data-title="The image for this tag has no vulnerabilities as found in our database"
|
||||
bs-tooltip
|
||||
bindonce>
|
||||
<a bo-href-i="/repository/{{ repository.namespace }}/{{ repository.name }}/manifest/{{ tag.manifest_digest }}?tab=vulnerabilities">
|
||||
<span class="donut-chart" width="22" data="[{'index': 0, 'value': 1, 'color': '#2FC98E'}]"></span>
|
||||
Passed
|
||||
</a>
|
||||
</span>
|
||||
|
||||
<!-- Vulns -->
|
||||
<span ng-if="getTagVulnerabilities(tag).status == 'scanned' && getTagVulnerabilities(tag).hasFeatures && getTagVulnerabilities(tag).hasVulnerabilities"
|
||||
ng-class="getTagVulnerabilities(tag).highestVulnerability.Priority"
|
||||
class="has-vulns" bindonce>
|
||||
|
||||
<a class="vuln-link" bo-href-i="/repository/{{ repository.namespace }}/{{ repository.name }}/manifest/{{ tag.manifest_digest }}?tab=vulnerabilities"
|
||||
data-title="This tag has {{ getTagVulnerabilities(tag).vulnerabilities.length }} vulnerabilities across {{ getTagVulnerabilities(tag).featuresInfo.brokenFeaturesCount }} packages"
|
||||
bs-tooltip>
|
||||
<!-- Donut -->
|
||||
<span class="donut-chart" min-percent="10" width="22" data="getTagVulnerabilities(tag).vulnerabilitiesInfo.severityBreakdown"></span>
|
||||
|
||||
<!-- Messaging -->
|
||||
<span class="highest-vuln">
|
||||
<span class="vulnerability-priority-view" hide-icon="true" priority="getTagVulnerabilities(tag).highestVulnerability.Priority">
|
||||
{{ getTagVulnerabilities(tag).highestVulnerability.Count }}
|
||||
</span>
|
||||
</span>
|
||||
</a>
|
||||
<span class="dot" ng-if="getTagVulnerabilities(tag).vulnerabilitiesInfo.fixable.length">·</span>
|
||||
<a class="vuln-link" bo-href-i="/repository/{{ repository.namespace }}/{{ repository.name }}/manifest/{{ tag.manifest_digest }}?tab=vulnerabilities&fixable=true" ng-if="getTagVulnerabilities(tag).vulnerabilitiesInfo.fixable.length">
|
||||
{{ getTagVulnerabilities(tag).vulnerabilitiesInfo.fixable.length }} fixable
|
||||
</a>
|
||||
</span>
|
||||
<!-- No Digest -->
|
||||
<span class="nodigest" ng-if="::!tag.manifest_digest"
|
||||
data-title="The tag does not have a V2 digest and so is unsupported for scan"
|
||||
bs-tooltip>
|
||||
<span class="donut-chart" width="22" data="[{'index': 0, 'value': 1, 'color': '#eee'}]"></span>
|
||||
Unsupported
|
||||
</span>
|
||||
|
||||
<!-- Manifest security view -->
|
||||
<manifest-security-view repository="::repository" manifest-digest="::tag.manifest_digest"
|
||||
ng-if="::(tag.manifest_digest && !tag.is_manifest_list)">
|
||||
</manifest-security-view>
|
||||
</td>
|
||||
|
||||
<!-- Size -->
|
||||
<td class="hidden-xs" bo-text="tag.size | bytes"></td>
|
||||
<td class="hidden-xs">
|
||||
<span bo-text="tag.size | bytes" bo-if="!tag.is_manifest_list"></span>
|
||||
<span bo-if="tag.is_manifest_list">N/A</span>
|
||||
</td>
|
||||
|
||||
<!-- Expiration -->
|
||||
<td class="hidden-xs hidden-sm hidden-md">
|
||||
|
@ -307,7 +259,44 @@
|
|||
</td>
|
||||
<td class="options-col hidden-xs hidden-sm"><!-- Whitespace col --></td>
|
||||
</tr>
|
||||
<tr ng-if="expandedView">
|
||||
|
||||
<!-- Manifest List Expanded View -->
|
||||
<tr class="manifest-list-view" ng-repeat="manifest in manifestsOf(tag)"
|
||||
ng-if="expandedView && tag.is_manifest_list">
|
||||
<td class="checkbox-col"></td>
|
||||
<td colspan="2">
|
||||
<i class="manifest-list-manifest-icon fa fa-{{ manifest.os }}"></i>
|
||||
{{ manifest.description }}
|
||||
</td>
|
||||
|
||||
<!-- Security scanning -->
|
||||
<td quay-require="['SECURITY_SCANNER']" class="security-scan-col hidden-sm hidden-xs">
|
||||
<manifest-security-view repository="::repository" manifest-digest="::manifest.digest">
|
||||
</manifest-security-view>
|
||||
</td>
|
||||
|
||||
<td colspan="1" class="hidden-xs"></td>
|
||||
<td colspan="1" class="hidden-xs hidden-sm hidden-md"></td>
|
||||
|
||||
<td class="hidden-xs hidden-sm image-id-col">
|
||||
<manifest-link repository="repository" manifest-digest="manifest.digest"></manifest-link>
|
||||
</td>
|
||||
|
||||
<td class="hidden-xs hidden-sm hidden-md image-track" ng-repeat="it in imageTracks"
|
||||
ng-if="imageTracks.length <= maxTrackCount" bindonce>
|
||||
<span class="image-track-line"
|
||||
ng-if="::getTrackEntryForIndex(it, $parent.$parent.$parent.$parent.$index)"
|
||||
ng-class="::trackLineExpandedClass(it, $parent.$parent.$parent.$parent.$parent.$index)"
|
||||
ng-style="::{'borderColor': getTrackEntryForIndex(it, $parent.$parent.$parent.$parent.$parent.$index).color}"></span>
|
||||
</td>
|
||||
|
||||
<td class="options-col"></td>
|
||||
<td class="options-col"></td>
|
||||
<td class="options-col hidden-xs hidden-sm"><!-- Whitespace col --></td>
|
||||
</tr>
|
||||
|
||||
<!-- Expanded View -->
|
||||
<tr ng-if="expandedView" class="expanded-viewport" ng-class="{'manifest-list': tag.is_manifest_list}">
|
||||
<td class="checkbox-col"></td>
|
||||
<td class="labels-col" colspan="{{6 + (repository.trust_enabled ? 1 : 0) + (Features.SECURITY_SCANNER ? 1 : 0) }}">
|
||||
<!-- Image ID -->
|
||||
|
@ -324,6 +313,7 @@
|
|||
<tag-signing-display compact="false" tag="tag" delegations="repoDelegationsInfo"></tag-signing-display>
|
||||
</div>
|
||||
</td>
|
||||
|
||||
<td class="hidden-xs hidden-sm hidden-md image-track" ng-repeat="it in imageTracks"
|
||||
ng-if="imageTracks.length <= maxTrackCount" bindonce>
|
||||
<span class="image-track-line"
|
||||
|
@ -331,7 +321,8 @@
|
|||
ng-class="::trackLineExpandedClass(it, $parent.$parent.$parent.$index)"
|
||||
ng-style="::{'borderColor': getTrackEntryForIndex(it, $parent.$parent.$parent.$index).color}"></span>
|
||||
</td>
|
||||
<td colspan="1" class="hidden-xs hidden-sm hidden-md"></td>
|
||||
<td></td>
|
||||
<td class="options-col hidden-xs hidden-sm"><!-- Whitespace col --></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
|
|
@ -47,9 +47,9 @@
|
|||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal"
|
||||
aria-hidden="true" ng-show="!addingTag">×</button>
|
||||
<h4 class="modal-title">{{ isAnotherImageTag(toTagImage, tagToCreate) ? 'Move' : 'Add' }} Tag to Image {{ toTagImage.substr(0, 12) }}</h4>
|
||||
<h4 class="modal-title">{{ isAnotherImageTag(toTagImage, tagToCreate) ? 'Move' : 'Add' }} Tag to {{ toTagManifestDigest ? ('Manifest ' + toTagManifestDigest.substr(0, 19)) : ('Image ' + toTagImage.substr(0, 12)) }}</h4>
|
||||
</div>
|
||||
<form name="addTagForm" ng-submit="createOrMoveTag(toTagImage, tagToCreate);">
|
||||
<form name="addTagForm" ng-submit="createOrMoveTag(toTagImage, tagToCreate, toTagManifestDigest);">
|
||||
<div class="modal-body">
|
||||
<div class="cor-loader" ng-show="addingTag"></div>
|
||||
<div ng-show="!addingTag">
|
||||
|
|
|
@ -39,7 +39,7 @@ angular.module('quay').directive('repoPanelTags', function () {
|
|||
$scope.expandedView = false;
|
||||
$scope.labelCache = {};
|
||||
|
||||
$scope.imageVulnerabilities = {};
|
||||
$scope.manifestVulnerabilities = {};
|
||||
$scope.repoDelegationsInfo = null;
|
||||
|
||||
$scope.defcon1 = {};
|
||||
|
@ -92,6 +92,10 @@ angular.module('quay').directive('repoPanelTags', function () {
|
|||
var imageIndexMap = {};
|
||||
for (var i = 0; i < ordered.length; ++i) {
|
||||
var tagInfo = ordered[i];
|
||||
if (!tagInfo.image_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!imageMap[tagInfo.image_id]) {
|
||||
imageMap[tagInfo.image_id] = [];
|
||||
imageIDs.push(tagInfo.image_id)
|
||||
|
@ -251,76 +255,6 @@ angular.module('quay').directive('repoPanelTags', function () {
|
|||
loadRepoSignatures();
|
||||
}, true);
|
||||
|
||||
$scope.loadImageVulnerabilities = function(image_id, imageData) {
|
||||
VulnerabilityService.loadImageVulnerabilities($scope.repository, image_id, function(resp) {
|
||||
imageData.loading = false;
|
||||
imageData.status = resp['status'];
|
||||
|
||||
if (imageData.status == 'scanned') {
|
||||
var vulnerabilities = [];
|
||||
var highest = {
|
||||
'Severity': 'Unknown',
|
||||
'Count': 0,
|
||||
'index': 100000
|
||||
};
|
||||
|
||||
VulnerabilityService.forEachVulnerability(resp, function(vuln) {
|
||||
if (VulnerabilityService.LEVELS[vuln.Severity].index == 0) {
|
||||
$scope.defcon1[vuln.Name] = vuln;
|
||||
$scope.hasDefcon1 = true;
|
||||
}
|
||||
|
||||
if (VulnerabilityService.LEVELS[vuln.Severity].index < highest.index) {
|
||||
highest = {
|
||||
'Priority': vuln.Severity,
|
||||
'Count': 1,
|
||||
'index': VulnerabilityService.LEVELS[vuln.Severity].index,
|
||||
'Color': VulnerabilityService.LEVELS[vuln.Severity].color
|
||||
}
|
||||
} else if (VulnerabilityService.LEVELS[vuln.Severity].index == highest.index) {
|
||||
highest['Count']++;
|
||||
}
|
||||
|
||||
vulnerabilities.push(vuln);
|
||||
});
|
||||
|
||||
imageData.hasFeatures = VulnerabilityService.hasFeatures(resp);
|
||||
imageData.hasVulnerabilities = !!vulnerabilities.length;
|
||||
imageData.vulnerabilities = vulnerabilities;
|
||||
imageData.highestVulnerability = highest;
|
||||
imageData.featuresInfo = VulnerabilityService.buildFeaturesInfo(null, resp);
|
||||
imageData.vulnerabilitiesInfo = VulnerabilityService.buildVulnerabilitiesInfo(null, resp);
|
||||
}
|
||||
}, function() {
|
||||
imageData.loading = false;
|
||||
imageData.hasError = true;
|
||||
});
|
||||
};
|
||||
|
||||
$scope.getTagVulnerabilities = function(tag) {
|
||||
if (!tag.manifest_digest) {
|
||||
return 'nodigest';
|
||||
}
|
||||
|
||||
return $scope.getImageVulnerabilities(tag.image_id);
|
||||
};
|
||||
|
||||
$scope.getImageVulnerabilities = function(image_id) {
|
||||
if (!$scope.repository) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!$scope.imageVulnerabilities[image_id]) {
|
||||
$scope.imageVulnerabilities[image_id] = {
|
||||
'loading': true
|
||||
};
|
||||
|
||||
$scope.loadImageVulnerabilities(image_id, $scope.imageVulnerabilities[image_id]);
|
||||
}
|
||||
|
||||
return $scope.imageVulnerabilities[image_id];
|
||||
};
|
||||
|
||||
$scope.clearSelectedTags = function() {
|
||||
$scope.checkedTags.setChecked([]);
|
||||
};
|
||||
|
@ -430,7 +364,7 @@ angular.module('quay').directive('repoPanelTags', function () {
|
|||
};
|
||||
|
||||
$scope.askAddTag = function(tag) {
|
||||
$scope.tagActionHandler.askAddTag(tag.image_id);
|
||||
$scope.tagActionHandler.askAddTag(tag.image_id, tag.manifest_digest);
|
||||
};
|
||||
|
||||
$scope.showLabelEditor = function(tag) {
|
||||
|
@ -499,6 +433,27 @@ angular.module('quay').directive('repoPanelTags', function () {
|
|||
$scope.handleLabelsChanged = function(manifest_digest) {
|
||||
delete $scope.labelCache[manifest_digest];
|
||||
};
|
||||
|
||||
$scope.manifestsOf = function(tag) {
|
||||
if (!tag.is_manifest_list || !tag.manifest) {
|
||||
return [];
|
||||
}
|
||||
|
||||
if (!tag._mapped_manifests) {
|
||||
// Calculate once and cache to avoid angular digest cycles.
|
||||
tag._mapped_manifests = tag.manifest.manifests.map(function(manifest) {
|
||||
return {
|
||||
'raw': manifest,
|
||||
'os': manifest.platform.os,
|
||||
'size': manifest.size,
|
||||
'digest': manifest.digest,
|
||||
'description': `${manifest.platform.os} on ${manifest.platform.architecture}`,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
return tag._mapped_manifests;
|
||||
};
|
||||
}
|
||||
};
|
||||
return directiveDefinitionObject;
|
||||
|
|
|
@ -39,7 +39,7 @@ angular.module('quay').directive('fetchTagDialog', function () {
|
|||
});
|
||||
}
|
||||
|
||||
if ($scope.repository) {
|
||||
if ($scope.repository && $scope.currentTag && !$scope.currentTag.is_manifest_list) {
|
||||
$scope.formats.push({
|
||||
'title': 'Squashed Docker Image',
|
||||
'icon': 'ci-squashed',
|
||||
|
@ -49,7 +49,7 @@ angular.module('quay').directive('fetchTagDialog', function () {
|
|||
});
|
||||
}
|
||||
|
||||
if (Features.ACI_CONVERSION) {
|
||||
if (Features.ACI_CONVERSION && $scope.currentTag && !$scope.currentTag.is_manifest_list) {
|
||||
$scope.formats.push({
|
||||
'title': 'rkt Fetch',
|
||||
'icon': 'rocket-icon',
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
<div class="manifest-security-view-element">
|
||||
<span class="cor-loader-inline" ng-if="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).loading"></span>
|
||||
<span class="vuln-load-error" ng-if="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).hasError"
|
||||
data-title="The vulnerabilities for this tag could not be retrieved at the present time, try again later"
|
||||
bs-tooltip>
|
||||
<i class="fa fa-times-circle"></i>
|
||||
Could not load security information
|
||||
</span>
|
||||
|
||||
<span ng-if="!$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).loading">
|
||||
<!-- Queued -->
|
||||
<span class="scanning" ng-if="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).status == 'queued'"
|
||||
data-title="The manifest for this tag is queued to be scanned for vulnerabilities"
|
||||
bs-tooltip>
|
||||
<i class="fa fa-ellipsis-h"></i>
|
||||
Queued
|
||||
</span>
|
||||
|
||||
<!-- Scan Failed -->
|
||||
<span class="failed-scan" ng-if="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).status == 'failed'"
|
||||
data-title="The manifest for this tag could not be scanned for vulnerabilities"
|
||||
bs-tooltip>
|
||||
<span class="donut-chart" width="22" data="[{'index': 0, 'value': 1, 'color': '#eee'}]"></span>
|
||||
Unable to scan
|
||||
</span>
|
||||
|
||||
<!-- Unsupported -->
|
||||
<span class="failed-scan"
|
||||
ng-if="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).status == 'unsupported'"
|
||||
data-title="The manifest for this tag has an operating system or package manager unsupported by Quay Security Scanner"
|
||||
bs-tooltip
|
||||
bindonce>
|
||||
<span class="donut-chart" width="22" data="[{'index': 0, 'value': 1, 'color': '#eee'}]"></span>
|
||||
Unsupported
|
||||
</span>
|
||||
|
||||
<!-- No Features -->
|
||||
<span class="failed-scan"
|
||||
ng-if="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).status == 'scanned' && !$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).hasFeatures"
|
||||
data-title="The manifest for this tag has an operating system or package manager unsupported by Quay Security Scanner"
|
||||
bs-tooltip
|
||||
bindonce>
|
||||
<span class="donut-chart" width="22" data="[{'index': 0, 'value': 1, 'color': '#eee'}]"></span>
|
||||
Unsupported
|
||||
</span>
|
||||
|
||||
<!-- Features and No Vulns -->
|
||||
<span class="no-vulns"
|
||||
ng-if="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).status == 'scanned' && $ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).hasFeatures && !$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).hasVulnerabilities"
|
||||
data-title="The manifest for this tag has no vulnerabilities as found in our database"
|
||||
bs-tooltip
|
||||
bindonce>
|
||||
<a bo-href-i="/repository/{{ $ctrl.repository.namespace }}/{{ $ctrl.repository.name }}/manifest/{{ tag.manifest_digest }}?tab=vulnerabilities">
|
||||
<span class="donut-chart" width="22" data="[{'index': 0, 'value': 1, 'color': '#2FC98E'}]"></span>
|
||||
Passed
|
||||
</a>
|
||||
</span>
|
||||
|
||||
<!-- Vulns -->
|
||||
<span ng-if="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).status == 'scanned' && $ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).hasFeatures && $ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).hasVulnerabilities"
|
||||
ng-class="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).highestVulnerability.Priority"
|
||||
class="has-vulns" bindonce>
|
||||
|
||||
<a class="vuln-link" bo-href-i="/repository/{{ $ctrl.repository.namespace }}/{{ $ctrl.repository.name }}/manifest/{{ tag.manifest_digest }}?tab=vulnerabilities"
|
||||
data-title="This tag has {{ $ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).vulnerabilities.length }} vulnerabilities across {{ $ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).featuresInfo.brokenFeaturesCount }} packages"
|
||||
bs-tooltip>
|
||||
<!-- Donut -->
|
||||
<span class="donut-chart" min-percent="10" width="22" data="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).vulnerabilitiesInfo.severityBreakdown"></span>
|
||||
|
||||
<!-- Messaging -->
|
||||
<span class="highest-vuln">
|
||||
<span class="vulnerability-priority-view" hide-icon="true" priority="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).highestVulnerability.Priority">
|
||||
{{ $ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).highestVulnerability.Count }}
|
||||
</span>
|
||||
</span>
|
||||
</a>
|
||||
<span class="dot" ng-if="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).vulnerabilitiesInfo.fixable.length">·</span>
|
||||
<a class="vuln-link" bo-href-i="/repository/{{ $ctrl.repository.namespace }}/{{ $ctrl.repository.name }}/manifest/{{ tag.manifest_digest }}?tab=vulnerabilities&fixable=true" ng-if="$ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).vulnerabilitiesInfo.fixable.length">
|
||||
{{ $ctrl.getSecurityStatus($ctrl.repository, $ctrl.manifestDigest).vulnerabilitiesInfo.fixable.length }} fixable
|
||||
</a>
|
||||
</span>
|
||||
</span>
|
||||
</div>
|
|
@ -0,0 +1,76 @@
|
|||
import { Input, Component, Inject } from 'ng-metadata/core';
|
||||
import { Repository } from '../../../types/common.types';
|
||||
|
||||
|
||||
/**
|
||||
* A component that displays the security status of a manifest.
|
||||
*/
|
||||
@Component({
|
||||
selector: 'manifest-security-view',
|
||||
templateUrl: '/static/js/directives/ui/manifest-security-view/manifest-security-view.component.html',
|
||||
})
|
||||
export class ManifestSecurityView {
|
||||
@Input('<') public repository: Repository;
|
||||
@Input('<') public manifestDigest: string;
|
||||
|
||||
private cachedSecurityStatus: Object = null;
|
||||
|
||||
constructor(@Inject('VulnerabilityService') private VulnerabilityService: any) {
|
||||
}
|
||||
|
||||
private getSecurityStatus(repository: Repository, manifestDigest: string): Object {
|
||||
if (repository == null || !manifestDigest) {
|
||||
return {'status': 'loading'};
|
||||
}
|
||||
|
||||
if (this.cachedSecurityStatus) {
|
||||
return this.cachedSecurityStatus;
|
||||
}
|
||||
|
||||
this.cachedSecurityStatus = {'status': 'loading'};
|
||||
this.loadManifestVulnerabilities(this.cachedSecurityStatus);
|
||||
return this.cachedSecurityStatus;
|
||||
}
|
||||
|
||||
private loadManifestVulnerabilities(securityStatus) {
|
||||
this.VulnerabilityService.loadManifestVulnerabilities(this.repository, this.manifestDigest, (resp) => {
|
||||
securityStatus.loading = false;
|
||||
securityStatus.status = resp['status'];
|
||||
|
||||
if (securityStatus.status == 'scanned') {
|
||||
var vulnerabilities = [];
|
||||
var highest = {
|
||||
'Priority': 'Unknown',
|
||||
'Count': 0,
|
||||
'index': 100000,
|
||||
'Color': 'gray',
|
||||
};
|
||||
|
||||
this.VulnerabilityService.forEachVulnerability(resp, function(vuln) {
|
||||
if (this.VulnerabilityService.LEVELS[vuln.Severity].index < highest.index) {
|
||||
highest = {
|
||||
'Priority': vuln.Severity,
|
||||
'Count': 1,
|
||||
'index': this.VulnerabilityService.LEVELS[vuln.Severity].index,
|
||||
'Color': this.VulnerabilityService.LEVELS[vuln.Severity].color
|
||||
}
|
||||
} else if (this.VulnerabilityService.LEVELS[vuln.Severity].index == highest.index) {
|
||||
highest['Count']++;
|
||||
}
|
||||
|
||||
vulnerabilities.push(vuln);
|
||||
});
|
||||
|
||||
securityStatus.hasFeatures = this.VulnerabilityService.hasFeatures(resp);
|
||||
securityStatus.hasVulnerabilities = !!vulnerabilities.length;
|
||||
securityStatus.vulnerabilities = vulnerabilities;
|
||||
securityStatus.highestVulnerability = highest;
|
||||
securityStatus.featuresInfo = this.VulnerabilityService.buildFeaturesInfo(null, resp);
|
||||
securityStatus.vulnerabilitiesInfo = this.VulnerabilityService.buildVulnerabilitiesInfo(null, resp);
|
||||
}
|
||||
}, function() {
|
||||
securityStatus.loading = false;
|
||||
securityStatus.hasError = true;
|
||||
});
|
||||
}
|
||||
}
|
|
@ -84,7 +84,7 @@ angular.module('quay').directive('tagOperationsDialog', function () {
|
|||
return found.image_id == image;
|
||||
};
|
||||
|
||||
$scope.createOrMoveTag = function(image, tag) {
|
||||
$scope.createOrMoveTag = function(image, tag, opt_manifest_digest) {
|
||||
if (!$scope.repository.can_write) { return; }
|
||||
if ($scope.alertOnTagOpsDisabled()) {
|
||||
return;
|
||||
|
@ -97,9 +97,14 @@ angular.module('quay').directive('tagOperationsDialog', function () {
|
|||
'tag': tag
|
||||
};
|
||||
|
||||
var data = {
|
||||
'image': image
|
||||
};
|
||||
var data = {};
|
||||
if (image) {
|
||||
data['image'] = image;
|
||||
}
|
||||
|
||||
if (opt_manifest_digest) {
|
||||
data['manifest_digest'] = opt_manifest_digest;
|
||||
}
|
||||
|
||||
var errorHandler = ApiService.errorDisplay('Cannot create or move tag', function(resp) {
|
||||
$element.find('#createOrMoveTagModal').modal('hide');
|
||||
|
@ -330,13 +335,14 @@ angular.module('quay').directive('tagOperationsDialog', function () {
|
|||
};
|
||||
},
|
||||
|
||||
'askAddTag': function(image) {
|
||||
'askAddTag': function(image, opt_manifest_digest) {
|
||||
if ($scope.alertOnTagOpsDisabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
$scope.tagToCreate = '';
|
||||
$scope.toTagImage = image;
|
||||
$scope.toTagManifestDigest = opt_manifest_digest;
|
||||
$scope.addingTag = false;
|
||||
$scope.addTagForm.$setPristine();
|
||||
$element.find('#createOrMoveTagModal').modal('show');
|
||||
|
|
|
@ -41,6 +41,7 @@ import { TimeAgoComponent } from './directives/ui/time-ago/time-ago.component';
|
|||
import { TimeDisplayComponent } from './directives/ui/time-display/time-display.component';
|
||||
import { AppSpecificTokenManagerComponent } from './directives/ui/app-specific-token-manager/app-specific-token-manager.component';
|
||||
import { ManifestLinkComponent } from './directives/ui/manifest-link/manifest-link.component';
|
||||
import { ManifestSecurityView } from './directives/ui/manifest-security-view/manifest-security-view.component';
|
||||
import { MarkdownModule } from './directives/ui/markdown/markdown.module';
|
||||
import * as Clipboard from 'clipboard';
|
||||
|
||||
|
@ -87,6 +88,7 @@ import * as Clipboard from 'clipboard';
|
|||
TimeDisplayComponent,
|
||||
AppSpecificTokenManagerComponent,
|
||||
ManifestLinkComponent,
|
||||
ManifestSecurityView,
|
||||
],
|
||||
providers: [
|
||||
ViewArrayImpl,
|
||||
|
|
|
@ -17,6 +17,7 @@ from data.database import (close_db_filter, configure, DerivedStorageForImage, Q
|
|||
TagManifest, TagManifestToManifest, Manifest, ManifestLegacyImage,
|
||||
ManifestBlob)
|
||||
from data import model
|
||||
from data.registry_model import registry_model
|
||||
from endpoints.csrf import generate_csrf_token
|
||||
from util.log import logfile_path
|
||||
|
||||
|
@ -132,8 +133,16 @@ def registry_server_executor(app):
|
|||
return executor
|
||||
|
||||
|
||||
@pytest.fixture(params=['pre_oci_model', 'oci_model'])
|
||||
def data_model(request):
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def liveserver_app(app, registry_server_executor, init_db_path):
|
||||
def liveserver_app(app, registry_server_executor, init_db_path, data_model):
|
||||
# Change the data model being used.
|
||||
registry_model.set_for_testing(data_model == 'oci_model')
|
||||
|
||||
registry_server_executor.apply_blueprint_to_app(app)
|
||||
|
||||
if os.environ.get('DEBUG', 'false').lower() == 'true':
|
||||
|
@ -150,7 +159,7 @@ def liveserver_app(app, registry_server_executor, init_db_path):
|
|||
|
||||
|
||||
@pytest.fixture()
|
||||
def app_reloader(liveserver, registry_server_executor):
|
||||
def app_reloader(request, liveserver, registry_server_executor):
|
||||
registry_server_executor.on(liveserver).reload_app(liveserver.url)
|
||||
yield
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import pytest
|
|||
from Crypto.PublicKey import RSA
|
||||
from jwkest.jwk import RSAKey
|
||||
|
||||
from test.registry.fixtures import data_model
|
||||
from test.registry.protocols import Image, layer_bytes_for_contents
|
||||
from test.registry.protocol_v1 import V1Protocol
|
||||
from test.registry.protocol_v2 import V2Protocol
|
||||
|
@ -23,7 +24,6 @@ def basic_images():
|
|||
]
|
||||
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def different_images():
|
||||
""" Returns different basic images for push and pull testing. """
|
||||
|
@ -36,7 +36,6 @@ def different_images():
|
|||
]
|
||||
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def sized_images():
|
||||
""" Returns basic images (with sizes) for push and pull testing. """
|
||||
|
@ -47,7 +46,8 @@ def sized_images():
|
|||
Image(id='parentid', bytes=parent_bytes, parent_id=None, size=len(parent_bytes),
|
||||
config={'foo': 'bar'}),
|
||||
Image(id='someid', bytes=image_bytes, parent_id='parentid', size=len(image_bytes),
|
||||
config={'foo': 'childbar', 'Entrypoint': ['hello']}),
|
||||
config={'foo': 'childbar', 'Entrypoint': ['hello']},
|
||||
created='2018-04-03T18:37:09.284840891Z'),
|
||||
]
|
||||
|
||||
|
||||
|
@ -90,6 +90,38 @@ def multi_layer_images():
|
|||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def remote_images():
|
||||
""" Returns images with at least one remote layer for push and pull testing. """
|
||||
# Note: order is from base layer down to leaf.
|
||||
remote_bytes = layer_bytes_for_contents('remote contents')
|
||||
parent_bytes = layer_bytes_for_contents('parent contents')
|
||||
image_bytes = layer_bytes_for_contents('some contents')
|
||||
return [
|
||||
Image(id='remoteid', bytes=remote_bytes, parent_id=None, urls=['http://some/url']),
|
||||
Image(id='parentid', bytes=parent_bytes, parent_id='remoteid'),
|
||||
Image(id='someid', bytes=image_bytes, parent_id='parentid'),
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def images_with_empty_layer():
|
||||
""" Returns images for push and pull testing that contain an empty layer. """
|
||||
# Note: order is from base layer down to leaf.
|
||||
parent_bytes = layer_bytes_for_contents('parent contents')
|
||||
empty_bytes = layer_bytes_for_contents('', empty=True)
|
||||
image_bytes = layer_bytes_for_contents('some contents')
|
||||
middle_bytes = layer_bytes_for_contents('middle')
|
||||
|
||||
return [
|
||||
Image(id='parentid', bytes=parent_bytes, parent_id=None),
|
||||
Image(id='emptyid', bytes=empty_bytes, parent_id='parentid', is_empty=True),
|
||||
Image(id='middleid', bytes=middle_bytes, parent_id='emptyid'),
|
||||
Image(id='emptyid2', bytes=empty_bytes, parent_id='middleid', is_empty=True),
|
||||
Image(id='someid', bytes=image_bytes, parent_id='emptyid2'),
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def jwk():
|
||||
return RSAKey(key=RSA.generate(2048))
|
||||
|
@ -100,14 +132,58 @@ def v2_protocol(request, jwk):
|
|||
return request.param(jwk)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def v22_protocol(request, jwk):
|
||||
return V2Protocol(jwk, schema2=True)
|
||||
|
||||
|
||||
@pytest.fixture(params=[V1Protocol])
|
||||
def v1_protocol(request, jwk):
|
||||
return request.param(jwk)
|
||||
|
||||
|
||||
@pytest.fixture(params=[V2Protocol])
|
||||
def manifest_protocol(request, jwk):
|
||||
return request.param(jwk)
|
||||
@pytest.fixture(params=['schema1', 'schema2'])
|
||||
def manifest_protocol(request, data_model, jwk):
|
||||
return V2Protocol(jwk, schema2=(request == 'schema2' and data_model == 'oci_model'))
|
||||
|
||||
|
||||
@pytest.fixture(params=['v1', 'v2_1', 'v2_2'])
|
||||
def pusher(request, data_model, jwk):
|
||||
if request.param == 'v1':
|
||||
return V1Protocol(jwk)
|
||||
|
||||
if request.param == 'v2_2' and data_model == 'oci_model':
|
||||
return V2Protocol(jwk, schema2=True)
|
||||
|
||||
return V2Protocol(jwk)
|
||||
|
||||
|
||||
|
||||
@pytest.fixture(params=['v1', 'v2_1'])
|
||||
def legacy_puller(request, data_model, jwk):
|
||||
if request.param == 'v1':
|
||||
return V1Protocol(jwk)
|
||||
|
||||
return V2Protocol(jwk)
|
||||
|
||||
|
||||
@pytest.fixture(params=['v1', 'v2_1'])
|
||||
def legacy_pusher(request, data_model, jwk):
|
||||
if request.param == 'v1':
|
||||
return V1Protocol(jwk)
|
||||
|
||||
return V2Protocol(jwk)
|
||||
|
||||
|
||||
@pytest.fixture(params=['v1', 'v2_1', 'v2_2'])
|
||||
def puller(request, data_model, jwk):
|
||||
if request.param == 'v1':
|
||||
return V1Protocol(jwk)
|
||||
|
||||
if request.param == 'v2_2' and data_model == 'oci_model':
|
||||
return V2Protocol(jwk, schema2=True)
|
||||
|
||||
return V2Protocol(jwk)
|
||||
|
||||
|
||||
@pytest.fixture(params=[V1Protocol, V2Protocol])
|
||||
|
@ -115,16 +191,6 @@ def loginer(request, jwk):
|
|||
return request.param(jwk)
|
||||
|
||||
|
||||
@pytest.fixture(params=[V1Protocol, V2Protocol])
|
||||
def pusher(request, jwk):
|
||||
return request.param(jwk)
|
||||
|
||||
|
||||
@pytest.fixture(params=[V1Protocol, V2Protocol])
|
||||
def puller(request, jwk):
|
||||
return request.param(jwk)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def random_layer_data():
|
||||
size = 4096
|
||||
|
|
|
@ -93,7 +93,6 @@ class V1Protocol(RegistryProtocol):
|
|||
|
||||
# GET /v1/repositories/{namespace}/{repository}/tags
|
||||
image_ids = self.conduct(session, 'GET', prefix + 'tags', headers=headers).json()
|
||||
assert len(image_ids.values()) >= len(tag_names)
|
||||
|
||||
for tag_name in tag_names:
|
||||
if tag_name not in image_ids:
|
||||
|
@ -101,10 +100,6 @@ class V1Protocol(RegistryProtocol):
|
|||
return None
|
||||
|
||||
tag_image_id = image_ids[tag_name]
|
||||
if not options.munge_shas:
|
||||
# Ensure we have a matching image ID.
|
||||
known_ids = {image.id for image in images}
|
||||
assert tag_image_id in known_ids
|
||||
|
||||
# Retrieve the ancestry of the tagged image.
|
||||
image_prefix = '/v1/images/%s/' % tag_image_id
|
||||
|
@ -149,6 +144,8 @@ class V1Protocol(RegistryProtocol):
|
|||
headers['Authorization'] = 'token ' + result.headers['www-authenticate']
|
||||
|
||||
for image in images:
|
||||
assert image.urls is None
|
||||
|
||||
# PUT /v1/images/{imageID}/json
|
||||
image_json_data = {'id': image.id}
|
||||
if image.size is not None:
|
||||
|
@ -160,15 +157,19 @@ class V1Protocol(RegistryProtocol):
|
|||
if image.config is not None:
|
||||
image_json_data['config'] = image.config
|
||||
|
||||
if image.created is not None:
|
||||
image_json_data['created'] = image.created
|
||||
|
||||
image_json = json.dumps(image_json_data)
|
||||
response = self.conduct(session, 'PUT', '/v1/images/%s/json' % image.id,
|
||||
json_data=image_json_data, headers=headers,
|
||||
data=image_json, headers=headers,
|
||||
expected_status=(200, expected_failure,
|
||||
V1ProtocolSteps.PUT_IMAGE_JSON))
|
||||
if response.status_code != 200:
|
||||
return
|
||||
|
||||
# PUT /v1/images/{imageID}/checksum (old style)
|
||||
old_checksum = compute_tarsum(StringIO(image.bytes), json.dumps(image_json_data))
|
||||
old_checksum = compute_tarsum(StringIO(image.bytes), image_json)
|
||||
checksum_headers = {'X-Docker-Checksum': old_checksum}
|
||||
checksum_headers.update(headers)
|
||||
|
||||
|
@ -180,7 +181,7 @@ class V1Protocol(RegistryProtocol):
|
|||
data=StringIO(image.bytes), headers=headers)
|
||||
|
||||
# PUT /v1/images/{imageID}/checksum (new style)
|
||||
checksum = compute_simple(StringIO(image.bytes), json.dumps(image_json_data))
|
||||
checksum = compute_simple(StringIO(image.bytes), image_json)
|
||||
checksum_headers = {'X-Docker-Checksum-Payload': checksum}
|
||||
checksum_headers.update(headers)
|
||||
|
||||
|
@ -200,7 +201,7 @@ class V1Protocol(RegistryProtocol):
|
|||
'/v1/repositories/%s/images' % self.repo_name(namespace, repo_name),
|
||||
expected_status=204, headers=headers)
|
||||
|
||||
return PushResult(checksums=None, manifests=None, headers=headers)
|
||||
return PushResult(manifests=None, headers=headers)
|
||||
|
||||
def delete(self, session, namespace, repo_name, tag_names, credentials=None,
|
||||
expected_failure=None, options=None):
|
||||
|
|
|
@ -3,7 +3,13 @@ import json
|
|||
|
||||
from enum import Enum, unique
|
||||
|
||||
from image.docker.schema1 import DockerSchema1ManifestBuilder, DockerSchema1Manifest
|
||||
from image.docker.schema1 import (DockerSchema1ManifestBuilder, DockerSchema1Manifest,
|
||||
DOCKER_SCHEMA1_CONTENT_TYPES)
|
||||
from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
|
||||
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
|
||||
from image.docker.schema2.config import DockerSchema2Config
|
||||
from image.docker.schema2.list import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
|
||||
from image.docker.schemas import parse_manifest_from_bytes
|
||||
from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult,
|
||||
PullResult)
|
||||
|
||||
|
@ -14,7 +20,9 @@ class V2ProtocolSteps(Enum):
|
|||
AUTH = 'auth'
|
||||
BLOB_HEAD_CHECK = 'blob-head-check'
|
||||
GET_MANIFEST = 'get-manifest'
|
||||
GET_MANIFEST_LIST = 'get-manifest-list'
|
||||
PUT_MANIFEST = 'put-manifest'
|
||||
PUT_MANIFEST_LIST = 'put-manifest-list'
|
||||
MOUNT_BLOB = 'mount-blob'
|
||||
CATALOG = 'catalog'
|
||||
LIST_TAGS = 'list-tags'
|
||||
|
@ -54,10 +62,14 @@ class V2Protocol(RegistryProtocol):
|
|||
Failures.INVALID_BLOB: 400,
|
||||
Failures.UNSUPPORTED_CONTENT_TYPE: 415,
|
||||
},
|
||||
V2ProtocolSteps.PUT_MANIFEST_LIST: {
|
||||
Failures.INVALID_MANIFEST: 400,
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, jwk):
|
||||
def __init__(self, jwk, schema2=False):
|
||||
self.jwk = jwk
|
||||
self.schema2 = schema2
|
||||
|
||||
def ping(self, session):
|
||||
result = session.get('/v2/')
|
||||
|
@ -118,6 +130,188 @@ class V2Protocol(RegistryProtocol):
|
|||
|
||||
return None, response
|
||||
|
||||
def pull_list(self, session, namespace, repo_name, tag_names, manifestlist,
|
||||
credentials=None, expected_failure=None, options=None):
|
||||
options = options or ProtocolOptions()
|
||||
scopes = options.scopes or ['repository:%s:push,pull' % self.repo_name(namespace, repo_name)]
|
||||
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
|
||||
|
||||
# Ping!
|
||||
self.ping(session)
|
||||
|
||||
# Perform auth and retrieve a token.
|
||||
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
|
||||
expected_failure=expected_failure)
|
||||
if token is None:
|
||||
assert V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure)
|
||||
return
|
||||
|
||||
headers = {
|
||||
'Authorization': 'Bearer ' + token,
|
||||
'Accept': ','.join(DOCKER_SCHEMA2_CONTENT_TYPES),
|
||||
}
|
||||
|
||||
for tag_name in tag_names:
|
||||
# Retrieve the manifest for the tag or digest.
|
||||
response = self.conduct(session, 'GET',
|
||||
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name),
|
||||
tag_name),
|
||||
expected_status=(200, expected_failure,
|
||||
V2ProtocolSteps.GET_MANIFEST_LIST),
|
||||
headers=headers)
|
||||
if expected_failure is not None:
|
||||
return None
|
||||
|
||||
# Parse the returned manifest list and ensure it matches.
|
||||
assert response.headers['Content-Type'] == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
|
||||
retrieved = parse_manifest_from_bytes(response.text, response.headers['Content-Type'])
|
||||
assert retrieved.schema_version == 2
|
||||
assert retrieved.is_manifest_list
|
||||
assert retrieved.digest == manifestlist.digest
|
||||
|
||||
# Pull each of the manifests inside and ensure they can be retrieved.
|
||||
for manifest_digest in retrieved.child_manifest_digests():
|
||||
response = self.conduct(session, 'GET',
|
||||
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name),
|
||||
manifest_digest),
|
||||
expected_status=(200, expected_failure,
|
||||
V2ProtocolSteps.GET_MANIFEST),
|
||||
headers=headers)
|
||||
if expected_failure is not None:
|
||||
return None
|
||||
|
||||
manifest = parse_manifest_from_bytes(response.text, response.headers['Content-Type'])
|
||||
assert not manifest.is_manifest_list
|
||||
assert manifest.digest == manifest_digest
|
||||
|
||||
def push_list(self, session, namespace, repo_name, tag_names, manifestlist, manifests, blobs,
|
||||
credentials=None, expected_failure=None, options=None):
|
||||
options = options or ProtocolOptions()
|
||||
scopes = options.scopes or ['repository:%s:push,pull' % self.repo_name(namespace, repo_name)]
|
||||
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
|
||||
|
||||
# Ping!
|
||||
self.ping(session)
|
||||
|
||||
# Perform auth and retrieve a token.
|
||||
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
|
||||
expected_failure=expected_failure)
|
||||
if token is None:
|
||||
assert V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure)
|
||||
return
|
||||
|
||||
headers = {
|
||||
'Authorization': 'Bearer ' + token,
|
||||
'Accept': ','.join(options.accept_mimetypes) if options.accept_mimetypes else '*/*',
|
||||
}
|
||||
|
||||
# Push all blobs.
|
||||
if not self._push_blobs(blobs, session, namespace, repo_name, headers, options,
|
||||
expected_failure):
|
||||
return
|
||||
|
||||
# Push the individual manifests.
|
||||
for manifest in manifests:
|
||||
manifest_headers = {'Content-Type': manifest.media_type}
|
||||
manifest_headers.update(headers)
|
||||
|
||||
self.conduct(session, 'PUT',
|
||||
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), manifest.digest),
|
||||
data=manifest.bytes,
|
||||
expected_status=(202, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
|
||||
headers=manifest_headers)
|
||||
|
||||
# Push the manifest list.
|
||||
for tag_name in tag_names:
|
||||
manifest_headers = {'Content-Type': manifestlist.media_type}
|
||||
manifest_headers.update(headers)
|
||||
|
||||
if options.manifest_content_type is not None:
|
||||
manifest_headers['Content-Type'] = options.manifest_content_type
|
||||
|
||||
self.conduct(session, 'PUT',
|
||||
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), tag_name),
|
||||
data=manifestlist.bytes,
|
||||
expected_status=(202, expected_failure, V2ProtocolSteps.PUT_MANIFEST_LIST),
|
||||
headers=manifest_headers)
|
||||
|
||||
return PushResult(manifests=None, headers=headers)
|
||||
|
||||
def build_schema2(self, images, blobs, options):
|
||||
builder = DockerSchema2ManifestBuilder()
|
||||
for image in images:
|
||||
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
|
||||
|
||||
if image.urls is None:
|
||||
blobs[checksum] = image.bytes
|
||||
|
||||
# If invalid blob references were requested, just make it up.
|
||||
if options.manifest_invalid_blob_references:
|
||||
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
|
||||
|
||||
if not image.is_empty:
|
||||
builder.add_layer(checksum, len(image.bytes), urls=image.urls)
|
||||
|
||||
def history_for_image(image):
|
||||
history = {
|
||||
'created': '2018-04-03T18:37:09.284840891Z',
|
||||
'created_by': (('/bin/sh -c #(nop) ENTRYPOINT %s' % image.config['Entrypoint'])
|
||||
if image.config and image.config.get('Entrypoint')
|
||||
else '/bin/sh -c #(nop) %s' % image.id),
|
||||
}
|
||||
|
||||
if image.is_empty:
|
||||
history['empty_layer'] = True
|
||||
|
||||
return history
|
||||
|
||||
config = {
|
||||
"os": "linux",
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": []
|
||||
},
|
||||
"history": [history_for_image(image) for image in images],
|
||||
}
|
||||
|
||||
if images[-1].config:
|
||||
config['config'] = images[-1].config
|
||||
|
||||
config_json = json.dumps(config)
|
||||
schema2_config = DockerSchema2Config(config_json)
|
||||
builder.set_config(schema2_config)
|
||||
|
||||
blobs[schema2_config.digest] = schema2_config.bytes
|
||||
return builder.build()
|
||||
|
||||
def build_schema1(self, namespace, repo_name, tag_name, images, blobs, options):
|
||||
builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name)
|
||||
|
||||
for image in reversed(images):
|
||||
assert image.urls is None
|
||||
|
||||
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
|
||||
blobs[checksum] = image.bytes
|
||||
|
||||
# If invalid blob references were requested, just make it up.
|
||||
if options.manifest_invalid_blob_references:
|
||||
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
|
||||
|
||||
layer_dict = {'id': image.id, 'parent': image.parent_id}
|
||||
if image.config is not None:
|
||||
layer_dict['config'] = image.config
|
||||
|
||||
if image.size is not None:
|
||||
layer_dict['Size'] = image.size
|
||||
|
||||
if image.created is not None:
|
||||
layer_dict['created'] = image.created
|
||||
|
||||
builder.add_layer(checksum, json.dumps(layer_dict))
|
||||
|
||||
# Build the manifest.
|
||||
return builder.build(self.jwk)
|
||||
|
||||
def push(self, session, namespace, repo_name, tag_names, images, credentials=None,
|
||||
expected_failure=None, options=None):
|
||||
options = options or ProtocolOptions()
|
||||
|
@ -136,60 +330,69 @@ class V2Protocol(RegistryProtocol):
|
|||
|
||||
headers = {
|
||||
'Authorization': 'Bearer ' + token,
|
||||
'Accept': options.accept_mimetypes,
|
||||
'Accept': ','.join(options.accept_mimetypes) if options.accept_mimetypes else '*/*',
|
||||
}
|
||||
|
||||
# Build fake manifests.
|
||||
manifests = {}
|
||||
blobs = {}
|
||||
for tag_name in tag_names:
|
||||
builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name)
|
||||
if self.schema2:
|
||||
manifests[tag_name] = self.build_schema2(images, blobs, options)
|
||||
else:
|
||||
manifests[tag_name] = self.build_schema1(namespace, repo_name, tag_name, images, blobs,
|
||||
options)
|
||||
|
||||
for image in reversed(images):
|
||||
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
|
||||
# Push the blob data.
|
||||
if not self._push_blobs(blobs, session, namespace, repo_name, headers, options,
|
||||
expected_failure):
|
||||
return
|
||||
|
||||
# If invalid blob references were requested, just make it up.
|
||||
if options.manifest_invalid_blob_references:
|
||||
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
|
||||
# Write a manifest for each tag.
|
||||
for tag_name in tag_names:
|
||||
manifest = manifests[tag_name]
|
||||
|
||||
layer_dict = {'id': image.id, 'parent': image.parent_id}
|
||||
if image.config is not None:
|
||||
layer_dict['config'] = image.config
|
||||
# Write the manifest. If we expect it to be invalid, we expect a 404 code. Otherwise, we
|
||||
# expect a 202 response for success.
|
||||
put_code = 404 if options.manifest_invalid_blob_references else 202
|
||||
manifest_headers = {'Content-Type': manifest.media_type}
|
||||
manifest_headers.update(headers)
|
||||
|
||||
if image.size is not None:
|
||||
layer_dict['Size'] = image.size
|
||||
if options.manifest_content_type is not None:
|
||||
manifest_headers['Content-Type'] = options.manifest_content_type
|
||||
|
||||
builder.add_layer(checksum, json.dumps(layer_dict))
|
||||
tag_or_digest = tag_name if not options.push_by_manifest_digest else manifest.digest
|
||||
self.conduct(session, 'PUT',
|
||||
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), tag_or_digest),
|
||||
data=manifest.bytes,
|
||||
expected_status=(put_code, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
|
||||
headers=manifest_headers)
|
||||
|
||||
# Build the manifest.
|
||||
manifests[tag_name] = builder.build(self.jwk)
|
||||
|
||||
# Push the layer data.
|
||||
checksums = {}
|
||||
for image in reversed(images):
|
||||
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
|
||||
checksums[image.id] = checksum
|
||||
return PushResult(manifests=manifests, headers=headers)
|
||||
|
||||
def _push_blobs(self, blobs, session, namespace, repo_name, headers, options, expected_failure):
|
||||
for blob_digest, blob_bytes in blobs.iteritems():
|
||||
if not options.skip_head_checks:
|
||||
# Layer data should not yet exist.
|
||||
# Blob data should not yet exist.
|
||||
self.conduct(session, 'HEAD',
|
||||
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), checksum),
|
||||
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), blob_digest),
|
||||
expected_status=(404, expected_failure, V2ProtocolSteps.BLOB_HEAD_CHECK),
|
||||
headers=headers)
|
||||
|
||||
# Check for mounting of blobs.
|
||||
if options.mount_blobs and image.id in options.mount_blobs:
|
||||
if options.mount_blobs and blob_digest in options.mount_blobs:
|
||||
self.conduct(session, 'POST',
|
||||
'/v2/%s/blobs/uploads/' % self.repo_name(namespace, repo_name),
|
||||
params={
|
||||
'mount': checksum,
|
||||
'from': options.mount_blobs[image.id],
|
||||
'mount': blob_digest,
|
||||
'from': options.mount_blobs[blob_digest],
|
||||
},
|
||||
expected_status=(201, expected_failure, V2ProtocolSteps.MOUNT_BLOB),
|
||||
headers=headers)
|
||||
if expected_failure is not None:
|
||||
return
|
||||
else:
|
||||
# Start a new upload of the layer data.
|
||||
# Start a new upload of the blob data.
|
||||
response = self.conduct(session, 'POST',
|
||||
'/v2/%s/blobs/uploads/' % self.repo_name(namespace, repo_name),
|
||||
expected_status=(202, expected_failure,
|
||||
|
@ -206,9 +409,9 @@ class V2Protocol(RegistryProtocol):
|
|||
# case modifies the port.
|
||||
location = response.headers['Location'][len('http://localhost:5000'):]
|
||||
|
||||
# PATCH the image data into the layer.
|
||||
# PATCH the data into the blob.
|
||||
if options.chunks_for_upload is None:
|
||||
self.conduct(session, 'PATCH', location, data=image.bytes, expected_status=204,
|
||||
self.conduct(session, 'PATCH', location, data=blob_bytes, expected_status=204,
|
||||
headers=headers)
|
||||
else:
|
||||
# If chunked upload is requested, upload the data as a series of chunks, checking
|
||||
|
@ -223,12 +426,12 @@ class V2Protocol(RegistryProtocol):
|
|||
patch_headers = {'Range': 'bytes=%s-%s' % (start_byte, end_byte)}
|
||||
patch_headers.update(headers)
|
||||
|
||||
contents_chunk = image.bytes[start_byte:end_byte]
|
||||
contents_chunk = blob_bytes[start_byte:end_byte]
|
||||
self.conduct(session, 'PATCH', location, data=contents_chunk,
|
||||
expected_status=expected_code,
|
||||
headers=patch_headers)
|
||||
if expected_code != 204:
|
||||
return
|
||||
return False
|
||||
|
||||
# Retrieve the upload status at each point, and ensure it is valid.
|
||||
status_url = '/v2/%s/blobs/uploads/%s' % (self.repo_name(namespace, repo_name),
|
||||
|
@ -239,56 +442,36 @@ class V2Protocol(RegistryProtocol):
|
|||
assert response.headers['Range'] == "bytes=0-%s" % end_byte
|
||||
|
||||
if options.cancel_blob_upload:
|
||||
self.conduct(session, 'DELETE', location, params=dict(digest=checksum),
|
||||
self.conduct(session, 'DELETE', location, params=dict(digest=blob_digest),
|
||||
expected_status=204, headers=headers)
|
||||
|
||||
# Ensure the upload was canceled.
|
||||
status_url = '/v2/%s/blobs/uploads/%s' % (self.repo_name(namespace, repo_name),
|
||||
upload_uuid)
|
||||
self.conduct(session, 'GET', status_url, expected_status=404, headers=headers)
|
||||
return
|
||||
return False
|
||||
|
||||
# Finish the layer upload with a PUT.
|
||||
response = self.conduct(session, 'PUT', location, params=dict(digest=checksum),
|
||||
# Finish the blob upload with a PUT.
|
||||
response = self.conduct(session, 'PUT', location, params=dict(digest=blob_digest),
|
||||
expected_status=201, headers=headers)
|
||||
assert response.headers['Docker-Content-Digest'] == checksum
|
||||
assert response.headers['Docker-Content-Digest'] == blob_digest
|
||||
|
||||
# Ensure the layer exists now.
|
||||
# Ensure the blob exists now.
|
||||
response = self.conduct(session, 'HEAD',
|
||||
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), checksum),
|
||||
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name),
|
||||
blob_digest),
|
||||
expected_status=200, headers=headers)
|
||||
|
||||
assert response.headers['Docker-Content-Digest'] == checksum
|
||||
assert response.headers['Content-Length'] == str(len(image.bytes))
|
||||
assert response.headers['Docker-Content-Digest'] == blob_digest
|
||||
assert response.headers['Content-Length'] == str(len(blob_bytes))
|
||||
|
||||
# And retrieve the layer data.
|
||||
# And retrieve the blob data.
|
||||
result = self.conduct(session, 'GET',
|
||||
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), checksum),
|
||||
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), blob_digest),
|
||||
headers=headers, expected_status=200)
|
||||
assert result.content == image.bytes
|
||||
|
||||
# Write a manifest for each tag.
|
||||
for tag_name in tag_names:
|
||||
manifest = manifests[tag_name]
|
||||
|
||||
# Write the manifest. If we expect it to be invalid, we expect a 404 code. Otherwise, we
|
||||
# expect a 202 response for success.
|
||||
put_code = 404 if options.manifest_invalid_blob_references else 202
|
||||
manifest_headers = {'Content-Type': 'application/json'}
|
||||
manifest_headers.update(headers)
|
||||
|
||||
if options.manifest_content_type is not None:
|
||||
manifest_headers['Content-Type'] = options.manifest_content_type
|
||||
|
||||
tag_or_digest = tag_name if not options.push_by_manifest_digest else manifest.digest
|
||||
self.conduct(session, 'PUT',
|
||||
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), tag_or_digest),
|
||||
data=manifest.bytes,
|
||||
expected_status=(put_code, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
|
||||
headers=manifest_headers)
|
||||
|
||||
return PushResult(checksums=checksums, manifests=manifests, headers=headers)
|
||||
assert result.content == blob_bytes
|
||||
|
||||
return True
|
||||
|
||||
def delete(self, session, namespace, repo_name, tag_names, credentials=None,
|
||||
expected_failure=None, options=None):
|
||||
|
@ -335,6 +518,9 @@ class V2Protocol(RegistryProtocol):
|
|||
'Authorization': 'Bearer ' + token,
|
||||
}
|
||||
|
||||
if self.schema2:
|
||||
headers['Accept'] = ','.join(options.accept_mimetypes or DOCKER_SCHEMA2_CONTENT_TYPES)
|
||||
|
||||
manifests = {}
|
||||
image_ids = {}
|
||||
for tag_name in tag_names:
|
||||
|
@ -348,18 +534,39 @@ class V2Protocol(RegistryProtocol):
|
|||
return None
|
||||
|
||||
# Ensure the manifest returned by us is valid.
|
||||
manifest = DockerSchema1Manifest(response.text)
|
||||
manifests[tag_name] = manifest
|
||||
image_ids[tag_name] = manifest.leaf_layer.v1_metadata.image_id
|
||||
if not self.schema2:
|
||||
assert response.headers['Content-Type'] in DOCKER_SCHEMA1_CONTENT_TYPES
|
||||
|
||||
# Verify the layers.
|
||||
for index, layer in enumerate(manifest.layers):
|
||||
manifest = parse_manifest_from_bytes(response.text, response.headers['Content-Type'])
|
||||
manifests[tag_name] = manifest
|
||||
|
||||
if manifest.schema_version == 1:
|
||||
image_ids[tag_name] = manifest.leaf_layer_v1_image_id
|
||||
|
||||
# Verify the blobs.
|
||||
layer_index = 0
|
||||
empty_count = 0
|
||||
blob_digests = list(manifest.blob_digests)
|
||||
for image in images:
|
||||
if manifest.schema_version == 2 and image.is_empty:
|
||||
empty_count += 1
|
||||
continue
|
||||
|
||||
# If the layer is remote, then we expect the blob to *not* exist in the system.
|
||||
blob_digest = blob_digests[layer_index]
|
||||
expected_status = 404 if image.urls else 200
|
||||
result = self.conduct(session, 'GET',
|
||||
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name),
|
||||
layer.digest),
|
||||
expected_status=200,
|
||||
blob_digest),
|
||||
expected_status=expected_status,
|
||||
headers=headers)
|
||||
assert result.content == images[index].bytes
|
||||
|
||||
if expected_status == 200:
|
||||
assert result.content == image.bytes
|
||||
|
||||
layer_index += 1
|
||||
|
||||
assert (len(blob_digests) + empty_count) >= len(images) # Schema 2 has 1 extra for config
|
||||
|
||||
return PullResult(manifests=manifests, image_ids=image_ids)
|
||||
|
||||
|
|
|
@ -7,14 +7,20 @@ from cStringIO import StringIO
|
|||
from enum import Enum, unique
|
||||
from six import add_metaclass
|
||||
|
||||
Image = namedtuple('Image', ['id', 'parent_id', 'bytes', 'size', 'config'])
|
||||
Image.__new__.__defaults__ = (None, None)
|
||||
from image.docker.schema2 import EMPTY_LAYER_BYTES
|
||||
|
||||
PushResult = namedtuple('PushResult', ['checksums', 'manifests', 'headers'])
|
||||
Image = namedtuple('Image', ['id', 'parent_id', 'bytes', 'size', 'config', 'created', 'urls',
|
||||
'is_empty'])
|
||||
Image.__new__.__defaults__ = (None, None, None, None, False)
|
||||
|
||||
PushResult = namedtuple('PushResult', ['manifests', 'headers'])
|
||||
PullResult = namedtuple('PullResult', ['manifests', 'image_ids'])
|
||||
|
||||
|
||||
def layer_bytes_for_contents(contents, mode='|gz', other_files=None):
|
||||
def layer_bytes_for_contents(contents, mode='|gz', other_files=None, empty=False):
|
||||
if empty:
|
||||
return EMPTY_LAYER_BYTES
|
||||
|
||||
layer_data = StringIO()
|
||||
tar_file = tarfile.open(fileobj=layer_data, mode='w' + mode)
|
||||
|
||||
|
@ -53,6 +59,7 @@ class Failures(Enum):
|
|||
DISALLOWED_LIBRARY_NAMESPACE = 'disallowed-library-namespace'
|
||||
MISSING_TAG = 'missing-tag'
|
||||
INVALID_TAG = 'invalid-tag'
|
||||
INVALID_MANIFEST = 'invalid-manifest'
|
||||
INVALID_IMAGES = 'invalid-images'
|
||||
UNSUPPORTED_CONTENT_TYPE = 'unsupported-content-type'
|
||||
INVALID_BLOB = 'invalid-blob'
|
||||
|
@ -62,14 +69,13 @@ class Failures(Enum):
|
|||
|
||||
class ProtocolOptions(object):
|
||||
def __init__(self):
|
||||
self.munge_shas = False
|
||||
self.scopes = None
|
||||
self.cancel_blob_upload = False
|
||||
self.manifest_invalid_blob_references = False
|
||||
self.chunks_for_upload = None
|
||||
self.skip_head_checks = False
|
||||
self.manifest_content_type = None
|
||||
self.accept_mimetypes = '*/*'
|
||||
self.accept_mimetypes = None
|
||||
self.mount_blobs = None
|
||||
self.push_by_manifest_digest = False
|
||||
|
||||
|
@ -122,6 +128,6 @@ class RegistryProtocol(object):
|
|||
expected_status = failures.get(expected_failure, expected_status)
|
||||
|
||||
result = session.request(method, url, params=params, data=data, headers=headers, auth=auth)
|
||||
msg = "Expected response %s, got %s" % (expected_status, result.status_code)
|
||||
msg = "Expected response %s, got %s: %s" % (expected_status, result.status_code, result.text)
|
||||
assert result.status_code == expected_status, msg
|
||||
return result
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
# pylint: disable=W0401, W0621, W0613, W0614, R0913
|
||||
import os
|
||||
import hashlib
|
||||
import tarfile
|
||||
|
||||
|
@ -21,6 +20,8 @@ from test.registry.protocols import Failures, Image, layer_bytes_for_contents, P
|
|||
from app import instance_keys
|
||||
from data.model.tag import list_repository_tags
|
||||
from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||
from image.docker.schema2.list import DockerSchema2ManifestListBuilder
|
||||
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
|
||||
from util.security.registry_jwt import decode_bearer_header
|
||||
from util.timedeltastring import convert_to_timedelta
|
||||
|
||||
|
@ -38,6 +39,19 @@ def test_basic_push_pull(pusher, puller, basic_images, liveserver_session, app_r
|
|||
credentials=credentials)
|
||||
|
||||
|
||||
def test_empty_layer(pusher, puller, images_with_empty_layer, liveserver_session, app_reloader):
|
||||
""" Test: Push and pull of an image with an empty layer to a new repository. """
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
# Push a new repository.
|
||||
pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', images_with_empty_layer,
|
||||
credentials=credentials)
|
||||
|
||||
# Pull the repository to verify.
|
||||
puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', images_with_empty_layer,
|
||||
credentials=credentials)
|
||||
|
||||
|
||||
def test_multi_layer_images_push_pull(pusher, puller, multi_layer_images, liveserver_session,
|
||||
app_reloader):
|
||||
""" Test: Basic push and pull of a multi-layered image to a new repository. """
|
||||
|
@ -75,10 +89,13 @@ def test_overwrite_tag(pusher, puller, basic_images, different_images, liveserve
|
|||
credentials=credentials)
|
||||
|
||||
|
||||
@pytest.mark.skipif(os.getenv('OCI_DATA_MODEL') == 'true', reason="no backfill in new model")
|
||||
def test_no_tag_manifests(pusher, puller, basic_images, liveserver_session, app_reloader,
|
||||
liveserver, registry_server_executor):
|
||||
liveserver, registry_server_executor, data_model):
|
||||
""" Test: Basic pull without manifests. """
|
||||
if data_model == 'oci_model':
|
||||
# Skip; OCI model doesn't have tag backfill.
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
# Push a new repository.
|
||||
|
@ -173,7 +190,7 @@ def test_application_repo(pusher, puller, basic_images, liveserver_session, app_
|
|||
credentials=credentials, expected_failure=Failures.APP_REPOSITORY)
|
||||
|
||||
|
||||
def test_middle_layer_different_sha(manifest_protocol, v1_protocol, liveserver_session,
|
||||
def test_middle_layer_different_sha(v2_protocol, v1_protocol, liveserver_session,
|
||||
app_reloader):
|
||||
""" Test: Pushing of a 3-layer image with the *same* V1 ID's, but the middle layer having
|
||||
different bytes, must result in new IDs being generated for the leaf layer, as
|
||||
|
@ -189,10 +206,10 @@ def test_middle_layer_different_sha(manifest_protocol, v1_protocol, liveserver_s
|
|||
]
|
||||
|
||||
# First push and pull the images, to ensure we have the basics setup and working.
|
||||
manifest_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', first_images,
|
||||
credentials=credentials)
|
||||
first_pull_result = manifest_protocol.pull(liveserver_session, 'devtable', 'newrepo', 'latest',
|
||||
first_images, credentials=credentials)
|
||||
v2_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', first_images,
|
||||
credentials=credentials)
|
||||
first_pull_result = v2_protocol.pull(liveserver_session, 'devtable', 'newrepo', 'latest',
|
||||
first_images, credentials=credentials)
|
||||
first_manifest = first_pull_result.manifests['latest']
|
||||
assert set([image.id for image in first_images]) == set(first_manifest.image_ids)
|
||||
assert first_pull_result.image_ids['latest'] == 'leafimage'
|
||||
|
@ -205,11 +222,10 @@ def test_middle_layer_different_sha(manifest_protocol, v1_protocol, liveserver_s
|
|||
# Push and pull the image, ensuring that the produced ID for the middle and leaf layers
|
||||
# are synthesized.
|
||||
options = ProtocolOptions()
|
||||
options.munge_shas = True
|
||||
options.skip_head_checks = True
|
||||
|
||||
manifest_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', second_images,
|
||||
credentials=credentials, options=options)
|
||||
v2_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', second_images,
|
||||
credentials=credentials, options=options)
|
||||
second_pull_result = v1_protocol.pull(liveserver_session, 'devtable', 'newrepo', 'latest',
|
||||
second_images, credentials=credentials, options=options)
|
||||
|
||||
|
@ -425,8 +441,8 @@ def test_pull_library_with_support_disabled(puller, basic_images, liveserver_ses
|
|||
expected_failure=Failures.DISALLOWED_LIBRARY_NAMESPACE)
|
||||
|
||||
|
||||
def test_image_replication(pusher, basic_images, liveserver_session, app_reloader, liveserver,
|
||||
registry_server_executor):
|
||||
def test_image_replication(pusher, puller, basic_images, liveserver_session, app_reloader,
|
||||
liveserver, registry_server_executor):
|
||||
""" Test: Ensure that entries are created for replication of the images pushed. """
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
|
@ -434,9 +450,30 @@ def test_image_replication(pusher, basic_images, liveserver_session, app_reloade
|
|||
pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||
credentials=credentials)
|
||||
|
||||
result = puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||
credentials=credentials)
|
||||
|
||||
# Ensure that entries were created for each image.
|
||||
for image in basic_images:
|
||||
r = registry_server_executor.on(liveserver).get_storage_replication_entry(image.id)
|
||||
for image_id in result.image_ids.values():
|
||||
r = registry_server_executor.on(liveserver).get_storage_replication_entry(image_id)
|
||||
assert r.text == 'OK'
|
||||
|
||||
|
||||
def test_image_replication_empty_layers(pusher, puller, images_with_empty_layer, liveserver_session,
|
||||
app_reloader, liveserver, registry_server_executor):
|
||||
""" Test: Ensure that entries are created for replication of the images pushed. """
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
with FeatureFlagValue('STORAGE_REPLICATION', True, registry_server_executor.on(liveserver)):
|
||||
pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', images_with_empty_layer,
|
||||
credentials=credentials)
|
||||
|
||||
result = puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest',
|
||||
images_with_empty_layer, credentials=credentials)
|
||||
|
||||
# Ensure that entries were created for each image.
|
||||
for image_id in result.image_ids.values():
|
||||
r = registry_server_executor.on(liveserver).get_storage_replication_entry(image_id)
|
||||
assert r.text == 'OK'
|
||||
|
||||
|
||||
|
@ -481,7 +518,7 @@ def test_tag_validaton(tag_name, expected_failure, pusher, basic_images, liveser
|
|||
expected_failure=expected_failure)
|
||||
|
||||
|
||||
def test_invalid_parent(pusher, liveserver_session, app_reloader):
|
||||
def test_invalid_parent(legacy_pusher, liveserver_session, app_reloader):
|
||||
""" Test: Attempt to push an image with an invalid/missing parent. """
|
||||
images = [
|
||||
Image(id='childimage', parent_id='parentimage', size=None,
|
||||
|
@ -490,12 +527,12 @@ def test_invalid_parent(pusher, liveserver_session, app_reloader):
|
|||
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', images,
|
||||
credentials=credentials,
|
||||
expected_failure=Failures.INVALID_IMAGES)
|
||||
legacy_pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', images,
|
||||
credentials=credentials,
|
||||
expected_failure=Failures.INVALID_IMAGES)
|
||||
|
||||
|
||||
def test_wrong_image_order(pusher, liveserver_session, app_reloader):
|
||||
def test_wrong_image_order(legacy_pusher, liveserver_session, app_reloader):
|
||||
""" Test: Attempt to push an image with its layers in the wrong order. """
|
||||
images = [
|
||||
Image(id='childimage', parent_id='parentimage', size=None,
|
||||
|
@ -506,9 +543,9 @@ def test_wrong_image_order(pusher, liveserver_session, app_reloader):
|
|||
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', images,
|
||||
credentials=credentials,
|
||||
expected_failure=Failures.INVALID_IMAGES)
|
||||
legacy_pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', images,
|
||||
credentials=credentials,
|
||||
expected_failure=Failures.INVALID_IMAGES)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('labels', [
|
||||
|
@ -590,8 +627,12 @@ def test_expiration_label(label_value, expected_expiration, manifest_protocol, l
|
|||
'application/vnd.docker.distribution.manifest.v2+json',
|
||||
])
|
||||
def test_unsupported_manifest_content_type(content_type, manifest_protocol, basic_images,
|
||||
liveserver_session, app_reloader):
|
||||
data_model, liveserver_session, app_reloader):
|
||||
""" Test: Attempt to push a manifest with an unsupported media type. """
|
||||
if data_model == 'oci_model':
|
||||
# Skip; OCI requires the new manifest content types.
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
options = ProtocolOptions()
|
||||
|
@ -605,19 +646,23 @@ def test_unsupported_manifest_content_type(content_type, manifest_protocol, basi
|
|||
|
||||
|
||||
@pytest.mark.parametrize('accept_mimetypes', [
|
||||
[('application/vnd.oci.image.manifest.v1+json', 1)],
|
||||
[('application/vnd.docker.distribution.manifest.v2+json', 1),
|
||||
('application/vnd.docker.distribution.manifest.list.v2+json', 1)],
|
||||
[('application/vnd.foo.bar', 1)],
|
||||
['application/vnd.oci.image.manifest.v1+json'],
|
||||
['application/vnd.docker.distribution.manifest.v2+json',
|
||||
'application/vnd.docker.distribution.manifest.list.v2+json'],
|
||||
['application/vnd.foo.bar'],
|
||||
])
|
||||
def test_unsupported_manifest_accept_headers(accept_mimetypes, manifest_protocol, basic_images,
|
||||
liveserver_session, app_reloader):
|
||||
data_model, liveserver_session, app_reloader):
|
||||
""" Test: Attempt to push a manifest with an unsupported accept headers. """
|
||||
if data_model == 'oci_model':
|
||||
# Skip; OCI requires the new manifest content types.
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
options = ProtocolOptions()
|
||||
options.manifest_content_type = DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||
options.accept_mimetypes = str(Accept(accept_mimetypes))
|
||||
options.accept_mimetypes = accept_mimetypes
|
||||
|
||||
# Attempt to push a new repository.
|
||||
manifest_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||
|
@ -640,15 +685,18 @@ def test_invalid_blob_reference(manifest_protocol, basic_images, liveserver_sess
|
|||
expected_failure=Failures.INVALID_BLOB)
|
||||
|
||||
|
||||
def test_delete_tag(pusher, puller, basic_images, liveserver_session,
|
||||
def test_delete_tag(pusher, puller, basic_images, different_images, liveserver_session,
|
||||
app_reloader):
|
||||
""" Test: Push a repository, delete a tag, and attempt to pull. """
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
# Push the tags.
|
||||
result = pusher.push(liveserver_session, 'devtable', 'newrepo', ['one', 'two'],
|
||||
result = pusher.push(liveserver_session, 'devtable', 'newrepo', 'one',
|
||||
basic_images, credentials=credentials)
|
||||
|
||||
pusher.push(liveserver_session, 'devtable', 'newrepo', 'two',
|
||||
different_images, credentials=credentials)
|
||||
|
||||
# Delete tag `one` by digest or tag.
|
||||
pusher.delete(liveserver_session, 'devtable', 'newrepo',
|
||||
result.manifests['one'].digest if result.manifests else 'one',
|
||||
|
@ -660,7 +708,7 @@ def test_delete_tag(pusher, puller, basic_images, liveserver_session,
|
|||
expected_failure=Failures.UNKNOWN_TAG)
|
||||
|
||||
# Pull tag `two` to verify it works.
|
||||
puller.pull(liveserver_session, 'devtable', 'newrepo', 'two', basic_images,
|
||||
puller.pull(liveserver_session, 'devtable', 'newrepo', 'two', different_images,
|
||||
credentials=credentials)
|
||||
|
||||
|
||||
|
@ -1086,7 +1134,7 @@ EXPECTED_ACI_MANIFEST = {
|
|||
"eventHandlers": [],
|
||||
"ports": [],
|
||||
"annotations": [
|
||||
{"name": "created", "value": ""},
|
||||
{"name": "created", "value": "2018-04-03T18:37:09.284840891Z"},
|
||||
{"name": "homepage", "value": "http://localhost:5000/devtable/newrepo:latest"},
|
||||
{"name": "quay.io/derived-image",
|
||||
"value": "035333848582cdb72d2bac4a0809bc7eed9d88004cfb3463562013fce53c2499"},
|
||||
|
@ -1151,7 +1199,8 @@ def test_blob_mounting(push_user, push_namespace, push_repo, mount_repo_name, ex
|
|||
options = ProtocolOptions()
|
||||
options.scopes = ['repository:devtable/newrepo:push,pull',
|
||||
'repository:%s:pull' % (mount_repo_name)]
|
||||
options.mount_blobs = {image.id: mount_repo_name for image in basic_images}
|
||||
options.mount_blobs = {'sha256:' + hashlib.sha256(image.bytes).hexdigest(): mount_repo_name
|
||||
for image in basic_images}
|
||||
|
||||
manifest_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||
credentials=('devtable', 'password'),
|
||||
|
@ -1330,8 +1379,8 @@ def test_push_tag_existing_image(v1_protocol, puller, basic_images, liveserver_s
|
|||
credentials = ('devtable', 'password')
|
||||
|
||||
# Push a new repository.
|
||||
result = v1_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||
credentials=credentials)
|
||||
v1_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||
credentials=credentials)
|
||||
|
||||
# Push the same image/manifest to another tag in the repository.
|
||||
v1_protocol.tag(liveserver_session, 'devtable', 'newrepo', 'anothertag', basic_images[-1],
|
||||
|
@ -1340,3 +1389,320 @@ def test_push_tag_existing_image(v1_protocol, puller, basic_images, liveserver_s
|
|||
# Pull the repository to verify.
|
||||
puller.pull(liveserver_session, 'devtable', 'newrepo', 'anothertag', basic_images,
|
||||
credentials=credentials)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('schema_version', [
|
||||
1,
|
||||
2,
|
||||
])
|
||||
@pytest.mark.parametrize('is_amd', [
|
||||
True,
|
||||
False
|
||||
])
|
||||
def test_push_pull_manifest_list_back_compat(v22_protocol, legacy_puller, basic_images,
|
||||
different_images, liveserver_session, app_reloader,
|
||||
schema_version, data_model, is_amd):
|
||||
""" Test: Push a new tag with a manifest list containing two manifests, one (possibly) legacy
|
||||
and one not, and, if there is a legacy manifest, ensure it can be pulled.
|
||||
"""
|
||||
if data_model != 'oci_model':
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
options = ProtocolOptions()
|
||||
|
||||
# Build the manifests that will go in the list.
|
||||
blobs = {}
|
||||
|
||||
signed = v22_protocol.build_schema1('devtable', 'newrepo', 'latest', basic_images, blobs, options)
|
||||
first_manifest = signed.unsigned()
|
||||
if schema_version == 2:
|
||||
first_manifest = v22_protocol.build_schema2(basic_images, blobs, options)
|
||||
|
||||
second_manifest = v22_protocol.build_schema2(different_images, blobs, options)
|
||||
|
||||
# Create and push the manifest list.
|
||||
builder = DockerSchema2ManifestListBuilder()
|
||||
builder.add_manifest(first_manifest, 'amd64' if is_amd else 'something', 'linux')
|
||||
builder.add_manifest(second_manifest, 'arm', 'linux')
|
||||
manifestlist = builder.build()
|
||||
|
||||
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
[first_manifest, second_manifest], blobs,
|
||||
credentials=credentials, options=options)
|
||||
|
||||
# Pull the tag and ensure we (don't) get back the basic images, since they are(n't) part of the
|
||||
# amd64+linux manifest.
|
||||
legacy_puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||
credentials=credentials,
|
||||
expected_failure=Failures.UNKNOWN_TAG if not is_amd else None)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('schema_version', [
|
||||
1,
|
||||
2,
|
||||
])
|
||||
def test_push_pull_manifest_list(v22_protocol, basic_images, different_images, liveserver_session,
|
||||
app_reloader, schema_version, data_model):
|
||||
""" Test: Push a new tag with a manifest list containing two manifests, one (possibly) legacy
|
||||
and one not, and pull it.
|
||||
"""
|
||||
if data_model != 'oci_model':
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
options = ProtocolOptions()
|
||||
|
||||
# Build the manifests that will go in the list.
|
||||
blobs = {}
|
||||
|
||||
signed = v22_protocol.build_schema1('devtable', 'newrepo', 'latest', basic_images, blobs, options)
|
||||
first_manifest = signed.unsigned()
|
||||
if schema_version == 2:
|
||||
first_manifest = v22_protocol.build_schema2(basic_images, blobs, options)
|
||||
|
||||
second_manifest = v22_protocol.build_schema2(different_images, blobs, options)
|
||||
|
||||
# Create and push the manifest list.
|
||||
builder = DockerSchema2ManifestListBuilder()
|
||||
builder.add_manifest(first_manifest, 'amd64', 'linux')
|
||||
builder.add_manifest(second_manifest, 'arm', 'linux')
|
||||
manifestlist = builder.build()
|
||||
|
||||
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
[first_manifest, second_manifest], blobs,
|
||||
credentials=credentials, options=options)
|
||||
|
||||
# Pull and verify the manifest list.
|
||||
v22_protocol.pull_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
credentials=credentials, options=options)
|
||||
|
||||
|
||||
def test_push_pull_manifest_remote_layers(v22_protocol, legacy_puller, liveserver_session,
|
||||
app_reloader, remote_images, data_model):
|
||||
""" Test: Push a new tag with a manifest which contains at least one remote layer, and then
|
||||
pull that manifest back.
|
||||
"""
|
||||
if data_model != 'oci_model':
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
# Push a new repository.
|
||||
v22_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', remote_images,
|
||||
credentials=credentials)
|
||||
|
||||
# Pull the repository to verify.
|
||||
v22_protocol.pull(liveserver_session, 'devtable', 'newrepo', 'latest', remote_images,
|
||||
credentials=credentials)
|
||||
|
||||
# Ensure that the image cannot be pulled by a legacy protocol.
|
||||
legacy_puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', remote_images,
|
||||
credentials=credentials, expected_failure=Failures.UNKNOWN_TAG)
|
||||
|
||||
|
||||
def test_push_manifest_list_missing_manifest(v22_protocol, basic_images, liveserver_session,
|
||||
app_reloader, data_model):
|
||||
""" Test: Attempt to push a new tag with a manifest list containing an invalid manifest.
|
||||
"""
|
||||
if data_model != 'oci_model':
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
options = ProtocolOptions()
|
||||
|
||||
# Build the manifests that will go in the list.
|
||||
blobs = {}
|
||||
manifest = v22_protocol.build_schema2(basic_images, blobs, options)
|
||||
|
||||
# Create and push the manifest list, but without the manifest itself.
|
||||
builder = DockerSchema2ManifestListBuilder()
|
||||
builder.add_manifest(manifest, 'amd64', 'linux')
|
||||
manifestlist = builder.build()
|
||||
|
||||
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
[], blobs,
|
||||
credentials=credentials, options=options,
|
||||
expected_failure=Failures.INVALID_MANIFEST)
|
||||
|
||||
|
||||
def test_push_pull_manifest_list_again(v22_protocol, basic_images, different_images,
|
||||
liveserver_session, app_reloader, data_model):
|
||||
""" Test: Push a new tag with a manifest list containing two manifests, push it again, and pull
|
||||
it.
|
||||
"""
|
||||
if data_model != 'oci_model':
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
options = ProtocolOptions()
|
||||
|
||||
# Build the manifests that will go in the list.
|
||||
blobs = {}
|
||||
|
||||
first_manifest = v22_protocol.build_schema2(basic_images, blobs, options)
|
||||
second_manifest = v22_protocol.build_schema2(different_images, blobs, options)
|
||||
|
||||
# Create and push the manifest list.
|
||||
builder = DockerSchema2ManifestListBuilder()
|
||||
builder.add_manifest(first_manifest, 'amd64', 'linux')
|
||||
builder.add_manifest(second_manifest, 'arm', 'linux')
|
||||
manifestlist = builder.build()
|
||||
|
||||
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
[first_manifest, second_manifest], blobs,
|
||||
credentials=credentials, options=options)
|
||||
|
||||
# Push the manifest list again. This should more or less no-op.
|
||||
options.skip_head_checks = True
|
||||
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
[first_manifest, second_manifest], blobs,
|
||||
credentials=credentials, options=options)
|
||||
|
||||
# Pull and verify the manifest list.
|
||||
v22_protocol.pull_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
credentials=credentials, options=options)
|
||||
|
||||
|
||||
def test_push_pull_manifest_list_duplicate_manifest(v22_protocol, basic_images, liveserver_session,
|
||||
app_reloader, data_model):
|
||||
""" Test: Push a manifest list that contains the same child manifest twice.
|
||||
"""
|
||||
if data_model != 'oci_model':
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
options = ProtocolOptions()
|
||||
|
||||
# Build the manifest that will go in the list.
|
||||
blobs = {}
|
||||
manifest = v22_protocol.build_schema2(basic_images, blobs, options)
|
||||
|
||||
# Create and push the manifest list.
|
||||
builder = DockerSchema2ManifestListBuilder()
|
||||
builder.add_manifest(manifest, 'amd64', 'linux')
|
||||
builder.add_manifest(manifest, 'amd32', 'linux')
|
||||
manifestlist = builder.build()
|
||||
|
||||
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
[manifest], blobs,
|
||||
credentials=credentials, options=options)
|
||||
|
||||
# Pull and verify the manifest list.
|
||||
v22_protocol.pull_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
credentials=credentials, options=options)
|
||||
|
||||
|
||||
def test_squashed_images_empty_layer(pusher, images_with_empty_layer, liveserver_session,
|
||||
liveserver, registry_server_executor, app_reloader):
|
||||
""" Test: Pulling of squashed images for a manifest with empty layers. """
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
# Push an image to download.
|
||||
pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', images_with_empty_layer,
|
||||
credentials=credentials)
|
||||
|
||||
# Pull the squashed version.
|
||||
response = liveserver_session.get('/c1/squash/devtable/newrepo/latest', auth=credentials)
|
||||
assert response.status_code == 200
|
||||
|
||||
tar = tarfile.open(fileobj=StringIO(response.content))
|
||||
|
||||
# Verify the squashed image.
|
||||
expected_image_id = 'cdc6d6c0d07d2cbacfc579e49ce0c256c5084b9b2b16c1b1b0c45f26a12a4ba5'
|
||||
expected_names = ['repositories',
|
||||
expected_image_id,
|
||||
'%s/json' % expected_image_id,
|
||||
'%s/VERSION' % expected_image_id,
|
||||
'%s/layer.tar' % expected_image_id]
|
||||
|
||||
assert tar.getnames() == expected_names
|
||||
|
||||
|
||||
def test_squashed_image_unsupported(v22_protocol, basic_images, liveserver_session, liveserver,
|
||||
app_reloader, data_model):
|
||||
""" Test: Attempting to pull a squashed image for a manifest list without an amd64+linux entry.
|
||||
"""
|
||||
credentials = ('devtable', 'password')
|
||||
if data_model != 'oci_model':
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
options = ProtocolOptions()
|
||||
|
||||
# Build the manifest that will go in the list.
|
||||
blobs = {}
|
||||
manifest = v22_protocol.build_schema2(basic_images, blobs, options)
|
||||
|
||||
# Create and push the manifest list.
|
||||
builder = DockerSchema2ManifestListBuilder()
|
||||
builder.add_manifest(manifest, 'foobar', 'someos')
|
||||
manifestlist = builder.build()
|
||||
|
||||
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
[manifest], blobs,
|
||||
credentials=credentials, options=options)
|
||||
|
||||
# Attempt to pull the squashed version.
|
||||
response = liveserver_session.get('/c1/squash/devtable/newrepo/latest', auth=credentials)
|
||||
assert response.status_code == 404
|
||||
|
||||
|
||||
def test_squashed_image_manifest_list(v22_protocol, basic_images, liveserver_session, liveserver,
|
||||
app_reloader, data_model):
|
||||
""" Test: Pull a squashed image for a manifest list with an amd64+linux entry.
|
||||
"""
|
||||
credentials = ('devtable', 'password')
|
||||
if data_model != 'oci_model':
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
options = ProtocolOptions()
|
||||
|
||||
# Build the manifest that will go in the list.
|
||||
blobs = {}
|
||||
manifest = v22_protocol.build_schema2(basic_images, blobs, options)
|
||||
|
||||
# Create and push the manifest list.
|
||||
builder = DockerSchema2ManifestListBuilder()
|
||||
builder.add_manifest(manifest, 'amd64', 'linux')
|
||||
manifestlist = builder.build()
|
||||
|
||||
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||
[manifest], blobs,
|
||||
credentials=credentials, options=options)
|
||||
|
||||
# Pull the squashed version.
|
||||
response = liveserver_session.get('/c1/squash/devtable/newrepo/latest', auth=credentials)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Verify the squashed image.
|
||||
tar = tarfile.open(fileobj=StringIO(response.content))
|
||||
expected_image_id = 'cdc6d6c0d07d2cbacfc579e49ce0c256c5084b9b2b16c1b1b0c45f26a12a4ba5'
|
||||
expected_names = ['repositories',
|
||||
expected_image_id,
|
||||
'%s/json' % expected_image_id,
|
||||
'%s/VERSION' % expected_image_id,
|
||||
'%s/layer.tar' % expected_image_id]
|
||||
|
||||
assert tar.getnames() == expected_names
|
||||
|
||||
|
||||
def test_verify_schema2(v22_protocol, basic_images, liveserver_session, liveserver,
|
||||
app_reloader, data_model):
|
||||
""" Test: Ensure that pushing of schema 2 manifests results in a pull of a schema2 manifest. """
|
||||
credentials = ('devtable', 'password')
|
||||
if data_model != 'oci_model':
|
||||
return
|
||||
|
||||
credentials = ('devtable', 'password')
|
||||
|
||||
# Push a new repository.
|
||||
v22_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||
credentials=credentials)
|
||||
|
||||
# Pull the repository to verify.
|
||||
result = v22_protocol.pull(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||
credentials=credentials)
|
||||
manifest = result.manifests['latest']
|
||||
assert manifest.schema_version == 2
|
||||
|
|
|
@ -21,7 +21,8 @@ from cryptography.hazmat.backends import default_backend
|
|||
from endpoints.api import api_bp, api
|
||||
from endpoints.building import PreparedBuild
|
||||
from endpoints.webhooks import webhooks
|
||||
from app import app, config_provider, all_queues, dockerfile_build_queue, notification_queue
|
||||
from app import (app, config_provider, all_queues, dockerfile_build_queue, notification_queue,
|
||||
storage)
|
||||
from buildtrigger.basehandler import BuildTriggerHandler
|
||||
from initdb import setup_database_for_testing, finished_database_for_testing
|
||||
from data import database, model, appr_model
|
||||
|
@ -2909,7 +2910,7 @@ class TestListAndDeleteTag(ApiTestCase):
|
|||
for i in xrange(1, 9):
|
||||
tag_name = "tag" + str(i)
|
||||
remaining_tags.add(tag_name)
|
||||
assert registry_model.retarget_tag(repo_ref, tag_name, latest_tag.legacy_image)
|
||||
assert registry_model.retarget_tag(repo_ref, tag_name, latest_tag.legacy_image, storage)
|
||||
|
||||
# Make sure we can iterate over all of them.
|
||||
json = self.getJsonResponse(ListRepositoryTags, params=dict(
|
||||
|
|
|
@ -12,6 +12,9 @@ class GarbageCollectionWorker(Worker):
|
|||
|
||||
def _garbage_collection_repos(self):
|
||||
""" Performs garbage collection on repositories. """
|
||||
# TODO(jschorr): Re-enable once GC is fixed for V22.
|
||||
return
|
||||
|
||||
with UseThenDisconnect(app.config):
|
||||
model.perform_garbage_collection()
|
||||
|
||||
|
|
|
@ -47,21 +47,56 @@ class BrokenManifest(ManifestInterface):
|
|||
def bytes(self):
|
||||
return self._payload
|
||||
|
||||
@property
|
||||
def layers(self):
|
||||
def get_layers(self, content_retriever):
|
||||
return None
|
||||
|
||||
def get_legacy_image_ids(self, cr):
|
||||
return []
|
||||
|
||||
@property
|
||||
def legacy_image_ids(self):
|
||||
return []
|
||||
|
||||
@property
|
||||
def leaf_layer_v1_image_id(self):
|
||||
def get_leaf_layer_v1_image_id(self, cr):
|
||||
return None
|
||||
|
||||
@property
|
||||
def blob_digests(self):
|
||||
return []
|
||||
|
||||
@property
|
||||
def local_blob_digests(self):
|
||||
return []
|
||||
|
||||
def child_manifests(self, lookup_manifest_fn):
|
||||
return None
|
||||
|
||||
def get_manifest_labels(self, lookup_config_fn):
|
||||
return {}
|
||||
|
||||
def unsigned(self):
|
||||
return self
|
||||
|
||||
def generate_legacy_layers(self, images_map, lookup_config_fn):
|
||||
return None
|
||||
|
||||
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, lookup_fn):
|
||||
return self
|
||||
|
||||
@property
|
||||
def schema_version(self):
|
||||
return 1
|
||||
|
||||
@property
|
||||
def layers_compressed_size(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def is_manifest_list(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def has_legacy_image(self):
|
||||
return False
|
||||
|
||||
def get_requires_empty_layer_blob(self, content_retriever):
|
||||
return False
|
||||
|
||||
|
||||
class ManifestBackfillWorker(Worker):
|
||||
|
|
Reference in a new issue