Update registry tests to test schema 2 manifest pushes and pulls

Note that tests for manifest *lists* will be in a following commit
This commit is contained in:
Joseph Schorr 2018-11-13 17:15:00 +02:00
parent 7b9f56eff3
commit e752a9a73f
5 changed files with 171 additions and 98 deletions

View file

@ -4,6 +4,10 @@ import json
from enum import Enum, unique
from image.docker.schema1 import DockerSchema1ManifestBuilder, DockerSchema1Manifest
from image.docker.schema2.list import DockerSchema2ManifestListBuilder
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
from image.docker.schema2.config import DockerSchema2Config
from image.docker.schemas import parse_manifest_from_bytes
from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult,
PullResult)
@ -56,8 +60,9 @@ class V2Protocol(RegistryProtocol):
},
}
def __init__(self, jwk):
def __init__(self, jwk, schema2=False):
self.jwk = jwk
self.schema2 = schema2
def ping(self, session):
result = session.get('/v2/')
@ -141,55 +146,92 @@ class V2Protocol(RegistryProtocol):
# Build fake manifests.
manifests = {}
blobs = {}
for tag_name in tag_names:
builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name)
if self.schema2:
builder = DockerSchema2ManifestBuilder()
for image in images:
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
blobs[checksum] = image.bytes
for image in reversed(images):
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
# If invalid blob references were requested, just make it up.
if options.manifest_invalid_blob_references:
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
# If invalid blob references were requested, just make it up.
if options.manifest_invalid_blob_references:
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
builder.add_layer(checksum, len(image.bytes))
layer_dict = {'id': image.id, 'parent': image.parent_id}
if image.config is not None:
layer_dict['config'] = image.config
config = {
"os": "linux",
"rootfs": {
"type": "layers",
"diff_ids": []
},
"history": [{
'created': '2018-04-03T18:37:09.284840891Z',
'created_by': (('/bin/sh -c #(nop) ENTRYPOINT %s' % image.config['Entrypoint'])
if image.config and image.config.get('Entrypoint')
else '/bin/sh -c #(nop) %s' % image.id),
} for image in images],
}
if image.size is not None:
layer_dict['Size'] = image.size
if images[-1].config:
config['config'] = images[-1].config
builder.add_layer(checksum, json.dumps(layer_dict))
config_json = json.dumps(config)
schema2_config = DockerSchema2Config(config_json)
builder.set_config(schema2_config)
# Build the manifest.
manifests[tag_name] = builder.build(self.jwk)
blobs[schema2_config.digest] = schema2_config.bytes
manifests[tag_name] = builder.build()
else:
builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name)
# Push the layer data.
checksums = {}
for image in reversed(images):
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
checksums[image.id] = checksum
for image in reversed(images):
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
blobs[checksum] = image.bytes
# If invalid blob references were requested, just make it up.
if options.manifest_invalid_blob_references:
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
layer_dict = {'id': image.id, 'parent': image.parent_id}
if image.config is not None:
layer_dict['config'] = image.config
if image.size is not None:
layer_dict['Size'] = image.size
if image.created is not None:
layer_dict['created'] = image.created
builder.add_layer(checksum, json.dumps(layer_dict))
# Build the manifest.
manifests[tag_name] = builder.build(self.jwk)
# Push the blob data.
for blob_digest, blob_bytes in blobs.iteritems():
if not options.skip_head_checks:
# Layer data should not yet exist.
# Blob data should not yet exist.
self.conduct(session, 'HEAD',
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), checksum),
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), blob_digest),
expected_status=(404, expected_failure, V2ProtocolSteps.BLOB_HEAD_CHECK),
headers=headers)
# Check for mounting of blobs.
if options.mount_blobs and image.id in options.mount_blobs:
if options.mount_blobs and blob_digest in options.mount_blobs:
self.conduct(session, 'POST',
'/v2/%s/blobs/uploads/' % self.repo_name(namespace, repo_name),
params={
'mount': checksum,
'from': options.mount_blobs[image.id],
'mount': blob_digest,
'from': options.mount_blobs[blob_digest],
},
expected_status=(201, expected_failure, V2ProtocolSteps.MOUNT_BLOB),
headers=headers)
if expected_failure is not None:
return
else:
# Start a new upload of the layer data.
# Start a new upload of the blob data.
response = self.conduct(session, 'POST',
'/v2/%s/blobs/uploads/' % self.repo_name(namespace, repo_name),
expected_status=(202, expected_failure,
@ -206,9 +248,9 @@ class V2Protocol(RegistryProtocol):
# case modifies the port.
location = response.headers['Location'][len('http://localhost:5000'):]
# PATCH the image data into the layer.
# PATCH the data into the blob.
if options.chunks_for_upload is None:
self.conduct(session, 'PATCH', location, data=image.bytes, expected_status=204,
self.conduct(session, 'PATCH', location, data=blob_bytes, expected_status=204,
headers=headers)
else:
# If chunked upload is requested, upload the data as a series of chunks, checking
@ -223,7 +265,7 @@ class V2Protocol(RegistryProtocol):
patch_headers = {'Range': 'bytes=%s-%s' % (start_byte, end_byte)}
patch_headers.update(headers)
contents_chunk = image.bytes[start_byte:end_byte]
contents_chunk = blob_bytes[start_byte:end_byte]
self.conduct(session, 'PATCH', location, data=contents_chunk,
expected_status=expected_code,
headers=patch_headers)
@ -239,7 +281,7 @@ class V2Protocol(RegistryProtocol):
assert response.headers['Range'] == "bytes=0-%s" % end_byte
if options.cancel_blob_upload:
self.conduct(session, 'DELETE', location, params=dict(digest=checksum),
self.conduct(session, 'DELETE', location, params=dict(digest=blob_digest),
expected_status=204, headers=headers)
# Ensure the upload was canceled.
@ -248,24 +290,25 @@ class V2Protocol(RegistryProtocol):
self.conduct(session, 'GET', status_url, expected_status=404, headers=headers)
return
# Finish the layer upload with a PUT.
response = self.conduct(session, 'PUT', location, params=dict(digest=checksum),
# Finish the blob upload with a PUT.
response = self.conduct(session, 'PUT', location, params=dict(digest=blob_digest),
expected_status=201, headers=headers)
assert response.headers['Docker-Content-Digest'] == checksum
assert response.headers['Docker-Content-Digest'] == blob_digest
# Ensure the layer exists now.
# Ensure the blob exists now.
response = self.conduct(session, 'HEAD',
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), checksum),
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name),
blob_digest),
expected_status=200, headers=headers)
assert response.headers['Docker-Content-Digest'] == checksum
assert response.headers['Content-Length'] == str(len(image.bytes))
assert response.headers['Docker-Content-Digest'] == blob_digest
assert response.headers['Content-Length'] == str(len(blob_bytes))
# And retrieve the layer data.
# And retrieve the blob data.
result = self.conduct(session, 'GET',
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), checksum),
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), blob_digest),
headers=headers, expected_status=200)
assert result.content == image.bytes
assert result.content == blob_bytes
# Write a manifest for each tag.
for tag_name in tag_names:
@ -274,7 +317,7 @@ class V2Protocol(RegistryProtocol):
# Write the manifest. If we expect it to be invalid, we expect a 404 code. Otherwise, we
# expect a 202 response for success.
put_code = 404 if options.manifest_invalid_blob_references else 202
manifest_headers = {'Content-Type': 'application/json'}
manifest_headers = {'Content-Type': manifest.media_type}
manifest_headers.update(headers)
if options.manifest_content_type is not None:
@ -287,7 +330,7 @@ class V2Protocol(RegistryProtocol):
expected_status=(put_code, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
headers=manifest_headers)
return PushResult(checksums=checksums, manifests=manifests, headers=headers)
return PushResult(manifests=manifests, headers=headers)
def delete(self, session, namespace, repo_name, tag_names, credentials=None,
@ -335,6 +378,10 @@ class V2Protocol(RegistryProtocol):
'Authorization': 'Bearer ' + token,
}
if self.schema2:
headers['Accept'] = [('application/vnd.docker.distribution.manifest.v2+json', 1),
('application/vnd.docker.distribution.manifest.list.v2+json', 1)]
manifests = {}
image_ids = {}
for tag_name in tag_names:
@ -348,9 +395,9 @@ class V2Protocol(RegistryProtocol):
return None
# Ensure the manifest returned by us is valid.
manifest = DockerSchema1Manifest(response.text)
manifest = parse_manifest_from_bytes(response.text, response.headers['Content-Type'])
manifests[tag_name] = manifest
image_ids[tag_name] = manifest.leaf_layer.v1_metadata.image_id
image_ids[tag_name] = manifest.leaf_layer_v1_image_id
# Verify the layers.
for index, layer in enumerate(manifest.layers):