Merge pull request #565 from coreos-inc/python-registry-v2-speccheck
New tests and small fixes while comparing against the V2 spec
This commit is contained in:
commit
dfe564fe24
6 changed files with 232 additions and 33 deletions
|
@ -40,7 +40,11 @@ def get_tag(namespace, repository, tag):
|
||||||
permission = ReadRepositoryPermission(namespace, repository)
|
permission = ReadRepositoryPermission(namespace, repository)
|
||||||
|
|
||||||
if permission.can() or model.repository.repository_is_public(namespace, repository):
|
if permission.can() or model.repository.repository_is_public(namespace, repository):
|
||||||
|
try:
|
||||||
tag_image = model.tag.get_tag_image(namespace, repository, tag)
|
tag_image = model.tag.get_tag_image(namespace, repository, tag)
|
||||||
|
except model.DataModelException:
|
||||||
|
abort(404)
|
||||||
|
|
||||||
resp = make_response('"%s"' % tag_image.docker_image_id)
|
resp = make_response('"%s"' % tag_image.docker_image_id)
|
||||||
resp.headers['Content-Type'] = 'application/json'
|
resp.headers['Content-Type'] = 'application/json'
|
||||||
return resp
|
return resp
|
||||||
|
|
|
@ -30,20 +30,6 @@ class _InvalidRangeHeader(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@v2_bp.route(BLOB_DIGEST_ROUTE, methods=['HEAD'])
|
|
||||||
@process_jwt_auth
|
|
||||||
@require_repo_read
|
|
||||||
@anon_protect
|
|
||||||
def check_blob_existence(namespace, repo_name, digest):
|
|
||||||
try:
|
|
||||||
model.image.get_repo_image_by_storage_checksum(namespace, repo_name, digest)
|
|
||||||
|
|
||||||
# The response body must be empty for a successful HEAD request
|
|
||||||
return make_response('')
|
|
||||||
except model.InvalidImageException:
|
|
||||||
raise BlobUnknown()
|
|
||||||
|
|
||||||
|
|
||||||
def _base_blob_fetch(namespace, repo_name, digest):
|
def _base_blob_fetch(namespace, repo_name, digest):
|
||||||
""" Some work that is common to both GET and HEAD requests. Callers MUST check for proper
|
""" Some work that is common to both GET and HEAD requests. Callers MUST check for proper
|
||||||
authorization before calling this method.
|
authorization before calling this method.
|
||||||
|
@ -55,6 +41,7 @@ def _base_blob_fetch(namespace, repo_name, digest):
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
'Docker-Content-Digest': digest,
|
'Docker-Content-Digest': digest,
|
||||||
|
'Content-Length': found.image_size,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Add the Accept-Ranges header if the storage engine supports resumable
|
# Add the Accept-Ranges header if the storage engine supports resumable
|
||||||
|
@ -76,6 +63,7 @@ def check_blob_exists(namespace, repo_name, digest):
|
||||||
|
|
||||||
response = make_response('')
|
response = make_response('')
|
||||||
response.headers.extend(headers)
|
response.headers.extend(headers)
|
||||||
|
response.headers['Content-Length'] = headers['Content-Length']
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
@ -149,8 +137,9 @@ def fetch_existing_upload(namespace, repo_name, upload_uuid):
|
||||||
except model.InvalidBlobUpload:
|
except model.InvalidBlobUpload:
|
||||||
raise BlobUploadUnknown()
|
raise BlobUploadUnknown()
|
||||||
|
|
||||||
|
# Note: Docker byte ranges are exclusive so we have to add one to the byte count.
|
||||||
accepted = make_response('', 204)
|
accepted = make_response('', 204)
|
||||||
accepted.headers['Range'] = _render_range(found.byte_count)
|
accepted.headers['Range'] = _render_range(found.byte_count + 1)
|
||||||
accepted.headers['Docker-Upload-UUID'] = upload_uuid
|
accepted.headers['Docker-Upload-UUID'] = upload_uuid
|
||||||
return accepted
|
return accepted
|
||||||
|
|
||||||
|
@ -305,3 +294,15 @@ def cancel_upload(namespace, repo_name, upload_uuid):
|
||||||
storage.cancel_chunked_upload({found.location.name}, found.uuid, found.storage_metadata)
|
storage.cancel_chunked_upload({found.location.name}, found.uuid, found.storage_metadata)
|
||||||
|
|
||||||
return make_response('', 204)
|
return make_response('', 204)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@v2_bp.route('/<namespace>/<repo_name>/blobs/<digest>', methods=['DELETE'])
|
||||||
|
@process_jwt_auth
|
||||||
|
@require_repo_write
|
||||||
|
@anon_protect
|
||||||
|
def delete_digest(namespace, repo_name, upload_uuid):
|
||||||
|
# We do not support deleting arbitrary digests, as they break repo images.
|
||||||
|
return make_response('', 501)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ class DigestInvalid(V2RegistryException):
|
||||||
class ManifestBlobUnknown(V2RegistryException):
|
class ManifestBlobUnknown(V2RegistryException):
|
||||||
def __init__(self, detail=None):
|
def __init__(self, detail=None):
|
||||||
super(ManifestBlobUnknown, self).__init__('MANIFEST_BLOB_UNKNOWN',
|
super(ManifestBlobUnknown, self).__init__('MANIFEST_BLOB_UNKNOWN',
|
||||||
'blob unknown to registry',
|
'manifest blob unknown to registry',
|
||||||
detail)
|
detail)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ from app import storage, docker_v2_signing_key
|
||||||
from auth.jwt_auth import process_jwt_auth
|
from auth.jwt_auth import process_jwt_auth
|
||||||
from endpoints.decorators import anon_protect
|
from endpoints.decorators import anon_protect
|
||||||
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write
|
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write
|
||||||
from endpoints.v2.errors import (ManifestBlobUnknown, ManifestInvalid, ManifestUnverified,
|
from endpoints.v2.errors import (BlobUnknown, ManifestInvalid, ManifestUnverified,
|
||||||
ManifestUnknown, TagInvalid, NameInvalid)
|
ManifestUnknown, TagInvalid, NameInvalid)
|
||||||
from endpoints.trackhelper import track_and_log
|
from endpoints.trackhelper import track_and_log
|
||||||
from endpoints.notificationhelper import spawn_notification
|
from endpoints.notificationhelper import spawn_notification
|
||||||
|
@ -102,7 +102,12 @@ class SignedManifest(object):
|
||||||
"""
|
"""
|
||||||
for blob_sum_obj, history_obj in reversed(zip(self._parsed[_FS_LAYERS_KEY],
|
for blob_sum_obj, history_obj in reversed(zip(self._parsed[_FS_LAYERS_KEY],
|
||||||
self._parsed[_HISTORY_KEY])):
|
self._parsed[_HISTORY_KEY])):
|
||||||
|
|
||||||
|
try:
|
||||||
image_digest = digest_tools.Digest.parse_digest(blob_sum_obj[_BLOB_SUM_KEY])
|
image_digest = digest_tools.Digest.parse_digest(blob_sum_obj[_BLOB_SUM_KEY])
|
||||||
|
except digest_tools.InvalidDigestException:
|
||||||
|
raise ManifestInvalid()
|
||||||
|
|
||||||
metadata_string = history_obj[_V1_COMPAT_KEY]
|
metadata_string = history_obj[_V1_COMPAT_KEY]
|
||||||
|
|
||||||
v1_metadata = json.loads(metadata_string)
|
v1_metadata = json.loads(metadata_string)
|
||||||
|
@ -271,7 +276,7 @@ def _write_manifest(namespace, repo_name, manifest):
|
||||||
leaf_layer = mdata
|
leaf_layer = mdata
|
||||||
|
|
||||||
except model.InvalidImageException:
|
except model.InvalidImageException:
|
||||||
raise ManifestBlobUnknown(detail={'missing': digest_str})
|
raise BlobUnknown(detail={'digest': digest_str})
|
||||||
|
|
||||||
if leaf_layer is None:
|
if leaf_layer is None:
|
||||||
# The manifest doesn't actually reference any layers!
|
# The manifest doesn't actually reference any layers!
|
||||||
|
|
|
@ -1,19 +1,48 @@
|
||||||
# XXX This code is not yet ready to be run in production, and should remain disabled until such
|
# XXX This code is not yet ready to be run in production, and should remain disabled until such
|
||||||
# XXX time as this notice is removed.
|
# XXX time as this notice is removed.
|
||||||
|
|
||||||
from flask import jsonify
|
from flask import jsonify, request, url_for
|
||||||
|
|
||||||
|
from app import get_app_url
|
||||||
from endpoints.v2 import v2_bp, require_repo_read
|
from endpoints.v2 import v2_bp, require_repo_read
|
||||||
|
from endpoints.v2.errors import NameUnknown
|
||||||
from auth.jwt_auth import process_jwt_auth
|
from auth.jwt_auth import process_jwt_auth
|
||||||
from endpoints.decorators import anon_protect
|
from endpoints.decorators import anon_protect
|
||||||
from data import model
|
from data import model
|
||||||
|
|
||||||
|
def _add_pagination(query, url):
|
||||||
|
limit = request.args.get('n', None)
|
||||||
|
page = request.args.get('page', 1)
|
||||||
|
|
||||||
|
if limit is None:
|
||||||
|
return None, query
|
||||||
|
|
||||||
|
url = get_app_url() + url
|
||||||
|
query = query.paginate(page, limit)
|
||||||
|
link = url + '?n=%s&last=%s; rel="next"' % (limit, page + 1)
|
||||||
|
return link, query
|
||||||
|
|
||||||
|
|
||||||
@v2_bp.route('/<namespace>/<repo_name>/tags/list', methods=['GET'])
|
@v2_bp.route('/<namespace>/<repo_name>/tags/list', methods=['GET'])
|
||||||
@process_jwt_auth
|
@process_jwt_auth
|
||||||
@require_repo_read
|
@require_repo_read
|
||||||
@anon_protect
|
@anon_protect
|
||||||
def list_all_tags(namespace, repo_name):
|
def list_all_tags(namespace, repo_name):
|
||||||
return jsonify({
|
repository = model.repository.get_repository(namespace, repo_name)
|
||||||
|
if repository is None:
|
||||||
|
raise NameUnknown()
|
||||||
|
|
||||||
|
query = model.tag.list_repository_tags(namespace, repo_name)
|
||||||
|
|
||||||
|
url = url_for('v2.list_all_tags', namespace=namespace, repo_name=repo_name)
|
||||||
|
link, query = _add_pagination(query, url)
|
||||||
|
|
||||||
|
response = jsonify({
|
||||||
'name': '{0}/{1}'.format(namespace, repo_name),
|
'name': '{0}/{1}'.format(namespace, repo_name),
|
||||||
'tags': [tag.name for tag in model.tag.list_repository_tags(namespace, repo_name)],
|
'tags': [tag.name for tag in query],
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if link is not None:
|
||||||
|
response.headers['Link'] = link
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
|
@ -19,6 +19,7 @@ from endpoints.api import api_bp
|
||||||
from initdb import wipe_database, initialize_database, populate_database
|
from initdb import wipe_database, initialize_database, populate_database
|
||||||
from endpoints.csrf import generate_csrf_token
|
from endpoints.csrf import generate_csrf_token
|
||||||
from tempfile import NamedTemporaryFile
|
from tempfile import NamedTemporaryFile
|
||||||
|
from jsonschema import validate as validate_schema
|
||||||
|
|
||||||
import endpoints.decorated
|
import endpoints.decorated
|
||||||
import json
|
import json
|
||||||
|
@ -295,8 +296,52 @@ class V1RegistryPullMixin(V1RegistryMixin):
|
||||||
|
|
||||||
|
|
||||||
class V2RegistryMixin(BaseRegistryMixin):
|
class V2RegistryMixin(BaseRegistryMixin):
|
||||||
|
MANIFEST_SCHEMA = {
|
||||||
|
'type': 'object',
|
||||||
|
'properties': {
|
||||||
|
'name': {
|
||||||
|
'type': 'string',
|
||||||
|
},
|
||||||
|
'tag': {
|
||||||
|
'type': 'string',
|
||||||
|
},
|
||||||
|
'signatures': {
|
||||||
|
'type': 'array',
|
||||||
|
'itemType': {
|
||||||
|
'type': 'object',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'fsLayers': {
|
||||||
|
'type': 'array',
|
||||||
|
'itemType': {
|
||||||
|
'type': 'object',
|
||||||
|
'properties': {
|
||||||
|
'blobSum': {
|
||||||
|
'type': 'string',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'required': 'blobSum',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'history': {
|
||||||
|
'type': 'array',
|
||||||
|
'itemType': {
|
||||||
|
'type': 'object',
|
||||||
|
'properties': {
|
||||||
|
'v1Compatibility': {
|
||||||
|
'type': 'object',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'required': ['v1Compatibility'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'required': ['name', 'tag', 'fsLayers', 'history', 'signatures'],
|
||||||
|
}
|
||||||
|
|
||||||
def v2_ping(self):
|
def v2_ping(self):
|
||||||
self.conduct('GET', '/v2/', expected_code=200 if self.jwt else 401, auth='jwt')
|
response = self.conduct('GET', '/v2/', expected_code=200 if self.jwt else 401, auth='jwt')
|
||||||
|
self.assertEquals(response.headers['Docker-Distribution-API-Version'], 'registry/2.0')
|
||||||
|
|
||||||
|
|
||||||
def do_auth(self, username, password, namespace, repository, expected_code=200, scopes=[]):
|
def do_auth(self, username, password, namespace, repository, expected_code=200, scopes=[]):
|
||||||
|
@ -315,9 +360,12 @@ class V2RegistryMixin(BaseRegistryMixin):
|
||||||
self.assertIsNotNone(response_json.get('token'))
|
self.assertIsNotNone(response_json.get('token'))
|
||||||
self.jwt = response_json['token']
|
self.jwt = response_json['token']
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
class V2RegistryPushMixin(V2RegistryMixin):
|
class V2RegistryPushMixin(V2RegistryMixin):
|
||||||
def do_push(self, namespace, repository, username, password, images):
|
def do_push(self, namespace, repository, username, password, images, tag_name=None,
|
||||||
|
cancel=False, invalid=False):
|
||||||
# Ping!
|
# Ping!
|
||||||
self.v2_ping()
|
self.v2_ping()
|
||||||
|
|
||||||
|
@ -325,7 +373,7 @@ class V2RegistryPushMixin(V2RegistryMixin):
|
||||||
self.do_auth(username, password, namespace, repository, scopes=['push', 'pull'])
|
self.do_auth(username, password, namespace, repository, scopes=['push', 'pull'])
|
||||||
|
|
||||||
# Build a fake manifest.
|
# Build a fake manifest.
|
||||||
tag_name = 'latest'
|
tag_name = tag_name or 'latest'
|
||||||
builder = SignedManifestBuilder(namespace, repository, tag_name)
|
builder = SignedManifestBuilder(namespace, repository, tag_name)
|
||||||
for image_id, contents in images.iteritems():
|
for image_id, contents in images.iteritems():
|
||||||
if isinstance(contents, dict):
|
if isinstance(contents, dict):
|
||||||
|
@ -334,12 +382,16 @@ class V2RegistryPushMixin(V2RegistryMixin):
|
||||||
full_contents = contents
|
full_contents = contents
|
||||||
|
|
||||||
checksum = 'sha256:' + hashlib.sha256(full_contents).hexdigest()
|
checksum = 'sha256:' + hashlib.sha256(full_contents).hexdigest()
|
||||||
|
if invalid:
|
||||||
|
checksum = 'sha256:' + hashlib.sha256('foobarbaz').hexdigest()
|
||||||
|
|
||||||
builder.add_layer(checksum, json.dumps({'id': image_id, 'data': contents}))
|
builder.add_layer(checksum, json.dumps({'id': image_id, 'data': contents}))
|
||||||
|
|
||||||
# Build the manifest.
|
# Build the manifest.
|
||||||
manifest = builder.build(_JWK)
|
manifest = builder.build(_JWK)
|
||||||
|
|
||||||
# Push the image's layers.
|
# Push the image's layers.
|
||||||
|
checksums = {}
|
||||||
for image_id, contents in images.iteritems():
|
for image_id, contents in images.iteritems():
|
||||||
chunks = None
|
chunks = None
|
||||||
if isinstance(contents, dict):
|
if isinstance(contents, dict):
|
||||||
|
@ -357,6 +409,7 @@ class V2RegistryPushMixin(V2RegistryMixin):
|
||||||
response = self.conduct('POST', '/v2/%s/%s/blobs/uploads/' % (namespace, repository),
|
response = self.conduct('POST', '/v2/%s/%s/blobs/uploads/' % (namespace, repository),
|
||||||
expected_code=202, auth='jwt')
|
expected_code=202, auth='jwt')
|
||||||
|
|
||||||
|
upload_uuid = response.headers['Docker-Upload-UUID']
|
||||||
location = response.headers['Location'][len(self.get_server_url()):]
|
location = response.headers['Location'][len(self.get_server_url()):]
|
||||||
|
|
||||||
# PATCH the image data into the layer.
|
# PATCH the image data into the layer.
|
||||||
|
@ -377,17 +430,48 @@ class V2RegistryPushMixin(V2RegistryMixin):
|
||||||
if expected_code != 204:
|
if expected_code != 204:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Retrieve the upload status at each point.
|
||||||
|
status_url = '/v2/%s/%s/blobs/uploads/%s' % (namespace, repository, upload_uuid)
|
||||||
|
response = self.conduct('GET', status_url, expected_code=204, auth='jwt',
|
||||||
|
headers=dict(host=self.get_server_url()))
|
||||||
|
self.assertEquals(response.headers['Docker-Upload-UUID'], upload_uuid)
|
||||||
|
self.assertEquals(response.headers['Range'], "bytes=0-%s" % end_byte)
|
||||||
|
|
||||||
|
if cancel:
|
||||||
|
self.conduct('DELETE', location, params=dict(digest=checksum), expected_code=204,
|
||||||
|
auth='jwt')
|
||||||
|
|
||||||
|
# Ensure the upload was canceled.
|
||||||
|
status_url = '/v2/%s/%s/blobs/uploads/%s' % (namespace, repository, upload_uuid)
|
||||||
|
self.conduct('GET', status_url, expected_code=404, auth='jwt',
|
||||||
|
headers=dict(host=self.get_server_url()))
|
||||||
|
return
|
||||||
|
|
||||||
# Finish the layer upload with a PUT.
|
# Finish the layer upload with a PUT.
|
||||||
self.conduct('PUT', location, params=dict(digest=checksum), expected_code=201, auth='jwt')
|
response = self.conduct('PUT', location, params=dict(digest=checksum), expected_code=201,
|
||||||
|
auth='jwt')
|
||||||
|
|
||||||
|
self.assertEquals(response.headers['Docker-Content-Digest'], checksum)
|
||||||
|
checksums[image_id] = checksum
|
||||||
|
|
||||||
|
# Ensure the layer exists now.
|
||||||
|
response = self.conduct('HEAD', '/v2/%s/%s/blobs/%s' % (namespace, repository, checksum),
|
||||||
|
expected_code=200, auth='jwt')
|
||||||
|
self.assertEquals(response.headers['Docker-Content-Digest'], checksum)
|
||||||
|
self.assertEquals(response.headers['Content-Length'], str(len(full_contents)))
|
||||||
|
|
||||||
# Write the manifest.
|
# Write the manifest.
|
||||||
|
put_code = 404 if invalid else 202
|
||||||
self.conduct('PUT', '/v2/%s/%s/manifests/%s' % (namespace, repository, tag_name),
|
self.conduct('PUT', '/v2/%s/%s/manifests/%s' % (namespace, repository, tag_name),
|
||||||
data=manifest.bytes, expected_code=202,
|
data=manifest.bytes, expected_code=put_code,
|
||||||
headers={'Content-Type': 'application/json'}, auth='jwt')
|
headers={'Content-Type': 'application/json'}, auth='jwt')
|
||||||
|
|
||||||
|
return checksums, manifest.digest
|
||||||
|
|
||||||
|
|
||||||
class V2RegistryPullMixin(V2RegistryMixin):
|
class V2RegistryPullMixin(V2RegistryMixin):
|
||||||
def do_pull(self, namespace, repository, username=None, password='password', expected_code=200):
|
def do_pull(self, namespace, repository, username=None, password='password', expected_code=200,
|
||||||
|
manifest_id=None, expected_manifest_code=200):
|
||||||
# Ping!
|
# Ping!
|
||||||
self.v2_ping()
|
self.v2_ping()
|
||||||
|
|
||||||
|
@ -397,11 +481,18 @@ class V2RegistryPullMixin(V2RegistryMixin):
|
||||||
if expected_code != 200:
|
if expected_code != 200:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Retrieve the manifest for the tag.
|
# Retrieve the manifest for the tag or digest.
|
||||||
tag_name = 'latest'
|
manifest_id = manifest_id or 'latest'
|
||||||
response = self.conduct('GET', '/v2/%s/%s/manifests/%s' % (namespace, repository, tag_name),
|
response = self.conduct('GET', '/v2/%s/%s/manifests/%s' % (namespace, repository, manifest_id),
|
||||||
auth='jwt')
|
auth='jwt', expected_code=expected_manifest_code)
|
||||||
|
if expected_manifest_code != 200:
|
||||||
|
return
|
||||||
|
|
||||||
manifest_data = json.loads(response.text)
|
manifest_data = json.loads(response.text)
|
||||||
|
|
||||||
|
# Ensure the manifest returned by us is valid.
|
||||||
|
validate_schema(manifest_data, V2RegistryMixin.MANIFEST_SCHEMA)
|
||||||
|
|
||||||
blobs = {}
|
blobs = {}
|
||||||
|
|
||||||
for layer in manifest_data['fsLayers']:
|
for layer in manifest_data['fsLayers']:
|
||||||
|
@ -605,6 +696,47 @@ class V2RegistryTests(V2RegistryPullMixin, V2RegistryPushMixin, RegistryTestsMix
|
||||||
RegistryTestCaseMixin, LiveServerTestCase):
|
RegistryTestCaseMixin, LiveServerTestCase):
|
||||||
""" Tests for V2 registry. """
|
""" Tests for V2 registry. """
|
||||||
|
|
||||||
|
def test_invalid_push(self):
|
||||||
|
images = {
|
||||||
|
'someid': 'onlyimagehere'
|
||||||
|
}
|
||||||
|
|
||||||
|
self.do_push('devtable', 'newrepo', 'devtable', 'password', images, invalid=True)
|
||||||
|
|
||||||
|
def test_cancel_push(self):
|
||||||
|
images = {
|
||||||
|
'someid': 'onlyimagehere'
|
||||||
|
}
|
||||||
|
|
||||||
|
self.do_push('devtable', 'newrepo', 'devtable', 'password', images, cancel=True)
|
||||||
|
|
||||||
|
|
||||||
|
def test_pull_by_checksum(self):
|
||||||
|
# Add a new repository under the user, so we have a real repository to pull.
|
||||||
|
images = {
|
||||||
|
'someid': 'onlyimagehere'
|
||||||
|
}
|
||||||
|
|
||||||
|
_, digest = self.do_push('devtable', 'newrepo', 'devtable', 'password', images)
|
||||||
|
|
||||||
|
# Attempt to pull by digest.
|
||||||
|
self.do_pull('devtable', 'newrepo', 'devtable', 'password', manifest_id=digest)
|
||||||
|
|
||||||
|
|
||||||
|
def test_pull_invalid_image_tag(self):
|
||||||
|
# Add a new repository under the user, so we have a real repository to pull.
|
||||||
|
images = {
|
||||||
|
'someid': 'onlyimagehere'
|
||||||
|
}
|
||||||
|
|
||||||
|
self.do_push('devtable', 'newrepo', 'devtable', 'password', images)
|
||||||
|
self.clearSession()
|
||||||
|
|
||||||
|
# Attempt to pull the invalid tag.
|
||||||
|
self.do_pull('devtable', 'newrepo', 'devtable', 'password', manifest_id='invalid',
|
||||||
|
expected_manifest_code=404)
|
||||||
|
|
||||||
|
|
||||||
def test_partial_upload_below_5mb(self):
|
def test_partial_upload_below_5mb(self):
|
||||||
chunksize = 1024 * 1024 * 2
|
chunksize = 1024 * 1024 * 2
|
||||||
size = chunksize * 3
|
size = chunksize * 3
|
||||||
|
@ -685,6 +817,34 @@ class V2RegistryTests(V2RegistryPullMixin, V2RegistryPushMixin, RegistryTestsMix
|
||||||
# Attempt to push the chunked upload, which should fail.
|
# Attempt to push the chunked upload, which should fail.
|
||||||
self.do_push('devtable', 'newrepo', 'devtable', 'password', images)
|
self.do_push('devtable', 'newrepo', 'devtable', 'password', images)
|
||||||
|
|
||||||
|
def test_multiple_tags(self):
|
||||||
|
# Create the repo.
|
||||||
|
self.do_push('devtable', 'newrepo', 'devtable', 'password', {'someid': 'onlyimagehere'},
|
||||||
|
tag_name='latest')
|
||||||
|
|
||||||
|
self.do_push('devtable', 'newrepo', 'devtable', 'password', {'anotherid': 'anotherimage'},
|
||||||
|
tag_name='foobar')
|
||||||
|
|
||||||
|
# Retrieve the tags.
|
||||||
|
response = self.conduct('GET', '/v2/devtable/newrepo/tags/list', auth='jwt', expected_code=200)
|
||||||
|
data = json.loads(response.text)
|
||||||
|
self.assertEquals(data['name'], "devtable/newrepo")
|
||||||
|
self.assertIn('latest', data['tags'])
|
||||||
|
self.assertIn('foobar', data['tags'])
|
||||||
|
|
||||||
|
# Retrieve the tags with pageniation.
|
||||||
|
response = self.conduct('GET', '/v2/devtable/newrepo/tags/list', auth='jwt',
|
||||||
|
params=dict(n=1), expected_code=200)
|
||||||
|
|
||||||
|
data = json.loads(response.text)
|
||||||
|
self.assertEquals(data['name'], "devtable/newrepo")
|
||||||
|
self.assertEquals(len(data['tags']), 1)
|
||||||
|
self.assertIn('latest', data['tags'])
|
||||||
|
self.assertTrue(response.headers['Link'].find('n=1&last=2') > 0)
|
||||||
|
|
||||||
|
# Try to get tags before a repo exists.
|
||||||
|
self.conduct('GET', '/v2/devtable/doesnotexist/tags/list', auth='jwt', expected_code=401)
|
||||||
|
|
||||||
|
|
||||||
class V1PushV2PullRegistryTests(V2RegistryPullMixin, V1RegistryPushMixin, RegistryTestsMixin,
|
class V1PushV2PullRegistryTests(V2RegistryPullMixin, V1RegistryPushMixin, RegistryTestsMixin,
|
||||||
RegistryTestCaseMixin, LiveServerTestCase):
|
RegistryTestCaseMixin, LiveServerTestCase):
|
||||||
|
|
Reference in a new issue