initial import for Open Source 🎉
This commit is contained in:
parent
1898c361f3
commit
9c0dd3b722
2048 changed files with 218743 additions and 0 deletions
127
endpoints/v2/test/test_blob.py
Normal file
127
endpoints/v2/test/test_blob.py
Normal file
|
@ -0,0 +1,127 @@
|
|||
import hashlib
|
||||
import pytest
|
||||
|
||||
from mock import patch
|
||||
from flask import url_for
|
||||
from playhouse.test_utils import assert_query_count
|
||||
|
||||
from app import instance_keys, app as realapp
|
||||
from auth.auth_context_type import ValidatedAuthContext
|
||||
from data import model
|
||||
from data.cache import InMemoryDataModelCache
|
||||
from data.database import ImageStorageLocation
|
||||
from endpoints.test.shared import conduct_call
|
||||
from util.security.registry_jwt import generate_bearer_token, build_context_and_subject
|
||||
from test.fixtures import *
|
||||
|
||||
@pytest.mark.parametrize('method, endpoint', [
|
||||
('GET', 'download_blob'),
|
||||
('HEAD', 'check_blob_exists'),
|
||||
])
|
||||
def test_blob_caching(method, endpoint, client, app):
|
||||
digest = 'sha256:' + hashlib.sha256("a").hexdigest()
|
||||
location = ImageStorageLocation.get(name='local_us')
|
||||
model.blob.store_blob_record_and_temp_link('devtable', 'simple', digest, location, 1, 10000000)
|
||||
|
||||
params = {
|
||||
'repository': 'devtable/simple',
|
||||
'digest': digest,
|
||||
}
|
||||
|
||||
user = model.user.get_user('devtable')
|
||||
access = [{
|
||||
'type': 'repository',
|
||||
'name': 'devtable/simple',
|
||||
'actions': ['pull'],
|
||||
}]
|
||||
|
||||
context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
|
||||
token = generate_bearer_token(realapp.config['SERVER_HOSTNAME'], subject, context, access, 600,
|
||||
instance_keys)
|
||||
|
||||
headers = {
|
||||
'Authorization': 'Bearer %s' % token,
|
||||
}
|
||||
|
||||
# Run without caching to make sure the request works. This also preloads some of
|
||||
# our global model caches.
|
||||
conduct_call(client, 'v2.' + endpoint, url_for, method, params, expected_code=200,
|
||||
headers=headers)
|
||||
|
||||
with patch('endpoints.v2.blob.model_cache', InMemoryDataModelCache()):
|
||||
# First request should make a DB query to retrieve the blob.
|
||||
conduct_call(client, 'v2.' + endpoint, url_for, method, params, expected_code=200,
|
||||
headers=headers)
|
||||
|
||||
# Subsequent requests should use the cached blob.
|
||||
with assert_query_count(0):
|
||||
conduct_call(client, 'v2.' + endpoint, url_for, method, params, expected_code=200,
|
||||
headers=headers)
|
||||
|
||||
@pytest.mark.parametrize('mount_digest, source_repo, username, expect_success', [
|
||||
# Unknown blob.
|
||||
('sha256:unknown', 'devtable/simple', 'devtable', False),
|
||||
|
||||
# Blob not in repo.
|
||||
('sha256:' + hashlib.sha256("a").hexdigest(), 'devtable/complex', 'devtable', False),
|
||||
|
||||
# Blob in repo.
|
||||
('sha256:' + hashlib.sha256("b").hexdigest(), 'devtable/complex', 'devtable', True),
|
||||
|
||||
# No access to repo.
|
||||
('sha256:' + hashlib.sha256("b").hexdigest(), 'devtable/complex', 'public', False),
|
||||
|
||||
# Public repo.
|
||||
('sha256:' + hashlib.sha256("c").hexdigest(), 'public/publicrepo', 'devtable', True),
|
||||
])
|
||||
def test_blob_mounting(mount_digest, source_repo, username, expect_success, client, app):
|
||||
location = ImageStorageLocation.get(name='local_us')
|
||||
|
||||
# Store and link some blobs.
|
||||
digest = 'sha256:' + hashlib.sha256("a").hexdigest()
|
||||
model.blob.store_blob_record_and_temp_link('devtable', 'simple', digest, location, 1, 10000000)
|
||||
|
||||
digest = 'sha256:' + hashlib.sha256("b").hexdigest()
|
||||
model.blob.store_blob_record_and_temp_link('devtable', 'complex', digest, location, 1, 10000000)
|
||||
|
||||
digest = 'sha256:' + hashlib.sha256("c").hexdigest()
|
||||
model.blob.store_blob_record_and_temp_link('public', 'publicrepo', digest, location, 1, 10000000)
|
||||
|
||||
params = {
|
||||
'repository': 'devtable/building',
|
||||
'mount': mount_digest,
|
||||
'from': source_repo,
|
||||
}
|
||||
|
||||
user = model.user.get_user(username)
|
||||
access = [{
|
||||
'type': 'repository',
|
||||
'name': 'devtable/building',
|
||||
'actions': ['pull', 'push'],
|
||||
}]
|
||||
|
||||
if source_repo.find(username) == 0:
|
||||
access.append({
|
||||
'type': 'repository',
|
||||
'name': source_repo,
|
||||
'actions': ['pull'],
|
||||
})
|
||||
|
||||
context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
|
||||
token = generate_bearer_token(realapp.config['SERVER_HOSTNAME'], subject, context, access, 600,
|
||||
instance_keys)
|
||||
|
||||
headers = {
|
||||
'Authorization': 'Bearer %s' % token,
|
||||
}
|
||||
|
||||
expected_code = 201 if expect_success else 202
|
||||
conduct_call(client, 'v2.start_blob_upload', url_for, 'POST', params, expected_code=expected_code,
|
||||
headers=headers)
|
||||
|
||||
if expect_success:
|
||||
# Ensure the blob now exists under the repo.
|
||||
model.blob.get_repo_blob_by_digest('devtable', 'building', mount_digest)
|
||||
else:
|
||||
with pytest.raises(model.blob.BlobDoesNotExist):
|
||||
model.blob.get_repo_blob_by_digest('devtable', 'building', mount_digest)
|
55
endpoints/v2/test/test_manifest.py
Normal file
55
endpoints/v2/test/test_manifest.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
import hashlib
|
||||
import pytest
|
||||
import time
|
||||
|
||||
from mock import patch
|
||||
|
||||
from flask import url_for
|
||||
from playhouse.test_utils import count_queries
|
||||
|
||||
from app import instance_keys, app as realapp
|
||||
from auth.auth_context_type import ValidatedAuthContext
|
||||
from data import model
|
||||
from endpoints.test.shared import conduct_call
|
||||
from util.security.registry_jwt import generate_bearer_token, build_context_and_subject
|
||||
from test.fixtures import *
|
||||
|
||||
def test_e2e_query_count_manifest_norewrite(client, app):
|
||||
tag_manifest = model.tag.load_tag_manifest('devtable', 'simple', 'latest')
|
||||
|
||||
params = {
|
||||
'repository': 'devtable/simple',
|
||||
'manifest_ref': tag_manifest.digest,
|
||||
}
|
||||
|
||||
user = model.user.get_user('devtable')
|
||||
access = [{
|
||||
'type': 'repository',
|
||||
'name': 'devtable/simple',
|
||||
'actions': ['pull', 'push'],
|
||||
}]
|
||||
|
||||
context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
|
||||
token = generate_bearer_token(realapp.config['SERVER_HOSTNAME'], subject, context, access, 600,
|
||||
instance_keys)
|
||||
|
||||
headers = {
|
||||
'Authorization': 'Bearer %s' % token,
|
||||
}
|
||||
|
||||
# Conduct a call to prime the instance key and other caches.
|
||||
conduct_call(client, 'v2.write_manifest_by_digest', url_for, 'PUT', params, expected_code=202,
|
||||
headers=headers, raw_body=tag_manifest.json_data)
|
||||
|
||||
timecode = time.time()
|
||||
def get_time():
|
||||
return timecode + 10
|
||||
|
||||
with patch('time.time', get_time):
|
||||
# Necessary in order to have the tag updates not occur in the same second, which is the
|
||||
# granularity supported currently.
|
||||
with count_queries() as counter:
|
||||
conduct_call(client, 'v2.write_manifest_by_digest', url_for, 'PUT', params, expected_code=202,
|
||||
headers=headers, raw_body=tag_manifest.json_data)
|
||||
|
||||
assert counter.count <= 27
|
138
endpoints/v2/test/test_manifest_cornercases.py
Normal file
138
endpoints/v2/test/test_manifest_cornercases.py
Normal file
|
@ -0,0 +1,138 @@
|
|||
import hashlib
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from app import storage, docker_v2_signing_key
|
||||
from data import model, database
|
||||
from data.registry_model import registry_model
|
||||
from endpoints.v2.manifest import _write_manifest
|
||||
from image.docker.schema1 import DockerSchema1ManifestBuilder
|
||||
|
||||
from test.fixtures import *
|
||||
|
||||
|
||||
ADMIN_ACCESS_USER = 'devtable'
|
||||
REPO = 'simple'
|
||||
FIRST_TAG = 'first'
|
||||
SECOND_TAG = 'second'
|
||||
THIRD_TAG = 'third'
|
||||
|
||||
|
||||
@contextmanager
|
||||
def set_tag_expiration_policy(namespace, expiration_s=0):
|
||||
namespace_user = model.user.get_user(namespace)
|
||||
model.user.change_user_tag_expiration(namespace_user, expiration_s)
|
||||
yield
|
||||
|
||||
|
||||
def _perform_cleanup():
|
||||
database.RepositoryTag.delete().where(database.RepositoryTag.hidden == True).execute()
|
||||
repo_object = model.repository.get_repository(ADMIN_ACCESS_USER, REPO)
|
||||
model.gc.garbage_collect_repo(repo_object)
|
||||
|
||||
|
||||
def test_missing_link(initialized_db):
|
||||
""" Tests for a corner case that could result in missing a link to a blob referenced by a
|
||||
manifest. The test exercises the case as follows:
|
||||
|
||||
1) Push a manifest of a single layer with a Docker ID `FIRST_ID`, pointing
|
||||
to blob `FIRST_BLOB`. The database should contain the tag referencing the layer, with
|
||||
no changed ID and the blob not being GCed.
|
||||
|
||||
2) Push a manifest of two layers:
|
||||
|
||||
Layer 1: `FIRST_ID` with blob `SECOND_BLOB`: Will result in a new synthesized ID
|
||||
Layer 2: `SECOND_ID` with blob `THIRD_BLOB`: Will result in `SECOND_ID` pointing to the
|
||||
`THIRD_BLOB`, with a parent pointing to the new synthesized ID's layer.
|
||||
|
||||
3) Push a manifest of two layers:
|
||||
|
||||
Layer 1: `THIRD_ID` with blob `FOURTH_BLOB`: Will result in a new `THIRD_ID` layer
|
||||
Layer 2: `FIRST_ID` with blob `THIRD_BLOB`: Since `FIRST_ID` already points to `SECOND_BLOB`,
|
||||
this will synthesize a new ID. With the current bug, the synthesized ID will match
|
||||
that of `SECOND_ID`, leaving `THIRD_ID` unlinked and therefore, after a GC, missing
|
||||
`FOURTH_BLOB`.
|
||||
"""
|
||||
with set_tag_expiration_policy('devtable', 0):
|
||||
location_name = storage.preferred_locations[0]
|
||||
location = database.ImageStorageLocation.get(name=location_name)
|
||||
|
||||
# Create first blob.
|
||||
first_blob_sha = 'sha256:' + hashlib.sha256("FIRST").hexdigest()
|
||||
model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, first_blob_sha, location, 0, 0, 0)
|
||||
|
||||
# Push the first manifest.
|
||||
first_manifest = (DockerSchema1ManifestBuilder(ADMIN_ACCESS_USER, REPO, FIRST_TAG)
|
||||
.add_layer(first_blob_sha, '{"id": "first"}')
|
||||
.build(docker_v2_signing_key))
|
||||
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, FIRST_TAG, first_manifest)
|
||||
|
||||
# Delete all temp tags and perform GC.
|
||||
_perform_cleanup()
|
||||
|
||||
# Ensure that the first blob still exists, along with the first tag.
|
||||
assert model.blob.get_repo_blob_by_digest(ADMIN_ACCESS_USER, REPO, first_blob_sha) is not None
|
||||
|
||||
repository_ref = registry_model.lookup_repository(ADMIN_ACCESS_USER, REPO)
|
||||
found_tag = registry_model.get_repo_tag(repository_ref, FIRST_TAG, include_legacy_image=True)
|
||||
assert found_tag is not None
|
||||
assert found_tag.legacy_image.docker_image_id == 'first'
|
||||
|
||||
# Create the second and third blobs.
|
||||
second_blob_sha = 'sha256:' + hashlib.sha256("SECOND").hexdigest()
|
||||
third_blob_sha = 'sha256:' + hashlib.sha256("THIRD").hexdigest()
|
||||
|
||||
model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, second_blob_sha, location, 0, 0, 0)
|
||||
model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, third_blob_sha, location, 0, 0, 0)
|
||||
|
||||
# Push the second manifest.
|
||||
second_manifest = (DockerSchema1ManifestBuilder(ADMIN_ACCESS_USER, REPO, SECOND_TAG)
|
||||
.add_layer(third_blob_sha, '{"id": "second", "parent": "first"}')
|
||||
.add_layer(second_blob_sha, '{"id": "first"}')
|
||||
.build(docker_v2_signing_key))
|
||||
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, SECOND_TAG, second_manifest)
|
||||
|
||||
# Delete all temp tags and perform GC.
|
||||
_perform_cleanup()
|
||||
|
||||
# Ensure that the first and second blobs still exists, along with the second tag.
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, first_blob_sha) is not None
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, second_blob_sha) is not None
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, third_blob_sha) is not None
|
||||
|
||||
found_tag = registry_model.get_repo_tag(repository_ref, FIRST_TAG, include_legacy_image=True)
|
||||
assert found_tag is not None
|
||||
assert found_tag.legacy_image.docker_image_id == 'first'
|
||||
|
||||
# Ensure the IDs have changed.
|
||||
found_tag = registry_model.get_repo_tag(repository_ref, SECOND_TAG, include_legacy_image=True)
|
||||
assert found_tag is not None
|
||||
assert found_tag.legacy_image.docker_image_id != 'second'
|
||||
|
||||
# Create the fourth blob.
|
||||
fourth_blob_sha = 'sha256:' + hashlib.sha256("FOURTH").hexdigest()
|
||||
model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, fourth_blob_sha, location, 0, 0, 0)
|
||||
|
||||
# Push the third manifest.
|
||||
third_manifest = (DockerSchema1ManifestBuilder(ADMIN_ACCESS_USER, REPO, THIRD_TAG)
|
||||
.add_layer(third_blob_sha, '{"id": "second", "parent": "first"}')
|
||||
.add_layer(fourth_blob_sha, '{"id": "first"}') # Note the change in BLOB from the second manifest.
|
||||
.build(docker_v2_signing_key))
|
||||
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, THIRD_TAG, third_manifest)
|
||||
|
||||
# Delete all temp tags and perform GC.
|
||||
_perform_cleanup()
|
||||
|
||||
# Ensure all blobs are present.
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, first_blob_sha) is not None
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, second_blob_sha) is not None
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, third_blob_sha) is not None
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, fourth_blob_sha) is not None
|
||||
|
||||
# Ensure new synthesized IDs were created.
|
||||
second_tag = registry_model.get_repo_tag(repository_ref, SECOND_TAG, include_legacy_image=True)
|
||||
third_tag = registry_model.get_repo_tag(repository_ref, THIRD_TAG, include_legacy_image=True)
|
||||
assert second_tag.legacy_image.docker_image_id != third_tag.legacy_image.docker_image_id
|
68
endpoints/v2/test/test_v2_tuf.py
Normal file
68
endpoints/v2/test/test_v2_tuf.py
Normal file
|
@ -0,0 +1,68 @@
|
|||
import pytest
|
||||
import flask
|
||||
|
||||
from flask_principal import Identity, Principal
|
||||
from mock import Mock
|
||||
|
||||
from auth import permissions
|
||||
from endpoints.v2.v2auth import _get_tuf_root
|
||||
from test import testconfig
|
||||
from util.security.registry_jwt import QUAY_TUF_ROOT, SIGNER_TUF_ROOT, DISABLED_TUF_ROOT
|
||||
|
||||
|
||||
|
||||
def admin_identity(namespace, reponame):
|
||||
identity = Identity('admin')
|
||||
identity.provides.add(permissions._RepositoryNeed(namespace, reponame, 'admin'))
|
||||
identity.provides.add(permissions._OrganizationRepoNeed(namespace, 'admin'))
|
||||
return identity
|
||||
|
||||
|
||||
def write_identity(namespace, reponame):
|
||||
identity = Identity('writer')
|
||||
identity.provides.add(permissions._RepositoryNeed(namespace, reponame, 'write'))
|
||||
identity.provides.add(permissions._OrganizationRepoNeed(namespace, 'write'))
|
||||
return identity
|
||||
|
||||
|
||||
def read_identity(namespace, reponame):
|
||||
identity = Identity('reader')
|
||||
identity.provides.add(permissions._RepositoryNeed(namespace, reponame, 'read'))
|
||||
identity.provides.add(permissions._OrganizationRepoNeed(namespace, 'read'))
|
||||
return identity
|
||||
|
||||
|
||||
def app_with_principal():
|
||||
app = flask.Flask(__name__)
|
||||
app.config.from_object(testconfig.TestConfig())
|
||||
principal = Principal(app)
|
||||
return app, principal
|
||||
|
||||
|
||||
@pytest.mark.parametrize('identity,expected', [
|
||||
(Identity('anon'), QUAY_TUF_ROOT),
|
||||
(read_identity("namespace", "repo"), QUAY_TUF_ROOT),
|
||||
(read_identity("different", "repo"), QUAY_TUF_ROOT),
|
||||
(admin_identity("different", "repo"), QUAY_TUF_ROOT),
|
||||
(write_identity("different", "repo"), QUAY_TUF_ROOT),
|
||||
(admin_identity("namespace", "repo"), SIGNER_TUF_ROOT),
|
||||
(write_identity("namespace", "repo"), SIGNER_TUF_ROOT),
|
||||
])
|
||||
def test_get_tuf_root(identity, expected):
|
||||
app, principal = app_with_principal()
|
||||
with app.test_request_context('/'):
|
||||
principal.set_identity(identity)
|
||||
actual = _get_tuf_root(Mock(), "namespace", "repo")
|
||||
assert actual == expected, "should be %s, but was %s" % (expected, actual)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('trust_enabled,tuf_root', [
|
||||
(True, QUAY_TUF_ROOT),
|
||||
(False, DISABLED_TUF_ROOT),
|
||||
])
|
||||
def test_trust_disabled(trust_enabled,tuf_root):
|
||||
app, principal = app_with_principal()
|
||||
with app.test_request_context('/'):
|
||||
principal.set_identity(read_identity("namespace", "repo"))
|
||||
actual = _get_tuf_root(Mock(trust_enabled=trust_enabled), "namespace", "repo")
|
||||
assert actual == tuf_root, "should be %s, but was %s" % (tuf_root, actual)
|
150
endpoints/v2/test/test_v2auth.py
Normal file
150
endpoints/v2/test/test_v2auth.py
Normal file
|
@ -0,0 +1,150 @@
|
|||
import base64
|
||||
|
||||
from flask import url_for
|
||||
|
||||
from app import instance_keys, app as original_app
|
||||
from data.model.user import regenerate_robot_token, get_robot_and_metadata, get_user
|
||||
from endpoints.test.shared import conduct_call
|
||||
from util.security.registry_jwt import decode_bearer_token, CLAIM_TUF_ROOTS
|
||||
|
||||
from test.fixtures import *
|
||||
|
||||
|
||||
def get_robot_password(username):
|
||||
parent_name, robot_shortname = username.split('+', 1)
|
||||
parent = get_user(parent_name)
|
||||
_, token, _ = get_robot_and_metadata(robot_shortname, parent)
|
||||
return token
|
||||
|
||||
|
||||
@pytest.mark.parametrize('scope, username, password, expected_code, expected_scopes', [
|
||||
# Invalid repository.
|
||||
('repository:devtable/simple/foo/bar/baz:pull', 'devtable', 'password', 400, []),
|
||||
|
||||
# Invalid scopes.
|
||||
('some_invalid_scope', 'devtable', 'password', 400, []),
|
||||
|
||||
# Invalid credentials.
|
||||
('repository:devtable/simple:pull', 'devtable', 'invalid', 401, []),
|
||||
|
||||
# Valid credentials.
|
||||
('repository:devtable/simple:pull', 'devtable', 'password', 200,
|
||||
['devtable/simple:pull']),
|
||||
|
||||
('repository:devtable/simple:push', 'devtable', 'password', 200,
|
||||
['devtable/simple:push']),
|
||||
|
||||
('repository:devtable/simple:pull,push', 'devtable', 'password', 200,
|
||||
['devtable/simple:push,pull']),
|
||||
|
||||
('repository:devtable/simple:pull,push,*', 'devtable', 'password', 200,
|
||||
['devtable/simple:push,pull,*']),
|
||||
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'devtable', 'password', 200,
|
||||
['buynlarge/orgrepo:push,pull,*']),
|
||||
|
||||
('', 'devtable', 'password', 200, []),
|
||||
|
||||
# No credentials, non-public repo.
|
||||
('repository:devtable/simple:pull', None, None, 200, ['devtable/simple:']),
|
||||
|
||||
# No credentials, public repo.
|
||||
('repository:public/publicrepo:pull', None, None, 200, ['public/publicrepo:pull']),
|
||||
|
||||
# Reader only.
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'reader', 'password', 200,
|
||||
['buynlarge/orgrepo:pull']),
|
||||
|
||||
# Unknown repository.
|
||||
('repository:devtable/unknownrepo:pull,push', 'devtable', 'password', 200,
|
||||
['devtable/unknownrepo:push,pull']),
|
||||
|
||||
# Unknown repository in another namespace.
|
||||
('repository:somenamespace/unknownrepo:pull,push', 'devtable', 'password', 200,
|
||||
['somenamespace/unknownrepo:']),
|
||||
|
||||
# Disabled namespace.
|
||||
(['repository:devtable/simple:pull,push', 'repository:disabled/complex:pull'],
|
||||
'devtable', 'password', 405,
|
||||
[]),
|
||||
|
||||
# Multiple scopes.
|
||||
(['repository:devtable/simple:pull,push', 'repository:devtable/complex:pull'],
|
||||
'devtable', 'password', 200,
|
||||
['devtable/simple:push,pull', 'devtable/complex:pull']),
|
||||
|
||||
# Multiple scopes with restricted behavior.
|
||||
(['repository:devtable/simple:pull,push', 'repository:public/publicrepo:pull,push'],
|
||||
'devtable', 'password', 200,
|
||||
['devtable/simple:push,pull', 'public/publicrepo:pull']),
|
||||
|
||||
(['repository:devtable/simple:pull,push,*', 'repository:public/publicrepo:pull,push,*'],
|
||||
'devtable', 'password', 200,
|
||||
['devtable/simple:push,pull,*', 'public/publicrepo:pull']),
|
||||
|
||||
# Read Only State
|
||||
('repository:devtable/readonly:pull,push,*', 'devtable', 'password', 200,
|
||||
['devtable/readonly:pull']),
|
||||
|
||||
# Mirror State as a typical User
|
||||
('repository:devtable/mirrored:pull,push,*', 'devtable', 'password', 200,
|
||||
['devtable/mirrored:pull']),
|
||||
|
||||
# Mirror State as the robot User should have write access
|
||||
('repository:devtable/mirrored:pull,push,*', 'devtable+dtrobot', get_robot_password, 200,
|
||||
['devtable/mirrored:push,pull']),
|
||||
|
||||
# Organization repository, org admin
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'devtable', 'password', 200,
|
||||
['buynlarge/orgrepo:push,pull,*']),
|
||||
|
||||
# Organization repository, org creator
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'creator', 'password', 200,
|
||||
['buynlarge/orgrepo:']),
|
||||
|
||||
# Organization repository, org reader
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'reader', 'password', 200,
|
||||
['buynlarge/orgrepo:pull']),
|
||||
|
||||
# Organization repository, freshuser
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'freshuser', 'password', 200,
|
||||
['buynlarge/orgrepo:']),
|
||||
])
|
||||
def test_generate_registry_jwt(scope, username, password, expected_code, expected_scopes,
|
||||
app, client):
|
||||
params = {
|
||||
'service': original_app.config['SERVER_HOSTNAME'],
|
||||
'scope': scope,
|
||||
}
|
||||
|
||||
if callable(password):
|
||||
password = password(username)
|
||||
|
||||
headers = {}
|
||||
if username and password:
|
||||
headers['Authorization'] = 'Basic %s' % (base64.b64encode('%s:%s' % (username, password)))
|
||||
|
||||
resp = conduct_call(client, 'v2.generate_registry_jwt', url_for, 'GET', params, {}, expected_code,
|
||||
headers=headers)
|
||||
if expected_code != 200:
|
||||
return
|
||||
|
||||
token = resp.json['token']
|
||||
decoded = decode_bearer_token(token, instance_keys, original_app.config)
|
||||
assert decoded['iss'] == 'quay'
|
||||
assert decoded['aud'] == original_app.config['SERVER_HOSTNAME']
|
||||
assert decoded['sub'] == username if username else '(anonymous)'
|
||||
|
||||
expected_access = []
|
||||
for scope in expected_scopes:
|
||||
name, actions_str = scope.split(':')
|
||||
actions = actions_str.split(',') if actions_str else []
|
||||
|
||||
expected_access.append({
|
||||
'type': 'repository',
|
||||
'name': name,
|
||||
'actions': actions,
|
||||
})
|
||||
|
||||
assert decoded['access'] == expected_access
|
||||
assert len(decoded['context'][CLAIM_TUF_ROOTS]) == len(expected_scopes)
|
Reference in a new issue