Merge pull request #3118 from quay/joseph.schorr/QUAY-977/catalog-efficiency

Catalog efficiency improvements
This commit is contained in:
Joseph Schorr 2018-06-20 16:40:37 -04:00 committed by GitHub
commit 371f6f8946
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 345 additions and 76 deletions

2
.gitignore vendored
View file

@ -26,3 +26,5 @@ build/
.vscode
*.iml
.DS_Store
.pytest_cache/v/cache/lastfailed
.pytest_cache/v/cache/nodeids

View file

@ -1,8 +1,7 @@
import logging
from cachetools import lru_cache
from abc import ABCMeta, abstractmethod
from cachetools import lru_cache
from six import add_metaclass
from app import app
@ -90,6 +89,17 @@ class AuthContext(object):
"""
pass
@property
@abstractmethod
def unique_key(self):
""" Returns a key that is unique to this auth context type and its data. For example, an
instance of the auth context type for the user might be a string of the form
`user-{user-uuid}`. Callers should treat this key as opaque and not rely on the contents
for anything besides uniqueness. This is typically used by callers when they'd like to
check cache but not hit the database to get a fully validated auth context.
"""
pass
class ValidatedAuthContext(AuthContext):
""" ValidatedAuthContext represents the loaded, authenticated and validated auth information
@ -213,6 +223,11 @@ class ValidatedAuthContext(AuthContext):
if self.identity:
identity_changed.send(app, identity=self.identity)
@property
def unique_key(self):
signed_dict = self.to_signed_dict()
return '%s-%s' % (signed_dict['entity_kind'], signed_dict.get('entity_reference', '(anon)'))
def to_signed_dict(self):
""" Serializes the auth context into a dictionary suitable for inclusion in a JWT or other
form of signed serialization.
@ -274,6 +289,15 @@ class SignedAuthContext(AuthContext):
self.signed_data = signed_data
self.v1_dict_format = v1_dict_format
@property
def unique_key(self):
if self.v1_dict_format:
# Since V1 data format is verbose, just use the validated version to get the key.
return self._get_validated().unique_key
signed_dict = self.signed_data
return '%s-%s' % (signed_dict['entity_kind'], signed_dict.get('entity_reference', '(anon)'))
@classmethod
def build_from_signed_dict(cls, dict_data, v1_dict_format=False):
if not v1_dict_format:

View file

@ -26,6 +26,7 @@ def test_signed_auth_context(kind, entity_reference, loader, v1_dict_format, ini
assert not validated.is_anonymous
assert validated.entity_kind == kind
assert validated.unique_key
signed = SignedAuthContext.build_from_signed_dict(validated.to_signed_dict(),
v1_dict_format=v1_dict_format)
@ -36,6 +37,7 @@ def test_signed_auth_context(kind, entity_reference, loader, v1_dict_format, ini
assert signed.description == validated.description
assert signed.credential_username == validated.credential_username
assert signed.analytics_id_and_public_metadata() == validated.analytics_id_and_public_metadata()
assert signed.unique_key == validated.unique_key
assert signed.is_anonymous == validated.is_anonymous
assert signed.authed_user == validated.authed_user

View file

@ -4,5 +4,13 @@ class CacheKey(namedtuple('CacheKey', ['key', 'expiration'])):
""" Defines a key into the data model cache. """
pass
def for_repository_blob(namespace_name, repo_name, digest):
""" Returns a cache key for a blob in a repository. """
return CacheKey('repository_blob__%s_%s_%s' % (namespace_name, repo_name, digest), '60s')
def for_catalog_page(auth_context_key, start_id, limit):
""" Returns a cache key for a single page of a catalog lookup for an authed context. """
params = (auth_context_key or '(anon)', start_id or 0, limit or 0)
return CacheKey('catalog_page__%s_%s_%s' % params, '60s')

View file

@ -69,9 +69,9 @@ def _lookup_team_roles():
return {role.name:role for role in TeamRole.select()}
def filter_to_repos_for_user(query, username=None, namespace=None, repo_kind='image',
def filter_to_repos_for_user(query, user_id=None, namespace=None, repo_kind='image',
include_public=True, start_id=None):
if not include_public and not username:
if not include_public and not user_id:
return Repository.select().where(Repository.id == '-1')
# Filter on the type of repository.
@ -85,32 +85,28 @@ def filter_to_repos_for_user(query, username=None, namespace=None, repo_kind='im
if start_id is not None:
query = query.where(Repository.id >= start_id)
# Add a namespace filter if necessary.
if namespace:
query = query.where(Namespace.username == namespace)
# Build a set of queries that, when unioned together, return the full set of visible repositories
# for the filters specified.
queries = []
where_clause = (True)
if namespace:
where_clause = (Namespace.username == namespace)
if include_public:
queries.append(query
.clone()
.where(Repository.visibility == get_public_repo_visibility(), where_clause))
.where(Repository.visibility == get_public_repo_visibility()))
if username:
UserThroughTeam = User.alias()
Org = User.alias()
if user_id is not None:
AdminTeam = Team.alias()
AdminTeamMember = TeamMember.alias()
AdminUser = User.alias()
# Add repositories in which the user has permission.
queries.append(query
.clone()
.switch(RepositoryPermission)
.join(User)
.where(User.username == username, where_clause))
.where(RepositoryPermission.user == user_id))
# Add repositories in which the user is a member of a team that has permission.
queries.append(query
@ -118,20 +114,16 @@ def filter_to_repos_for_user(query, username=None, namespace=None, repo_kind='im
.switch(RepositoryPermission)
.join(Team)
.join(TeamMember)
.join(UserThroughTeam, on=(UserThroughTeam.id == TeamMember.user))
.where(UserThroughTeam.username == username, where_clause))
.where(TeamMember.user == user_id))
# Add repositories under namespaces in which the user is the org admin.
queries.append(query
.clone()
.switch(Repository)
.join(Org, on=(Repository.namespace_user == Org.id))
.join(AdminTeam, on=(Org.id == AdminTeam.organization))
.where(AdminTeam.role == _lookup_team_role('admin'))
.switch(AdminTeam)
.join(AdminTeam, on=(Repository.namespace_user == AdminTeam.organization))
.join(AdminTeamMember, on=(AdminTeam.id == AdminTeamMember.team))
.join(AdminUser, on=(AdminTeamMember.user == AdminUser.id))
.where(AdminUser.username == username, where_clause))
.where(AdminTeam.role == _lookup_team_role('admin'))
.where(AdminTeamMember.user == user_id))
return reduce(lambda l, r: l | r, queries)

View file

@ -12,12 +12,18 @@ from data.model import (DataModelException, db_transaction, _basequery, storage,
InvalidImageException)
from data.database import (Image, Repository, ImageStoragePlacement, Namespace, ImageStorage,
ImageStorageLocation, RepositoryPermission, DerivedStorageForImage,
ImageStorageTransformation)
ImageStorageTransformation, User)
from util.canonicaljson import canonicalize
logger = logging.getLogger(__name__)
def _namespace_id_for_username(username):
try:
return User.get(username=username).id
except User.DoesNotExist:
return None
def get_image_with_storage(docker_image_id, storage_uuid):
""" Returns the image with the given docker image ID and storage uuid or None if none.
@ -273,7 +279,8 @@ def find_create_or_link_image(docker_image_id, repo_obj, username, translations,
.where(ImageStorage.uploading == False,
Image.docker_image_id == docker_image_id))
existing_image_query = _basequery.filter_to_repos_for_user(existing_image_query, username)
existing_image_query = _basequery.filter_to_repos_for_user(existing_image_query,
_namespace_id_for_username(username))
# If there is an existing image, we try to translate its ancestry and copy its storage.
new_image = None

View file

@ -403,11 +403,17 @@ def get_visible_repositories(username, namespace=None, kind_filter='image', incl
Namespace.username, Repository.visibility, Repository.kind)
.switch(Repository).join(Namespace, on=(Repository.namespace_user == Namespace.id)))
user_id = None
if username:
# Note: We only need the permissions table if we will filter based on a user's permissions.
query = query.switch(Repository).distinct().join(RepositoryPermission, JOIN_LEFT_OUTER)
found_namespace = _get_namespace_user(username)
if not found_namespace:
return Repository.select(Repository.id.alias('rid')).where(Repository.id == -1)
query = _basequery.filter_to_repos_for_user(query, username, namespace, kind_filter,
user_id = found_namespace.id
query = _basequery.filter_to_repos_for_user(query, user_id, namespace, kind_filter,
include_public, start_id=start_id)
if limit is not None:
@ -434,6 +440,13 @@ def get_app_search(lookup, search_fields=None, username=None, limit=50):
offset=0, limit=limit)
def _get_namespace_user(username):
try:
return User.get(username=username)
except User.DoesNotExist:
return None
def get_filtered_matching_repositories(lookup_value, filter_username=None, repo_kind='image',
offset=0, limit=25, search_fields=None):
""" Returns an iterator of all repositories matching the given lookup value, with optional
@ -451,8 +464,12 @@ def get_filtered_matching_repositories(lookup_value, filter_username=None, repo_
# Add a filter to the iterator, if necessary.
if filter_username is not None:
iterator = _filter_repositories_visible_to_username(unfiltered_query, filter_username, limit,
repo_kind)
filter_user = _get_namespace_user(filter_username)
if filter_user is None:
return []
iterator = _filter_repositories_visible_to_user(unfiltered_query, filter_user.id, limit,
repo_kind)
if offset > 0:
take(offset, iterator)
@ -462,7 +479,7 @@ def get_filtered_matching_repositories(lookup_value, filter_username=None, repo_
return list(unfiltered_query.offset(offset).limit(limit))
def _filter_repositories_visible_to_username(unfiltered_query, filter_username, limit, repo_kind):
def _filter_repositories_visible_to_user(unfiltered_query, filter_user_id, limit, repo_kind):
encountered = set()
chunk_count = limit * 2
unfiltered_page = 0
@ -484,11 +501,13 @@ def _filter_repositories_visible_to_username(unfiltered_query, filter_username,
encountered.update(new_unfiltered_ids)
# Filter the repositories found to only those visible to the current user.
query = (Repository.select(Repository, Namespace).distinct()
query = (Repository
.select(Repository, Namespace)
.distinct()
.join(Namespace, on=(Namespace.id == Repository.namespace_user)).switch(Repository)
.join(RepositoryPermission).where(Repository.id << list(new_unfiltered_ids)))
filtered = _basequery.filter_to_repos_for_user(query, filter_username, repo_kind=repo_kind)
filtered = _basequery.filter_to_repos_for_user(query, filter_user_id, repo_kind=repo_kind)
# Sort the filtered repositories by their initial order.
all_filtered_repos = list(filtered)

View file

@ -0,0 +1,104 @@
import pytest
from peewee import JOIN_LEFT_OUTER
from playhouse.test_utils import assert_query_count
from data.database import Repository, RepositoryPermission, TeamMember, Namespace
from data.model._basequery import filter_to_repos_for_user
from data.model.organization import get_admin_users
from data.model.user import get_namespace_user
from util.names import parse_robot_username
from test.fixtures import *
def _is_team_member(team, user):
return user.id in [member.user_id for member in
TeamMember.select().where(TeamMember.team == team)]
def _get_visible_repositories_for_user(user, repo_kind='image', include_public=False,
namespace=None):
""" Returns all repositories directly visible to the given user, by either repo permission,
or the user being the admin of a namespace.
"""
for repo in Repository.select():
if repo_kind is not None and repo.kind.name != repo_kind:
continue
if namespace is not None and repo.namespace_user.username != namespace:
continue
if include_public and repo.visibility.name == 'public':
yield repo
continue
# Direct repo permission.
try:
RepositoryPermission.get(repository=repo, user=user).get()
yield repo
continue
except RepositoryPermission.DoesNotExist:
pass
# Team permission.
found_in_team = False
for perm in RepositoryPermission.select().where(RepositoryPermission.repository == repo):
if perm.team and _is_team_member(perm.team, user):
found_in_team = True
break
if found_in_team:
yield repo
continue
# Org namespace admin permission.
if user in get_admin_users(repo.namespace_user):
yield repo
continue
@pytest.mark.parametrize('username', [
'devtable',
'devtable+dtrobot',
'public',
'reader',
])
@pytest.mark.parametrize('include_public', [
True,
False
])
@pytest.mark.parametrize('filter_to_namespace', [
True,
False
])
@pytest.mark.parametrize('repo_kind', [
None,
'image',
'application',
])
def test_filter_repositories(username, include_public, filter_to_namespace, repo_kind,
initialized_db):
namespace = username if filter_to_namespace else None
if '+' in username and filter_to_namespace:
namespace, _ = parse_robot_username(username)
user = get_namespace_user(username)
query = (Repository
.select()
.distinct()
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(Repository)
.join(RepositoryPermission, JOIN_LEFT_OUTER))
with assert_query_count(1):
found = list(filter_to_repos_for_user(query, user.id,
namespace=namespace,
include_public=include_public,
repo_kind=repo_kind))
expected = list(_get_visible_repositories_for_user(user,
repo_kind=repo_kind,
namespace=namespace,
include_public=include_public))
assert len(found) == len(expected)
assert {r.id for r in found} == {r.id for r in expected}

View file

@ -668,6 +668,9 @@ def invalidate_all_sessions(user):
user.save()
def get_matching_user_namespaces(namespace_prefix, username, limit=10):
namespace_user = get_namespace_user(username)
namespace_user_id = namespace_user.id if namespace_user is not None else None
namespace_search = prefix_search(Namespace.username, namespace_prefix)
base_query = (Namespace
.select()
@ -676,7 +679,7 @@ def get_matching_user_namespaces(namespace_prefix, username, limit=10):
.join(RepositoryPermission, JOIN_LEFT_OUTER)
.where(namespace_search))
return _basequery.filter_to_repos_for_user(base_query, username).limit(limit)
return _basequery.filter_to_repos_for_user(base_query, namespace_user_id).limit(limit)
def get_matching_users(username_prefix, robot_namespace=None, organization=None, limit=20,
exact_matches_only=False):

View file

@ -2,7 +2,6 @@ import pytest
from playhouse.test_utils import assert_query_count
from data.model import _basequery
from endpoints.api.search import ConductRepositorySearch, ConductSearch
from endpoints.api.test.shared import conduct_api_call
from endpoints.test.shared import client_with_identity
@ -17,7 +16,7 @@ from test.fixtures import *
def test_repository_search(query, client):
with client_with_identity('devtable', client) as cl:
params = {'query': query}
with assert_query_count(6):
with assert_query_count(7):
result = conduct_api_call(cl, ConductRepositorySearch, 'GET', params, None, 200).json
assert result['start_index'] == 0
assert result['page'] == 1
@ -32,6 +31,6 @@ def test_repository_search(query, client):
def test_search_query_count(query, client):
with client_with_identity('devtable', client) as cl:
params = {'query': query}
with assert_query_count(8):
with assert_query_count(10):
result = conduct_api_call(cl, ConductSearch, 'GET', params, None, 200).json
assert len(result['results'])

View file

@ -40,10 +40,10 @@ def handle_registry_v2_exception(error):
return response
_MAX_RESULTS_PER_PAGE = app.config.get('V2_PAGINATION_SIZE', 50)
_MAX_RESULTS_PER_PAGE = app.config.get('V2_PAGINATION_SIZE', 100)
def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset',
def paginate(start_id_kwarg_name='start_id', limit_kwarg_name='limit',
callback_kwarg_name='pagination_callback'):
"""
Decorates a handler adding a parsed pagination token and a callback to encode a response token.
@ -61,17 +61,16 @@ def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset',
next_page_token = request.args.get('next_page', request.args.get('last', None))
# Decrypt the next page token, if any.
offset = 0
start_id = None
page_info = decrypt_page_token(next_page_token)
if page_info is not None:
# Note: we use offset here instead of ID >= n because one of the V2 queries is a UNION.
offset = page_info.get('offset', 0)
start_id = page_info.get('start_id', None)
def callback(num_results, response):
if num_results < limit:
def callback(results, response):
if len(results) <= limit:
return
next_page_token = encrypt_page_token({'offset': limit + offset})
next_page_token = encrypt_page_token({'start_id': max([obj.id for obj in results])})
link_url = os.path.join(get_app_url(), url_for(request.endpoint, **request.view_args))
link_param = urlencode({'n': limit, 'next_page': next_page_token})
@ -79,12 +78,10 @@ def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset',
response.headers['Link'] = link
kwargs[limit_kwarg_name] = limit
kwargs[offset_kwarg_name] = offset
kwargs[start_id_kwarg_name] = start_id
kwargs[callback_kwarg_name] = callback
return func(*args, **kwargs)
return wrapped
return wrapper

View file

@ -2,10 +2,13 @@ import features
from flask import jsonify
from auth.auth_context import get_authenticated_user
from app import model_cache
from auth.auth_context import get_authenticated_user, get_authenticated_context
from auth.registry_jwt_auth import process_registry_jwt_auth
from data.cache import cache_key
from endpoints.decorators import anon_protect
from endpoints.v2 import v2_bp, paginate
from endpoints.v2.models_interface import Repository
from endpoints.v2.models_pre_oci import data_model as model
@ -13,20 +16,28 @@ from endpoints.v2.models_pre_oci import data_model as model
@process_registry_jwt_auth()
@anon_protect
@paginate()
def catalog_search(limit, offset, pagination_callback):
include_public = bool(features.PUBLIC_CATALOG)
if not include_public and not get_authenticated_user():
return jsonify({'repositories': []})
def catalog_search(start_id, limit, pagination_callback):
def _load_catalog():
include_public = bool(features.PUBLIC_CATALOG)
if not include_public and not get_authenticated_user():
return []
username = get_authenticated_user().username if get_authenticated_user() else None
if username and not get_authenticated_user().enabled:
return jsonify({'repositories': []})
username = get_authenticated_user().username if get_authenticated_user() else None
if username and not get_authenticated_user().enabled:
return []
repos = model.get_visible_repositories(username, start_id, limit, include_public=include_public)
return [repo._asdict() for repo in repos]
context_key = get_authenticated_context().unique_key if get_authenticated_context() else None
catalog_cache_key = cache_key.for_catalog_page(context_key, start_id, limit)
visible_repositories = [Repository(**repo_dict) for repo_dict
in model_cache.retrieve(catalog_cache_key, _load_catalog)]
visible_repositories = model.get_visible_repositories(username, limit + 1, offset,
include_public=include_public)
response = jsonify({
'repositories': ['%s/%s' % (repo.namespace_name, repo.name)
for repo in visible_repositories][0:limit],})
for repo in visible_repositories][0:limit],
})
pagination_callback(len(visible_repositories), response)
pagination_callback(visible_repositories, response)
return response

View file

@ -26,7 +26,7 @@ class ManifestJSON(namedtuple('ManifestJSON', ['digest', 'json', 'media_type']))
"""
class Tag(namedtuple('Tag', ['name', 'repository'])):
class Tag(namedtuple('Tag', ['id', 'name', 'repository'])):
"""
Tag represents a user-facing alias for referencing a set of Manifests.
"""
@ -167,14 +167,14 @@ class DockerRegistryV2DataInterface(object):
pass
@abstractmethod
def repository_tags(self, namespace_name, repo_name, limit, offset):
def repository_tags(self, namespace_name, repo_name, start_id, limit):
"""
Returns the active tags under the repository with the given name and namespace.
"""
pass
@abstractmethod
def get_visible_repositories(self, username, limit, offset):
def get_visible_repositories(self, username, start_id, limit):
"""
Returns the repositories visible to the user with the given username, if any.
"""

View file

@ -60,7 +60,7 @@ class PreOCIModel(DockerRegistryV2DataInterface):
def delete_manifest_by_digest(self, namespace_name, repo_name, digest):
def _tag_view(tag):
return Tag(name=tag.name, repository=RepositoryReference(
return Tag(id=tag.id, name=tag.name, repository=RepositoryReference(
id=tag.repository_id,
name=repo_name,
namespace_name=namespace_name,))
@ -118,24 +118,32 @@ class PreOCIModel(DockerRegistryV2DataInterface):
repository.id, tag_name, leaf_layer_docker_id, manifest_digest, manifest_bytes)
return newly_created
def repository_tags(self, namespace_name, repo_name, limit, offset):
def repository_tags(self, namespace_name, repo_name, start_id, limit):
def _tag_view(tag):
return Tag(name=tag.name, repository=RepositoryReference(
return Tag(id=tag.id, name=tag.name, repository=RepositoryReference(
id=tag.repository_id,
name=repo_name,
namespace_name=namespace_name,))
tags_query = model.tag.list_repository_tags(namespace_name, repo_name)
tags_query = tags_query.limit(limit).offset(offset)
tags_query = (tags_query
.order_by(database.RepositoryTag.id)
.limit(limit + 1))
if start_id is not None:
tags_query = tags_query.where(database.RepositoryTag.id >= start_id)
return [_tag_view(tag) for tag in tags_query]
def get_visible_repositories(self, username, limit, offset, include_public=None):
def get_visible_repositories(self, username, start_id, limit, include_public=None):
if include_public is None:
include_public = (username is None)
query = model.repository.get_visible_repositories(username, kind_filter='image',
include_public=include_public)
query = query.limit(limit).offset(offset)
query = model.repository.get_visible_repositories(username,
kind_filter='image',
include_public=include_public,
start_id=start_id,
limit=limit + 1)
return [_repository_for_repo(repo) for repo in query]
def create_blob_upload(self, namespace_name, repo_name, upload_uuid, location_name,
@ -295,7 +303,7 @@ def _docker_v1_metadata(namespace_name, repo_name, repo_image):
def _repository_for_repo(repo):
""" Returns a Repository object representing the Pre-OCI data model repo instance given. """
return Repository(
id=repo.id,
id=repo.id or repo.rid,
name=repo.name,
namespace_name=repo.namespace_user.username,
description=repo.description,

View file

@ -12,11 +12,11 @@ from endpoints.v2.models_pre_oci import data_model as model
@require_repo_read
@anon_protect
@paginate()
def list_all_tags(namespace_name, repo_name, limit, offset, pagination_callback):
tags = model.repository_tags(namespace_name, repo_name, limit, offset)
def list_all_tags(namespace_name, repo_name, start_id, limit, pagination_callback):
tags = list(model.repository_tags(namespace_name, repo_name, start_id, limit))
response = jsonify({
'name': '{0}/{1}'.format(namespace_name, repo_name),
'tags': [tag.name for tag in tags],})
'tags': [tag.name for tag in tags][0:limit],})
pagination_callback(len(tags), response)
pagination_callback(tags, response)
return response

View file

@ -347,8 +347,50 @@ class V2Protocol(RegistryProtocol):
return PullResult(manifests=manifests, image_ids=image_ids)
def tags(self, session, namespace, repo_name, page_size=2, credentials=None, options=None,
expected_failure=None):
options = options or ProtocolOptions()
scopes = options.scopes or ['repository:%s:pull' % self.repo_name(namespace, repo_name)]
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
headers = {}
if credentials is not None:
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
expected_failure=expected_failure)
if token is None:
return None
headers = {
'Authorization': 'Bearer ' + token,
}
results = []
url = '/v2/%s/tags/list' % (self.repo_name(namespace, repo_name))
params = {}
if page_size is not None:
params['n'] = page_size
while True:
response = self.conduct(session, 'GET', url, headers=headers, params=params)
data = response.json()
assert len(data['tags']) <= page_size
results.extend(data['tags'])
if not response.headers.get('Link'):
return results
link_url = response.headers['Link']
v2_index = link_url.find('/v2/')
url = link_url[v2_index:]
return results
def catalog(self, session, page_size=2, credentials=None, options=None, expected_failure=None,
namespace=None, repo_name=None):
namespace=None, repo_name=None, bearer_token=None):
options = options or ProtocolOptions()
scopes = options.scopes or []
@ -367,6 +409,11 @@ class V2Protocol(RegistryProtocol):
'Authorization': 'Bearer ' + token,
}
if bearer_token is not None:
headers = {
'Authorization': 'Bearer ' + bearer_token,
}
results = []
url = '/v2/_catalog'
params = {}

View file

@ -16,6 +16,7 @@ from test.registry.protocol_fixtures import *
from test.registry.protocols import Failures, Image, layer_bytes_for_contents, ProtocolOptions
from app import instance_keys
from data.model.tag import list_repository_tags
from util.security.registry_jwt import decode_bearer_header
from util.timedeltastring import convert_to_timedelta
@ -718,6 +719,51 @@ def test_catalog(public_catalog, credentials, expected_repos, page_size, v2_prot
assert set(expected_repos).issubset(set(results))
def test_catalog_caching(v2_protocol, basic_images, liveserver_session, app_reloader,
liveserver, registry_server_executor):
""" Test: Calling the catalog after initially pulled will result in the catalog being cached. """
credentials = ('devtable', 'password')
# Conduct the initial catalog call to prime the cache.
results = v2_protocol.catalog(liveserver_session, credentials=credentials,
namespace='devtable', repo_name='simple')
token, _ = v2_protocol.auth(liveserver_session, credentials, 'devtable', 'simple')
# Disconnect the server from the database.
registry_server_executor.on(liveserver).break_database()
# Call the catalog again, which should now be cached.
cached_results = v2_protocol.catalog(liveserver_session, bearer_token=token)
assert len(cached_results) == len(results)
assert set(cached_results) == set(results)
@pytest.mark.parametrize('username, namespace, repository', [
('devtable', 'devtable', 'simple'),
('devtable', 'devtable', 'gargantuan'),
('public', 'public', 'publicrepo'),
('devtable', 'buynlarge', 'orgrepo'),
])
@pytest.mark.parametrize('page_size', [
1,
2,
10,
50,
100,
])
def test_tags(username, namespace, repository, page_size, v2_protocol, liveserver_session,
app_reloader, liveserver, registry_server_executor):
""" Test: Retrieving results from the V2 catalog. """
credentials = (username, 'password')
results = v2_protocol.tags(liveserver_session, page_size=page_size, credentials=credentials,
namespace=namespace, repo_name=repository)
expected_tags = [tag.name for tag in list_repository_tags(namespace, repository)]
assert len(results) == len(expected_tags)
assert set([r for r in results]) == set(expected_tags)
def test_pull_torrent(pusher, basic_images, liveserver_session, liveserver,
registry_server_executor, app_reloader):
""" Test: Retrieve a torrent for pulling the image via the Quay CLI. """

View file

@ -1858,7 +1858,7 @@ class TestListRepos(ApiTestCase):
self.login(ADMIN_ACCESS_USER)
# Queries: Base + the list query + the popularity and last modified queries + full perms load
with assert_query_count(BASE_LOGGEDIN_QUERY_COUNT + 4):
with assert_query_count(BASE_LOGGEDIN_QUERY_COUNT + 5):
json = self.getJsonResponse(RepositoryList, params=dict(namespace=ORGANIZATION, public=False,
last_modified=True, popularity=True))