Compare commits

...
This repository has been archived on 2020-03-24. You can view files and clone it, but cannot push or open issues or pull requests.

15 Commits

Author SHA1 Message Date
thomasmckay c898b54605
Merge pull request #32 from thomasmckay/20-non-root
PROJQUAY-20 - Dockerfile.osbs for non-root container
2019-11-21 11:38:19 -05:00
Tom McKay 45e8df1def PROJQUAY-20 - Dockerfile.osbs for non-root container 2019-11-21 09:09:18 -05:00
Alec Merdler 73bab07baf
Merge pull request #21 from alecmerdler/opensearch
OpenSearch
2019-11-20 14:14:51 -05:00
thomasmckay f4836f7916
Merge pull request #25 from thomasmckay/12-mirror-api
PROJQUAY-12 - remove mirror/rules API
2019-11-20 13:44:34 -05:00
alecmerdler 68fa29a36e add OpenSearch capabilities for Chrome omnibar search 2019-11-20 13:06:21 -05:00
Jake Moshenko 04fbaa8890
Merge pull request #26 from tparikh/fix-openshift-deployment
PROJQUAY-11 - update progressDeadlineSeconds value for openshift deployment
2019-11-20 09:51:54 -05:00
Tom McKay 36d0aa4fbb PROJQUAY-12 - remove mirror/rules API 2019-11-20 09:30:52 -05:00
Tejas Parikh d0d68816f9
update QUAY_APP_DEPLOYMENT_PROGRESS_DEADLINE_SECONDS value for openshift deployment
Signed-off-by: Tejas Parikh <tparikh@redhat.com>
2019-11-19 16:08:27 -05:00
Jimmy Zelinskie f915352138
Merge pull request #24 from tparikh/quayio-osd-deployment
added OpenShift template for Quay app
2019-11-19 14:17:01 -05:00
Tejas Parikh f386e7acce
updated memory limits and requests
Signed-off-by: Tejas Parikh <tparikh@redhat.com>
2019-11-18 15:10:12 -05:00
Tejas Parikh c975807e50
added OpenShift template for Quay app
Signed-off-by: Tejas Parikh <tparikh@redhat.com>
2019-11-18 12:57:07 -05:00
Joseph Schorr 23c5120790
Merge pull request #20 from josephschorr/fix-alembic-no-migration
Fix the alembic version returned if there is no active data migration
2019-11-15 13:29:02 -06:00
Joseph Schorr 9834bddae6 Fix the alembic version returned if there is no active data migration 2019-11-15 13:12:20 -05:00
Joseph Schorr 2e3457ae76
Merge pull request #17 from josephschorr/fix-encrypted-token-migration
Fix the encrypted token migration issue encountered on HEAD
2019-11-14 15:58:20 -06:00
Joseph Schorr a54fb1b23a Fix the encrypted token migration issue encountered on HEAD
This change ensures there is better messaging around the encrypted token migration, including a new phase to use for new installations, and fixes an issue encountered when running database migrations for new installations
2019-11-14 14:50:47 -05:00
19 changed files with 633 additions and 231 deletions

142
Dockerfile.osbs Normal file
View File

@ -0,0 +1,142 @@
FROM registry.redhat.io/rhel7:7.7
LABEL maintainer "thomasmckay@redhat.com"
ENV PYTHON_VERSION=2.7 \
PATH=$HOME/.local/bin/:$PATH \
PYTHONUNBUFFERED=1 \
PYTHONIOENCODING=UTF-8 \
LC_ALL=en_US.UTF-8 \
LANG=en_US.UTF-8 \
PIP_NO_CACHE_DIR=off
ENV QUAYDIR /quay-registry
ENV QUAYCONF /quay-registry/conf
ENV QUAYPATH "."
RUN mkdir $QUAYDIR
WORKDIR $QUAYDIR
RUN INSTALL_PKGS="\
python27 \
python27-python-pip \
rh-nginx112 rh-nginx112-nginx \
openldap \
scl-utils \
gcc-c++ git \
openldap-devel \
gpgme-devel \
dnsmasq \
memcached \
openssl \
skopeo \
" && \
yum install -y yum-utils && \
yum-config-manager --quiet --disable "*" >/dev/null && \
yum-config-manager --quiet --enable \
rhel-7-server-rpms \
rhel-server-rhscl-7-rpms \
rhel-7-server-optional-rpms \
rhel-7-server-extras-rpms \
--save >/dev/null && \
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
yum -y update && \
yum -y clean all
COPY . .
RUN scl enable python27 "\
pip install --upgrade setuptools pip && \
pip install -r requirements.txt --no-cache && \
pip freeze && \
mkdir -p $QUAYDIR/static/webfonts && \
mkdir -p $QUAYDIR/static/fonts && \
mkdir -p $QUAYDIR/static/ldn && \
PYTHONPATH=$QUAYPATH python -m external_libraries \
"
RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \
cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \
cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts
# Check python dependencies for GPL
# Due to the following bug, pip results must be piped to a file before grepping:
# https://github.com/pypa/pip/pull/3304
# 'docutils' is a setup dependency of botocore required by s3transfer. It's under
# GPLv3, and so is manually removed.
RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \
scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \
scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
rm -f piplist.txt pipinfo.txt
# Front-end
RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \
yum install -y nodejs && \
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \
rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \
yum install -y yarn && \
yarn install --ignore-engines && \
yarn build && \
yarn build-config-app
# TODO: Build jwtproxy in dist-git
# https://jira.coreos.com/browse/QUAY-1315
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \
chmod +x /usr/local/bin/jwtproxy
# TODO: Build prometheus-aggregator in dist-git
# https://jira.coreos.com/browse/QUAY-1324
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\
chmod +x /usr/local/bin/prometheus-aggregator
# Update local copy of AWS IP Ranges.
RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
RUN ln -s $QUAYCONF /conf && \
mkdir /var/log/nginx && \
ln -sf /dev/stdout /var/log/nginx/access.log && \
ln -sf /dev/stdout /var/log/nginx/error.log && \
chmod -R a+rwx /var/log/nginx
# Cleanup
RUN UNINSTALL_PKGS="\
gcc-c++ git \
openldap-devel \
gpgme-devel \
optipng \
kernel-headers \
" && \
yum remove -y $UNINSTALL_PKGS && \
yum clean all && \
rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache
EXPOSE 8080 8443 7443
RUN chgrp -R 0 $QUAYDIR && \
chmod -R g=u $QUAYDIR
RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \
mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \
mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \
mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \
chmod g=u /etc/passwd
RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx
# Allow TLS certs to be created and installed as non-root user
RUN chgrp -R 0 /etc/pki/ca-trust/extracted && \
chmod -R g=u /etc/pki/ca-trust/extracted && \
chgrp -R 0 /etc/pki/ca-trust/source/anchors && \
chmod -R g=u /etc/pki/ca-trust/source/anchors && \
chgrp -R 0 /opt/rh/python27/root/usr/lib/python2.7/site-packages/requests && \
chmod -R g=u /opt/rh/python27/root/usr/lib/python2.7/site-packages/requests && \
chgrp -R 0 /opt/rh/python27/root/usr/lib/python2.7/site-packages/certifi && \
chmod -R g=u /opt/rh/python27/root/usr/lib/python2.7/site-packages/certifi
VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"]
USER 1001
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
CMD ["registry"]

View File

@ -10,6 +10,9 @@ else:
elif v3_upgrade_mode == 'production-transition':
print '481623ba00ba'
elif v3_upgrade_mode == 'post-oci-rollout' or v3_upgrade_mode == 'post-oci-roll-back-compat' or v3_upgrade_mode == 'complete':
print ActiveDataMigration.alembic_migration_revision
if ActiveDataMigration is not None:
print ActiveDataMigration.alembic_migration_revision
else:
print 'head'
else:
raise Exception('Unknown V3_UPGRADE_MODE: %s' % v3_upgrade_mode)

View File

@ -378,7 +378,7 @@ def configure(config_object, testing=False):
real_for_update))
db_concat_func.initialize(SCHEME_SPECIALIZED_CONCAT.get(parsed_write_uri.drivername,
function_concat))
db_encrypter.initialize(FieldEncrypter(config_object['DATABASE_SECRET_KEY']))
db_encrypter.initialize(FieldEncrypter(config_object.get('DATABASE_SECRET_KEY')))
read_replicas = config_object.get('DB_READ_REPLICAS', None)
is_read_only = config_object.get('REGISTRY_STATE', 'normal') == 'readonly'

View File

@ -59,11 +59,15 @@ class FieldEncrypter(object):
and the application.
"""
def __init__(self, secret_key, version='v0'):
self._secret_key = convert_secret_key(secret_key)
# NOTE: secret_key will be None when the system is being first initialized, so we allow that
# case here, but make sure to assert that it is *not* None below if any encryption is actually
# needed.
self._secret_key = convert_secret_key(secret_key) if secret_key is not None else None
self._encryption_version = _VERSIONS[version]
def encrypt_value(self, value, field_max_length=None):
""" Encrypts the value using the current version of encryption. """
assert self._secret_key is not None
encrypted_value = self._encryption_version.encrypt(self._secret_key, value, field_max_length)
return '%s%s%s' % (self._encryption_version.prefix, _SEPARATOR, encrypted_value)
@ -71,6 +75,7 @@ class FieldEncrypter(object):
""" Decrypts the value, returning it. If the value cannot be decrypted
raises a DecryptionFailureException.
"""
assert self._secret_key is not None
if _SEPARATOR not in value:
raise DecryptionFailureException('Invalid encrypted value')

View File

@ -2,7 +2,7 @@ import json
import logging
import uuid
from abc import ABCMeta, abstractmethod
from abc import ABCMeta, abstractmethod, abstractproperty
from datetime import datetime
from six import add_metaclass
@ -92,6 +92,10 @@ class MigrationTester(object):
"""
TestDataType = DataTypes
@abstractproperty
def is_testing(self):
""" Returns whether we are currently under a migration test. """
@abstractmethod
def populate_table(self, table_name, fields):
""" Called to populate a table with the given fields filled in with testing data. """
@ -107,6 +111,10 @@ class NoopTester(MigrationTester):
class PopulateTestDataTester(MigrationTester):
@property
def is_testing(self):
return True
def populate_table(self, table_name, fields):
columns = {field_name: field_type() for field_name, field_type in fields}
field_name_vars = [':' + field_name for field_name, _ in fields]

View File

@ -80,11 +80,13 @@ def upgrade(tables, tester, progress_reporter):
op.add_column('repomirrorconfig', sa.Column('external_reference', sa.Text(), nullable=True))
for repo_mirror in _iterate(RepoMirrorConfig, (RepoMirrorConfig.external_reference >> None)):
repo = '%s/%s/%s' % (repo_mirror.external_registry, repo_mirror.external_namespace, repo_mirror.external_repository)
logger.info('migrating %s' % repo)
repo_mirror.external_reference = repo
repo_mirror.save()
from app import app
if app.config.get('SETUP_COMPLETE', False) or tester.is_testing:
for repo_mirror in _iterate(RepoMirrorConfig, (RepoMirrorConfig.external_reference >> None)):
repo = '%s/%s/%s' % (repo_mirror.external_registry, repo_mirror.external_namespace, repo_mirror.external_repository)
logger.info('migrating %s' % repo)
repo_mirror.external_reference = repo
repo_mirror.save()
op.drop_column('repomirrorconfig', 'external_registry')
op.drop_column('repomirrorconfig', 'external_namespace')
@ -109,14 +111,16 @@ def downgrade(tables, tester, progress_reporter):
op.add_column('repomirrorconfig', sa.Column('external_namespace', sa.String(length=255), nullable=True))
op.add_column('repomirrorconfig', sa.Column('external_repository', sa.String(length=255), nullable=True))
logger.info('Restoring columns from external_reference')
for repo_mirror in _iterate(RepoMirrorConfig, (RepoMirrorConfig.external_registry >> None)):
logger.info('Restoring %s' % repo_mirror.external_reference)
parts = repo_mirror.external_reference.split('/', 2)
repo_mirror.external_registry = parts[0] if len(parts) >= 1 else 'DOWNGRADE-FAILED'
repo_mirror.external_namespace = parts[1] if len(parts) >= 2 else 'DOWNGRADE-FAILED'
repo_mirror.external_repository = parts[2] if len(parts) >= 3 else 'DOWNGRADE-FAILED'
repo_mirror.save()
from app import app
if app.config.get('SETUP_COMPLETE', False):
logger.info('Restoring columns from external_reference')
for repo_mirror in _iterate(RepoMirrorConfig, (RepoMirrorConfig.external_registry >> None)):
logger.info('Restoring %s' % repo_mirror.external_reference)
parts = repo_mirror.external_reference.split('/', 2)
repo_mirror.external_registry = parts[0] if len(parts) >= 1 else 'DOWNGRADE-FAILED'
repo_mirror.external_namespace = parts[1] if len(parts) >= 2 else 'DOWNGRADE-FAILED'
repo_mirror.external_repository = parts[2] if len(parts) >= 3 else 'DOWNGRADE-FAILED'
repo_mirror.save()
op.drop_column('repomirrorconfig', 'external_reference')

View File

@ -98,157 +98,159 @@ class OAuthApplication(BaseModel):
def upgrade(tables, tester, progress_reporter):
op = ProgressWrapper(original_op, progress_reporter)
# Empty all access token names to fix the bug where we put the wrong name and code
# in for some tokens.
AccessToken.update(token_name=None).where(AccessToken.token_name >> None).execute()
from app import app
if app.config.get('SETUP_COMPLETE', False) or tester.is_testing:
# Empty all access token names to fix the bug where we put the wrong name and code
# in for some tokens.
AccessToken.update(token_name=None).where(AccessToken.token_name >> None).execute()
# AccessToken.
logger.info('Backfilling encrypted credentials for access tokens')
for access_token in _iterate(AccessToken, ((AccessToken.token_name >> None) |
(AccessToken.token_name == ''))):
logger.info('Backfilling encrypted credentials for access token %s', access_token.id)
assert access_token.code is not None
assert access_token.code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
assert access_token.code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:]
# AccessToken.
logger.info('Backfilling encrypted credentials for access tokens')
for access_token in _iterate(AccessToken, ((AccessToken.token_name >> None) |
(AccessToken.token_name == ''))):
logger.info('Backfilling encrypted credentials for access token %s', access_token.id)
assert access_token.code is not None
assert access_token.code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
assert access_token.code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:]
token_name = access_token.code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
token_code = _decrypted(access_token.code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:])
token_name = access_token.code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
token_code = _decrypted(access_token.code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:])
(AccessToken
.update(token_name=token_name, token_code=token_code)
.where(AccessToken.id == access_token.id, AccessToken.code == access_token.code)
.execute())
(AccessToken
.update(token_name=token_name, token_code=token_code)
.where(AccessToken.id == access_token.id, AccessToken.code == access_token.code)
.execute())
assert AccessToken.select().where(AccessToken.token_name >> None).count() == 0
assert AccessToken.select().where(AccessToken.token_name >> None).count() == 0
# Robots.
logger.info('Backfilling encrypted credentials for robots')
while True:
has_row = False
query = (User
.select()
.join(RobotAccountToken, JOIN.LEFT_OUTER)
.where(User.robot == True, RobotAccountToken.id >> None)
.limit(BATCH_SIZE))
# Robots.
logger.info('Backfilling encrypted credentials for robots')
while True:
has_row = False
query = (User
.select()
.join(RobotAccountToken, JOIN.LEFT_OUTER)
.where(User.robot == True, RobotAccountToken.id >> None)
.limit(BATCH_SIZE))
for robot_user in query:
logger.info('Backfilling encrypted credentials for robot %s', robot_user.id)
has_row = True
try:
RobotAccountToken.create(robot_account=robot_user,
token=_decrypted(robot_user.email),
fully_migrated=False)
except IntegrityError:
for robot_user in query:
logger.info('Backfilling encrypted credentials for robot %s', robot_user.id)
has_row = True
try:
RobotAccountToken.create(robot_account=robot_user,
token=_decrypted(robot_user.email),
fully_migrated=False)
except IntegrityError:
break
if not has_row:
break
if not has_row:
break
# RepositoryBuildTrigger
logger.info('Backfilling encrypted credentials for repo build triggers')
for repo_build_trigger in _iterate(RepositoryBuildTrigger,
(RepositoryBuildTrigger.fully_migrated == False)):
logger.info('Backfilling encrypted credentials for repo build trigger %s',
repo_build_trigger.id)
# RepositoryBuildTrigger
logger.info('Backfilling encrypted credentials for repo build triggers')
for repo_build_trigger in _iterate(RepositoryBuildTrigger,
(RepositoryBuildTrigger.fully_migrated == False)):
logger.info('Backfilling encrypted credentials for repo build trigger %s',
repo_build_trigger.id)
(RepositoryBuildTrigger
.update(secure_auth_token=_decrypted(repo_build_trigger.auth_token),
secure_private_key=_decrypted(repo_build_trigger.private_key),
fully_migrated=True)
.where(RepositoryBuildTrigger.id == repo_build_trigger.id,
RepositoryBuildTrigger.uuid == repo_build_trigger.uuid)
.execute())
(RepositoryBuildTrigger
.update(secure_auth_token=_decrypted(repo_build_trigger.auth_token),
secure_private_key=_decrypted(repo_build_trigger.private_key),
fully_migrated=True)
.where(RepositoryBuildTrigger.id == repo_build_trigger.id,
RepositoryBuildTrigger.uuid == repo_build_trigger.uuid)
.execute())
assert (RepositoryBuildTrigger
.select()
.where(RepositoryBuildTrigger.fully_migrated == False)
.count()) == 0
assert (RepositoryBuildTrigger
.select()
.where(RepositoryBuildTrigger.fully_migrated == False)
.count()) == 0
# AppSpecificAuthToken
logger.info('Backfilling encrypted credentials for app specific auth tokens')
for token in _iterate(AppSpecificAuthToken, ((AppSpecificAuthToken.token_name >> None) |
(AppSpecificAuthToken.token_name == '') |
(AppSpecificAuthToken.token_secret >> None))):
logger.info('Backfilling encrypted credentials for app specific auth %s',
token.id)
assert token.token_code[AST_TOKEN_NAME_PREFIX_LENGTH:]
# AppSpecificAuthToken
logger.info('Backfilling encrypted credentials for app specific auth tokens')
for token in _iterate(AppSpecificAuthToken, ((AppSpecificAuthToken.token_name >> None) |
(AppSpecificAuthToken.token_name == '') |
(AppSpecificAuthToken.token_secret >> None))):
logger.info('Backfilling encrypted credentials for app specific auth %s',
token.id)
assert token.token_code[AST_TOKEN_NAME_PREFIX_LENGTH:]
token_name = token.token_code[:AST_TOKEN_NAME_PREFIX_LENGTH]
token_secret = _decrypted(token.token_code[AST_TOKEN_NAME_PREFIX_LENGTH:])
assert token_name
assert token_secret
token_name = token.token_code[:AST_TOKEN_NAME_PREFIX_LENGTH]
token_secret = _decrypted(token.token_code[AST_TOKEN_NAME_PREFIX_LENGTH:])
assert token_name
assert token_secret
(AppSpecificAuthToken
.update(token_name=token_name,
token_secret=token_secret)
.where(AppSpecificAuthToken.id == token.id,
AppSpecificAuthToken.token_code == token.token_code)
.execute())
(AppSpecificAuthToken
.update(token_name=token_name,
token_secret=token_secret)
.where(AppSpecificAuthToken.id == token.id,
AppSpecificAuthToken.token_code == token.token_code)
.execute())
assert (AppSpecificAuthToken
.select()
.where(AppSpecificAuthToken.token_name >> None)
.count()) == 0
assert (AppSpecificAuthToken
.select()
.where(AppSpecificAuthToken.token_name >> None)
.count()) == 0
# OAuthAccessToken
logger.info('Backfilling credentials for OAuth access tokens')
for token in _iterate(OAuthAccessToken, ((OAuthAccessToken.token_name >> None) |
(OAuthAccessToken.token_name == ''))):
logger.info('Backfilling credentials for OAuth access token %s', token.id)
token_name = token.access_token[:OAUTH_ACCESS_TOKEN_PREFIX_LENGTH]
token_code = Credential.from_string(token.access_token[OAUTH_ACCESS_TOKEN_PREFIX_LENGTH:])
assert token_name
assert token.access_token[OAUTH_ACCESS_TOKEN_PREFIX_LENGTH:]
# OAuthAccessToken
logger.info('Backfilling credentials for OAuth access tokens')
for token in _iterate(OAuthAccessToken, ((OAuthAccessToken.token_name >> None) |
(OAuthAccessToken.token_name == ''))):
logger.info('Backfilling credentials for OAuth access token %s', token.id)
token_name = token.access_token[:OAUTH_ACCESS_TOKEN_PREFIX_LENGTH]
token_code = Credential.from_string(token.access_token[OAUTH_ACCESS_TOKEN_PREFIX_LENGTH:])
assert token_name
assert token.access_token[OAUTH_ACCESS_TOKEN_PREFIX_LENGTH:]
(OAuthAccessToken
.update(token_name=token_name,
token_code=token_code)
.where(OAuthAccessToken.id == token.id,
OAuthAccessToken.access_token == token.access_token)
.execute())
(OAuthAccessToken
.update(token_name=token_name,
token_code=token_code)
.where(OAuthAccessToken.id == token.id,
OAuthAccessToken.access_token == token.access_token)
.execute())
assert (OAuthAccessToken
.select()
.where(OAuthAccessToken.token_name >> None)
.count()) == 0
assert (OAuthAccessToken
.select()
.where(OAuthAccessToken.token_name >> None)
.count()) == 0
# OAuthAuthorizationCode
logger.info('Backfilling credentials for OAuth auth code')
for code in _iterate(OAuthAuthorizationCode, ((OAuthAuthorizationCode.code_name >> None) |
(OAuthAuthorizationCode.code_name == ''))):
logger.info('Backfilling credentials for OAuth auth code %s', code.id)
user_code = code.code or random_string_generator(AUTHORIZATION_CODE_PREFIX_LENGTH * 2)()
code_name = user_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
code_credential = Credential.from_string(user_code[AUTHORIZATION_CODE_PREFIX_LENGTH:])
assert code_name
assert user_code[AUTHORIZATION_CODE_PREFIX_LENGTH:]
# OAuthAuthorizationCode
logger.info('Backfilling credentials for OAuth auth code')
for code in _iterate(OAuthAuthorizationCode, ((OAuthAuthorizationCode.code_name >> None) |
(OAuthAuthorizationCode.code_name == ''))):
logger.info('Backfilling credentials for OAuth auth code %s', code.id)
user_code = code.code or random_string_generator(AUTHORIZATION_CODE_PREFIX_LENGTH * 2)()
code_name = user_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
code_credential = Credential.from_string(user_code[AUTHORIZATION_CODE_PREFIX_LENGTH:])
assert code_name
assert user_code[AUTHORIZATION_CODE_PREFIX_LENGTH:]
(OAuthAuthorizationCode
.update(code_name=code_name, code_credential=code_credential)
.where(OAuthAuthorizationCode.id == code.id)
.execute())
(OAuthAuthorizationCode
.update(code_name=code_name, code_credential=code_credential)
.where(OAuthAuthorizationCode.id == code.id)
.execute())
assert (OAuthAuthorizationCode
.select()
.where(OAuthAuthorizationCode.code_name >> None)
.count()) == 0
assert (OAuthAuthorizationCode
.select()
.where(OAuthAuthorizationCode.code_name >> None)
.count()) == 0
# OAuthApplication
logger.info('Backfilling secret for OAuth applications')
for app in _iterate(OAuthApplication, OAuthApplication.fully_migrated == False):
logger.info('Backfilling secret for OAuth application %s', app.id)
client_secret = app.client_secret or str(uuid.uuid4())
secure_client_secret = _decrypted(client_secret)
# OAuthApplication
logger.info('Backfilling secret for OAuth applications')
for app in _iterate(OAuthApplication, OAuthApplication.fully_migrated == False):
logger.info('Backfilling secret for OAuth application %s', app.id)
client_secret = app.client_secret or str(uuid.uuid4())
secure_client_secret = _decrypted(client_secret)
(OAuthApplication
.update(secure_client_secret=secure_client_secret, fully_migrated=True)
.where(OAuthApplication.id == app.id, OAuthApplication.fully_migrated == False)
.execute())
(OAuthApplication
.update(secure_client_secret=secure_client_secret, fully_migrated=True)
.where(OAuthApplication.id == app.id, OAuthApplication.fully_migrated == False)
.execute())
assert (OAuthApplication
.select()
.where(OAuthApplication.fully_migrated == False)
.count()) == 0
assert (OAuthApplication
.select()
.where(OAuthApplication.fully_migrated == False)
.count()) == 0
# Adjust existing fields to be nullable.
op.alter_column('accesstoken', 'code', nullable=True, existing_type=sa.String(length=255))
@ -271,10 +273,6 @@ def upgrade(tables, tester, progress_reporter):
def downgrade(tables, tester, progress_reporter):
op = ProgressWrapper(original_op, progress_reporter)
op.alter_column('accesstoken', 'code', nullable=False, existing_type=sa.String(length=255))
op.alter_column('oauthaccesstoken', 'access_token', nullable=False, existing_type=sa.String(length=255))
op.alter_column('oauthauthorizationcode', 'code', nullable=False, existing_type=sa.String(length=255))
op.alter_column('appspecificauthtoken', 'token_code', nullable=False, existing_type=sa.String(length=255))
op.alter_column('accesstoken', 'token_name', nullable=True, existing_type=sa.String(length=255))
op.alter_column('accesstoken', 'token_code', nullable=True, existing_type=sa.String(length=255))

View File

@ -39,22 +39,24 @@ def upgrade(tables, tester, progress_reporter):
# ### end Alembic commands ###
# Overwrite all plaintext robot credentials.
while True:
try:
robot_account_token = RobotAccountToken.get(fully_migrated=False)
robot_account = robot_account_token.robot_account
from app import app
if app.config.get('SETUP_COMPLETE', False) or tester.is_testing:
while True:
try:
robot_account_token = RobotAccountToken.get(fully_migrated=False)
robot_account = robot_account_token.robot_account
robot_account.email = str(uuid.uuid4())
robot_account.save()
robot_account.email = str(uuid.uuid4())
robot_account.save()
federated_login = FederatedLogin.get(user=robot_account)
federated_login.service_ident = 'robot:%s' % robot_account.id
federated_login.save()
federated_login = FederatedLogin.get(user=robot_account)
federated_login.service_ident = 'robot:%s' % robot_account.id
federated_login.save()
robot_account_token.fully_migrated = True
robot_account_token.save()
except RobotAccountToken.DoesNotExist:
break
robot_account_token.fully_migrated = True
robot_account_token.save()
except RobotAccountToken.DoesNotExist:
break
def downgrade(tables, tester, progress_reporter):

View File

@ -30,21 +30,35 @@ class NullDataMigration(DataMigration):
class DefinedDataMigration(DataMigration):
def __init__(self, name, env_var, phases):
assert phases
self.name = name
self.phases = {phase.name: phase for phase in phases}
# Add a synthetic phase for new installations that skips the entire migration.
self.phases['new-installation'] = phases[-1]._replace(name='new-installation',
alembic_revision='head')
phase_name = os.getenv(env_var)
if phase_name is None:
msg = 'Missing env var `%s` for data migration `%s`' % (env_var, self.name)
msg = 'Missing env var `%s` for data migration `%s`. %s' % (env_var, self.name,
self._error_suffix)
raise Exception(msg)
current_phase = self.phases.get(phase_name)
if current_phase is None:
msg = 'Unknown phase `%s` for data migration `%s`' % (phase_name, self.name)
msg = 'Unknown phase `%s` for data migration `%s`. %s' % (phase_name, self.name,
self._error_suffix)
raise Exception(msg)
self.current_phase = current_phase
@property
def _error_suffix(self):
message = 'Available values for this migration: %s. ' % (self.phases.keys())
message += 'If this is a new installation, please use `new-installation`.'
return message
@property
def alembic_migration_revision(self):
assert self.current_phase

View File

@ -466,17 +466,20 @@ def set_mirroring_robot(repository, robot):
# -------------------- Mirroring Rules --------------------------#
def validate_rule(rule_type, rule_value):
if rule_type != RepoMirrorRuleType.TAG_GLOB_CSV:
raise ValidationError('validation failed: rule_type must be TAG_GLOB_CSV')
if not rule_value or not isinstance(rule_value, list) or len(rule_value) < 1:
raise ValidationError('validation failed: rule_value for TAG_GLOB_CSV must be a list with at least one rule')
def create_rule(repository, rule_value, rule_type=RepoMirrorRuleType.TAG_GLOB_CSV, left_child=None, right_child=None):
"""
Create a new Rule for mirroring a Repository
"""
if rule_type != RepoMirrorRuleType.TAG_GLOB_CSV:
raise ValidationError('validation failed: rule_type must be TAG_GLOB_CSV')
if not isinstance(rule_value, list) or len(rule_value) < 1:
raise ValidationError('validation failed: rule_value for TAG_GLOB_CSV must be a list with at least one rule')
validate_rule(rule_type, rule_value)
rule_kwargs = {
'repository': repository,
@ -509,11 +512,18 @@ def get_root_rule(repository):
return None
def change_rule_value(rule, value):
def change_rule(repository, rule_type, rule_value):
"""
Update the value of an existing rule.
"""
validate_rule(rule_type, rule_value)
mirrorRule = get_root_rule(repository)
if not mirrorRule:
raise ValidationError('validation failed: rule not found')
query = (RepoMirrorRule
.update(rule_value=value)
.where(RepoMirrorRule.id == rule.id))
.update(rule_value=rule_value)
.where(RepoMirrorRule.id == mirrorRule.id))
return query.execute()

View File

@ -0,0 +1,219 @@
---
apiVersion: v1
kind: Template
metadata:
name: quay
objects:
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: ${{NAME}}
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- extensions
- apps
resources:
- deployments
verbs:
- get
- list
- patch
- update
- watch
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: ${{NAME}}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ${{NAME}}
subjects:
- kind: ServiceAccount
name: default
- apiVersion: v1
kind: Service
metadata:
name: quay-clusterip-service
spec:
type: ClusterIP
ports:
- protocol: TCP
name: clusterip
port: ${{CLUSTERIP_SERVICE_PORT}}
targetPort: ${{CLUSTERIP_SERVICE_TARGET_PORT}}
selector:
${{QUAY_APP_COMPONENT_LABEL_KEY}}: ${{QUAY_APP_COMPONENT_LABEL_VALUE}}
- apiVersion: v1
kind: Service
metadata:
name: quay-loadbalancer-service
spec:
ports:
- name: loadbalancer
protocol: TCP
port: ${{LOADBALANCER_SERVICE_PORT}}
targetPort: ${{LOADBALANCER_SERVICE_TARGET_PORT}}
loadBalancerIP:
type: LoadBalancer
selector:
${{QUAY_APP_COMPONENT_LABEL_KEY}}: ${{QUAY_APP_COMPONENT_LABEL_VALUE}}
- apiVersion: apps/v1
kind: Deployment
metadata:
name: quay-app
labels:
${{QUAY_APP_COMPONENT_LABEL_KEY}}: ${{QUAY_APP_COMPONENT_LABEL_VALUE}}
spec:
replicas: ${{QUAY_APP_DEPLOYMENT_REPLICAS}}
minReadySeconds: ${{QUAY_APP_DEPLOYMENT_MIN_READY_SECONDS}}
progressDeadlineSeconds: ${{QUAY_APP_DEPLOYMENT_PROGRESS_DEADLINE_SECONDS}}
revisionHistoryLimit: ${{QUAY_APP_DEPLOYMENT_REVISION_HISTORY_LIMITS}}
strategy:
type: ${{QUAY_APP_DEPLOYMENT_STRATEGY_TYPE}}
rollingUpdate:
maxUnavailable: ${{QUAY_APP_DEPLOYMENT_MAX_UNAVAILABLE}}
maxSurge: ${{QUAY_APP_DEPLOYMENT_MAX_SURGE}}
selector:
matchLabels:
${{QUAY_APP_COMPONENT_LABEL_KEY}}: ${{QUAY_APP_COMPONENT_LABEL_VALUE}}
template:
metadata:
labels:
${{QUAY_APP_COMPONENT_LABEL_KEY}}: ${{QUAY_APP_COMPONENT_LABEL_VALUE}}
spec:
volumes:
- name: configvolume
secret:
secretName: ${{QUAY_APP_CONFIG_SECRET}}
containers:
- name: quay-app
image: ${IMAGE}:${IMAGE_TAG}
imagePullPolicy: Always
ports:
- containerPort: 8443
volumeMounts:
- name: configvolume
readOnly: false
mountPath: /conf/stack
livenessProbe:
httpGet:
path: /health/instance
port: 8443
initialDelaySeconds: ${{QUAY_APP_LIVENESS_PROBE_INITIAL_DELAY_SECONDS}}
periodSeconds: ${{QUAY_APP_LIVENESS_PROBE_PERIOD_SECONDS}}
timeoutSeconds: ${{QUAY_APP_LIVENESS_PROBE_TIMEOUT_SECONDS}}
readinessProbe:
httpGet:
path: /health/endtoend
port: 8443
initialDelaySeconds: ${{QUAY_APP_READINESS_PROBE_INITIAL_DELAY_SECONDS}}
periodSeconds: ${{QUAY_APP_READINESS_PROBE_PERIOD_SECONDS}}
timeoutSeconds: ${{QUAY_APP_READINESS_PROBE_TIMEOUT_SECONDS}}
resources:
limits:
cpu: ${{QUAY_APP_CPU_LIMIT}}
memory: ${{QUAY_APP_MEMORY_LIMIT}}
requests:
cpu: ${{QUAY_APP_CPU_REQUEST}}
memory: ${{QUAY_APP_MEMORY_REQUEST}}
parameters:
- name: NAME
value: "quay"
displayName: name
description: Defaults to quay.
- name: IMAGE
value: "quay.io/app-sre/quay"
displayName: quay image
description: quay docker image. Defaults to quay.io/app-sre/quay.
- name: IMAGE_TAG
value: "latest"
displayName: quay version
description: quay version which defaults to latest
- name: CLUSTERIP_SERVICE_PORT
value: "443"
displayName: clusterip service port
- name: CLUSTERIP_SERVICE_TARGET_PORT
value: "8443"
displayName: clusterip service target port
- name: QUAY_APP_COMPONENT_LABEL_KEY
value: "quay-component"
displayName: quay app selector label
- name: QUAY_APP_COMPONENT_LABEL_VALUE
value: "app"
displayName: quay app selector label value
- name: LOADBALANCER_SERVICE_PORT
value: "443"
displayName: loadbalancer service port
- name: LOADBALANCER_SERVICE_TARGET_PORT
value: "8443"
displayName: loadbalancer service target port
- name: QUAY_APP_CONFIG_SECRET
value: "quay-config-secret"
displayName: quay app config secret
- name: QUAY_APP_DEPLOYMENT_REPLICAS
value: "1"
displayName: quay app deployment replicas
- name: QUAY_APP_MEMORY_REQUEST
value: "4096Mi"
displayName: "quay app memory request"
- name: QUAY_APP_CPU_REQUEST
value: "1"
displayName: "quay app CPU request"
- name: QUAY_APP_MEMORY_LIMIT
value: "4096Mi"
displayName: "quay app memory limit"
- name: QUAY_APP_CPU_LIMIT
value: "1"
displayName: "quay app CPU limit"
- name: QUAY_APP_DEPLOYMENT_MIN_READY_SECONDS
value: "0"
displayName: quay app deployment min ready seconds
- name: QUAY_APP_DEPLOYMENT_PROGRESS_DEADLINE_SECONDS
value: "600"
displayName: quay app deployment progress deadline seconds
- name: QUAY_APP_DEPLOYMENT_REVISION_HISTORY_LIMITS
value: "10"
displayName: quay app deployment revision history limits
- name: QUAY_APP_DEPLOYMENT_STRATEGY_TYPE
value: "RollingUpdate"
displayName: quay app deployment strategy
- name: QUAY_APP_DEPLOYMENT_MAX_SURGE
value: "1"
displayName: quay app deployment max surge
- name: QUAY_APP_DEPLOYMENT_MAX_UNAVAILABLE
value: "0"
displayName: quay app deployment max unavailable
- name: QUAY_APP_LIVENESS_PROBE_INITIAL_DELAY_SECONDS
value: "15"
displayName: quay app liveness probe initial delay seconds
- name: QUAY_APP_LIVENESS_PROBE_PERIOD_SECONDS
value: "30"
displayName: quay app liveness probe period seconds
- name: QUAY_APP_LIVENESS_PROBE_TIMEOUT_SECONDS
value: "10"
displayName: quay app liveness probe timeout
- name: QUAY_APP_READINESS_PROBE_INITIAL_DELAY_SECONDS
value: "15"
displayName: quay app readiness probe initial delay seconds
- name: QUAY_APP_READINESS_PROBE_PERIOD_SECONDS
value: "30"
displayName: quay app readiness probe period seconds
- name: QUAY_APP_READINESS_PROBE_TIMEOUT_SECONDS
value: "10"
displayName: quay app readiness probe timeout

View File

@ -11,6 +11,7 @@ import features
from auth.auth_context import get_authenticated_user
from data import model
from data.database import RepoMirrorRuleType
from endpoints.api import (RepositoryParamResource, nickname, path_param, require_repo_admin,
resource, validate_json_request, define_json_response, show_if,
format_date)
@ -53,13 +54,14 @@ common_properties = {
'type': 'object',
'description': 'Tag mirror rule',
'required': [
'rule_type',
'rule_kind',
'rule_value'
],
'properties': {
'rule_type': {
'rule_kind': {
'type': 'string',
'description': 'Rule type must be "TAG_GLOB_CSV"'
'description': 'The kind of rule type',
'enum': ['tag_glob_csv'],
},
'rule_value': {
'type': 'array',
@ -231,7 +233,7 @@ class RepoMirrorResource(RepositoryParamResource):
'sync_retries_remaining': mirror.sync_retries_remaining,
'sync_status': mirror.sync_status.name,
'root_rule': {
'rule_type': 'TAG_GLOB_CSV',
'rule_kind': 'tag_glob_csv',
'rule_value': rules
},
'robot_username': robot,
@ -368,6 +370,14 @@ class RepoMirrorResource(RepositoryParamResource):
if model.repo_mirror.change_external_registry_config(repo, updates):
track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='no_proxy', to=proxy_values['no_proxy'])
if 'root_rule' in values:
if values['root_rule']['rule_kind'] != "tag_glob_csv":
raise ValidationError('validation failed: rule_kind must be "tag_glob_csv"')
if model.repo_mirror.change_rule(repo, RepoMirrorRuleType.TAG_GLOB_CSV, values['root_rule']['rule_value']):
track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed="mirror_rule", to=values['root_rule']['rule_value'])
return '', 201
def _setup_robot_for_mirroring(self, namespace_name, repo_name, robot_username):
@ -423,45 +433,3 @@ class RepoMirrorResource(RepositoryParamResource):
if username is None:
return None
return username.decrypt()
@resource('/v1/repository/<apirepopath:repository>/mirror/rules')
@show_if(features.REPO_MIRROR)
class ManageRepoMirrorRule(RepositoryParamResource):
"""
Operations to manage a single Repository Mirroring Rule.
TODO: At the moment, we are only dealing with a single rule associated with the mirror.
This should change to update the rule and address it using its UUID.
"""
schemas = {
'MirrorRule': {
'type': 'object',
'description': 'A rule used to define how a repository is mirrored.',
'required': ['root_rule'],
'properties': {
'root_rule': common_properties['root_rule']
}
}
}
@require_repo_admin
@nickname('changeRepoMirrorRule')
@validate_json_request('MirrorRule')
def put(self, namespace_name, repository_name):
"""
Update an existing RepoMirrorRule
"""
repo = model.repository.get_repository(namespace_name, repository_name)
if not repo:
raise NotFound()
rule = model.repo_mirror.get_root_rule(repo)
if not rule:
return {'detail': 'The rule appears to be missing.'}, 400
data = request.get_json()
if model.repo_mirror.change_rule_value(rule, data['root_rule']['rule_value']):
track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed="mirror_rule", to=data['root_rule']['rule_value'])
return 200
else:
return {'detail': 'Unable to update rule.'}, 400

View File

@ -58,7 +58,7 @@ def test_create_mirror_sets_permissions(existing_robot_permission, expected_perm
'sync_interval': 100,
'sync_start_date': '2019-08-20T17:51:00Z',
'root_rule': {
'rule_type': 'TAG_GLOB_CSV',
'rule_kind': 'tag_glob_csv',
'rule_value': ['latest','foo', 'bar']
},
'robot_username': 'devtable+newmirrorbot',
@ -155,6 +155,11 @@ def test_get_mirror(client):
('verify_tls', None, 400),
('verify_tls', 'abc', 400),
('root_rule', {'rule_kind': 'tag_glob_csv', 'rule_value': ['3.1', '3.1*']}, 201),
('root_rule', {'rule_kind': 'tag_glob_csv'}, 400),
('root_rule', {'rule_kind': 'tag_glob_csv', 'rule_value': []}, 400),
('root_rule', {'rule_kind': 'incorrect', 'rule_value': ['3.1', '3.1*']}, 400),
])
def test_change_config(key, value, expected_status, client):
""" Verify that changing each attribute works as expected. """

View File

@ -1417,11 +1417,6 @@ SECURITY_TESTS = [
(RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, 'devtable', 400),
(RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
(RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, 'reader', 403),
(ManageRepoMirrorRule, 'PUT', {'repository': 'devtable/simple'}, None, None, 401),
(ManageRepoMirrorRule, 'PUT', {'repository': 'devtable/simple'}, None, 'devtable', 400),
(ManageRepoMirrorRule, 'PUT', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
(ManageRepoMirrorRule, 'PUT', {'repository': 'devtable/simple'}, None, 'reader', 403),
]
@pytest.mark.parametrize('resource,method,params,body,identity,expected', SECURITY_TESTS)

View File

@ -83,6 +83,16 @@ def not_found_error_display(e = None):
resp.status_code = 404
return resp
@web.route('/opensearch.xml')
def opensearch():
template = render_template('opensearch.xml',
baseurl=get_app_url(),
registry_title=app.config.get('REGISTRY_TITLE', 'Quay'))
resp = make_response(template)
resp.headers['Content-Type'] = 'application/xml'
return resp
@web.route('/organization/<path:path>', methods=['GET'])
@web.route('/organization/<path:path>/', methods=['GET'])
@no_cache

View File

@ -70,7 +70,7 @@ angular.module('quay').directive('repoPanelMirror', function () {
};
}
vm.tags = resp.root_rule.rule_value || []; // TODO: Use RepoMirrorRule-specific endpoint
vm.tags = resp.root_rule.rule_value || [];
// TODO: These are not consistently provided by the API. Correct that in the API.
vm.verifyTLS = resp.external_registry_config.verify_tls;
@ -356,8 +356,16 @@ angular.module('quay').directive('repoPanelMirror', function () {
* Update Tag-Rules
*/
vm.changeTagRules = function(data, callback) {
let csv = data.values.rule_value;
let patterns = csv.split(',');
let csv = data.values.rule_value,
patterns;
// If already an array then the data has not changed
if (Array.isArray(csv)) {
patterns = csv;
} else {
patterns = csv.split(',');
}
patterns.map(s => s.trim()); // Trim excess whitespace
patterns = Array.from(new Set(patterns)); // De-duplicate
@ -370,7 +378,7 @@ angular.module('quay').directive('repoPanelMirror', function () {
data = {
'root_rule': {
'rule_type': 'TAG_GLOB_CSV',
'rule_kind': "tag_glob_csv",
'rule_value': patterns
}
}
@ -378,7 +386,7 @@ angular.module('quay').directive('repoPanelMirror', function () {
let displayError = ApiService.errorDisplay('Could not change Tag Rules', callback);
ApiService
.changeRepoMirrorRule(data, params)
.changeRepoMirrorConfig(data, params)
.then(function(resp) {
vm.getMirror();
callback(true);
@ -452,7 +460,7 @@ angular.module('quay').directive('repoPanelMirror', function () {
}
},
'root_rule': {
'rule_type': 'TAG_GLOB_CSV',
'rule_kind': "tag_glob_csv",
'rule_value': patterns
}
}

View File

@ -131,6 +131,8 @@ b._i.push([a,e,d])};b.__SV=1.2}})(document,window.mixpanel||[]);
mixpanel.init("{{ mixpanel_key }}", { track_pageview : false, debug: {{ is_debug }} });</script><!-- end Mixpanel -->
{% endif %}
<link rel="search" type="application/opensearchdescription+xml" title="{{ config_set['REGISTRY_TITLE'] }}" href="/opensearch.xml" />
</head>
<body ng-class="pageClass + ' ' + (user.anonymous ? 'anon' : 'signedin')" class="co-img-bg-network">
<div id="co-l-footer-wrapper">

9
templates/opensearch.xml Normal file
View File

@ -0,0 +1,9 @@
<OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
<ShortName>{{ registry_title }}</ShortName>
<Description>Find public container repositories on {{ registry_title }}</Description>
<InputEncoding>UTF-8</InputEncoding>
<AdultContent>false</AdultContent>
<Language>en-us</Language>
<Image width="16" height="16" type="image/x-icon">//static/img/quay_favicon.png</Image>
<Url type="text/html" method="get" template="{{ baseurl }}/search?q={searchTerms}"/>
</OpenSearchDescription>

View File

@ -2246,7 +2246,7 @@ def test_repository_states(state, use_robot, create_mirror, robot_exists, expect
'sync_interval': 1000,
'sync_start_date': '2020-01-01T00:00:00Z',
'root_rule': {
'rule_type': 'TAG_GLOB_CSV',
'rule_kind': "tag_glob_csv",
'rule_value': ['latest', '1.3*', 'foo']
},
'robot_username': robot_full_name,