Merge branch 'master' into delete-setup-page
This commit is contained in:
commit
cd6b0a6f46
29 changed files with 550 additions and 142 deletions
|
@ -5,6 +5,8 @@ from app import app
|
||||||
from cachetools import lru_cache
|
from cachetools import lru_cache
|
||||||
from notifications import spawn_notification
|
from notifications import spawn_notification
|
||||||
from data import model
|
from data import model
|
||||||
|
from data.registry_model import registry_model
|
||||||
|
from data.registry_model.datatypes import RepositoryReference
|
||||||
from data.database import UseThenDisconnect
|
from data.database import UseThenDisconnect
|
||||||
from util.imagetree import ImageTree
|
from util.imagetree import ImageTree
|
||||||
from util.morecollections import AttrDict
|
from util.morecollections import AttrDict
|
||||||
|
@ -27,7 +29,7 @@ class BuildJob(object):
|
||||||
self.build_notifier = BuildJobNotifier(self.build_uuid)
|
self.build_notifier = BuildJobNotifier(self.build_uuid)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise BuildJobLoadException(
|
raise BuildJobLoadException(
|
||||||
'Could not parse build queue item config with ID %s' % self.job_details['build_uuid']
|
'Could not parse build queue item config with ID %s' % self.job_details['build_uuid']
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -95,70 +97,24 @@ class BuildJob(object):
|
||||||
|
|
||||||
def determine_cached_tag(self, base_image_id=None, cache_comments=None):
|
def determine_cached_tag(self, base_image_id=None, cache_comments=None):
|
||||||
""" Returns the tag to pull to prime the cache or None if none. """
|
""" Returns the tag to pull to prime the cache or None if none. """
|
||||||
cached_tag = None
|
cached_tag = self._determine_cached_tag_by_tag()
|
||||||
if base_image_id and cache_comments:
|
|
||||||
cached_tag = self._determine_cached_tag_by_comments(base_image_id, cache_comments)
|
|
||||||
|
|
||||||
if not cached_tag:
|
|
||||||
cached_tag = self._determine_cached_tag_by_tag()
|
|
||||||
|
|
||||||
logger.debug('Determined cached tag %s for %s: %s', cached_tag, base_image_id, cache_comments)
|
logger.debug('Determined cached tag %s for %s: %s', cached_tag, base_image_id, cache_comments)
|
||||||
|
|
||||||
return cached_tag
|
return cached_tag
|
||||||
|
|
||||||
def _determine_cached_tag_by_comments(self, base_image_id, cache_commands):
|
|
||||||
""" Determines the tag to use for priming the cache for this build job, by matching commands
|
|
||||||
starting at the given base_image_id. This mimics the Docker cache checking, so it should,
|
|
||||||
in theory, provide "perfect" caching.
|
|
||||||
"""
|
|
||||||
with UseThenDisconnect(app.config):
|
|
||||||
# Lookup the base image in the repository. If it doesn't exist, nothing more to do.
|
|
||||||
repo_build = self.repo_build
|
|
||||||
repo_namespace = repo_build.repository.namespace_user.username
|
|
||||||
repo_name = repo_build.repository.name
|
|
||||||
|
|
||||||
base_image = model.image.get_image(repo_build.repository, base_image_id)
|
|
||||||
if base_image is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Build an in-memory tree of the full heirarchy of images in the repository.
|
|
||||||
all_images = model.image.get_repository_images_without_placements(repo_build.repository,
|
|
||||||
with_ancestor=base_image)
|
|
||||||
|
|
||||||
all_tags = model.tag.list_repository_tags(repo_namespace, repo_name)
|
|
||||||
tree = ImageTree(all_images, all_tags, base_filter=base_image.id)
|
|
||||||
|
|
||||||
# Find a path in the tree, starting at the base image, that matches the cache comments
|
|
||||||
# or some subset thereof.
|
|
||||||
def checker(step, image):
|
|
||||||
if step >= len(cache_commands):
|
|
||||||
return False
|
|
||||||
|
|
||||||
full_command = '["/bin/sh", "-c", "%s"]' % cache_commands[step]
|
|
||||||
logger.debug('Checking step #%s: %s, %s == %s', step, image.id, image.command, full_command)
|
|
||||||
|
|
||||||
return image.command == full_command
|
|
||||||
|
|
||||||
path = tree.find_longest_path(base_image.id, checker)
|
|
||||||
if not path:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Find any tag associated with the last image in the path.
|
|
||||||
return tree.tag_containing_image(path[-1])
|
|
||||||
|
|
||||||
|
|
||||||
def _determine_cached_tag_by_tag(self):
|
def _determine_cached_tag_by_tag(self):
|
||||||
""" Determines the cached tag by looking for one of the tags being built, and seeing if it
|
""" Determines the cached tag by looking for one of the tags being built, and seeing if it
|
||||||
exists in the repository. This is a fallback for when no comment information is available.
|
exists in the repository. This is a fallback for when no comment information is available.
|
||||||
"""
|
"""
|
||||||
with UseThenDisconnect(app.config):
|
with UseThenDisconnect(app.config):
|
||||||
tags = self.build_config.get('docker_tags', ['latest'])
|
tags = self.build_config.get('docker_tags', ['latest'])
|
||||||
repository = self.repo_build.repository
|
repository = RepositoryReference.for_repo_obj(self.repo_build.repository)
|
||||||
existing_tags = model.tag.list_repository_tags(repository.namespace_user.username,
|
matching_tag = registry_model.find_matching_tag(repository, tags)
|
||||||
repository.name)
|
if matching_tag is not None:
|
||||||
cached_tags = set(tags) & set([tag.name for tag in existing_tags])
|
return matching_tag.name
|
||||||
if cached_tags:
|
|
||||||
return list(cached_tags)[0]
|
most_recent_tag = registry_model.get_most_recent_tag(repository)
|
||||||
|
if most_recent_tag is not None:
|
||||||
|
return most_recent_tag.name
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
71
conf/init/02_get_kube_certs.py
Normal file
71
conf/init/02_get_kube_certs.py
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import base64
|
||||||
|
|
||||||
|
from requests import Request, Session
|
||||||
|
|
||||||
|
QUAYPATH = os.environ.get('QUAYPATH', '.')
|
||||||
|
KUBE_EXTRA_CA_CERTDIR = os.environ.get('KUBE_EXTRA_CA_CERTDIR', '%s/conf/kube_extra_certs' % QUAYPATH)
|
||||||
|
|
||||||
|
KUBERNETES_API_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', '')
|
||||||
|
port = os.environ.get('KUBERNETES_SERVICE_PORT')
|
||||||
|
if port:
|
||||||
|
KUBERNETES_API_HOST += ':' + port
|
||||||
|
|
||||||
|
SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
|
||||||
|
|
||||||
|
QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise')
|
||||||
|
QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret')
|
||||||
|
EXTRA_CA_DIRECTORY_PREFIX = 'extra_ca_certs_'
|
||||||
|
|
||||||
|
|
||||||
|
def _lookup_secret(service_token):
|
||||||
|
secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
|
||||||
|
response = _execute_k8s_api(service_token, 'GET', secret_url)
|
||||||
|
if response.status_code != 200:
|
||||||
|
raise Exception('Cannot get the config secret')
|
||||||
|
return json.loads(response.text)
|
||||||
|
|
||||||
|
def _execute_k8s_api(service_account_token, method, relative_url, data=None, api_prefix='api/v1', content_type='application/json'):
|
||||||
|
headers = {
|
||||||
|
'Authorization': 'Bearer ' + service_account_token
|
||||||
|
}
|
||||||
|
|
||||||
|
if data:
|
||||||
|
headers['Content-Type'] = content_type
|
||||||
|
|
||||||
|
data = json.dumps(data) if data else None
|
||||||
|
session = Session()
|
||||||
|
url = 'https://%s/%s/%s' % (KUBERNETES_API_HOST, api_prefix, relative_url)
|
||||||
|
|
||||||
|
request = Request(method, url, data=data, headers=headers)
|
||||||
|
return session.send(request.prepare(), verify=False, timeout=2)
|
||||||
|
|
||||||
|
def is_extra_cert(key):
|
||||||
|
return key.find(EXTRA_CA_DIRECTORY_PREFIX) == 0
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Load the service account token from the local store.
|
||||||
|
if not os.path.exists(SERVICE_ACCOUNT_TOKEN_PATH):
|
||||||
|
raise Exception('Cannot load Kubernetes service account token')
|
||||||
|
|
||||||
|
with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f:
|
||||||
|
service_token = f.read()
|
||||||
|
|
||||||
|
secret_data = _lookup_secret(service_token).get('data', {})
|
||||||
|
cert_keys = filter(is_extra_cert, secret_data.keys())
|
||||||
|
|
||||||
|
for cert_key in cert_keys:
|
||||||
|
if not os.path.exists(KUBE_EXTRA_CA_CERTDIR):
|
||||||
|
os.mkdir(KUBE_EXTRA_CA_CERTDIR)
|
||||||
|
|
||||||
|
cert_value = base64.b64decode(secret_data[cert_key])
|
||||||
|
cert_filename = cert_key.replace(EXTRA_CA_DIRECTORY_PREFIX, '')
|
||||||
|
print "Found an extra cert %s in config-secret, copying to kube ca dir"
|
||||||
|
|
||||||
|
with open(os.path.join(KUBE_EXTRA_CA_CERTDIR, cert_filename), 'w') as f:
|
||||||
|
f.write(cert_value)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
12
conf/init/02_get_kube_certs.sh
Executable file
12
conf/init/02_get_kube_certs.sh
Executable file
|
@ -0,0 +1,12 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
QUAYDIR=${QUAYDIR:-"/"}
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
cd $QUAYDIR
|
||||||
|
|
||||||
|
if [[ "$KUBERNETES_SERVICE_HOST" != "" ]];then
|
||||||
|
echo "Running on kubernetes, attempting to retrieve extra certs from secret"
|
||||||
|
venv/bin/python $QUAYCONF/init/02_get_kube_certs.py
|
||||||
|
fi
|
|
@ -3,6 +3,12 @@ set -e
|
||||||
QUAYPATH=${QUAYPATH:-"."}
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf/stack"}
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf/stack"}
|
||||||
QUAYCONFIG=${QUAYCONFIG:-"$QUAYCONF/stack"}
|
QUAYCONFIG=${QUAYCONFIG:-"$QUAYCONF/stack"}
|
||||||
|
CERTDIR=${QUAYCONFIG/extra_ca_certs}
|
||||||
|
|
||||||
|
# If we're running under kube, the previous script (02_get_kube_certs.sh) will put the certs in a different location
|
||||||
|
if [[ "$KUBERNETES_SERVICE_HOST" != "" ]];then
|
||||||
|
CERTDIR=${KUBE_EXTRA_CA_CERTDIR:-"$QUAYPATH/conf/kube_extra_certs"}
|
||||||
|
fi
|
||||||
|
|
||||||
cd ${QUAYDIR:-"/quay-registry"}
|
cd ${QUAYDIR:-"/quay-registry"}
|
||||||
|
|
||||||
|
@ -13,25 +19,25 @@ then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add extra trusted certificates (as a directory)
|
# Add extra trusted certificates (as a directory)
|
||||||
if [ -d $QUAYCONFIG/extra_ca_certs ]; then
|
if [ -d $CERTDIR ]; then
|
||||||
if test "$(ls -A "$QUAYCONFIG/extra_ca_certs")"; then
|
if test "$(ls -A "$CERTDIR")"; then
|
||||||
echo "Installing extra certificates found in $QUAYCONFIG/extra_ca_certs directory"
|
echo "Installing extra certificates found in $CERTDIR directory"
|
||||||
cp $QUAYCONFIG/extra_ca_certs/* /usr/local/share/ca-certificates/
|
cp $CERTDIR/* /usr/local/share/ca-certificates/
|
||||||
cat $QUAYCONFIG/extra_ca_certs/* >> venv/lib/python2.7/site-packages/requests/cacert.pem
|
cat $CERTDIR/* >> venv/lib/python2.7/site-packages/requests/cacert.pem
|
||||||
cat $QUAYCONFIG/extra_ca_certs/* >> venv/lib/python2.7/site-packages/certifi/cacert.pem
|
cat $CERTDIR/* >> venv/lib/python2.7/site-packages/certifi/cacert.pem
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add extra trusted certificates (as a file)
|
# Add extra trusted certificates (as a file)
|
||||||
if [ -f $QUAYCONFIG/extra_ca_certs ]; then
|
if [ -f $CERTDIR ]; then
|
||||||
echo "Installing extra certificates found in $QUAYCONFIG/extra_ca_certs file"
|
echo "Installing extra certificates found in $CERTDIR file"
|
||||||
csplit -z -f /usr/local/share/ca-certificates/extra-ca- $QUAYCONFIG/extra_ca_certs '/-----BEGIN CERTIFICATE-----/' '{*}'
|
csplit -z -f /usr/local/share/ca-certificates/extra-ca- $CERTDIR '/-----BEGIN CERTIFICATE-----/' '{*}'
|
||||||
cat $QUAYCONFIG/extra_ca_certs >> venv/lib/python2.7/site-packages/requests/cacert.pem
|
cat $CERTDIR >> venv/lib/python2.7/site-packages/requests/cacert.pem
|
||||||
cat $QUAYCONFIG/extra_ca_certs >> venv/lib/python2.7/site-packages/certifi/cacert.pem
|
cat $CERTDIR >> venv/lib/python2.7/site-packages/certifi/cacert.pem
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add extra trusted certificates (prefixed)
|
# Add extra trusted certificates (prefixed)
|
||||||
for f in $(find $QUAYCONFIG/ -maxdepth 1 -type f -name "extra_ca*")
|
for f in $(find $CERTDIR/ -maxdepth 1 -type f -name "extra_ca*")
|
||||||
do
|
do
|
||||||
echo "Installing extra cert $f"
|
echo "Installing extra cert $f"
|
||||||
cp "$f" /usr/local/share/ca-certificates/
|
cp "$f" /usr/local/share/ca-certificates/
|
||||||
|
|
|
@ -10,6 +10,9 @@ proxy_redirect off;
|
||||||
|
|
||||||
proxy_set_header Transfer-Encoding $http_transfer_encoding;
|
proxy_set_header Transfer-Encoding $http_transfer_encoding;
|
||||||
|
|
||||||
|
# The DB migrations sometimes take a while, so increase timeoutso we don't report an error
|
||||||
|
proxy_read_timeout 300s;
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
proxy_pass http://web_app_server;
|
proxy_pass http://web_app_server;
|
||||||
}
|
}
|
||||||
|
|
|
@ -150,10 +150,11 @@ def kubernetes_only(f):
|
||||||
nickname = partial(add_method_metadata, 'nickname')
|
nickname = partial(add_method_metadata, 'nickname')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
import config_app.config_endpoints.api
|
import config_app.config_endpoints.api
|
||||||
import config_app.config_endpoints.api.discovery
|
import config_app.config_endpoints.api.discovery
|
||||||
|
import config_app.config_endpoints.api.kubeconfig
|
||||||
import config_app.config_endpoints.api.suconfig
|
import config_app.config_endpoints.api.suconfig
|
||||||
import config_app.config_endpoints.api.superuser
|
import config_app.config_endpoints.api.superuser
|
||||||
import config_app.config_endpoints.api.user
|
|
||||||
import config_app.config_endpoints.api.tar_config_loader
|
import config_app.config_endpoints.api.tar_config_loader
|
||||||
|
import config_app.config_endpoints.api.user
|
||||||
|
|
69
config_app/config_endpoints/api/kubeconfig.py
Normal file
69
config_app/config_endpoints/api/kubeconfig.py
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
from flask import request
|
||||||
|
|
||||||
|
from data.database import configure
|
||||||
|
|
||||||
|
from config_app.c_app import app, config_provider
|
||||||
|
from config_app.config_endpoints.api import resource, ApiResource, nickname, kubernetes_only
|
||||||
|
from config_app.config_util.k8saccessor import KubernetesAccessorSingleton
|
||||||
|
|
||||||
|
|
||||||
|
@resource('/v1/kubernetes/deployments/')
|
||||||
|
class SuperUserKubernetesDeployment(ApiResource):
|
||||||
|
""" Resource for the getting the status of Quay Enterprise deployments and cycling them """
|
||||||
|
schemas = {
|
||||||
|
'ValidateDeploymentNames': {
|
||||||
|
'type': 'object',
|
||||||
|
'description': 'Validates deployment names for cycling',
|
||||||
|
'required': [
|
||||||
|
'deploymentNames'
|
||||||
|
],
|
||||||
|
'properties': {
|
||||||
|
'deploymentNames': {
|
||||||
|
'type': 'array',
|
||||||
|
'description': 'The names of the deployments to cycle'
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@kubernetes_only
|
||||||
|
@nickname('scGetNumDeployments')
|
||||||
|
def get(self):
|
||||||
|
return KubernetesAccessorSingleton.get_instance().get_qe_deployments()
|
||||||
|
|
||||||
|
@kubernetes_only
|
||||||
|
@nickname('scCycleQEDeployments')
|
||||||
|
def put(self):
|
||||||
|
deployment_names = request.get_json()['deploymentNames']
|
||||||
|
return KubernetesAccessorSingleton.get_instance().cycle_qe_deployments(deployment_names)
|
||||||
|
|
||||||
|
|
||||||
|
@resource('/v1/superuser/config/kubernetes')
|
||||||
|
class SuperUserKubernetesConfiguration(ApiResource):
|
||||||
|
""" Resource for saving the config files to kubernetes secrets. """
|
||||||
|
|
||||||
|
@kubernetes_only
|
||||||
|
@nickname('scDeployConfiguration')
|
||||||
|
def post(self):
|
||||||
|
return config_provider.save_configuration_to_kubernetes()
|
||||||
|
|
||||||
|
|
||||||
|
@resource('/v1/kubernetes/config/populate')
|
||||||
|
class KubernetesConfigurationPopulator(ApiResource):
|
||||||
|
""" Resource for populating the local configuration from the cluster's kubernetes secrets. """
|
||||||
|
|
||||||
|
@kubernetes_only
|
||||||
|
@nickname('scKubePopulateConfig')
|
||||||
|
def post(self):
|
||||||
|
# Get a clean transient directory to write the config into
|
||||||
|
config_provider.new_config_dir()
|
||||||
|
KubernetesAccessorSingleton.get_instance().save_secret_to_directory(config_provider.get_config_dir_path())
|
||||||
|
|
||||||
|
# We update the db configuration to connect to their specified one
|
||||||
|
# (Note, even if this DB isn't valid, it won't affect much in the config app, since we'll report an error,
|
||||||
|
# and all of the options create a new clean dir, so we'll never pollute configs)
|
||||||
|
combined = dict(**app.config)
|
||||||
|
combined.update(config_provider.get_config())
|
||||||
|
configure(combined)
|
||||||
|
|
||||||
|
return 200
|
|
@ -3,11 +3,9 @@ import logging
|
||||||
from flask import abort, request
|
from flask import abort, request
|
||||||
|
|
||||||
from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model
|
from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model
|
||||||
from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request, \
|
from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request
|
||||||
kubernetes_only
|
|
||||||
from config_app.c_app import (app, config_provider, superusers, ip_resolver,
|
from config_app.c_app import (app, config_provider, superusers, ip_resolver,
|
||||||
instance_keys, INIT_SCRIPTS_LOCATION)
|
instance_keys, INIT_SCRIPTS_LOCATION)
|
||||||
from config_app.config_util.k8saccessor import KubernetesAccessorSingleton
|
|
||||||
|
|
||||||
from data.database import configure
|
from data.database import configure
|
||||||
from data.runmigration import run_alembic_migration
|
from data.runmigration import run_alembic_migration
|
||||||
|
@ -270,47 +268,6 @@ class SuperUserConfigValidate(ApiResource):
|
||||||
return validate_service_for_config(service, validator_context)
|
return validate_service_for_config(service, validator_context)
|
||||||
|
|
||||||
|
|
||||||
@resource('/v1/kubernetes/deployments/')
|
|
||||||
class SuperUserKubernetesDeployment(ApiResource):
|
|
||||||
""" Resource for the getting the status of Quay Enterprise deployments and cycling them """
|
|
||||||
schemas = {
|
|
||||||
'ValidateDeploymentNames': {
|
|
||||||
'type': 'object',
|
|
||||||
'description': 'Validates deployment names for cycling',
|
|
||||||
'required': [
|
|
||||||
'deploymentNames'
|
|
||||||
],
|
|
||||||
'properties': {
|
|
||||||
'deploymentNames': {
|
|
||||||
'type': 'array',
|
|
||||||
'description': 'The names of the deployments to cycle'
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@kubernetes_only
|
|
||||||
@nickname('scGetNumDeployments')
|
|
||||||
def get(self):
|
|
||||||
return KubernetesAccessorSingleton.get_instance().get_qe_deployments()
|
|
||||||
|
|
||||||
@kubernetes_only
|
|
||||||
@nickname('scCycleQEDeployments')
|
|
||||||
def put(self):
|
|
||||||
deployment_names = request.get_json()['deploymentNames']
|
|
||||||
return KubernetesAccessorSingleton.get_instance().cycle_qe_deployments(deployment_names)
|
|
||||||
|
|
||||||
|
|
||||||
@resource('/v1/superuser/config/kubernetes')
|
|
||||||
class SuperUserKubernetesConfiguration(ApiResource):
|
|
||||||
""" Resource for saving the config files to kubernetes secrets. """
|
|
||||||
|
|
||||||
@kubernetes_only
|
|
||||||
@nickname('scDeployConfiguration')
|
|
||||||
def post(self):
|
|
||||||
return config_provider.save_configuration_to_kubernetes()
|
|
||||||
|
|
||||||
|
|
||||||
@resource('/v1/superuser/config/file/<filename>')
|
@resource('/v1/superuser/config/file/<filename>')
|
||||||
class SuperUserConfigFile(ApiResource):
|
class SuperUserConfigFile(ApiResource):
|
||||||
""" Resource for fetching the status of config files and overriding them. """
|
""" Resource for fetching the status of config files and overriding them. """
|
||||||
|
|
|
@ -55,8 +55,8 @@ class SuperUserCustomCertificate(ApiResource):
|
||||||
|
|
||||||
# Call the update script with config dir location to install the certificate immediately.
|
# Call the update script with config dir location to install the certificate immediately.
|
||||||
if not app.config['TESTING']:
|
if not app.config['TESTING']:
|
||||||
if subprocess.call([os.path.join(INIT_SCRIPTS_LOCATION, 'certs_install.sh')],
|
cert_dir = os.path.join(config_provider.get_config_dir_path(), EXTRA_CA_DIRECTORY)
|
||||||
env={ 'QUAYCONFIG': config_provider.get_config_dir_path() }) != 0:
|
if subprocess.call([os.path.join(INIT_SCRIPTS_LOCATION, 'certs_install.sh')], env={ 'CERTDIR': cert_dir }) != 0:
|
||||||
raise Exception('Could not install certificates')
|
raise Exception('Could not install certificates')
|
||||||
|
|
||||||
return '', 204
|
return '', 204
|
||||||
|
|
|
@ -1,8 +1,12 @@
|
||||||
import os
|
import os
|
||||||
|
import base64
|
||||||
|
|
||||||
from backports.tempfile import TemporaryDirectory
|
from backports.tempfile import TemporaryDirectory
|
||||||
|
|
||||||
from config_app.config_util.config.fileprovider import FileConfigProvider
|
from config_app.config_util.config.fileprovider import FileConfigProvider
|
||||||
from config_app.config_util.k8saccessor import KubernetesAccessorSingleton
|
from config_app.config_util.k8saccessor import KubernetesAccessorSingleton
|
||||||
|
from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class TransientDirectoryProvider(FileConfigProvider):
|
class TransientDirectoryProvider(FileConfigProvider):
|
||||||
|
@ -38,10 +42,25 @@ class TransientDirectoryProvider(FileConfigProvider):
|
||||||
return self.config_volume
|
return self.config_volume
|
||||||
|
|
||||||
def save_configuration_to_kubernetes(self):
|
def save_configuration_to_kubernetes(self):
|
||||||
config_path = self.get_config_dir_path()
|
data = {}
|
||||||
|
|
||||||
for name in os.listdir(config_path):
|
# Kubernetes secrets don't have sub-directories, so for the extra_ca_certs dir
|
||||||
|
# we have to put the extra certs in with a prefix, and then one of our init scripts
|
||||||
|
# (02_get_kube_certs.sh) will expand the prefixed certs into the equivalent directory
|
||||||
|
# so that they'll be installed correctly on startup by the certs_install script
|
||||||
|
certs_dir = os.path.join(self.config_volume, EXTRA_CA_DIRECTORY)
|
||||||
|
if os.path.exists(certs_dir):
|
||||||
|
for extra_cert in os.listdir(certs_dir):
|
||||||
|
with open(os.path.join(certs_dir, extra_cert)) as f:
|
||||||
|
data[EXTRA_CA_DIRECTORY_PREFIX + extra_cert] = base64.b64encode(f.read())
|
||||||
|
|
||||||
|
|
||||||
|
for name in os.listdir(self.config_volume):
|
||||||
file_path = os.path.join(self.config_volume, name)
|
file_path = os.path.join(self.config_volume, name)
|
||||||
KubernetesAccessorSingleton.get_instance().save_file_as_secret(name, file_path)
|
if not os.path.isdir(file_path):
|
||||||
|
with open(file_path) as f:
|
||||||
|
data[name] = base64.b64encode(f.read())
|
||||||
|
|
||||||
|
KubernetesAccessorSingleton.get_instance().replace_qe_secret(data)
|
||||||
|
|
||||||
return 200
|
return 200
|
||||||
|
|
|
@ -2,8 +2,10 @@ import logging
|
||||||
import json
|
import json
|
||||||
import base64
|
import base64
|
||||||
import datetime
|
import datetime
|
||||||
|
import os
|
||||||
|
|
||||||
from requests import Request, Session
|
from requests import Request, Session
|
||||||
|
from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX
|
||||||
|
|
||||||
from config_app.config_util.k8sconfig import KubernetesConfig
|
from config_app.config_util.k8sconfig import KubernetesConfig
|
||||||
|
|
||||||
|
@ -35,10 +37,64 @@ class KubernetesAccessorSingleton(object):
|
||||||
|
|
||||||
return cls._instance
|
return cls._instance
|
||||||
|
|
||||||
def save_file_as_secret(self, name, file_path):
|
def save_secret_to_directory(self, dir_path):
|
||||||
with open(file_path) as f:
|
"""
|
||||||
value = f.read()
|
Saves all files in the kubernetes secret to a local directory.
|
||||||
self._update_secret_file(name, value)
|
Assumes the directory is empty.
|
||||||
|
"""
|
||||||
|
secret = self._lookup_secret()
|
||||||
|
|
||||||
|
secret_data = secret.get('data', {})
|
||||||
|
|
||||||
|
# Make the `extra_ca_certs` dir to ensure we can populate extra certs
|
||||||
|
extra_ca_dir_path = os.path.join(dir_path, EXTRA_CA_DIRECTORY)
|
||||||
|
os.mkdir(extra_ca_dir_path)
|
||||||
|
|
||||||
|
for secret_filename, data in secret_data.iteritems():
|
||||||
|
write_path = os.path.join(dir_path, secret_filename)
|
||||||
|
|
||||||
|
if EXTRA_CA_DIRECTORY_PREFIX in secret_filename:
|
||||||
|
write_path = os.path.join(extra_ca_dir_path, secret_filename.replace(EXTRA_CA_DIRECTORY_PREFIX, ''))
|
||||||
|
|
||||||
|
with open(write_path, 'w') as f:
|
||||||
|
f.write(base64.b64decode(data))
|
||||||
|
|
||||||
|
return 200
|
||||||
|
|
||||||
|
def save_file_as_secret(self, name, file_pointer):
|
||||||
|
value = file_pointer.read()
|
||||||
|
self._update_secret_file(name, value)
|
||||||
|
|
||||||
|
def replace_qe_secret(self, new_secret_data):
|
||||||
|
"""
|
||||||
|
Removes the old config and replaces it with the new_secret_data as one action
|
||||||
|
"""
|
||||||
|
# Check first that the namespace for Quay Enterprise exists. If it does not, report that
|
||||||
|
# as an error, as it seems to be a common issue.
|
||||||
|
namespace_url = 'namespaces/%s' % (self.kube_config.qe_namespace)
|
||||||
|
response = self._execute_k8s_api('GET', namespace_url)
|
||||||
|
if response.status_code // 100 != 2:
|
||||||
|
msg = 'A Kubernetes namespace with name `%s` must be created to save config' % self.kube_config.qe_namespace
|
||||||
|
raise Exception(msg)
|
||||||
|
|
||||||
|
# Check if the secret exists. If not, then we create an empty secret and then update the file
|
||||||
|
# inside.
|
||||||
|
secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret)
|
||||||
|
secret = self._lookup_secret()
|
||||||
|
if secret is None:
|
||||||
|
self._assert_success(self._execute_k8s_api('POST', secret_url, {
|
||||||
|
"kind": "Secret",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": self.kube_config.qe_config_secret
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
}))
|
||||||
|
|
||||||
|
# Update the secret to reflect the file change.
|
||||||
|
secret['data'] = new_secret_data
|
||||||
|
|
||||||
|
self._assert_success(self._execute_k8s_api('PUT', secret_url, secret))
|
||||||
|
|
||||||
def get_qe_deployments(self):
|
def get_qe_deployments(self):
|
||||||
""""
|
""""
|
||||||
|
|
|
@ -15,7 +15,6 @@ export class ConfigSetupAppComponent {
|
||||||
: 'choice'
|
: 'choice'
|
||||||
| 'setup'
|
| 'setup'
|
||||||
| 'load'
|
| 'load'
|
||||||
| 'populate'
|
|
||||||
| 'download'
|
| 'download'
|
||||||
| 'deploy';
|
| 'deploy';
|
||||||
|
|
||||||
|
@ -45,7 +44,15 @@ export class ConfigSetupAppComponent {
|
||||||
}
|
}
|
||||||
|
|
||||||
private choosePopulate(): void {
|
private choosePopulate(): void {
|
||||||
this.state = 'populate';
|
this.apiService.scKubePopulateConfig()
|
||||||
|
.then(() => {
|
||||||
|
this.state = 'setup';
|
||||||
|
})
|
||||||
|
.catch(err => {
|
||||||
|
this.apiService.errorDisplay(
|
||||||
|
`Could not populate the configuration from your cluster. Please report this error: ${JSON.stringify(err)}`
|
||||||
|
)()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
private configLoaded(): void {
|
private configLoaded(): void {
|
||||||
|
|
|
@ -24,7 +24,7 @@ export class KubeDeployModalComponent {
|
||||||
this.state = 'loadingDeployments';
|
this.state = 'loadingDeployments';
|
||||||
|
|
||||||
ApiService.scGetNumDeployments().then(resp => {
|
ApiService.scGetNumDeployments().then(resp => {
|
||||||
this.deploymentsStatus = resp.items.map(dep => ({ name: dep.metadata.name, numPods: dep.status.replicas }));
|
this.deploymentsStatus = resp.items.map(dep => ({ name: dep.metadata.name, numPods: dep.spec.replicas }));
|
||||||
this.state = 'readyToDeploy';
|
this.state = 'readyToDeploy';
|
||||||
}).catch(err => {
|
}).catch(err => {
|
||||||
this.state = 'error';
|
this.state = 'error';
|
||||||
|
@ -37,7 +37,7 @@ export class KubeDeployModalComponent {
|
||||||
|
|
||||||
deployConfiguration(): void {
|
deployConfiguration(): void {
|
||||||
this.ApiService.scDeployConfiguration().then(() => {
|
this.ApiService.scDeployConfiguration().then(() => {
|
||||||
const deploymentNames: string[]= this.deploymentsStatus.map(dep => dep.name);
|
const deploymentNames: string[] = this.deploymentsStatus.map(dep => dep.name);
|
||||||
|
|
||||||
this.ApiService.scCycleQEDeployments({ deploymentNames }).then(() => {
|
this.ApiService.scCycleQEDeployments({ deploymentNames }).then(() => {
|
||||||
this.state = 'deployed'
|
this.state = 'deployed'
|
||||||
|
@ -46,7 +46,6 @@ export class KubeDeployModalComponent {
|
||||||
this.errorMessage = `Could cycle the deployments with the new configuration. Error: ${err.toString()}`;
|
this.errorMessage = `Could cycle the deployments with the new configuration. Error: ${err.toString()}`;
|
||||||
})
|
})
|
||||||
}).catch(err => {
|
}).catch(err => {
|
||||||
console.log(err)
|
|
||||||
this.state = 'error';
|
this.state = 'error';
|
||||||
this.errorMessage = `Could not deploy the configuration. Error: ${err.toString()}`;
|
this.errorMessage = `Could not deploy the configuration. Error: ${err.toString()}`;
|
||||||
})
|
})
|
||||||
|
|
|
@ -209,19 +209,19 @@
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Footer: SUPERUSER_ERROR -->
|
<!-- Footer: SUPERUSER_ERROR -->
|
||||||
<div class="modal-footer alert alert-warning"
|
<div class="modal-footer alert alert-danger"
|
||||||
ng-show="isStep(currentStep, States.SUPERUSER_ERROR)">
|
ng-show="isStep(currentStep, States.SUPERUSER_ERROR)">
|
||||||
{{ errors.SuperuserCreationError }}
|
{{ errors.SuperuserCreationError }}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Footer: DB_SETUP_ERROR -->
|
<!-- Footer: DB_SETUP_ERROR -->
|
||||||
<div class="modal-footer alert alert-warning"
|
<div class="modal-footer alert alert-danger"
|
||||||
ng-show="isStep(currentStep, States.DB_SETUP_ERROR)">
|
ng-show="isStep(currentStep, States.DB_SETUP_ERROR)">
|
||||||
Database Setup Failed. Please report this to support: {{ errors.DatabaseSetupError }}
|
Database Setup Failed: {{ errors.DatabaseSetupError }}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Footer: DB_ERROR -->
|
<!-- Footer: DB_ERROR -->
|
||||||
<div class="modal-footer alert alert-warning" ng-show="isStep(currentStep, States.DB_ERROR)">
|
<div class="modal-footer alert alert-danger" ng-show="isStep(currentStep, States.DB_ERROR)">
|
||||||
Database Validation Issue: {{ errors.DatabaseValidationError }}
|
Database Validation Issue: {{ errors.DatabaseValidationError }}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,21 @@
|
||||||
text-decoration: none;
|
text-decoration: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.quay-config-app .alert-danger {
|
||||||
|
padding: 25px;
|
||||||
|
display: flex;
|
||||||
|
}
|
||||||
|
|
||||||
|
.quay-config-app .alert-danger:before {
|
||||||
|
content: "\f071";
|
||||||
|
font-family: Font Awesome\ 5 Free;
|
||||||
|
font-weight: 900;
|
||||||
|
font-size: 30px;
|
||||||
|
padding-right: 15px;
|
||||||
|
color: #c53c3f;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
/* Overrides for fixing old quay styles*/
|
/* Overrides for fixing old quay styles*/
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1435,7 +1435,7 @@ class TagManifest(BaseModel):
|
||||||
class TagManifestToManifest(BaseModel):
|
class TagManifestToManifest(BaseModel):
|
||||||
""" NOTE: Only used for the duration of the migrations. """
|
""" NOTE: Only used for the duration of the migrations. """
|
||||||
tag_manifest = ForeignKeyField(TagManifest, index=True, unique=True)
|
tag_manifest = ForeignKeyField(TagManifest, index=True, unique=True)
|
||||||
manifest = ForeignKeyField(Manifest, index=True, unique=True)
|
manifest = ForeignKeyField(Manifest, index=True)
|
||||||
broken = BooleanField(index=True, default=False)
|
broken = BooleanField(index=True, default=False)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,43 @@
|
||||||
|
"""Remove unique from TagManifestToManifest
|
||||||
|
|
||||||
|
Revision ID: 13411de1c0ff
|
||||||
|
Revises: 654e6df88b71
|
||||||
|
Create Date: 2018-08-19 23:30:24.969549
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '13411de1c0ff'
|
||||||
|
down_revision = '654e6df88b71'
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
def upgrade(tables, tester):
|
||||||
|
# Note: Because of a restriction in MySQL, we cannot simply remove the index and re-add
|
||||||
|
# it without the unique=False, nor can we simply alter the index. To make it work, we'd have to
|
||||||
|
# remove the primary key on the field, so instead we simply drop the table entirely and
|
||||||
|
# recreate it with the modified index. The backfill will re-fill this in.
|
||||||
|
op.drop_table('tagmanifesttomanifest')
|
||||||
|
|
||||||
|
op.create_table('tagmanifesttomanifest',
|
||||||
|
sa.Column('id', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('tag_manifest_id', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('manifest_id', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('broken', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
|
||||||
|
sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_tagmanifesttomanifest_manifest_id_manifest')),
|
||||||
|
sa.ForeignKeyConstraint(['tag_manifest_id'], ['tagmanifest.id'], name=op.f('fk_tagmanifesttomanifest_tag_manifest_id_tagmanifest')),
|
||||||
|
sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifesttomanifest'))
|
||||||
|
)
|
||||||
|
op.create_index('tagmanifesttomanifest_broken', 'tagmanifesttomanifest', ['broken'], unique=False)
|
||||||
|
op.create_index('tagmanifesttomanifest_manifest_id', 'tagmanifesttomanifest', ['manifest_id'], unique=False)
|
||||||
|
op.create_index('tagmanifesttomanifest_tag_manifest_id', 'tagmanifesttomanifest', ['tag_manifest_id'], unique=True)
|
||||||
|
|
||||||
|
tester.populate_table('tagmanifesttomanifest', [
|
||||||
|
('manifest_id', tester.TestDataType.Foreign('manifest')),
|
||||||
|
('tag_manifest_id', tester.TestDataType.Foreign('tagmanifest')),
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(tables, tester):
|
||||||
|
pass
|
|
@ -721,3 +721,30 @@ def change_tag_expiration(tag, expiration_date):
|
||||||
.execute())
|
.execute())
|
||||||
|
|
||||||
return (tag.lifetime_end_ts, result > 0)
|
return (tag.lifetime_end_ts, result > 0)
|
||||||
|
|
||||||
|
|
||||||
|
def find_matching_tag(repo_id, tag_names):
|
||||||
|
""" Finds the most recently pushed alive tag in the repository with one of the given names,
|
||||||
|
if any.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return (_tag_alive(RepositoryTag
|
||||||
|
.select()
|
||||||
|
.where(RepositoryTag.repository == repo_id,
|
||||||
|
RepositoryTag.name << list(tag_names))
|
||||||
|
.order_by(RepositoryTag.lifetime_start_ts.desc()))
|
||||||
|
.get())
|
||||||
|
except RepositoryTag.DoesNotExist:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_most_recent_tag(repo_id):
|
||||||
|
""" Returns the most recently pushed alive tag in the repository, or None if none. """
|
||||||
|
try:
|
||||||
|
return (_tag_alive(RepositoryTag
|
||||||
|
.select()
|
||||||
|
.where(RepositoryTag.repository == repo_id)
|
||||||
|
.order_by(RepositoryTag.lifetime_start_ts.desc()))
|
||||||
|
.get())
|
||||||
|
except RepositoryTag.DoesNotExist:
|
||||||
|
return None
|
||||||
|
|
3
data/registry_model/__init__.py
Normal file
3
data/registry_model/__init__.py
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
from data.registry_model.registry_pre_oci_model import pre_oci_model
|
||||||
|
|
||||||
|
registry_model = pre_oci_model
|
20
data/registry_model/datatypes.py
Normal file
20
data/registry_model/datatypes.py
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
class RepositoryReference(object):
|
||||||
|
""" RepositoryReference is a reference to a repository, passed to registry interface methods. """
|
||||||
|
def __init__(self, repo_id):
|
||||||
|
self.repo_id = repo_id
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def for_repo_obj(cls, repo_obj):
|
||||||
|
return RepositoryReference(repo_obj.id)
|
||||||
|
|
||||||
|
|
||||||
|
class Tag(namedtuple('Tag', ['id', 'name'])):
|
||||||
|
""" Tag represents a tag in a repository, which points to a manifest or image. """
|
||||||
|
@classmethod
|
||||||
|
def for_repository_tag(cls, repository_tag):
|
||||||
|
if repository_tag is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return Tag(id=repository_tag.id, name=repository_tag.name)
|
21
data/registry_model/interface.py
Normal file
21
data/registry_model/interface.py
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
from abc import ABCMeta, abstractmethod
|
||||||
|
from six import add_metaclass
|
||||||
|
|
||||||
|
@add_metaclass(ABCMeta)
|
||||||
|
class RegistryDataInterface(object):
|
||||||
|
""" Interface for code to work with the registry data model. The registry data model consists
|
||||||
|
of all tables that store registry-specific information, such as Manifests, Blobs, Images,
|
||||||
|
and Labels.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def find_matching_tag(self, repository_ref, tag_names):
|
||||||
|
""" Finds an alive tag in the repository matching one of the given tag names and returns it
|
||||||
|
or None if none.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_most_recent_tag(self, repository_ref):
|
||||||
|
""" Returns the most recently pushed alive tag in the repository, if any. If none, returns
|
||||||
|
None.
|
||||||
|
"""
|
27
data/registry_model/registry_pre_oci_model.py
Normal file
27
data/registry_model/registry_pre_oci_model.py
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
from data import model
|
||||||
|
from data.registry_model.interface import RegistryDataInterface
|
||||||
|
from data.registry_model.datatypes import Tag
|
||||||
|
|
||||||
|
|
||||||
|
class PreOCIModel(RegistryDataInterface):
|
||||||
|
"""
|
||||||
|
PreOCIModel implements the data model for the registry API using a database schema
|
||||||
|
before it was changed to support the OCI specification.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def find_matching_tag(self, repository_ref, tag_names):
|
||||||
|
""" Finds an alive tag in the repository matching one of the given tag names and returns it
|
||||||
|
or None if none.
|
||||||
|
"""
|
||||||
|
found_tag = model.tag.find_matching_tag(repository_ref.repo_id, tag_names)
|
||||||
|
return Tag.for_repository_tag(found_tag)
|
||||||
|
|
||||||
|
def get_most_recent_tag(self, repository_ref):
|
||||||
|
""" Returns the most recently pushed alive tag in the repository, if any. If none, returns
|
||||||
|
None.
|
||||||
|
"""
|
||||||
|
found_tag = model.tag.get_most_recent_tag(repository_ref.repo_id)
|
||||||
|
return Tag.for_repository_tag(found_tag)
|
||||||
|
|
||||||
|
|
||||||
|
pre_oci_model = PreOCIModel()
|
41
data/registry_model/test/test_pre_oci_model.py
Normal file
41
data/registry_model/test/test_pre_oci_model.py
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from data import model
|
||||||
|
from data.registry_model.registry_pre_oci_model import PreOCIModel
|
||||||
|
from data.registry_model.datatypes import RepositoryReference
|
||||||
|
from test.fixtures import *
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def pre_oci_model(initialized_db):
|
||||||
|
return PreOCIModel()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('names, expected', [
|
||||||
|
(['unknown'], None),
|
||||||
|
(['latest'], {'latest'}),
|
||||||
|
(['latest', 'prod'], {'latest', 'prod'}),
|
||||||
|
(['latest', 'prod', 'another'], {'latest', 'prod'}),
|
||||||
|
(['foo', 'prod'], {'prod'}),
|
||||||
|
])
|
||||||
|
def test_find_matching_tag(names, expected, pre_oci_model):
|
||||||
|
repo = model.repository.get_repository('devtable', 'simple')
|
||||||
|
repository_ref = RepositoryReference.for_repo_obj(repo)
|
||||||
|
found = pre_oci_model.find_matching_tag(repository_ref, names)
|
||||||
|
if expected is None:
|
||||||
|
assert found is None
|
||||||
|
else:
|
||||||
|
assert found.name in expected
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('repo_namespace, repo_name, expected', [
|
||||||
|
('devtable', 'simple', {'latest'}),
|
||||||
|
('buynlarge', 'orgrepo', {'latest', 'prod'}),
|
||||||
|
])
|
||||||
|
def test_get_most_recent_tag(repo_namespace, repo_name, expected, pre_oci_model):
|
||||||
|
repo = model.repository.get_repository(repo_namespace, repo_name)
|
||||||
|
repository_ref = RepositoryReference.for_repo_obj(repo)
|
||||||
|
found = pre_oci_model.get_most_recent_tag(repository_ref)
|
||||||
|
if expected is None:
|
||||||
|
assert found is None
|
||||||
|
else:
|
||||||
|
assert found.name in expected
|
|
@ -15,6 +15,7 @@ from auth.permissions import QuayDeferredPermissionUser
|
||||||
from config import frontend_visible_config
|
from config import frontend_visible_config
|
||||||
from external_libraries import get_external_javascript, get_external_css
|
from external_libraries import get_external_javascript, get_external_css
|
||||||
from endpoints.common_models_pre_oci import pre_oci_model as model
|
from endpoints.common_models_pre_oci import pre_oci_model as model
|
||||||
|
from util.config.provider.k8sprovider import QE_NAMESPACE
|
||||||
from util.secscan import PRIORITY_LEVELS
|
from util.secscan import PRIORITY_LEVELS
|
||||||
from util.saas.useranalytics import build_error_callback
|
from util.saas.useranalytics import build_error_callback
|
||||||
from util.timedeltastring import convert_to_timedelta
|
from util.timedeltastring import convert_to_timedelta
|
||||||
|
@ -143,7 +144,7 @@ def render_page_template(name, route_data=None, **kwargs):
|
||||||
preferred_scheme=app.config['PREFERRED_URL_SCHEME'],
|
preferred_scheme=app.config['PREFERRED_URL_SCHEME'],
|
||||||
version_number=version_number,
|
version_number=version_number,
|
||||||
current_year=datetime.datetime.now().year,
|
current_year=datetime.datetime.now().year,
|
||||||
is_kubernetes=IS_KUBERNETES,
|
kubernetes_namespace=IS_KUBERNETES and QE_NAMESPACE,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
resp = make_response(contents)
|
resp = make_response(contents)
|
||||||
|
|
|
@ -59,9 +59,9 @@
|
||||||
"<a href='https://coreos.com/docs/enterprise-registry/initial-setup/'>" +
|
"<a href='https://coreos.com/docs/enterprise-registry/initial-setup/'>" +
|
||||||
"Read the Setup Guide</a>";
|
"Read the Setup Guide</a>";
|
||||||
|
|
||||||
if (window.__is_kubernetes) {
|
if (window.__kubernetes_namespace) {
|
||||||
title = "Configuration Secret Missing";
|
title = "Configuration Secret Missing";
|
||||||
message = "It looks like the Quay Enterprise secret is not present in this Kubernetes namespace." +
|
message = `It looks like the Quay Enterprise secret is not present in the namespace <code>${window.__kubernetes_namespace}.</code>` +
|
||||||
"<br>Please double-check that the secret exists, or " +
|
"<br>Please double-check that the secret exists, or " +
|
||||||
"<a href='https://coreos.com/docs/enterprise-registry/initial-setup/'>" +
|
"<a href='https://coreos.com/docs/enterprise-registry/initial-setup/'>" +
|
||||||
"refer to the Setup Guide</a>";
|
"refer to the Setup Guide</a>";
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
window.__auth_scopes = {{ scope_set|tojson|safe }};
|
window.__auth_scopes = {{ scope_set|tojson|safe }};
|
||||||
window.__vuln_priority = {{ vuln_priority_set|tojson|safe }}
|
window.__vuln_priority = {{ vuln_priority_set|tojson|safe }}
|
||||||
window.__token = '{{ csrf_token() }}';
|
window.__token = '{{ csrf_token() }}';
|
||||||
window.__is_kubernetes = {{ is_kubernetes|tojson|safe }};
|
window.__kubernetes_namespace = {{ kubernetes_namespace|tojson|safe }};
|
||||||
|
|
||||||
{% if error_code %}
|
{% if error_code %}
|
||||||
window.__error_code = {{ error_code }};
|
window.__error_code = {{ error_code }};
|
||||||
|
|
|
@ -41,6 +41,7 @@ CONFIG_FILENAMES = (SSL_FILENAMES + DB_SSL_FILENAMES + JWT_FILENAMES + ACI_CERT_
|
||||||
LDAP_FILENAMES)
|
LDAP_FILENAMES)
|
||||||
CONFIG_FILE_SUFFIXES = ['-cloudfront-signing-key.pem']
|
CONFIG_FILE_SUFFIXES = ['-cloudfront-signing-key.pem']
|
||||||
EXTRA_CA_DIRECTORY = 'extra_ca_certs'
|
EXTRA_CA_DIRECTORY = 'extra_ca_certs'
|
||||||
|
EXTRA_CA_DIRECTORY_PREFIX = 'extra_ca_certs_'
|
||||||
|
|
||||||
VALIDATORS = {
|
VALIDATORS = {
|
||||||
DatabaseValidator.name: DatabaseValidator.validate,
|
DatabaseValidator.name: DatabaseValidator.validate,
|
||||||
|
|
|
@ -5,7 +5,8 @@ from peewee import JOIN, fn, IntegrityError
|
||||||
|
|
||||||
from app import app
|
from app import app
|
||||||
from data.database import (UseThenDisconnect, TagManifest, TagManifestToManifest, Image,
|
from data.database import (UseThenDisconnect, TagManifest, TagManifestToManifest, Image,
|
||||||
db_transaction)
|
Manifest, db_transaction)
|
||||||
|
from data.model import DataModelException
|
||||||
from data.model.image import get_parent_images
|
from data.model.image import get_parent_images
|
||||||
from data.model.tag import populate_manifest
|
from data.model.tag import populate_manifest
|
||||||
from data.model.blob import get_repo_blob_by_digest, BlobDoesNotExist
|
from data.model.blob import get_repo_blob_by_digest, BlobDoesNotExist
|
||||||
|
@ -129,7 +130,16 @@ def backfill_manifest(tag_manifest):
|
||||||
repository = tag_manifest.tag.repository
|
repository = tag_manifest.tag.repository
|
||||||
|
|
||||||
image_storage_id_map = {root_image.storage.content_checksum: root_image.storage.id}
|
image_storage_id_map = {root_image.storage.content_checksum: root_image.storage.id}
|
||||||
parent_images = get_parent_images(repository.namespace_user.username, repository.name, root_image)
|
|
||||||
|
try:
|
||||||
|
parent_images = get_parent_images(repository.namespace_user.username, repository.name,
|
||||||
|
root_image)
|
||||||
|
except DataModelException:
|
||||||
|
logger.exception('Exception when trying to load parent images for manifest `%s`',
|
||||||
|
tag_manifest.id)
|
||||||
|
parent_images = {}
|
||||||
|
is_broken = True
|
||||||
|
|
||||||
for parent_image in parent_images:
|
for parent_image in parent_images:
|
||||||
image_storage_id_map[parent_image.storage.content_checksum] = parent_image.storage.id
|
image_storage_id_map[parent_image.storage.content_checksum] = parent_image.storage.id
|
||||||
|
|
||||||
|
@ -158,9 +168,22 @@ def backfill_manifest(tag_manifest):
|
||||||
except TagManifest.DoesNotExist:
|
except TagManifest.DoesNotExist:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# Create the new-style rows for the manifest.
|
# Ensure it wasn't already created.
|
||||||
manifest_row = populate_manifest(tag_manifest.tag.repository, manifest,
|
if lookup_map_row(tag_manifest):
|
||||||
tag_manifest.tag.image, storage_ids)
|
return False
|
||||||
|
|
||||||
|
# Check for a pre-existing manifest matching the digest in the repository. This can happen
|
||||||
|
# if we've already created the manifest row (typically for tag reverision).
|
||||||
|
try:
|
||||||
|
manifest_row = Manifest.get(digest=manifest.digest, repository=tag_manifest.tag.repository)
|
||||||
|
except Manifest.DoesNotExist:
|
||||||
|
# Create the new-style rows for the manifest.
|
||||||
|
try:
|
||||||
|
manifest_row = populate_manifest(tag_manifest.tag.repository, manifest,
|
||||||
|
tag_manifest.tag.image, storage_ids)
|
||||||
|
except IntegrityError:
|
||||||
|
# Pre-empted.
|
||||||
|
return False
|
||||||
|
|
||||||
# Create the mapping row. If we find another was created for this tag manifest in the
|
# Create the mapping row. If we find another was created for this tag manifest in the
|
||||||
# meantime, then we've been preempted.
|
# meantime, then we've been preempted.
|
||||||
|
|
|
@ -138,3 +138,33 @@ def test_manifestbackfillworker_mislinked_invalid_manifest(clear_rows, initializ
|
||||||
|
|
||||||
manifest_blobs = list(ManifestBlob.select().where(ManifestBlob.manifest == manifest_row))
|
manifest_blobs = list(ManifestBlob.select().where(ManifestBlob.manifest == manifest_row))
|
||||||
assert len(manifest_blobs) == 0
|
assert len(manifest_blobs) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_manifestbackfillworker_repeat_digest(clear_rows, initialized_db):
|
||||||
|
""" Tests that a manifest with a shared digest will be properly linked. """
|
||||||
|
# Delete existing tag manifest so we can reuse the tag.
|
||||||
|
TagManifestLabel.delete().execute()
|
||||||
|
TagManifest.delete().execute()
|
||||||
|
|
||||||
|
repo = model.repository.get_repository('devtable', 'gargantuan')
|
||||||
|
tag_v30 = model.tag.get_active_tag('devtable', 'gargantuan', 'v3.0')
|
||||||
|
tag_v50 = model.tag.get_active_tag('devtable', 'gargantuan', 'v5.0')
|
||||||
|
|
||||||
|
# Build a manifest and assign it to both tags (this is allowed in the old model).
|
||||||
|
builder = DockerSchema1ManifestBuilder('devtable', 'gargantuan', 'sometag')
|
||||||
|
builder.add_layer('sha256:deadbeef', '{"id": "foo"}')
|
||||||
|
manifest = builder.build(docker_v2_signing_key)
|
||||||
|
|
||||||
|
manifest_1 = TagManifest.create(json_data=manifest.bytes, digest=manifest.digest,
|
||||||
|
tag=tag_v30)
|
||||||
|
manifest_2 = TagManifest.create(json_data=manifest.bytes, digest=manifest.digest,
|
||||||
|
tag=tag_v50)
|
||||||
|
|
||||||
|
# Backfill "both" manifests and ensure both are pointed to by a single resulting row.
|
||||||
|
assert backfill_manifest(manifest_1)
|
||||||
|
assert backfill_manifest(manifest_2)
|
||||||
|
|
||||||
|
map_row1 = TagManifestToManifest.get(tag_manifest=manifest_1)
|
||||||
|
map_row2 = TagManifestToManifest.get(tag_manifest=manifest_2)
|
||||||
|
|
||||||
|
assert map_row1.manifest == map_row2.manifest
|
||||||
|
|
Reference in a new issue