Merge remote-tracking branch 'upstream/master' into python-registry-v2

This commit is contained in:
Jake Moshenko 2015-10-26 14:44:16 -04:00
commit 2c10d28afc
13 changed files with 57 additions and 36 deletions

View file

@ -6,7 +6,7 @@ ENV DEBIAN_FRONTEND noninteractive
ENV HOME /root ENV HOME /root
# Install the dependencies. # Install the dependencies.
RUN apt-get update # 22OCT2015 RUN apt-get update # 23OCT2015
# New ubuntu packages should be added as their own apt-get install lines below the existing install commands # New ubuntu packages should be added as their own apt-get install lines below the existing install commands
RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev libgpgme11 libgpgme11-dev RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev libgpgme11 libgpgme11-dev

View file

@ -121,6 +121,8 @@ class EC2Executor(BuilderExecutor):
block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping() block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping()
block_devices['/dev/xvda'] = ssd_root_ebs block_devices['/dev/xvda'] = ssd_root_ebs
interfaces = None
if self.executor_config.get('EC2_VPC_SUBNET_ID', None) is not None:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=self.executor_config['EC2_VPC_SUBNET_ID'], subnet_id=self.executor_config['EC2_VPC_SUBNET_ID'],
groups=self.executor_config['EC2_SECURITY_GROUP_IDS'], groups=self.executor_config['EC2_SECURITY_GROUP_IDS'],

View file

@ -18,7 +18,11 @@ class RedisBuildLogs(object):
PHASE = 'phase' PHASE = 'phase'
def __init__(self, redis_config): def __init__(self, redis_config):
self._redis = redis.StrictRedis(socket_connect_timeout=5, **redis_config) args = dict(redis_config)
args.update({'socket_connect_timeout': 5})
self._redis_config = redis_config
self._redis = redis.StrictRedis(**args)
@staticmethod @staticmethod
def _logs_key(build_id): def _logs_key(build_id):
@ -94,12 +98,16 @@ class RedisBuildLogs(object):
def check_health(self): def check_health(self):
try: try:
if not self._redis.ping() == True: args = dict(self._redis_config)
args.update({'socket_connect_timeout': 1, 'socket_timeout': 1})
connection = redis.StrictRedis(**args)
if not connection.ping() == True:
return False return False
# Ensure we can write and read a key. # Ensure we can write and read a key.
self._redis.set(self._health_key(), time.time()) connection.set(self._health_key(), time.time())
self._redis.get(self._health_key()) connection.get(self._health_key())
return True return True
except redis.ConnectionError: except redis.ConnectionError:

View file

@ -199,6 +199,7 @@ def _find_or_link_image(existing_image, repo_obj, username, translations, prefer
command=existing_image.command, command=existing_image.command,
created=existing_image.created, created=existing_image.created,
comment=existing_image.comment, comment=existing_image.comment,
v1_json_metadata=existing_image.v1_json_metadata,
aggregate_size=existing_image.aggregate_size) aggregate_size=existing_image.aggregate_size)

View file

@ -22,12 +22,14 @@ verbs = Blueprint('verbs', __name__)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _open_stream(formatter, namespace, repository, tag, synthetic_image_id, image_json, def _open_stream(formatter, namespace, repository, tag, synthetic_image_id, image_json, repo_image):
image_list):
store = Storage(app) store = Storage(app)
def get_image_json(image): # For performance reasons, we load the full image list here, cache it, then disconnect from
return json.loads(image.v1_json_metadata) # the database.
with database.UseThenDisconnect(app.config):
image_list = list(model.image.get_parent_images(namespace, repository, repo_image))
image_list.append(repo_image)
def get_next_image(): def get_next_image():
for current_image in image_list: for current_image in image_list:
@ -113,7 +115,7 @@ def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None):
abort(404) abort(404)
# Lookup the tag's image and storage. # Lookup the tag's image and storage.
repo_image = model.image.get_repo_image(namespace, repository, tag_image.docker_image_id) repo_image = model.image.get_repo_image_extended(namespace, repository, tag_image.docker_image_id)
if not repo_image: if not repo_image:
abort(404) abort(404)
@ -199,7 +201,7 @@ def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker=
# Create a queue process to generate the data. The queue files will read from the process # Create a queue process to generate the data. The queue files will read from the process
# and send the results to the client and storage. # and send the results to the client and storage.
args = (formatter, namespace, repository, tag, synthetic_image_id, image_json, full_image_list) args = (formatter, namespace, repository, tag, synthetic_image_id, image_json, repo_image)
queue_process = QueueProcess(_open_stream, queue_process = QueueProcess(_open_stream,
8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
args, finished=_cleanup) args, finished=_cleanup)

View file

@ -10,16 +10,17 @@ def get_healthchecker(app, config_provider):
class HealthCheck(object): class HealthCheck(object):
def __init__(self, app, config_provider): def __init__(self, app, config_provider, instance_skips=None):
self.app = app self.app = app
self.config_provider = config_provider self.config_provider = config_provider
self.instance_skips = instance_skips or []
def check_instance(self): def check_instance(self):
""" """
Conducts a check on this specific instance, returning a dict representing the HealthCheck Conducts a check on this specific instance, returning a dict representing the HealthCheck
output and a number indicating the health check response code. output and a number indicating the health check response code.
""" """
service_statuses = check_all_services(self.app) service_statuses = check_all_services(self.app, self.instance_skips)
return self.get_instance_health(service_statuses) return self.get_instance_health(service_statuses)
def check_endtoend(self): def check_endtoend(self):
@ -27,7 +28,7 @@ class HealthCheck(object):
Conducts a check on all services, returning a dict representing the HealthCheck Conducts a check on all services, returning a dict representing the HealthCheck
output and a number indicating the health check response code. output and a number indicating the health check response code.
""" """
service_statuses = check_all_services(self.app) service_statuses = check_all_services(self.app, [])
return self.calculate_overall_health(service_statuses) return self.calculate_overall_health(service_statuses)
def get_instance_health(self, service_statuses): def get_instance_health(self, service_statuses):
@ -80,7 +81,7 @@ class LocalHealthCheck(HealthCheck):
class ProductionHealthCheck(HealthCheck): class ProductionHealthCheck(HealthCheck):
def __init__(self, app, config_provider, access_key, secret_key, db_instance='quay'): def __init__(self, app, config_provider, access_key, secret_key, db_instance='quay'):
super(ProductionHealthCheck, self).__init__(app, config_provider) super(ProductionHealthCheck, self).__init__(app, config_provider, ['redis'])
self.access_key = access_key self.access_key = access_key
self.secret_key = secret_key self.secret_key = secret_key
self.db_instance = db_instance self.db_instance = db_instance
@ -92,7 +93,7 @@ class ProductionHealthCheck(HealthCheck):
def get_instance_health(self, service_statuses): def get_instance_health(self, service_statuses):
# Note: We skip the redis check because if redis is down, we don't want ELB taking the # Note: We skip the redis check because if redis is down, we don't want ELB taking the
# machines out of service. Redis is not considered a high avaliability-required service. # machines out of service. Redis is not considered a high avaliability-required service.
skip = ['redis'] skip = []
notes = [] notes = []
# If the database is marked as unhealthy, check the status of RDS directly. If RDS is # If the database is marked as unhealthy, check the status of RDS directly. If RDS is

View file

@ -39,10 +39,13 @@ _SERVICES = {
'redis': _check_redis 'redis': _check_redis
} }
def check_all_services(app): def check_all_services(app, skip):
""" Returns a dictionary containing the status of all the services defined. """ """ Returns a dictionary containing the status of all the services defined. """
status = {} status = {}
for name in _SERVICES: for name in _SERVICES:
if name in skip:
continue
status[name] = _SERVICES[name](app) status[name] = _SERVICES[name](app)
return status return status

View file

@ -36,9 +36,9 @@ git+https://github.com/DevTable/aniso8601-fake.git
git+https://github.com/DevTable/anunidecode.git git+https://github.com/DevTable/anunidecode.git
git+https://github.com/DevTable/pygithub.git git+https://github.com/DevTable/pygithub.git
git+https://github.com/DevTable/container-cloud-config.git git+https://github.com/DevTable/container-cloud-config.git
git+https://github.com/coreos/mockldap.git
git+https://github.com/coreos/py-bitbucket.git git+https://github.com/coreos/py-bitbucket.git
git+https://github.com/coreos/pyapi-gitlab.git@timeout git+https://github.com/coreos/pyapi-gitlab.git@timeout
git+https://github.com/coreos/mockldap.git
git+https://github.com/coreos/resumablehashlib.git git+https://github.com/coreos/resumablehashlib.git
git+https://github.com/DevTable/python-etcd.git@sslfix git+https://github.com/DevTable/python-etcd.git@sslfix
gipc gipc
@ -54,5 +54,9 @@ Flask-Testing
pyjwt pyjwt
toposort toposort
pyjwkest pyjwkest
<<<<<<< HEAD
rfc3987 rfc3987
jsonpath-rw jsonpath-rw
=======
jsonpath-rw
>>>>>>> upstream/master

View file

@ -100,6 +100,6 @@ git+https://github.com/coreos/mockldap.git
git+https://github.com/coreos/py-bitbucket.git git+https://github.com/coreos/py-bitbucket.git
git+https://github.com/coreos/pyapi-gitlab.git@timeout git+https://github.com/coreos/pyapi-gitlab.git@timeout
git+https://github.com/coreos/resumablehashlib.git git+https://github.com/coreos/resumablehashlib.git
git+https://github.com/coreos/mockldap.git git+https://github.com/coreos/resumablehashlib.git
git+https://github.com/DevTable/python-etcd.git@sslfix git+https://github.com/DevTable/python-etcd.git@sslfix
git+https://github.com/NateFerrero/oauth2lib.git git+https://github.com/NateFerrero/oauth2lib.git

Binary file not shown.

View file

@ -224,7 +224,7 @@ class TestEphemeral(unittest.TestCase):
@async_test @async_test
def test_change_worker(self): def test_change_worker(self):
# Send a signal to the callback that a worker key has been changed # Send a signal to the callback that a worker key has been changed
set_result = Mock(sepc=etcd.EtcdResult) set_result = Mock(spec=etcd.EtcdResult)
set_result.action = 'set' set_result.action = 'set'
set_result.key = self.mock_job_key set_result.key = self.mock_job_key

View file

@ -14,8 +14,8 @@ KUBERNETES_API_HOST = 'kubernetes.default.svc.cluster.local'
SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token' SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
ER_NAMESPACE = 'quay' QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise')
ER_CONFIG_SECRET = 'quay-config-secret' QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret')
class KubernetesConfigProvider(FileConfigProvider): class KubernetesConfigProvider(FileConfigProvider):
""" Implementation of the config provider that reads and writes configuration """ Implementation of the config provider that reads and writes configuration
@ -67,12 +67,12 @@ class KubernetesConfigProvider(FileConfigProvider):
"kind": "Secret", "kind": "Secret",
"apiVersion": "v1", "apiVersion": "v1",
"metadata": { "metadata": {
"name": ER_CONFIG_SECRET "name": QE_CONFIG_SECRET
}, },
"data": secret_data "data": secret_data
} }
secret_url = 'namespaces/%s/secrets/%s' % (ER_NAMESPACE, ER_CONFIG_SECRET) secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
secret = self._lookup_secret() secret = self._lookup_secret()
if not secret: if not secret:
self._assert_success(self._execute_k8s_api('POST', secret_url, data)) self._assert_success(self._execute_k8s_api('POST', secret_url, data))
@ -86,7 +86,7 @@ class KubernetesConfigProvider(FileConfigProvider):
def _lookup_secret(self): def _lookup_secret(self):
secret_url = 'namespaces/%s/secrets/%s' % (ER_NAMESPACE, ER_CONFIG_SECRET) secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
response = self._execute_k8s_api('GET', secret_url) response = self._execute_k8s_api('GET', secret_url)
if response.status_code != 200: if response.status_code != 200:
return None return None