Phase 4 of the namespace to user migration: actually remove the column from the db and remove the dependence on serialized namespaces in the workers and queues
This commit is contained in:
parent
2c5cc7990f
commit
e8b3d1cc4a
17 changed files with 273 additions and 123 deletions
|
@ -1,30 +1,28 @@
|
|||
import logging
|
||||
import argparse
|
||||
|
||||
from app import image_diff_queue
|
||||
from data.model import DataModelException
|
||||
from data import model
|
||||
from endpoints.registry import process_image_changes
|
||||
from workers.worker import Worker
|
||||
|
||||
|
||||
root_logger = logging.getLogger('')
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
|
||||
FORMAT = '%(asctime)-15s - %(levelname)s - %(pathname)s - %(funcName)s - %(message)s'
|
||||
formatter = logging.Formatter(FORMAT)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DiffsWorker(Worker):
|
||||
def process_queue_item(self, job_details):
|
||||
image_id = job_details['image_id']
|
||||
namespace = job_details['namespace']
|
||||
repository = job_details['repository']
|
||||
|
||||
# TODO switch to the namespace_user_id branch only once exisiting jobs have all gone through
|
||||
if 'namespace_user_id' in job_details:
|
||||
namespace = model.get_namespace_by_user_id(job_details['namespace_user_id'])
|
||||
else:
|
||||
namespace = job_details['namespace']
|
||||
|
||||
try:
|
||||
process_image_changes(namespace, repository, image_id)
|
||||
except DataModelException:
|
||||
except model.DataModelException:
|
||||
# This exception is unrecoverable, and the item should continue and be
|
||||
# marked as complete.
|
||||
msg = ('Image does not exist in database \'%s\' for repo \'%s/\'%s\'' %
|
||||
|
|
|
@ -38,7 +38,7 @@ TIMEOUT_PERIOD_MINUTES = 20
|
|||
CACHE_EXPIRATION_PERIOD_HOURS = 24
|
||||
NO_TAGS = ['<none>:<none>']
|
||||
RESERVATION_TIME = (TIMEOUT_PERIOD_MINUTES + 5) * 60
|
||||
DOCKER_BASE_URL = None # Set this if you want to use a different docker URL/socket.
|
||||
DOCKER_BASE_URL = os.environ.get('DOCKER_HOST', None)
|
||||
|
||||
|
||||
def matches_system_error(status_str):
|
||||
|
@ -130,8 +130,8 @@ class DockerfileBuildContext(object):
|
|||
# Note: We have two different clients here because we (potentially) login
|
||||
# with both, but with different credentials that we do not want shared between
|
||||
# the build and push operations.
|
||||
self._push_cl = StreamingDockerClient(timeout=1200, base_url = DOCKER_BASE_URL)
|
||||
self._build_cl = StreamingDockerClient(timeout=1200, base_url = DOCKER_BASE_URL)
|
||||
self._push_cl = StreamingDockerClient(timeout=1200, base_url=DOCKER_BASE_URL)
|
||||
self._build_cl = StreamingDockerClient(timeout=1200, base_url=DOCKER_BASE_URL)
|
||||
|
||||
dockerfile_path = os.path.join(self._build_dir, dockerfile_subdir,
|
||||
'Dockerfile')
|
||||
|
@ -223,20 +223,6 @@ class DockerfileBuildContext(object):
|
|||
raise RuntimeError(message)
|
||||
|
||||
def pull(self):
|
||||
# Login with the specified credentials (if any).
|
||||
if self._pull_credentials:
|
||||
logger.debug('Logging in with pull credentials: %s@%s',
|
||||
self._pull_credentials['username'], self._pull_credentials['registry'])
|
||||
|
||||
self._build_logger('Pulling base image: %s' % image_and_tag, log_data = {
|
||||
'phasestep': 'login',
|
||||
'username': self._pull_credentials['username'],
|
||||
'registry': self._pull_credentials['registry']
|
||||
})
|
||||
|
||||
self._build_cl.login(self._pull_credentials['username'], self._pull_credentials['password'],
|
||||
registry=self._pull_credentials['registry'], reauth=True)
|
||||
|
||||
# Pull the image, in case it was updated since the last build
|
||||
image_and_tag_tuple = self._parsed_dockerfile.get_image_and_tag()
|
||||
if image_and_tag_tuple is None or image_and_tag_tuple[0] is None:
|
||||
|
@ -245,10 +231,24 @@ class DockerfileBuildContext(object):
|
|||
|
||||
image_and_tag = ':'.join(image_and_tag_tuple)
|
||||
|
||||
self._build_logger('Pulling base image: %s' % image_and_tag, log_data = {
|
||||
'phasestep': 'pull',
|
||||
'repo_url': image_and_tag
|
||||
})
|
||||
# Login with the specified credentials (if any).
|
||||
if self._pull_credentials:
|
||||
logger.debug('Logging in with pull credentials: %s@%s',
|
||||
self._pull_credentials['username'], self._pull_credentials['registry'])
|
||||
|
||||
self._build_logger('Pulling base image: %s' % image_and_tag, log_data={
|
||||
'phasestep': 'login',
|
||||
'username': self._pull_credentials['username'],
|
||||
'registry': self._pull_credentials['registry']
|
||||
})
|
||||
|
||||
self._build_cl.login(self._pull_credentials['username'], self._pull_credentials['password'],
|
||||
registry=self._pull_credentials['registry'], reauth=True)
|
||||
else:
|
||||
self._build_logger('Pulling base image: %s' % image_and_tag, log_data={
|
||||
'phasestep': 'pull',
|
||||
'repo_url': image_and_tag
|
||||
})
|
||||
|
||||
pull_status = self._build_cl.pull(image_and_tag, stream=True)
|
||||
|
||||
|
@ -279,7 +279,7 @@ class DockerfileBuildContext(object):
|
|||
if key in status:
|
||||
fully_unwrapped = status[key]
|
||||
break
|
||||
|
||||
|
||||
if not fully_unwrapped:
|
||||
logger.debug('Status dict did not have any extractable keys and was: %s', status)
|
||||
elif isinstance(status, basestring):
|
||||
|
@ -289,7 +289,7 @@ class DockerfileBuildContext(object):
|
|||
|
||||
# Check for system errors when building.
|
||||
if matches_system_error(status_str):
|
||||
raise WorkerUnhealthyException(status_str)
|
||||
raise WorkerUnhealthyException(status_str)
|
||||
|
||||
logger.debug('Status: %s', status_str)
|
||||
step_increment = re.search(r'Step ([0-9]+) :', status_str)
|
||||
|
@ -481,8 +481,8 @@ class DockerfileBuildWorker(Worker):
|
|||
def watchdog(self):
|
||||
logger.debug('Running build watchdog code.')
|
||||
try:
|
||||
docker_cl = Client(base_url = DOCKER_BASE_URL)
|
||||
|
||||
docker_cl = Client(base_url=DOCKER_BASE_URL)
|
||||
|
||||
# Iterate the running containers and kill ones that have been running more than 20 minutes
|
||||
for container in docker_cl.containers():
|
||||
start_time = datetime.fromtimestamp(container['Created'])
|
||||
|
@ -502,9 +502,7 @@ class DockerfileBuildWorker(Worker):
|
|||
# Make sure we have more information for debugging problems
|
||||
sentry.client.user_context(job_details)
|
||||
|
||||
repository_build = model.get_repository_build(job_details['namespace'],
|
||||
job_details['repository'],
|
||||
job_details['build_uuid'])
|
||||
repository_build = model.get_repository_build(job_details['build_uuid'])
|
||||
|
||||
pull_credentials = job_details.get('pull_credentials', None)
|
||||
|
||||
|
@ -513,15 +511,21 @@ class DockerfileBuildWorker(Worker):
|
|||
resource_url = user_files.get_file_url(repository_build.resource_key, requires_cors=False)
|
||||
tag_names = job_config['docker_tags']
|
||||
build_subdir = job_config['build_subdir']
|
||||
repo = job_config['repository']
|
||||
|
||||
# TODO remove the top branch when there are no more jobs with a repository config
|
||||
if 'repository' in job_config:
|
||||
repo = job_config['repository']
|
||||
else:
|
||||
repo = '%s/%s/%s' % (job_config['registry'],
|
||||
repository_build.repository.namespace_user.username,
|
||||
repository_build.repository.name)
|
||||
|
||||
access_token = repository_build.access_token.code
|
||||
|
||||
log_appender = partial(build_logs.append_log_message,
|
||||
repository_build.uuid)
|
||||
log_appender = partial(build_logs.append_log_message, repository_build.uuid)
|
||||
|
||||
# Lookup and save the version of docker being used.
|
||||
docker_cl = Client(base_url = DOCKER_BASE_URL)
|
||||
docker_cl = Client(base_url=DOCKER_BASE_URL)
|
||||
docker_version = docker_cl.version().get('Version', '')
|
||||
dash = docker_version.find('-')
|
||||
|
||||
|
@ -529,14 +533,13 @@ class DockerfileBuildWorker(Worker):
|
|||
if dash > 0:
|
||||
docker_version = docker_version[:dash]
|
||||
|
||||
log_appender('initializing', build_logs.PHASE, log_data = {
|
||||
log_appender('initializing', build_logs.PHASE, log_data={
|
||||
'docker_version': docker_version
|
||||
})
|
||||
|
||||
log_appender('Docker version: %s' % docker_version)
|
||||
|
||||
start_msg = ('Starting job with resource url: %s repo: %s' % (resource_url,
|
||||
repo))
|
||||
start_msg = ('Starting job with resource url: %s repo: %s' % (resource_url, repo))
|
||||
logger.debug(start_msg)
|
||||
|
||||
docker_resource = requests.get(resource_url, stream=True)
|
||||
|
@ -592,7 +595,7 @@ class DockerfileBuildWorker(Worker):
|
|||
cur_message = ex.message or 'Error while unpacking build package'
|
||||
log_appender(cur_message, build_logs.ERROR)
|
||||
spawn_failure(cur_message, event_data)
|
||||
raise JobException(cur_message)
|
||||
raise JobException(cur_message)
|
||||
|
||||
# Start the build process.
|
||||
try:
|
||||
|
@ -637,14 +640,14 @@ class DockerfileBuildWorker(Worker):
|
|||
|
||||
# Spawn a notification that the build has completed.
|
||||
spawn_notification(repository_build.repository, 'build_success', event_data,
|
||||
subpage='build?current=%s' % repository_build.uuid,
|
||||
pathargs=['build', repository_build.uuid])
|
||||
subpage='build?current=%s' % repository_build.uuid,
|
||||
pathargs=['build', repository_build.uuid])
|
||||
|
||||
except WorkerUnhealthyException as exc:
|
||||
# Spawn a notification that the build has failed.
|
||||
log_appender('Worker has become unhealthy. Will retry shortly.', build_logs.ERROR)
|
||||
spawn_failure(exc.message, event_data)
|
||||
|
||||
|
||||
# Raise the exception to the queue.
|
||||
raise exc
|
||||
|
||||
|
|
|
@ -1,7 +1,4 @@
|
|||
import logging
|
||||
import argparse
|
||||
import requests
|
||||
import json
|
||||
|
||||
from app import notification_queue
|
||||
from workers.worker import Worker
|
||||
|
@ -12,11 +9,6 @@ from workers.worker import JobException
|
|||
|
||||
from data import model
|
||||
|
||||
root_logger = logging.getLogger('')
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
|
||||
FORMAT = '%(asctime)-15s - %(levelname)s - %(pathname)s - %(funcName)s - %(message)s'
|
||||
formatter = logging.Formatter(FORMAT)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -24,10 +16,8 @@ logger = logging.getLogger(__name__)
|
|||
class NotificationWorker(Worker):
|
||||
def process_queue_item(self, job_details):
|
||||
notification_uuid = job_details['notification_uuid'];
|
||||
repo_namespace = job_details['repository_namespace']
|
||||
repo_name = job_details['repository_name']
|
||||
|
||||
notification = model.get_repo_notification(repo_namespace, repo_name, notification_uuid)
|
||||
notification = model.get_repo_notification(notification_uuid)
|
||||
if not notification:
|
||||
# Probably deleted.
|
||||
return
|
||||
|
|
Reference in a new issue