From 6601e83285c840dec783347be7c997a1d0f3b2f1 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 11 Dec 2014 18:03:40 +0200 Subject: [PATCH 01/62] When speaking to version 0.2-beta of the build worker, properly lookup the cached commands and see if we have a matching image/tag in the repository --- buildman/component/buildcomponent.py | 81 ++++++++++++++++------------ buildman/jobutil/buildjob.py | 54 +++++++++++++++++-- buildman/jobutil/workererror.py | 5 ++ data/model/legacy.py | 20 ++++++- 4 files changed, 123 insertions(+), 37 deletions(-) diff --git a/buildman/component/buildcomponent.py b/buildman/component/buildcomponent.py index d518d3453..ca56e6926 100644 --- a/buildman/component/buildcomponent.py +++ b/buildman/component/buildcomponent.py @@ -20,7 +20,7 @@ HEARTBEAT_DELTA = datetime.timedelta(seconds=30) HEARTBEAT_TIMEOUT = 10 INITIAL_TIMEOUT = 25 -SUPPORTED_WORKER_VERSIONS = ['0.1-beta'] +SUPPORTED_WORKER_VERSIONS = ['0.1-beta', '0.2-beta'] logger = logging.getLogger(__name__) @@ -46,6 +46,7 @@ class BuildComponent(BaseComponent): self._current_job = None self._build_status = None self._image_info = None + self._worker_version = None BaseComponent.__init__(self, config, **kwargs) @@ -55,6 +56,7 @@ class BuildComponent(BaseComponent): def onJoin(self, details): logger.debug('Registering methods and listeners for component %s', self.builder_realm) yield From(self.register(self._on_ready, u'io.quay.buildworker.ready')) + yield From(self.register(self._check_cache, u'io.quay.buildworker.checkcache')) yield From(self.register(self._ping, u'io.quay.buildworker.ping')) yield From(self.subscribe(self._on_heartbeat, 'io.quay.builder.heartbeat')) yield From(self.subscribe(self._on_log_message, 'io.quay.builder.logmessage')) @@ -73,42 +75,45 @@ class BuildComponent(BaseComponent): self._set_status(ComponentStatus.BUILDING) - # Retrieve the job's buildpack. + base_image_information = {} + build_config = build_job.build_config() + + # Retrieve the job's buildpack url. buildpack_url = self.user_files.get_file_url(build_job.repo_build().resource_key, requires_cors=False) - logger.debug('Retreiving build package: %s', buildpack_url) - buildpack = None - try: - buildpack = BuildPackage.from_url(buildpack_url) - except BuildPackageException as bpe: - self._build_failure('Could not retrieve build package', bpe) - return + # TODO(jschorr): Remove this block andthe buildpack package once we move everyone over + # to version 0.2 or higher + if self._worker_version == '0.1-beta': + logger.debug('Retreiving build package: %s', buildpack_url) + buildpack = None + try: + buildpack = BuildPackage.from_url(buildpack_url) + except BuildPackageException as bpe: + self._build_failure('Could not retrieve build package', bpe) + return - # Extract the base image information from the Dockerfile. - parsed_dockerfile = None - logger.debug('Parsing dockerfile') + # Extract the base image information from the Dockerfile. + parsed_dockerfile = None + logger.debug('Parsing dockerfile') - build_config = build_job.build_config() - try: - parsed_dockerfile = buildpack.parse_dockerfile(build_config.get('build_subdir')) - except BuildPackageException as bpe: - self._build_failure('Could not find Dockerfile in build package', bpe) - return + try: + parsed_dockerfile = buildpack.parse_dockerfile(build_config.get('build_subdir')) + except BuildPackageException as bpe: + self._build_failure('Could not find Dockerfile in build package', bpe) + return - image_and_tag_tuple = parsed_dockerfile.get_image_and_tag() - if image_and_tag_tuple is None or image_and_tag_tuple[0] is None: - self._build_failure('Missing FROM line in Dockerfile') - return + image_and_tag_tuple = parsed_dockerfile.get_image_and_tag() + if image_and_tag_tuple is None or image_and_tag_tuple[0] is None: + self._build_failure('Missing FROM line in Dockerfile') + return - base_image_information = { - 'repository': image_and_tag_tuple[0], - 'tag': image_and_tag_tuple[1] - } + base_image_information['repository'] = image_and_tag_tuple[0] + base_image_information['tag'] = image_and_tag_tuple[1] - # Extract the number of steps from the Dockerfile. - with self._build_status as status_dict: - status_dict['total_commands'] = len(parsed_dockerfile.commands) + # Extract the number of steps from the Dockerfile. + with self._build_status as status_dict: + status_dict['total_commands'] = len(parsed_dockerfile.commands) # Add the pull robot information, if any. if build_config.get('pull_credentials') is not None: @@ -128,20 +133,20 @@ class BuildComponent(BaseComponent): # push_token: The token to use to push the built image. # tag_names: The name(s) of the tag(s) for the newly built image. # base_image: The image name and credentials to use to conduct the base image pull. - # repository: The repository to pull. - # tag: The tag to pull. + # repository: The repository to pull (DEPRECATED) + # tag: The tag to pull (DEPRECATED) # username: The username for pulling the base image (if any). # password: The password for pulling the base image (if any). build_arguments = { 'build_package': buildpack_url, 'sub_directory': build_config.get('build_subdir', ''), 'repository': repository_name, - 'registry': self.server_hostname, + 'registry': '10.0.2.2:5000' or self.server_hostname, 'pull_token': build_job.repo_build().access_token.code, 'push_token': build_job.repo_build().access_token.code, 'tag_names': build_config.get('docker_tags', ['latest']), 'base_image': base_image_information, - 'cached_tag': build_job.determine_cached_tag() or '' + 'cached_tag': build_job.determine_cached_tag() or '' # Remove after V0.1-beta is deprecated } # Invoke the build. @@ -283,6 +288,15 @@ class BuildComponent(BaseComponent): """ Ping pong. """ return 'pong' + def _check_cache(self, cache_commands, base_image_name, base_image_tag, base_image_id): + with self._build_status as status_dict: + status_dict['total_commands'] = len(cache_commands) + 1 + + logger.debug('Checking cache on realm %s. Base image: %s:%s (%s)', self.builder_realm, + base_image_name, base_image_tag, base_image_id) + + return self._current_job.determine_cached_tag(base_image_id, cache_commands) or '' + def _on_ready(self, token, version): if not version in SUPPORTED_WORKER_VERSIONS: logger.warning('Build component (token "%s") is running an out-of-date version: %s', version) @@ -296,6 +310,7 @@ class BuildComponent(BaseComponent): logger.warning('Builder token mismatch. Expected: "%s". Found: "%s"', self.expected_token, token) return False + self._worker_version = version self._set_status(ComponentStatus.RUNNING) # Start the heartbeat check and updating loop. diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index 6ec02a830..63d544790 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -31,10 +31,58 @@ class BuildJob(object): 'Could not parse repository build job config with ID %s' % self._job_details['build_uuid'] ) - def determine_cached_tag(self): + def determine_cached_tag(self, base_image_id=None, cache_comments=None): """ Returns the tag to pull to prime the cache or None if none. """ - # TODO(jschorr): Change this to use the more complicated caching rules, once we have caching - # be a pull of things besides the constructed tags. + cached_tag = None + if base_image_id and cache_comments: + cached_tag = self._determine_cached_tag_by_comments(base_image_id, cache_comments) + + if not cached_tag: + cached_tag = self._determine_cached_tag_by_tag() + + return cached_tag + + def _determine_cached_tag_by_comments(self, base_image_id, cache_commands): + """ Determines the tag to use for priming the cache for this build job, by matching commands + starting at the given base_image_id. This mimics the Docker cache checking, so it should, + in theory, provide "perfect" caching. + """ + # Lookup the base image in the repository. If it doesn't exist, nothing more to do. + repo_namespace = self._repo_build.repository.namespace_user.username + repo_name = self._repo_build.repository.name + + repository = model.get_repository(repo_namespace, repo_name) + if repository is None: + # Should never happen, but just to be sure. + return None + + current_image = model.get_image(repository, base_image_id) + if current_image is None: + return None + + # For each cache comment, find a child image that matches the command. + for cache_command in cache_commands: + print current_image.docker_image_id + + current_image = model.find_child_image(repository, current_image, cache_command) + if current_image is None: + return None + + # Find a tag associated with the image, if any. + # TODO(jschorr): We should just return the image ID instead of a parent tag, OR we should + # make this more efficient. + for tag in model.list_repository_tags(repo_namespace, repo_name): + tag_image = tag.image + ancestor_index = '/%s/' % current_image.id + if ancestor_index in tag_image.ancestors: + return tag.name + + return None + + def _determine_cached_tag_by_tag(self): + """ Determines the cached tag by looking for one of the tags being built, and seeing if it + exists in the repository. This is a fallback for when no comment information is available. + """ tags = self._build_config.get('docker_tags', ['latest']) existing_tags = model.list_repository_tags(self._repo_build.repository.namespace_user.username, self._repo_build.repository.name) diff --git a/buildman/jobutil/workererror.py b/buildman/jobutil/workererror.py index 8271976e4..580d46f4d 100644 --- a/buildman/jobutil/workererror.py +++ b/buildman/jobutil/workererror.py @@ -57,6 +57,11 @@ class WorkerError(object): 'io.quay.builder.missingorinvalidargument': { 'message': 'Missing required arguments for builder', 'is_internal': True + }, + + 'io.quay.builder.cachelookupissue': { + 'message': 'Error checking for a cached tag', + 'is_internal': True } } diff --git a/data/model/legacy.py b/data/model/legacy.py index a5c779871..225b5bf2e 100644 --- a/data/model/legacy.py +++ b/data/model/legacy.py @@ -1089,6 +1089,25 @@ def get_repository(namespace_name, repository_name): return None +def get_image(repo, dockerfile_id): + try: + return Image.get(Image.docker_image_id == dockerfile_id, Image.repository == repo) + except Image.DoesNotExist: + return None + + +def find_child_image(repo, parent_image, command): + try: + return (Image.select() + .join(ImageStorage) + .switch(Image) + .where(Image.ancestors % '%/' + parent_image.id + '/%', + ImageStorage.command == command) + .get()) + except Image.DoesNotExist: + return None + + def get_repo_image(namespace_name, repository_name, docker_image_id): def limit_to_image_id(query): return query.where(Image.docker_image_id == docker_image_id).limit(1) @@ -1645,7 +1664,6 @@ def get_tag_image(namespace_name, repository_name, tag_name): else: return images[0] - def get_image_by_id(namespace_name, repository_name, docker_image_id): image = get_repo_image_extended(namespace_name, repository_name, docker_image_id) if not image: From 00299ca60ffcdc77b64fb388fcb2d30613ae09ab Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 11 Dec 2014 18:17:15 +0200 Subject: [PATCH 02/62] We need to make sure to use the *full* command --- buildman/jobutil/buildjob.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index 63d544790..d5514a958 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -62,9 +62,8 @@ class BuildJob(object): # For each cache comment, find a child image that matches the command. for cache_command in cache_commands: - print current_image.docker_image_id - - current_image = model.find_child_image(repository, current_image, cache_command) + full_command = '["/bin/sh", "-c", "%s"]' % cache_command + current_image = model.find_child_image(repository, current_image, full_command) if current_image is None: return None From 1f9f4ef26b394f405d1bee44789c75507dc4aa7e Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Mon, 22 Dec 2014 15:13:23 -0500 Subject: [PATCH 03/62] - Switch font to Source Sans Pro, like CoreUpdate - Add support for the new cor-tabs - Add support for title-based layouts - Switch super user layout to the new tabs UI in prep for adding setup support --- external_libraries.py | 4 +- static/css/quay.css | 84 +++++++- static/directives/cor-tab-content.html | 1 + static/directives/cor-tab-panel.html | 3 + static/directives/cor-tab.html | 11 + static/directives/cor-tabs.html | 1 + static/directives/cor-title-content.html | 3 + static/directives/cor-title-link.html | 1 + static/directives/cor-title.html | 2 + static/js/app.js | 44 ++-- static/js/controllers.js | 2 +- static/js/core-ui.js | 104 ++++++++++ static/partials/super-user.html | 252 ++++++++++------------- templates/base.html | 3 +- 14 files changed, 345 insertions(+), 170 deletions(-) create mode 100644 static/directives/cor-tab-content.html create mode 100644 static/directives/cor-tab-panel.html create mode 100644 static/directives/cor-tab.html create mode 100644 static/directives/cor-tabs.html create mode 100644 static/directives/cor-title-content.html create mode 100644 static/directives/cor-title-link.html create mode 100644 static/directives/cor-title.html create mode 100644 static/js/core-ui.js diff --git a/external_libraries.py b/external_libraries.py index febf9abb1..7e071424c 100644 --- a/external_libraries.py +++ b/external_libraries.py @@ -18,9 +18,9 @@ EXTERNAL_JS = [ ] EXTERNAL_CSS = [ - 'netdna.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.css', + 'netdna.bootstrapcdn.com/font-awesome/4.2.0/css/font-awesome.css', 'netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.no-icons.min.css', - 'fonts.googleapis.com/css?family=Droid+Sans:400,700', + 'fonts.googleapis.com/css?family=Source+Sans+Pro:400,700', ] EXTERNAL_FONTS = [ diff --git a/static/css/quay.css b/static/css/quay.css index 4df625503..e57d7f872 100644 --- a/static/css/quay.css +++ b/static/css/quay.css @@ -1,5 +1,5 @@ * { - font-family: 'Droid Sans', sans-serif; + font-family: 'Source Sans Pro', sans-serif; margin: 0; } @@ -116,6 +116,88 @@ box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); } +.co-fx-text-shadow { + text-shadow: rgba(0, 0, 0, 1) 1px 1px 2px; +} + +.co-nav-title { + height: 70px; + margin-top: -22px; +} + +.co-nav-title .co-nav-title-content { + color: white; + text-align: center; +} + +.co-tab-container { + padding: 0px; +} + +.co-tabs { + margin: 0px; + padding: 0px; + width: 82px; + background-color: #e8f1f6; + border-right: 1px solid #DDE7ED; + + display: table-cell; + float: none; + vertical-align: top; +} + +.co-tab-content { + width: 100%; + display: table-cell; + float: none; + padding: 10px; +} + +.co-tabs li { + list-style: none; + display: block; + border-bottom: 1px solid #DDE7ED; +} + + +.co-tabs li.active { + background-color: white; + border-right: 1px solid white; + margin-right: -1px; +} + +.co-tabs li a { + display: block; + width: 82px; + height: 82px; + line-height: 82px; + text-align: center; + font-size: 36px; + color: gray; +} + +.co-tabs li.active a { + color: black; +} + + +.co-main-content-panel { + margin-bottom: 20px; + background-color: #fff; + border: 1px solid transparent; + padding: 10px; + + -webkit-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + -moz-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + -ms-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + -o-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); +} + +.co-tab-panel { + padding: 0px; +} + .main-panel { margin-bottom: 20px; background-color: #fff; diff --git a/static/directives/cor-tab-content.html b/static/directives/cor-tab-content.html new file mode 100644 index 000000000..997ae5af1 --- /dev/null +++ b/static/directives/cor-tab-content.html @@ -0,0 +1 @@ +
\ No newline at end of file diff --git a/static/directives/cor-tab-panel.html b/static/directives/cor-tab-panel.html new file mode 100644 index 000000000..57f9dfa1c --- /dev/null +++ b/static/directives/cor-tab-panel.html @@ -0,0 +1,3 @@ +
+
+
\ No newline at end of file diff --git a/static/directives/cor-tab.html b/static/directives/cor-tab.html new file mode 100644 index 000000000..f22d3bdac --- /dev/null +++ b/static/directives/cor-tab.html @@ -0,0 +1,11 @@ +
  • + + + +
  • \ No newline at end of file diff --git a/static/directives/cor-tabs.html b/static/directives/cor-tabs.html new file mode 100644 index 000000000..1a965932e --- /dev/null +++ b/static/directives/cor-tabs.html @@ -0,0 +1 @@ +
      \ No newline at end of file diff --git a/static/directives/cor-title-content.html b/static/directives/cor-title-content.html new file mode 100644 index 000000000..6acbe47b3 --- /dev/null +++ b/static/directives/cor-title-content.html @@ -0,0 +1,3 @@ +
      +

      +
      \ No newline at end of file diff --git a/static/directives/cor-title-link.html b/static/directives/cor-title-link.html new file mode 100644 index 000000000..396a1f447 --- /dev/null +++ b/static/directives/cor-title-link.html @@ -0,0 +1 @@ +
      \ No newline at end of file diff --git a/static/directives/cor-title.html b/static/directives/cor-title.html new file mode 100644 index 000000000..63cfd322c --- /dev/null +++ b/static/directives/cor-title.html @@ -0,0 +1,2 @@ +
      + diff --git a/static/js/app.js b/static/js/app.js index 43fa2f5d4..0c0c173e7 100644 --- a/static/js/app.js +++ b/static/js/app.js @@ -126,7 +126,7 @@ function getMarkedDown(string) { quayDependencies = ['ngRoute', 'chieffancypants.loadingBar', 'angular-tour', 'restangular', 'angularMoment', 'mgcrea.ngStrap', 'ngCookies', 'ngSanitize', 'angular-md5', 'pasvaz.bindonce', 'ansiToHtml', - 'ngAnimate']; + 'ngAnimate', 'core-ui']; if (window.__config && window.__config.MIXPANEL_KEY) { quayDependencies.push('angulartics'); @@ -2226,7 +2226,7 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading when('/user/', {title: 'Account Settings', description:'Account settings for ' + title, templateUrl: '/static/partials/user-admin.html', reloadOnSearch: false, controller: UserAdminCtrl}). when('/superuser/', {title: 'Superuser Admin Panel', description:'Admin panel for ' + title, templateUrl: '/static/partials/super-user.html', - reloadOnSearch: false, controller: SuperUserAdminCtrl}). + reloadOnSearch: false, controller: SuperUserAdminCtrl, newLayout: true}). when('/guide/', {title: 'Guide', description:'Guide to using private docker repositories on ' + title, templateUrl: '/static/partials/guide.html', controller: GuideCtrl}). @@ -6690,6 +6690,7 @@ quayApp.directive('ngBlur', function() { }; }); + quayApp.directive("filePresent", [function () { return { restrict: 'A', @@ -6763,7 +6764,6 @@ quayApp.run(['$location', '$rootScope', 'Restangular', 'UserService', 'PlanServi var changeTab = function(activeTab, opt_timeout) { var checkCount = 0; - $timeout(function() { if (checkCount > 5) { return; } checkCount++; @@ -6827,6 +6827,8 @@ quayApp.run(['$location', '$rootScope', 'Restangular', 'UserService', 'PlanServi $rootScope.pageClass = current.$$route.pageClass; } + $rootScope.newLayout = !!current.$$route.newLayout; + if (current.$$route.description) { $rootScope.description = current.$$route.description; } else { @@ -6842,26 +6844,28 @@ quayApp.run(['$location', '$rootScope', 'Restangular', 'UserService', 'PlanServi // Setup deep linking of tabs. This will change the search field of the URL whenever a tab // is changed in the UI. - $('a[data-toggle="tab"]').on('shown.bs.tab', function (e) { - var tabName = e.target.getAttribute('data-target').substr(1); - $rootScope.$apply(function() { - var isDefaultTab = $('a[data-toggle="tab"]')[0] == e.target; - var newSearch = $.extend($location.search(), {}); - if (isDefaultTab) { - delete newSearch['tab']; - } else { - newSearch['tab'] = tabName; - } + $timeout(function() { + $('a[data-toggle="tab"]').on('shown.bs.tab', function (e) { + var tabName = e.target.getAttribute('data-target').substr(1); + $rootScope.$apply(function() { + var isDefaultTab = $('a[data-toggle="tab"]')[0] == e.target; + var newSearch = $.extend($location.search(), {}); + if (isDefaultTab) { + delete newSearch['tab']; + } else { + newSearch['tab'] = tabName; + } - $location.search(newSearch); + $location.search(newSearch); + }); + + e.preventDefault(); }); - e.preventDefault(); - }); - - if (activeTab) { - changeTab(activeTab); - } + if (activeTab) { + changeTab(activeTab); + } + }, 100); // 100ms to make sure angular has rendered. }); var initallyChecked = false; diff --git a/static/js/controllers.js b/static/js/controllers.js index 9bea2ebdb..b8f6ccb11 100644 --- a/static/js/controllers.js +++ b/static/js/controllers.js @@ -2831,7 +2831,7 @@ function SuperUserAdminCtrl($scope, ApiService, Features, UserService) { }, ApiService.errorDisplay('Cannot load system usage. Please contact support.')) } - $scope.loadLogs = function() { + $scope.loadUsageLogs = function() { $scope.logsCounter++; }; diff --git a/static/js/core-ui.js b/static/js/core-ui.js new file mode 100644 index 000000000..64ffc6f68 --- /dev/null +++ b/static/js/core-ui.js @@ -0,0 +1,104 @@ +angular.module("core-ui", []) + + .directive('corTitle', function() { + var directiveDefinitionObject = { + priority: 1, + templateUrl: '/static/directives/cor-title.html', + replace: true, + transclude: true, + restrict: 'C', + scope: {}, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }) + + .directive('corTitleContent', function() { + var directiveDefinitionObject = { + priority: 1, + templateUrl: '/static/directives/cor-title-content.html', + replace: true, + transclude: true, + restrict: 'C', + scope: {}, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }) + + .directive('corTitleLink', function() { + var directiveDefinitionObject = { + priority: 1, + templateUrl: '/static/directives/cor-title-link.html', + replace: true, + transclude: true, + restrict: 'C', + scope: {}, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }) + + .directive('corTabPanel', function() { + var directiveDefinitionObject = { + priority: 1, + templateUrl: '/static/directives/cor-tab-panel.html', + replace: true, + transclude: true, + restrict: 'C', + scope: {}, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }) + + .directive('corTabContent', function() { + var directiveDefinitionObject = { + priority: 2, + templateUrl: '/static/directives/cor-tab-content.html', + replace: true, + transclude: true, + restrict: 'C', + scope: {}, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }) + + .directive('corTabs', function() { + var directiveDefinitionObject = { + priority: 3, + templateUrl: '/static/directives/cor-tabs.html', + replace: true, + transclude: true, + restrict: 'C', + scope: {}, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }) + + .directive('corTab', function() { + var directiveDefinitionObject = { + priority: 4, + templateUrl: '/static/directives/cor-tab.html', + replace: true, + transclude: true, + restrict: 'C', + scope: { + 'tabActive': '@tabActive', + 'tabTitle': '@tabTitle', + 'tabTarget': '@tabTarget', + 'tabInit': '&tabInit' + }, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }); \ No newline at end of file diff --git a/static/partials/super-user.html b/static/partials/super-user.html index 9b34cf159..6a49e6eed 100644 --- a/static/partials/super-user.html +++ b/static/partials/super-user.html @@ -1,157 +1,120 @@ -
      -
      - This panel provides administrator access to super users of this installation of the registry. Super users can be managed in the configuration for this installation. +
      +
      + + Enterprise Registry Setup
      -
      - -
      - -
      +
      +
      + + + + + + + + + + + +
      - -
      -
      - -
      -
      +
      + +
      +
      +
      + + +
      +
      +
      + + +
      + You have deployed more repositories than your plan allows. Please + upgrade your subscription by contacting CoreOS Sales.
      - -
      -
      -
      - - -
      - You have deployed more repositories than your plan allows. Please - upgrade your subscription by contacting CoreOS Sales. -
      - -
      - You are at your current plan's number of allowed repositories. It might be time to think about - upgrading your subscription by contacting CoreOS Sales. -
      - -
      - You are nearing the number of allowed deployed repositories. It might be time to think about - upgrading your subscription by contacting CoreOS Sales. -
      +
      + You are at your current plan's number of allowed repositories. It might be time to think about + upgrading your subscription by contacting CoreOS Sales.
      - -
      - -
      -
      - - +
      + You are nearing the number of allowed deployed repositories. It might be time to think about + upgrading your subscription by contacting CoreOS Sales. +
      +
      + + +
      +
      +
      + {{ usersError }} +
      +
      +
      +
      + Showing {{(users | filter:search | limitTo:100).length}} of + {{(users | filter:search).length}} matching users
      - -
      - - +
      +
      - - - - -
      - - - - - - - - - - - - -
      UsernameE-mail addressTemporary Password
      {{ created_user.username }}{{ created_user.email }}{{ created_user.password }}
      -
      - -
      -
      -
      - {{ usersError }} -
      -
      -
      -
      - Showing {{(users | filter:search | limitTo:100).length}} of - {{(users | filter:search).length}} matching users -
      -
      - -
      -
      - - - - - - - - - - - - - -
      UsernameE-mail address
      - - {{ current_user.username }} - - {{ current_user.email }} - - - -
      - -
      -
      -
      -
      -
      + + + + + + + + + + + +
      UsernameE-mail address
      + + {{ current_user.username }} + + {{ current_user.email }} + + + +
      +
      +
      +
      +
      - + +
      + diff --git a/templates/base.html b/templates/base.html index 9c4b0fed0..fe4d5f110 100644 --- a/templates/base.html +++ b/templates/base.html @@ -96,10 +96,9 @@ mixpanel.init("{{ mixpanel_key }}", { track_pageview : false, debug: {{ is_debug @@ -562,17 +436,34 @@ - - -
      - -
      + +
      +
      + Dockerfile Build Support +
      +
      +
      + If enabled, users can submit Dockerfiles to be built and pushed by the Enterprise Registry. +
      + +
      + + +
      + +
      + Note: Build workers are required for this feature. + See Adding Build Workers for instructions on how to setup build workers. +
      +
      +
      + -
      +
      Github (Enterprise) Build Triggers
      @@ -631,12 +522,90 @@ - - -
      - -
      + + +
      + +
      + + + +
      \ No newline at end of file diff --git a/static/directives/cor-floating-bottom-bar.html b/static/directives/cor-floating-bottom-bar.html new file mode 100644 index 000000000..2e5337fd2 --- /dev/null +++ b/static/directives/cor-floating-bottom-bar.html @@ -0,0 +1,3 @@ +
      + +
      \ No newline at end of file diff --git a/static/js/core-config-setup.js b/static/js/core-config-setup.js index 1aff16ea9..d0d2bd8e6 100644 --- a/static/js/core-config-setup.js +++ b/static/js/core-config-setup.js @@ -10,8 +10,137 @@ angular.module("core-config-setup", ['angularFileUpload']) 'isActive': '=isActive' }, controller: function($rootScope, $scope, $element, $timeout, ApiService) { + $scope.SERVICES = [ + {'id': 'redis', 'title': 'Redis'}, + + {'id': 'registry-storage', 'title': 'Registry Storage'}, + + {'id': 'ssl', 'title': 'SSL certificate and key', 'condition': function(config) { + return config.PREFERRED_URL_SCHEME == 'https'; + }}, + + {'id': 'ldap', 'title': 'LDAP Authentication', 'condition': function(config) { + return config.AUTHENTICATION_TYPE == 'LDAP'; + }}, + + {'id': 'mail', 'title': 'E-mail Support', 'condition': function(config) { + return config.FEATURE_MAILING; + }}, + + {'id': 'github-login', 'title': 'Github (Enterprise) Authentication', 'condition': function(config) { + return config.FEATURE_GITHUB_LOGIN; + }} + ]; + + $scope.STORAGE_CONFIG_FIELDS = { + 'LocalStorage': [ + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/some/directory', 'kind': 'text'} + ], + + 'S3Storage': [ + {'name': 's3_access_key', 'title': 'AWS Access Key', 'placeholder': 'accesskeyhere', 'kind': 'text'}, + {'name': 's3_secret_key', 'title': 'AWS Secret Key', 'placeholder': 'secretkeyhere', 'kind': 'text'}, + {'name': 's3_bucket', 'title': 'S3 Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'} + ], + + 'GoogleCloudStorage': [ + {'name': 'access_key', 'title': 'Cloud Access Key', 'placeholder': 'accesskeyhere', 'kind': 'text'}, + {'name': 'secret_key', 'title': 'Cloud Secret Key', 'placeholder': 'secretkeyhere', 'kind': 'text'}, + {'name': 'bucket_name', 'title': 'GCS Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'} + ], + + 'RadosGWStorage': [ + {'name': 'hostname', 'title': 'Rados Server Hostname', 'placeholder': 'my.rados.hostname', 'kind': 'text'}, + {'name': 'is_secure', 'title': 'Is Secure', 'placeholder': 'Require SSL', 'kind': 'bool'}, + {'name': 'access_key', 'title': 'Access Key', 'placeholder': 'accesskeyhere', 'kind': 'text', 'help_url': 'http://ceph.com/docs/master/radosgw/admin/'}, + {'name': 'secret_key', 'title': 'Secret Key', 'placeholder': 'secretkeyhere', 'kind': 'text'}, + {'name': 'bucket_name', 'title': 'Bucket Name', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'} + ] + }; + $scope.config = null; - $scope.mapped = {}; + $scope.mapped = { + '$hasChanges': false + }; + + $scope.validating = null; + $scope.savingConfiguration = false; + + $scope.getServices = function(config) { + var services = []; + if (!config) { return services; } + + for (var i = 0; i < $scope.SERVICES.length; ++i) { + var service = $scope.SERVICES[i]; + if (!service.condition || service.condition(config)) { + services.push({ + 'service': service, + 'status': 'validating' + }); + } + } + + return services; + }; + + $scope.validationStatus = function(serviceInfos) { + if (!serviceInfos) { return 'validating'; } + + var hasError = false; + for (var i = 0; i < serviceInfos.length; ++i) { + if (serviceInfos[i].status == 'validating') { + return 'validating'; + } + if (serviceInfos[i].status == 'error') { + hasError = true; + } + } + + return hasError ? 'failed' : 'success'; + }; + + $scope.validateService = function(serviceInfo) { + var params = { + 'service': serviceInfo.service.id + }; + + ApiService.scValidateConfig({'config': $scope.config}, params).then(function(resp) { + serviceInfo.status = resp.status ? 'success' : 'error'; + serviceInfo.errorMessage = $.trim(resp.reason || ''); + }, ApiService.errorDisplay('Could not validate configuration. Please report this error.')); + }; + + $scope.validateAndSave = function() { + $scope.savingConfiguration = false; + $scope.validating = $scope.getServices($scope.config); + + $('#validateAndSaveModal').modal({ + keyboard: false, + backdrop: 'static' + }); + + for (var i = 0; i < $scope.validating.length; ++i) { + var serviceInfo = $scope.validating[i]; + $scope.validateService(serviceInfo); + } + }; + + $scope.saveConfiguration = function() { + $scope.savingConfiguration = true; + + var data = { + 'config': $scope.config, + 'hostname': window.location.host + }; + + ApiService.scUpdateConfig(data).then(function(resp) { + $scope.savingConfiguration = false; + $scope.mapped.$hasChanges = false + }, ApiService.errorDisplay('Could not save configuration. Please report this error.')); + }; var githubSelector = function(key) { return function(value) { @@ -36,8 +165,8 @@ angular.module("core-config-setup", ['angularFileUpload']) var current = config; for (var i = 0; i < parts.length; ++i) { var part = parts[i]; - if (!config[part]) { return null; } - current = config[part]; + if (!current[part]) { return null; } + current = current[part]; } return current; }; @@ -86,7 +215,36 @@ angular.module("core-config-setup", ['angularFileUpload']) $scope.$watch('mapped.redis.port', redisSetter('port')); $scope.$watch('mapped.redis.password', redisSetter('password')); + // Add a watch to remove any fields not allowed by the current storage configuration. + // We have to do this otherwise extra fields (which are not allowed) can end up in the + // configuration. + $scope.$watch('config.DISTRIBUTED_STORAGE_CONFIG.local[0]', function(value) { + // Remove any fields not associated with the current kind. + if (!value || !$scope.STORAGE_CONFIG_FIELDS[value] + || !$scope.config.DISTRIBUTED_STORAGE_CONFIG + || !$scope.config.DISTRIBUTED_STORAGE_CONFIG.local + || !$scope.config.DISTRIBUTED_STORAGE_CONFIG.local[1]) { return; } + + var allowedFields = $scope.STORAGE_CONFIG_FIELDS[value]; + var configObject = $scope.config.DISTRIBUTED_STORAGE_CONFIG.local[1]; + + for (var fieldName in configObject) { + if (!configObject.hasOwnProperty(fieldName)) { + continue; + } + + var isValidField = $.grep(allowedFields, function(field) { + return field.name == fieldName; + }).length > 0; + + if (!isValidField) { + delete configObject[fieldName]; + } + } + }); + $scope.$watch('config', function(value) { + $scope.mapped['$hasChanges'] = true; }, true); $scope.$watch('isActive', function(value) { @@ -95,6 +253,7 @@ angular.module("core-config-setup", ['angularFileUpload']) ApiService.scGetConfig().then(function(resp) { $scope.config = resp['config']; initializeMappedLogic($scope.config); + $scope.mapped['$hasChanges'] = false; }); }); } @@ -376,9 +535,7 @@ angular.module("core-config-setup", ['angularFileUpload']) 'binding': '=binding' }, controller: function($scope, $element) { - $scope.$watch('items', function(items) { - if (!items) { return; } - + var padItems = function(items) { // Remove the last item if both it and the second to last items are empty. if (items.length > 1 && !items[items.length - 2].value && !items[items.length - 1].value) { items.splice(items.length - 1, 1); @@ -386,14 +543,45 @@ angular.module("core-config-setup", ['angularFileUpload']) } // If the last item is non-empty, add a new item. - if (items[items.length - 1].value) { + if (items.length == 0 || items[items.length - 1].value) { items.push({'value': ''}); + return; } + }; + + $scope.itemHash = null; + $scope.$watch('items', function(items) { + if (!items) { return; } + padItems(items); + + var itemHash = ''; + var binding = []; + for (var i = 0; i < items.length; ++i) { + var item = items[i]; + if (item.value && (URI(item.value).host() || URI(item.value).path())) { + binding.push(item.value); + itemHash += item.value; + } + } + + $scope.itemHash = itemHash; + $scope.binding = binding; }, true); $scope.$watch('binding', function(binding) { - $scope.items = []; - $scope.items.push({'value': ''}); + if (!binding) { return; } + + var current = binding; + var items = []; + var itemHash = ''; + for (var i = 0; i < current.length; ++i) { + items.push({'value': current[i]}) + itemHash += current[i]; + } + + if ($scope.itemHash != itemHash) { + $scope.items = items; + } }); } }; @@ -416,6 +604,7 @@ angular.module("core-config-setup", ['angularFileUpload']) $scope.value = null; var updateBinding = function() { + if ($scope.value == null) { return; } var value = $scope.value || ''; switch ($scope.kind) { diff --git a/static/js/core-ui.js b/static/js/core-ui.js index 2cd547f4d..9a42fd8dc 100644 --- a/static/js/core-ui.js +++ b/static/js/core-ui.js @@ -175,6 +175,49 @@ angular.module("core-ui", []) return directiveDefinitionObject; }) + .directive('corFloatingBottomBar', function() { + var directiveDefinitionObject = { + priority: 3, + templateUrl: '/static/directives/cor-floating-bottom-bar.html', + replace: true, + transclude: true, + restrict: 'C', + scope: {}, + controller: function($rootScope, $scope, $element, $timeout, $interval) { + var handler = function() { + $element.removeClass('floating'); + $element.css('width', $element[0].parentNode.clientWidth + 'px'); + + var windowHeight = $(window).height(); + var rect = $element[0].getBoundingClientRect(); + if (rect.bottom > windowHeight) { + $element.addClass('floating'); + } + }; + + $(window).on("scroll", handler); + $(window).on("resize", handler); + + var previousHeight = $element[0].parentNode.clientHeight; + var stop = $interval(function() { + var currentHeight = $element[0].parentNode.clientWidth; + if (previousHeight != currentHeight) { + currentHeight = previousHeight; + handler(); + } + }, 100); + + $scope.$on('$destroy', function() { + $(window).off("resize", handler); + $(window).off("scroll", handler); + $internval.stop(stop); + }); + } + }; + return directiveDefinitionObject; + + }) + .directive('corTab', function() { var directiveDefinitionObject = { priority: 4, diff --git a/storage/__init__.py b/storage/__init__.py index 4d1134d4b..7893343c2 100644 --- a/storage/__init__.py +++ b/storage/__init__.py @@ -11,6 +11,14 @@ STORAGE_DRIVER_CLASSES = { 'RadosGWStorage': RadosGWStorage, } +def get_storage_driver(storage_params): + """ Returns a storage driver class for the given storage configuration + (a pair of string name and a dict of parameters). """ + driver = storage_params[0] + parameters = storage_params[1] + driver_class = STORAGE_DRIVER_CLASSES.get(driver, FakeStorage) + return driver_class(**parameters) + class Storage(object): def __init__(self, app=None): @@ -23,12 +31,7 @@ class Storage(object): def init_app(self, app): storages = {} for location, storage_params in app.config.get('DISTRIBUTED_STORAGE_CONFIG').items(): - driver = storage_params[0] - parameters = storage_params[1] - - driver_class = STORAGE_DRIVER_CLASSES.get(driver, FakeStorage) - storage = driver_class(**parameters) - storages[location] = storage + storages[location] = get_storage_driver(storage_params) preference = app.config.get('DISTRIBUTED_STORAGE_PREFERENCE', None) if not preference: diff --git a/util/config/__init__.py b/util/config/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/util/configutil.py b/util/config/configutil.py similarity index 100% rename from util/configutil.py rename to util/config/configutil.py diff --git a/util/config/validator.py b/util/config/validator.py new file mode 100644 index 000000000..16211c254 --- /dev/null +++ b/util/config/validator.py @@ -0,0 +1,122 @@ +import redis +import os +import json +import ldap + +from data.users import LDAPConnection +from flask import Flask +from flask.ext.mail import Mail, Message +from data.database import validate_database_url, User +from storage import get_storage_driver +from app import app, OVERRIDE_CONFIG_DIRECTORY +from auth.auth_context import get_authenticated_user +from util.oauth import GoogleOAuthConfig, GithubOAuthConfig + +SSL_FILENAMES = ['ssl.cert', 'ssl.key'] + +def validate_service_for_config(service, config): + """ Attempts to validate the configuration for the given service. """ + if not service in _VALIDATORS: + return { + 'status': False + } + + try: + _VALIDATORS[service](config) + return { + 'status': True + } + except Exception as ex: + return { + 'status': False, + 'reason': str(ex) + } + +def _validate_database(config): + """ Validates connecting to the database. """ + validate_database_url(config['DB_URI']) + +def _validate_redis(config): + """ Validates connecting to redis. """ + redis_config = config['BUILDLOGS_REDIS'] + client = redis.StrictRedis(socket_connect_timeout=5, **redis_config) + client.ping() + +def _validate_registry_storage(config): + """ Validates registry storage. """ + parameters = config.get('DISTRIBUTED_STORAGE_CONFIG', {}).get('local', ['LocalStorage', {}]) + try: + driver = get_storage_driver(parameters) + except TypeError: + raise Exception('Missing required storage configuration parameter(s)') + + # Put and remove a temporary file. + driver.put_content('_verify', 'testing 123') + driver.remove('_verify') + +def _validate_mailing(config): + """ Validates sending email. """ + test_app = Flask("mail-test-app") + test_app.config.update(config) + test_app.config.update({ + 'MAIL_FAIL_SILENTLY': False, + 'TESTING': False + }) + + test_mail = Mail(test_app) + test_msg = Message("Test e-mail from %s" % app.config['REGISTRY_TITLE']) + test_msg.add_recipient(get_authenticated_user().email) + test_mail.send(test_msg) + +def _validate_github_login(config): + """ Validates the OAuth credentials and API endpoint for Github Login. """ + client = app.config['HTTPCLIENT'] + oauth = GithubOAuthConfig(config, 'GITHUB_LOGIN_CONFIG') + endpoint = oauth.authorize_endpoint() + # TODO: this + + +def _validate_ssl(config): + """ Validates the SSL configuration (if enabled). """ + if config.get('PREFERRED_URL_SCHEME', 'http') != 'https': + return + + for filename in SSL_FILENAMES: + if not os.path.exists(os.path.join(OVERRIDE_CONFIG_DIRECTORY, filename)): + raise Exception('Missing required SSL file: %s' % filename) + + +def _validate_ldap(config): + """ Validates the LDAP connection. """ + if config.get('AUTHENTICATION_TYPE', 'Database') != 'LDAP': + return + + # Note: raises ldap.INVALID_CREDENTIALS on failure + admin_dn = config.get('LDAP_ADMIN_DN') + admin_passwd = config.get('LDAP_ADMIN_PASSWD') + + if not admin_dn: + raise Exception('Missing Admin DN for LDAP configuration') + + if not admin_passwd: + raise Exception('Missing Admin Password for LDAP configuration') + + ldap_uri = config.get('LDAP_URI', 'ldap://localhost') + + try: + with LDAPConnection(ldap_uri, admin_dn, admin_passwd): + pass + except ldap.LDAPError as ex: + values = ex.args[0] if ex.args else {} + raise Exception(values.get('desc', 'Unknown error')) + + +_VALIDATORS = { + 'database': _validate_database, + 'redis': _validate_redis, + 'registry-storage': _validate_registry_storage, + 'mail': _validate_mailing, + 'github-login': _validate_github_login, + 'ssl': _validate_ssl, + 'ldap': _validate_ldap, +} \ No newline at end of file diff --git a/util/oauth.py b/util/oauth.py index e0d38d395..65af5cd13 100644 --- a/util/oauth.py +++ b/util/oauth.py @@ -1,9 +1,9 @@ import urlparse class OAuthConfig(object): - def __init__(self, app, key_name): + def __init__(self, config, key_name): self.key_name = key_name - self.config = app.config.get(key_name) or {} + self.config = config.get(key_name) or {} def service_name(self): raise NotImplementedError @@ -23,6 +23,9 @@ class OAuthConfig(object): def client_secret(self): return self.config.get('CLIENT_SECRET') + def basic_scope(self): + raise NotImplementedError + def _get_url(self, endpoint, *args): for arg in args: endpoint = urlparse.urljoin(endpoint, arg) @@ -31,8 +34,8 @@ class OAuthConfig(object): class GithubOAuthConfig(OAuthConfig): - def __init__(self, app, key_name): - super(GithubOAuthConfig, self).__init__(app, key_name) + def __init__(self, config, key_name): + super(GithubOAuthConfig, self).__init__(config, key_name) def service_name(self): return 'GitHub' @@ -43,6 +46,9 @@ class GithubOAuthConfig(OAuthConfig): endpoint = endpoint + '/' return endpoint + def basic_scope(self): + return 'user:email' + def authorize_endpoint(self): return self._get_url(self._endpoint(), '/login/oauth/authorize') + '?' @@ -73,12 +79,15 @@ class GithubOAuthConfig(OAuthConfig): class GoogleOAuthConfig(OAuthConfig): - def __init__(self, app, key_name): - super(GoogleOAuthConfig, self).__init__(app, key_name) + def __init__(self, config, key_name): + super(GoogleOAuthConfig, self).__init__(config, key_name) def service_name(self): return 'Google' + def basic_scope(self): + return 'openid email' + def authorize_endpoint(self): return 'https://accounts.google.com/o/oauth2/auth?response_type=code&' From 508bc10a586eff26736fff5da9279ea85f0d6596 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Wed, 7 Jan 2015 16:31:16 -0500 Subject: [PATCH 15/62] Fix broken test due to the permissions change --- test/test_api_security.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_api_security.py b/test/test_api_security.py index 97ec3950d..a6714aa28 100644 --- a/test/test_api_security.py +++ b/test/test_api_security.py @@ -3576,7 +3576,7 @@ class TestSuperUserLogs(ApiTestCase): self._set_url(SuperUserLogs) def test_get_anonymous(self): - self._run_test('GET', 403, None, None) + self._run_test('GET', 401, None, None) def test_get_freshuser(self): self._run_test('GET', 403, 'freshuser', None) From f125efa8cabdbf1dee5c22bfccba8f791aec86f7 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Wed, 7 Jan 2015 16:42:09 -0500 Subject: [PATCH 16/62] Fix broken check --- endpoints/api/suconfig.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/endpoints/api/suconfig.py b/endpoints/api/suconfig.py index cc5910ae6..c9c280b5d 100644 --- a/endpoints/api/suconfig.py +++ b/endpoints/api/suconfig.py @@ -58,7 +58,7 @@ class SuperUserRegistryStatus(ApiResource): @internal_only @show_if(features.SUPER_USERS) @hide_if(features.BILLING) # Make sure it is never allowed in prod. -class SuperUserGetConfig(ApiResource): +class SuperUserConfig(ApiResource): """ Resource for fetching and updating the current configuration, if any. """ schemas = { 'UpdateConfig': { @@ -143,7 +143,7 @@ class SuperUserConfigFile(ApiResource): @nickname('scUpdateConfigFile') def post(self, filename): """ Updates the configuration file with the given name. """ - if not filename in CONFIG_FILE_WHITELIST: + if not filename in SSL_FILENAMES: abort(404) if SuperUserPermission().can(): From 575d4c50622dca8de9934e4ff6591409ab622a81 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Wed, 7 Jan 2015 16:50:08 -0500 Subject: [PATCH 17/62] Fix file uploading --- static/js/core-config-setup.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/static/js/core-config-setup.js b/static/js/core-config-setup.js index d0d2bd8e6..a57e74b37 100644 --- a/static/js/core-config-setup.js +++ b/static/js/core-config-setup.js @@ -442,9 +442,9 @@ angular.module("core-config-setup", ['angularFileUpload']) $scope.uploadProgress = 0; $scope.upload = $upload.upload({ - url: '/api/v1/superuser/config/file', + url: '/api/v1/superuser/config/file/' + $scope.filename, method: 'POST', - data: { filename: $scope.filename }, + data: {'_csrf_token': window.__token}, file: files[0], }).progress(function(evt) { $scope.uploadProgress = parseInt(100.0 * evt.loaded / evt.total); From 7933bd44fd66b5e0a3c190fc8b351c5c12153983 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 8 Jan 2015 12:53:36 -0500 Subject: [PATCH 18/62] Add tests for the new super user config API and make sure both super user API endpoint sets are all guarded against being used in production --- endpoints/api/__init__.py | 17 ++++ endpoints/api/suconfig.py | 33 ++++--- endpoints/api/superuser.py | 12 ++- test/test_suconfig_api.py | 177 +++++++++++++++++++++++++++++++++++++ 4 files changed, 224 insertions(+), 15 deletions(-) create mode 100644 test/test_suconfig_api.py diff --git a/endpoints/api/__init__.py b/endpoints/api/__init__.py index f2c7bc663..377834002 100644 --- a/endpoints/api/__init__.py +++ b/endpoints/api/__init__.py @@ -280,6 +280,23 @@ require_user_read = require_user_permission(UserReadPermission, scopes.READ_USER require_user_admin = require_user_permission(UserAdminPermission, None) require_fresh_user_admin = require_user_permission(UserAdminPermission, None) + +def verify_not_prod(func): + @add_method_metadata('enterprise_only', True) + @wraps(func) + def wrapped(*args, **kwargs): + # Verify that we are not running on a production (i.e. hosted) stack. If so, we fail. + # This should never happen (because of the feature-flag on SUPER_USERS), but we want to be + # absolutely sure. + if app.config['SERVER_HOSTNAME'].find('quay.io') >= 0: + logger.error('!!! Super user method called IN PRODUCTION !!!') + raise NotFound() + + return func(*args, **kwargs) + + return wrapped + + def require_fresh_login(func): @add_method_metadata('requires_fresh_login', True) @wraps(func) diff --git a/endpoints/api/suconfig.py b/endpoints/api/suconfig.py index c9c280b5d..25b91380d 100644 --- a/endpoints/api/suconfig.py +++ b/endpoints/api/suconfig.py @@ -4,7 +4,7 @@ import json from flask import abort from endpoints.api import (ApiResource, nickname, resource, internal_only, show_if, hide_if, - require_fresh_login, request, validate_json_request) + require_fresh_login, request, validate_json_request, verify_not_prod) from endpoints.common import common_login from app import app, OVERRIDE_CONFIG_YAML_FILENAME, OVERRIDE_CONFIG_DIRECTORY @@ -21,43 +21,45 @@ import features logger = logging.getLogger(__name__) def database_is_valid(): + """ Returns whether the database, as configured, is valid. """ try: User.select().limit(1) return True except: return False - def database_has_users(): + """ Returns whether the database has any users defined. """ return bool(list(User.select().limit(1))) +def config_file_exists(): + """ Returns whether a configuration file exists. """ + return os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME) + @resource('/v1/superuser/registrystatus') @internal_only @show_if(features.SUPER_USERS) -@hide_if(features.BILLING) # Make sure it is never allowed in prod. class SuperUserRegistryStatus(ApiResource): """ Resource for determining the status of the registry, such as if config exists, if a database is configured, and if it has any defined users. """ @nickname('scRegistryStatus') + @verify_not_prod def get(self): """ Returns whether a valid configuration, database and users exist. """ - current_user = get_authenticated_user() - return { 'dir_exists': os.path.exists(OVERRIDE_CONFIG_DIRECTORY), 'file_exists': os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME), 'is_testing': app.config['TESTING'], 'valid_db': database_is_valid(), - 'ready': current_user and current_user.username in app.config['SUPER_USERS'] + 'ready': not app.config['TESTING'] and file_exists and bool(app.config['SUPER_USERS']) } @resource('/v1/superuser/config') @internal_only @show_if(features.SUPER_USERS) -@hide_if(features.BILLING) # Make sure it is never allowed in prod. class SuperUserConfig(ApiResource): """ Resource for fetching and updating the current configuration, if any. """ schemas = { @@ -81,6 +83,7 @@ class SuperUserConfig(ApiResource): } @require_fresh_login + @verify_not_prod @nickname('scGetConfig') def get(self): """ Returns the currently defined configuration, if any. """ @@ -98,12 +101,13 @@ class SuperUserConfig(ApiResource): abort(403) @nickname('scUpdateConfig') + @verify_not_prod @validate_json_request('UpdateConfig') def put(self): """ Updates the config.yaml file. """ # Note: This method is called to set the database configuration before super users exists, # so we also allow it to be called if there is no valid registry configuration setup. - if not os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME) or SuperUserPermission().can(): + if not config_file_exists() or SuperUserPermission().can(): config_object = request.get_json()['config'] hostname = request.get_json()['hostname'] @@ -124,10 +128,10 @@ class SuperUserConfig(ApiResource): @resource('/v1/superuser/config/file/') @internal_only @show_if(features.SUPER_USERS) -@hide_if(features.BILLING) # Make sure it is never allowed in prod. class SuperUserConfigFile(ApiResource): """ Resource for fetching the status of config files and overriding them. """ @nickname('scConfigFileExists') + @verify_not_prod def get(self, filename): """ Returns whether the configuration file with the given name exists. """ if not filename in SSL_FILENAMES: @@ -141,6 +145,7 @@ class SuperUserConfigFile(ApiResource): abort(403) @nickname('scUpdateConfigFile') + @verify_not_prod def post(self, filename): """ Updates the configuration file with the given name. """ if not filename in SSL_FILENAMES: @@ -149,7 +154,7 @@ class SuperUserConfigFile(ApiResource): if SuperUserPermission().can(): uploaded_file = request.files['file'] if not uploaded_file: - abort(404) + abort(400) uploaded_file.save(os.path.join(OVERRIDE_CONFIG_DIRECTORY, filename)) return { @@ -162,7 +167,6 @@ class SuperUserConfigFile(ApiResource): @resource('/v1/superuser/config/createsuperuser') @internal_only @show_if(features.SUPER_USERS) -@hide_if(features.BILLING) # Make sure it is never allowed in prod. class SuperUserCreateInitialSuperUser(ApiResource): """ Resource for creating the initial super user. """ schemas = { @@ -193,6 +197,7 @@ class SuperUserCreateInitialSuperUser(ApiResource): } @nickname('scCreateInitialSuperuser') + @verify_not_prod @validate_json_request('CreateSuperUser') def post(self): """ Creates the initial super user, updates the underlying configuration and @@ -204,7 +209,7 @@ class SuperUserCreateInitialSuperUser(ApiResource): # # We do this special security check because at the point this method is called, the database # is clean but does not (yet) have any super users for our permissions code to check against. - if os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME) and not database_has_users(): + if config_file_exists() and not database_has_users(): data = request.get_json() username = data['username'] password = data['password'] @@ -231,7 +236,6 @@ class SuperUserCreateInitialSuperUser(ApiResource): @resource('/v1/superuser/config/validate/') @internal_only @show_if(features.SUPER_USERS) -@hide_if(features.BILLING) # Make sure it is never allowed in prod. class SuperUserConfigValidate(ApiResource): """ Resource for validating a block of configuration against an external service. """ schemas = { @@ -251,13 +255,14 @@ class SuperUserConfigValidate(ApiResource): } @nickname('scValidateConfig') + @verify_not_prod @validate_json_request('ValidateConfig') def post(self, service): """ Validates the given config for the given service. """ # Note: This method is called to validate the database configuration before super users exists, # so we also allow it to be called if there is no valid registry configuration setup. Note that # this is also safe since this method does not access any information not given in the request. - if not os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME) or SuperUserPermission().can(): + if not config_file_exists() or SuperUserPermission().can(): config = request.get_json()['config'] return validate_service_for_config(service, config) diff --git a/endpoints/api/superuser.py b/endpoints/api/superuser.py index 7e337c3b7..c8717bd7b 100644 --- a/endpoints/api/superuser.py +++ b/endpoints/api/superuser.py @@ -10,7 +10,7 @@ from flask import request from endpoints.api import (ApiResource, nickname, resource, validate_json_request, request_error, log_action, internal_only, NotFound, require_user_admin, format_date, InvalidToken, require_scope, format_date, hide_if, show_if, parse_args, - query_param, abort, require_fresh_login, path_param) + query_param, abort, require_fresh_login, path_param, verify_not_prod) from endpoints.api.logs import get_logs @@ -38,6 +38,7 @@ def get_services(): class SuperUserGetLogsForService(ApiResource): """ Resource for fetching the kinds of system logs in the system. """ @require_fresh_login + @verify_not_prod @nickname('getSystemLogs') def get(self, service): """ Returns the logs for the specific service. """ @@ -65,6 +66,7 @@ class SuperUserGetLogsForService(ApiResource): class SuperUserSystemLogServices(ApiResource): """ Resource for fetching the kinds of system logs in the system. """ @require_fresh_login + @verify_not_prod @nickname('listSystemLogServices') def get(self): """ List the system logs for the current system. """ @@ -83,6 +85,7 @@ class SuperUserSystemLogServices(ApiResource): class SuperUserLogs(ApiResource): """ Resource for fetching all logs in the system. """ @require_fresh_login + @verify_not_prod @nickname('listAllLogs') @parse_args @query_param('starttime', 'Earliest time from which to get logs. (%m/%d/%Y %Z)', type=str) @@ -115,6 +118,7 @@ def user_view(user): class UsageInformation(ApiResource): """ Resource for returning the usage information for enterprise customers. """ @require_fresh_login + @verify_not_prod @nickname('getSystemUsage') def get(self): """ Returns the number of repository handles currently held. """ @@ -153,6 +157,7 @@ class SuperUserList(ApiResource): } @require_fresh_login + @verify_not_prod @nickname('listAllUsers') def get(self): """ Returns a list of all users in the system. """ @@ -166,6 +171,7 @@ class SuperUserList(ApiResource): @require_fresh_login + @verify_not_prod @nickname('createInstallUser') @validate_json_request('CreateInstallUser') def post(self): @@ -203,6 +209,7 @@ class SuperUserList(ApiResource): class SuperUserSendRecoveryEmail(ApiResource): """ Resource for sending a recovery user on behalf of a user. """ @require_fresh_login + @verify_not_prod @nickname('sendInstallUserRecoveryEmail') def post(self, username): if SuperUserPermission().can(): @@ -247,6 +254,7 @@ class SuperUserManagement(ApiResource): } @require_fresh_login + @verify_not_prod @nickname('getInstallUser') def get(self, username): """ Returns information about the specified user. """ @@ -260,6 +268,7 @@ class SuperUserManagement(ApiResource): abort(403) @require_fresh_login + @verify_not_prod @nickname('deleteInstallUser') def delete(self, username): """ Deletes the specified user. """ @@ -277,6 +286,7 @@ class SuperUserManagement(ApiResource): abort(403) @require_fresh_login + @verify_not_prod @nickname('changeInstallUser') @validate_json_request('UpdateUser') def put(self, username): diff --git a/test/test_suconfig_api.py b/test/test_suconfig_api.py new file mode 100644 index 000000000..44234d7d6 --- /dev/null +++ b/test/test_suconfig_api.py @@ -0,0 +1,177 @@ +from test.test_api_usage import ApiTestCase, READ_ACCESS_USER, ADMIN_ACCESS_USER +from endpoints.api.suconfig import (SuperUserRegistryStatus, SuperUserConfig, SuperUserConfigFile, + SuperUserCreateInitialSuperUser, SuperUserConfigValidate) +from app import OVERRIDE_CONFIG_YAML_FILENAME +from data.database import User + +import unittest +import os + +class TestSuperUserRegistryStatus(ApiTestCase): + def test_registry_status(self): + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertTrue(json['is_testing']) + self.assertTrue(json['valid_db']) + self.assertFalse(json['file_exists']) + self.assertFalse(json['ready']) + + +class TestSuperUserConfigFile(ApiTestCase): + def test_get_non_superuser(self): + # No user. + self.getResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) + + # Non-superuser. + self.login(READ_ACCESS_USER) + self.getResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) + + def test_get_superuser_invalid_filename(self): + self.login(ADMIN_ACCESS_USER) + self.getResponse(SuperUserConfigFile, params=dict(filename='somefile'), expected_code=404) + + def test_get_superuser(self): + self.login(ADMIN_ACCESS_USER) + result = self.getJsonResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) + self.assertFalse(result['exists']) + + def test_post_non_superuser(self): + # No user. + self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) + + # Non-superuser. + self.login(READ_ACCESS_USER) + self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) + + def test_post_superuser_invalid_filename(self): + self.login(ADMIN_ACCESS_USER) + self.postResponse(SuperUserConfigFile, params=dict(filename='somefile'), expected_code=404) + + def test_post_superuser(self): + self.login(ADMIN_ACCESS_USER) + self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=400) + + +class TestSuperUserCreateInitialSuperUser(ApiTestCase): + def test_no_config_file(self): + # If there is no config.yaml, then this method should security fail. + data = dict(username='cooluser', password='password', email='fake@example.com') + self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) + + def test_config_file_with_db_users(self): + try: + # Write some config. + self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) + + # If there is a config.yaml, but existing DB users exist, then this method should security + # fail. + data = dict(username='cooluser', password='password', email='fake@example.com') + self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) + finally: + os.remove(OVERRIDE_CONFIG_YAML_FILENAME) + + def test_config_file_with_no_db_users(self): + try: + # Write some config. + self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) + + # Delete all the users in the DB. + for user in list(User.select()): + user.delete_instance(recursive=True) + + # This method should now succeed. + data = dict(username='cooluser', password='password', email='fake@example.com') + result = self.postJsonResponse(SuperUserCreateInitialSuperUser, data=data) + self.assertTrue(result['status']) + + # Verify the superuser was created. + User.get(User.username == 'cooluser') + + # Verify the superuser was placed into the config. + result = self.getJsonResponse(SuperUserConfig) + self.assertEquals(['cooluser'], result['config']['SUPER_USERS']) + + finally: + os.remove(OVERRIDE_CONFIG_YAML_FILENAME) + + +class TestSuperUserConfigValidate(ApiTestCase): + def test_nonsuperuser_noconfig(self): + self.login(ADMIN_ACCESS_USER) + result = self.postJsonResponse(SuperUserConfigValidate, params=dict(service='someservice'), + data=dict(config={})) + + self.assertFalse(result['status']) + + + def test_nonsuperuser_config(self): + try: + # The validate config call works if there is no config.yaml OR the user is a superuser. + # Add a config, and verify it breaks when unauthenticated. + json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) + self.assertTrue(json['exists']) + + self.postResponse(SuperUserConfigValidate, params=dict(service='someservice'), + data=dict(config={}), + expected_code=403) + + # Now login as a superuser. + self.login(ADMIN_ACCESS_USER) + result = self.postJsonResponse(SuperUserConfigValidate, params=dict(service='someservice'), + data=dict(config={})) + + self.assertFalse(result['status']) + finally: + os.remove(OVERRIDE_CONFIG_YAML_FILENAME) + + +class TestSuperUserConfig(ApiTestCase): + def test_get_non_superuser(self): + # No user. + self.getResponse(SuperUserConfig, expected_code=401) + + # Non-superuser. + self.login(READ_ACCESS_USER) + self.getResponse(SuperUserConfig, expected_code=403) + + def test_get_superuser(self): + self.login(ADMIN_ACCESS_USER) + json = self.getJsonResponse(SuperUserConfig) + + # Note: We expect the config to be none because a config.yaml should never be checked into + # the directory. + self.assertIsNone(json['config']) + + def test_put(self): + try: + # The update config call works if there is no config.yaml OR the user is a superuser. First + # try writing it without a superuser present. + json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) + self.assertTrue(json['exists']) + + # Verify the config.yaml file exists. + self.assertTrue(os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME)) + + # Try writing it again. This should now fail, since the config.yaml exists. + self.putResponse(SuperUserConfig, data=dict(config={}, hostname='barbaz'), expected_code=403) + + # Login as a non-superuser. + self.login(READ_ACCESS_USER) + + # Try writing it again. This should fail. + self.putResponse(SuperUserConfig, data=dict(config={}, hostname='barbaz'), expected_code=403) + + # Login as a superuser. + self.login(ADMIN_ACCESS_USER) + + # This should succeed. + json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='barbaz')) + self.assertTrue(json['exists']) + + json = self.getJsonResponse(SuperUserConfig) + self.assertIsNotNone(json['config']) + + finally: + os.remove(OVERRIDE_CONFIG_YAML_FILENAME) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file From 5e0ce4eea9a58d00b920fc0da0065beca1ab25ad Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 8 Jan 2015 13:26:24 -0500 Subject: [PATCH 19/62] Add validation of github to the config tool --- endpoints/api/suconfig.py | 2 +- .../directives/config/config-setup-tool.html | 16 +++++--- static/js/core-config-setup.js | 10 ++++- util/config/validator.py | 36 ++++++++++++++--- util/oauth.py | 40 ++++++++++++++----- 5 files changed, 80 insertions(+), 24 deletions(-) diff --git a/endpoints/api/suconfig.py b/endpoints/api/suconfig.py index 25b91380d..d403b617b 100644 --- a/endpoints/api/suconfig.py +++ b/endpoints/api/suconfig.py @@ -53,7 +53,7 @@ class SuperUserRegistryStatus(ApiResource): 'file_exists': os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME), 'is_testing': app.config['TESTING'], 'valid_db': database_is_valid(), - 'ready': not app.config['TESTING'] and file_exists and bool(app.config['SUPER_USERS']) + 'ready': not app.config['TESTING'] and config_file_exists() and bool(app.config['SUPER_USERS']) } diff --git a/static/directives/config/config-setup-tool.html b/static/directives/config/config-setup-tool.html index 3480a1104..63f9701e5 100644 --- a/static/directives/config/config-setup-tool.html +++ b/static/directives/config/config-setup-tool.html @@ -369,11 +369,13 @@ Github Endpoint: - +
      - The Github Enterprise endpoint. + The Github Enterprise endpoint. Must start with http:// or https://.
      @@ -499,11 +501,13 @@ Github Endpoint: - +
      - The Github Enterprise endpoint. + The Github Enterprise endpoint. Must start with http:// or https://.
      diff --git a/static/js/core-config-setup.js b/static/js/core-config-setup.js index a57e74b37..6950ebe68 100644 --- a/static/js/core-config-setup.js +++ b/static/js/core-config-setup.js @@ -29,6 +29,10 @@ angular.module("core-config-setup", ['angularFileUpload']) {'id': 'github-login', 'title': 'Github (Enterprise) Authentication', 'condition': function(config) { return config.FEATURE_GITHUB_LOGIN; + }}, + + {'id': 'github-trigger', 'title': 'Github (Enterprise) Build Triggers', 'condition': function(config) { + return config.FEATURE_GITHUB_BUILD; }} ]; @@ -151,8 +155,10 @@ angular.module("core-config-setup", ['angularFileUpload']) } if (value == 'enterprise') { - $scope.config[key]['GITHUB_ENDPOINT'] = ''; - $scope.config[key]['API_ENDPOINT'] = ''; + if ($scope.config[key]['GITHUB_ENDPOINT'] == 'https://github.com/') { + $scope.config[key]['GITHUB_ENDPOINT'] = ''; + } + delete $scope.config[key]['API_ENDPOINT']; } else if (value == 'hosted') { $scope.config[key]['GITHUB_ENDPOINT'] = 'https://github.com/'; $scope.config[key]['API_ENDPOINT'] = 'https://api.github.com/'; diff --git a/util/config/validator.py b/util/config/validator.py index 16211c254..7be8657df 100644 --- a/util/config/validator.py +++ b/util/config/validator.py @@ -32,16 +32,19 @@ def validate_service_for_config(service, config): 'reason': str(ex) } + def _validate_database(config): """ Validates connecting to the database. """ validate_database_url(config['DB_URI']) + def _validate_redis(config): """ Validates connecting to redis. """ redis_config = config['BUILDLOGS_REDIS'] client = redis.StrictRedis(socket_connect_timeout=5, **redis_config) client.ping() + def _validate_registry_storage(config): """ Validates registry storage. """ parameters = config.get('DISTRIBUTED_STORAGE_CONFIG', {}).get('local', ['LocalStorage', {}]) @@ -54,6 +57,7 @@ def _validate_registry_storage(config): driver.put_content('_verify', 'testing 123') driver.remove('_verify') + def _validate_mailing(config): """ Validates sending email. """ test_app = Flask("mail-test-app") @@ -68,12 +72,31 @@ def _validate_mailing(config): test_msg.add_recipient(get_authenticated_user().email) test_mail.send(test_msg) -def _validate_github_login(config): - """ Validates the OAuth credentials and API endpoint for Github Login. """ + +def _validate_github(config_key): + return lambda config: _validate_github_with_key(config_key, config) + + +def _validate_github_with_key(config_key, config): + """ Validates the OAuth credentials and API endpoint for a Github service. """ + endpoint = config[config_key].get('GITHUB_ENDPOINT') + if not endpoint: + raise Exception('Missing Github Endpoint') + + if endpoint.find('http://') != 0 and endpoint.find('https://') != 0: + raise Exception('Github Endpoint must start with http:// or https://') + + if not config[config_key].get('CLIENT_ID'): + raise Exception('Missing Client ID') + + if not config[config_key].get('CLIENT_SECRET'): + raise Exception('Missing Client Secret') + client = app.config['HTTPCLIENT'] - oauth = GithubOAuthConfig(config, 'GITHUB_LOGIN_CONFIG') - endpoint = oauth.authorize_endpoint() - # TODO: this + oauth = GithubOAuthConfig(config, config_key) + result = oauth.validate_client_id_and_secret(client) + if not result: + raise Exception('Invalid client id or client secret') def _validate_ssl(config): @@ -116,7 +139,8 @@ _VALIDATORS = { 'redis': _validate_redis, 'registry-storage': _validate_registry_storage, 'mail': _validate_mailing, - 'github-login': _validate_github_login, + 'github-login': _validate_github('GITHUB_LOGIN_CONFIG'), + 'github-trigger': _validate_github('GITHUB_TRIGGER_CONFIG'), 'ssl': _validate_ssl, 'ldap': _validate_ldap, } \ No newline at end of file diff --git a/util/oauth.py b/util/oauth.py index 65af5cd13..2405423ea 100644 --- a/util/oauth.py +++ b/util/oauth.py @@ -17,15 +17,15 @@ class OAuthConfig(object): def login_endpoint(self): raise NotImplementedError + def validate_client_id_and_secret(self, http_client): + raise NotImplementedError + def client_id(self): return self.config.get('CLIENT_ID') def client_secret(self): return self.config.get('CLIENT_SECRET') - def basic_scope(self): - raise NotImplementedError - def _get_url(self, endpoint, *args): for arg in args: endpoint = urlparse.urljoin(endpoint, arg) @@ -46,9 +46,6 @@ class GithubOAuthConfig(OAuthConfig): endpoint = endpoint + '/' return endpoint - def basic_scope(self): - return 'user:email' - def authorize_endpoint(self): return self._get_url(self._endpoint(), '/login/oauth/authorize') + '?' @@ -69,6 +66,30 @@ class GithubOAuthConfig(OAuthConfig): api_endpoint = self._api_endpoint() return self._get_url(api_endpoint, 'user/emails') + def validate_client_id_and_secret(self, http_client): + # First: Verify that the github endpoint is actually Github by checking for the + # X-GitHub-Request-Id here. + api_endpoint = self._api_endpoint() + result = http_client.get(api_endpoint, auth=(self.client_id(), self.client_secret())) + if not 'X-GitHub-Request-Id' in result.headers: + raise Exception('Endpoint is not a Github (Enterprise) installation') + + # Next: Verify the client ID and secret. + # Note: The following code is a hack until such time as Github officially adds an API endpoint + # for verifying a {client_id, client_secret} pair. That being said, this hack was given to us + # *by a Github Engineer*, so I think it is okay for the time being :) + # + # TODO(jschorr): Replace with the real API call once added. + # + # Hitting the endpoint applications/{client_id}/tokens/foo will result in the following + # behavior IF the client_id is given as the HTTP username and the client_secret as the HTTP + # password: + # - If the {client_id, client_secret} pair is invalid in some way, we get a 401 error. + # - If the pair is valid, then we get a 404 because the 'foo' token does not exists. + validate_endpoint = self._get_url(api_endpoint, 'applications/%s/tokens/foo' % self.client_id()) + result = http_client.get(validate_endpoint, auth=(self.client_id(), self.client_secret())) + return result.status_code == 404 + def get_public_config(self): return { 'CLIENT_ID': self.client_id(), @@ -85,9 +106,6 @@ class GoogleOAuthConfig(OAuthConfig): def service_name(self): return 'Google' - def basic_scope(self): - return 'openid email' - def authorize_endpoint(self): return 'https://accounts.google.com/o/oauth2/auth?response_type=code&' @@ -97,6 +115,10 @@ class GoogleOAuthConfig(OAuthConfig): def user_endpoint(self): return 'https://www.googleapis.com/oauth2/v1/userinfo' + def validate_client_id_and_secret(self, http_client): + # No validation supported at this time. + return None + def get_public_config(self): return { 'CLIENT_ID': self.client_id(), From 5ac2c4970ae2e2bceee09c290ffc8e8fe8e6a55f Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 8 Jan 2015 13:56:17 -0500 Subject: [PATCH 20/62] Add Google auth validation and fix the case where no config is specified at all for Google auth or Github auth --- static/js/core-config-setup.js | 4 ++++ util/config/validator.py | 30 +++++++++++++++++++++++++++--- util/oauth.py | 22 ++++++++++++++++++---- 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/static/js/core-config-setup.js b/static/js/core-config-setup.js index 6950ebe68..78483ab5d 100644 --- a/static/js/core-config-setup.js +++ b/static/js/core-config-setup.js @@ -31,6 +31,10 @@ angular.module("core-config-setup", ['angularFileUpload']) return config.FEATURE_GITHUB_LOGIN; }}, + {'id': 'google-login', 'title': 'Google Authentication', 'condition': function(config) { + return config.FEATURE_GOOGLE_LOGIN; + }}, + {'id': 'github-trigger', 'title': 'Github (Enterprise) Build Triggers', 'condition': function(config) { return config.FEATURE_GITHUB_BUILD; }} diff --git a/util/config/validator.py b/util/config/validator.py index 7be8657df..990c69c36 100644 --- a/util/config/validator.py +++ b/util/config/validator.py @@ -79,17 +79,21 @@ def _validate_github(config_key): def _validate_github_with_key(config_key, config): """ Validates the OAuth credentials and API endpoint for a Github service. """ - endpoint = config[config_key].get('GITHUB_ENDPOINT') + github_config = config.get(config_key) + if not github_config: + raise Exception('Missing Github client id and client secret') + + endpoint = github_config.get('GITHUB_ENDPOINT') if not endpoint: raise Exception('Missing Github Endpoint') if endpoint.find('http://') != 0 and endpoint.find('https://') != 0: raise Exception('Github Endpoint must start with http:// or https://') - if not config[config_key].get('CLIENT_ID'): + if not github_config.get('CLIENT_ID'): raise Exception('Missing Client ID') - if not config[config_key].get('CLIENT_SECRET'): + if not github_config.get('CLIENT_SECRET'): raise Exception('Missing Client Secret') client = app.config['HTTPCLIENT'] @@ -99,6 +103,25 @@ def _validate_github_with_key(config_key, config): raise Exception('Invalid client id or client secret') +def _validate_google_login(config): + """ Validates the Google Login client ID and secret. """ + google_login_config = config.get('GOOGLE_LOGIN_CONFIG') + if not google_login_config: + raise Exception('Missing client ID and client secret') + + if not google_login_config.get('CLIENT_ID'): + raise Exception('Missing Client ID') + + if not google_login_config.get('CLIENT_SECRET'): + raise Exception('Missing Client Secret') + + client = app.config['HTTPCLIENT'] + oauth = GoogleOAuthConfig(config, 'GOOGLE_LOGIN_CONFIG') + result = oauth.validate_client_id_and_secret(client) + if not result: + raise Exception('Invalid client id or client secret') + + def _validate_ssl(config): """ Validates the SSL configuration (if enabled). """ if config.get('PREFERRED_URL_SCHEME', 'http') != 'https': @@ -141,6 +164,7 @@ _VALIDATORS = { 'mail': _validate_mailing, 'github-login': _validate_github('GITHUB_LOGIN_CONFIG'), 'github-trigger': _validate_github('GITHUB_TRIGGER_CONFIG'), + 'google-login': _validate_google_login, 'ssl': _validate_ssl, 'ldap': _validate_ldap, } \ No newline at end of file diff --git a/util/oauth.py b/util/oauth.py index 2405423ea..ede8823aa 100644 --- a/util/oauth.py +++ b/util/oauth.py @@ -70,7 +70,7 @@ class GithubOAuthConfig(OAuthConfig): # First: Verify that the github endpoint is actually Github by checking for the # X-GitHub-Request-Id here. api_endpoint = self._api_endpoint() - result = http_client.get(api_endpoint, auth=(self.client_id(), self.client_secret())) + result = http_client.get(api_endpoint, auth=(self.client_id(), self.client_secret()), timeout=5) if not 'X-GitHub-Request-Id' in result.headers: raise Exception('Endpoint is not a Github (Enterprise) installation') @@ -87,7 +87,8 @@ class GithubOAuthConfig(OAuthConfig): # - If the {client_id, client_secret} pair is invalid in some way, we get a 401 error. # - If the pair is valid, then we get a 404 because the 'foo' token does not exists. validate_endpoint = self._get_url(api_endpoint, 'applications/%s/tokens/foo' % self.client_id()) - result = http_client.get(validate_endpoint, auth=(self.client_id(), self.client_secret())) + result = http_client.get(validate_endpoint, auth=(self.client_id(), self.client_secret()), + timeout=5) return result.status_code == 404 def get_public_config(self): @@ -116,8 +117,21 @@ class GoogleOAuthConfig(OAuthConfig): return 'https://www.googleapis.com/oauth2/v1/userinfo' def validate_client_id_and_secret(self, http_client): - # No validation supported at this time. - return None + # To verify the Google client ID and secret, we hit the + # https://www.googleapis.com/oauth2/v3/token endpoint with an invalid request. If the client + # ID or secret are invalid, we get returned a 403 Unauthorized. Otherwise, we get returned + # another response code. + url = 'https://www.googleapis.com/oauth2/v3/token' + data = { + 'code': 'fakecode', + 'client_id': self.client_id(), + 'client_secret': self.client_secret(), + 'grant_type': 'authorization_code', + 'redirect_uri': 'http://example.com' + } + + result = http_client.post(url, data=data, timeout=5) + return result.status_code != 401 def get_public_config(self): return { From bfd273d16f48a23c98c8e678ad74da42bef7795b Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 8 Jan 2015 15:27:49 -0500 Subject: [PATCH 21/62] - Make validation a bit nicer: - Add timeout to the DB validation - Make DB validation exception handling a bit nicer - Move the DB validation error message - Fix bug around RADOS config default for Is Secure - Allow hiding of the validation box --- data/database.py | 4 ++- static/css/quay.css | 5 ++++ .../directives/config/config-setup-tool.html | 9 +++++- static/js/core-config-setup.js | 14 +++++++++ static/partials/super-user.html | 30 +++++++++---------- util/config/validator.py | 14 +++++++-- 6 files changed, 56 insertions(+), 20 deletions(-) diff --git a/data/database.py b/data/database.py index ab23b3d7d..1953b4a14 100644 --- a/data/database.py +++ b/data/database.py @@ -71,7 +71,9 @@ db_random_func = CallableProxy() def validate_database_url(url): - driver = _db_from_url(url, {}) + driver = _db_from_url(url, { + 'connect_timeout': 5 + }) driver.connect() driver.close() diff --git a/static/css/quay.css b/static/css/quay.css index 9d032549a..0ae6bf47b 100644 --- a/static/css/quay.css +++ b/static/css/quay.css @@ -4923,4 +4923,9 @@ i.slack-icon { border: 1px solid #eee; vertical-align: middle; padding: 4px; +} + +.modal-footer.alert { + text-align: left; + margin-bottom: -16px; } \ No newline at end of file diff --git a/static/directives/config/config-setup-tool.html b/static/directives/config/config-setup-tool.html index 63f9701e5..bd39b3f60 100644 --- a/static/directives/config/config-setup-tool.html +++ b/static/directives/config/config-setup-tool.html @@ -546,7 +546,7 @@ + - +
      + +
      diff --git a/static/directives/config/config-setup-tool.html b/static/directives/config/config-setup-tool.html index bd39b3f60..62311d78d 100644 --- a/static/directives/config/config-setup-tool.html +++ b/static/directives/config/config-setup-tool.html @@ -1,6 +1,8 @@
      +
      +
      @@ -78,7 +80,7 @@ A valid SSL certificate and private key files are required to use this option.
      - +
      Certificate: @@ -206,7 +208,7 @@ - +
      SMTP Server: @@ -249,7 +251,7 @@ - +
      Username: @@ -299,7 +301,7 @@
      - +
      @@ -356,7 +358,7 @@ -
      LDAP URI:
      +
      - +
      Github: @@ -366,7 +368,7 @@
      Github Endpoint: Enable Google Authentication - +
      OAuth Client ID: @@ -465,7 +467,7 @@ -
      +
      Github (Enterprise) Build Triggers
      @@ -488,7 +490,7 @@
      - +
      - + @@ -213,7 +215,8 @@ @@ -235,8 +238,8 @@ @@ -216,7 +217,8 @@ @@ -377,7 +379,7 @@ + pattern="{{ GITHUB_REGEX }}">
      The Github Enterprise endpoint. Must start with http:// or https://. @@ -509,7 +511,7 @@ + pattern="{{ GITHUB_REGEX }}">
      The Github Enterprise endpoint. Must start with http:// or https://. diff --git a/static/directives/config/config-string-field.html b/static/directives/config/config-string-field.html index 5cbbe8a35..7714fd541 100644 --- a/static/directives/config/config-string-field.html +++ b/static/directives/config/config-string-field.html @@ -3,5 +3,8 @@ +
      + {{ errorMessage }} +
      diff --git a/static/js/core-config-setup.js b/static/js/core-config-setup.js index e6a79d4b6..0b1f42e62 100644 --- a/static/js/core-config-setup.js +++ b/static/js/core-config-setup.js @@ -10,6 +10,9 @@ angular.module("core-config-setup", ['angularFileUpload']) 'isActive': '=isActive' }, controller: function($rootScope, $scope, $element, $timeout, ApiService) { + $scope.HOSTNAME_REGEX = '^[a-zA-Z-0-9\.]+(:[0-9]+)?$'; + $scope.GITHUB_REGEX = '^https?://([a-zA-Z0-9]+\.?\/?)+$'; + $scope.SERVICES = [ {'id': 'redis', 'title': 'Redis'}, @@ -69,6 +72,14 @@ angular.module("core-config-setup", ['angularFileUpload']) ] }; + $scope.validateHostname = function(hostname) { + if (hostname.indexOf('127.0.0.1') == 0 || hostname.indexOf('localhost') == 0) { + return 'Please specify a non-localhost hostname. "localhost" will refer to the container, not your machine.' + } + + return null; + }; + $scope.config = null; $scope.mapped = { '$hasChanges': false @@ -719,7 +730,8 @@ angular.module("core-config-setup", ['angularFileUpload']) 'binding': '=binding', 'placeholder': '@placeholder', 'pattern': '@pattern', - 'defaultValue': '@defaultValue' + 'defaultValue': '@defaultValue', + 'validator': '&validator' }, controller: function($scope, $element) { $scope.getRegexp = function(pattern) { @@ -733,6 +745,8 @@ angular.module("core-config-setup", ['angularFileUpload']) if (!binding && $scope.defaultValue) { $scope.binding = $scope.defaultValue; } + + $scope.errorMessage = $scope.validator({'value': binding || ''}); }); } }; From 53e5fc6265bb86d0245812593c05f594ce00171f Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Fri, 16 Jan 2015 16:10:40 -0500 Subject: [PATCH 30/62] Have the config setup tool automatically prepare the S3 or GCS storage with CORS config --- storage/basestorage.py | 4 ++++ storage/cloud.py | 44 ++++++++++++++++++++++++++++++++++++++++ util/config/validator.py | 19 ++++++++++++----- 3 files changed, 62 insertions(+), 5 deletions(-) diff --git a/storage/basestorage.py b/storage/basestorage.py index 332a5d2ca..da297fcf1 100644 --- a/storage/basestorage.py +++ b/storage/basestorage.py @@ -54,6 +54,10 @@ class BaseStorage(StoragePaths): # Set the IO buffer to 64kB buffer_size = 64 * 1024 + def setup(self): + """ Called to perform any storage system setup. """ + pass + def get_direct_download_url(self, path, expires_in=60, requires_cors=False): return None diff --git a/storage/cloud.py b/storage/cloud.py index 06dd8a2a9..91dadfb3e 100644 --- a/storage/cloud.py +++ b/storage/cloud.py @@ -77,6 +77,13 @@ class _CloudStorage(BaseStorage): return path[1:] return path + def get_cloud_conn(self): + self._initialize_cloud_conn() + return self._cloud_conn + + def get_cloud_bucket(self): + return self._cloud_bucket + def get_content(self, path): self._initialize_cloud_conn() path = self._init_path(path) @@ -221,6 +228,25 @@ class S3Storage(_CloudStorage): connect_kwargs, upload_params, storage_path, s3_access_key, s3_secret_key, s3_bucket) + def setup(self): + self.get_cloud_bucket().set_cors_xml(""" + + + * + GET + 3000 + Authorization + + + * + PUT + 3000 + Content-Type + x-amz-acl + origin + + """) + class GoogleCloudStorage(_CloudStorage): def __init__(self, storage_path, access_key, secret_key, bucket_name): @@ -230,6 +256,24 @@ class GoogleCloudStorage(_CloudStorage): connect_kwargs, upload_params, storage_path, access_key, secret_key, bucket_name) + def setup(self): + self.get_cloud_bucket().set_cors_xml(""" + + + + * + + + GET + PUT + + + Content-Type + + 3000 + + """) + def stream_write(self, path, fp, content_type=None, content_encoding=None): # Minimum size of upload part size on S3 is 5MB self._initialize_cloud_conn() diff --git a/util/config/validator.py b/util/config/validator.py index ec6d6f708..da533ec01 100644 --- a/util/config/validator.py +++ b/util/config/validator.py @@ -15,6 +15,13 @@ from util.oauth import GoogleOAuthConfig, GithubOAuthConfig SSL_FILENAMES = ['ssl.cert', 'ssl.key'] +def get_storage_provider(config): + parameters = config.get('DISTRIBUTED_STORAGE_CONFIG', {}).get('local', ['LocalStorage', {}]) + try: + return get_storage_driver(parameters) + except TypeError: + raise Exception('Missing required storage configuration parameter(s)') + def validate_service_for_config(service, config): """ Attempts to validate the configuration for the given service. """ if not service in _VALIDATORS: @@ -57,16 +64,18 @@ def _validate_redis(config): def _validate_registry_storage(config): """ Validates registry storage. """ - parameters = config.get('DISTRIBUTED_STORAGE_CONFIG', {}).get('local', ['LocalStorage', {}]) - try: - driver = get_storage_driver(parameters) - except TypeError: - raise Exception('Missing required storage configuration parameter(s)') + driver = get_storage_provider(config) # Put and remove a temporary file. driver.put_content('_verify', 'testing 123') driver.remove('_verify') + # Run setup on the driver if the read/write succeeded. + try: + driver.setup() + except Exception as ex: + raise Exception('Could not prepare storage: %s' % str(ex)) + def _validate_mailing(config): """ Validates sending email. """ From da4bcbbee0dda7a74a6e7beec8bc6a32220e4a39 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Fri, 16 Jan 2015 21:02:12 -0500 Subject: [PATCH 31/62] Add help text on the username and password fields --- static/partials/super-user.html | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/static/partials/super-user.html b/static/partials/super-user.html index 78e8c6f51..847a0ed23 100644 --- a/static/partials/super-user.html +++ b/static/partials/super-user.html @@ -281,8 +281,9 @@
      - + +
      Minimum 4 characters in length
      @@ -295,6 +296,7 @@ +
      Minimum 8 characters in length
      From 28d319ad2653fd11213970d2e7a938b001306539 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 20 Jan 2015 12:43:11 -0500 Subject: [PATCH 32/62] Add an in-memory superusermanager, which stores the current list of superusers in a process-shared Value. We do this because in the ER, when we add a new superuser, we need to ensure that ALL workers have their lists updated (otherwise we get the behavior that some workers validate the new permission and others do not). --- app.py | 2 ++ auth/permissions.py | 5 ++--- endpoints/api/suconfig.py | 6 +++--- endpoints/api/superuser.py | 10 ++++----- static/partials/super-user.html | 2 +- util/config/superusermanager.py | 38 +++++++++++++++++++++++++++++++++ 6 files changed, 51 insertions(+), 12 deletions(-) create mode 100644 util/config/superusermanager.py diff --git a/app.py b/app.py index b1281b760..6ce4fbf90 100644 --- a/app.py +++ b/app.py @@ -29,6 +29,7 @@ from util.names import urn_generator from util.oauth import GoogleOAuthConfig, GithubOAuthConfig from util.queuemetrics import QueueMetrics from util.config.configutil import generate_secret_key +from util.config.superusermanager import SuperUserManager app = Flask(__name__) @@ -105,6 +106,7 @@ build_logs = BuildLogs(app) queue_metrics = QueueMetrics(app) authentication = UserAuthentication(app) userevents = UserEventsBuilderModule(app) +superusers = SuperUserManager(app) github_login = GithubOAuthConfig(app.config, 'GITHUB_LOGIN_CONFIG') github_trigger = GithubOAuthConfig(app.config, 'GITHUB_TRIGGER_CONFIG') diff --git a/auth/permissions.py b/auth/permissions.py index efa135155..47a41c257 100644 --- a/auth/permissions.py +++ b/auth/permissions.py @@ -7,7 +7,7 @@ from functools import partial import scopes from data import model -from app import app +from app import app, superusers logger = logging.getLogger(__name__) @@ -94,8 +94,7 @@ class QuayDeferredPermissionUser(Identity): return super(QuayDeferredPermissionUser, self).can(permission) # Add the superuser need, if applicable. - if (user_object.username is not None and - user_object.username in app.config.get('SUPER_USERS', [])): + if superusers.is_superuser(user_object.username): self.provides.add(_SuperUserNeed()) # Add the user specific permissions, only for non-oauth permission diff --git a/endpoints/api/suconfig.py b/endpoints/api/suconfig.py index 50b3635cf..05efb4cd7 100644 --- a/endpoints/api/suconfig.py +++ b/endpoints/api/suconfig.py @@ -7,7 +7,7 @@ from endpoints.api import (ApiResource, nickname, resource, internal_only, show_ require_fresh_login, request, validate_json_request, verify_not_prod) from endpoints.common import common_login -from app import app, CONFIG_PROVIDER +from app import app, CONFIG_PROVIDER, superusers from data import model from auth.permissions import SuperUserPermission from auth.auth_context import get_authenticated_user @@ -49,7 +49,7 @@ class SuperUserRegistryStatus(ApiResource): 'file_exists': file_exists, 'is_testing': app.config['TESTING'], 'valid_db': database_is_valid(), - 'ready': not app.config['TESTING'] and file_exists and bool(app.config['SUPER_USERS']) + 'ready': not app.config['TESTING'] and file_exists and superusers.has_superusers() } @@ -215,7 +215,7 @@ class SuperUserCreateInitialSuperUser(ApiResource): CONFIG_PROVIDER.save_yaml(config_object) # Update the in-memory config for the new superuser. - app.config['SUPER_USERS'] = [username] + superusers.register_superuser(username) # Conduct login with that user. common_login(superuser) diff --git a/endpoints/api/superuser.py b/endpoints/api/superuser.py index c8717bd7b..a391b3130 100644 --- a/endpoints/api/superuser.py +++ b/endpoints/api/superuser.py @@ -4,7 +4,7 @@ import json import os from random import SystemRandom -from app import app, avatar +from app import app, avatar, superusers from flask import request from endpoints.api import (ApiResource, nickname, resource, validate_json_request, request_error, @@ -109,7 +109,7 @@ def user_view(user): 'email': user.email, 'verified': user.verified, 'avatar': avatar.compute_hash(user.email, name=user.username), - 'super_user': user.username in app.config['SUPER_USERS'] + 'super_user': superusers.is_superuser(user.username) } @resource('/v1/superuser/usage/') @@ -217,7 +217,7 @@ class SuperUserSendRecoveryEmail(ApiResource): if not user or user.organization or user.robot: abort(404) - if username in app.config['SUPER_USERS']: + if superusers.is_superuser(username): abort(403) code = model.create_reset_password_email_code(user.email) @@ -277,7 +277,7 @@ class SuperUserManagement(ApiResource): if not user or user.organization or user.robot: abort(404) - if username in app.config['SUPER_USERS']: + if superusers.is_superuser(username): abort(403) model.delete_user(user) @@ -296,7 +296,7 @@ class SuperUserManagement(ApiResource): if not user or user.organization or user.robot: abort(404) - if username in app.config['SUPER_USERS']: + if superusers.is_superuser(username): abort(403) user_data = request.get_json() diff --git a/static/partials/super-user.html b/static/partials/super-user.html index 847a0ed23..7800c9b1e 100644 --- a/static/partials/super-user.html +++ b/static/partials/super-user.html @@ -376,7 +376,7 @@
      Github: @@ -498,7 +500,7 @@
      Github Endpoint: +
      +
      diff --git a/static/js/core-config-setup.js b/static/js/core-config-setup.js index d2669bb12..e6a79d4b6 100644 --- a/static/js/core-config-setup.js +++ b/static/js/core-config-setup.js @@ -127,6 +127,16 @@ angular.module("core-config-setup", ['angularFileUpload']) }, ApiService.errorDisplay('Could not validate configuration. Please report this error.')); }; + $scope.checkValidateAndSave = function() { + if ($scope.configform.$valid) { + $scope.validateAndSave(); + return; + } + + $element.find("input.ng-invalid:first")[0].scrollIntoView(); + $element.find("input.ng-invalid:first").focus(); + }; + $scope.validateAndSave = function() { $scope.savingConfiguration = false; $scope.validating = $scope.getServices($scope.config); @@ -593,9 +603,7 @@ angular.module("core-config-setup", ['angularFileUpload']) }, true); $scope.$watch('binding', function(binding) { - if (!binding) { return; } - - var current = binding; + var current = binding || []; var items = []; var itemHash = ''; for (var i = 0; i < current.length; ++i) { From 65989ac1fbdd62d68d8ab91687ba7fda73fbf856 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 8 Jan 2015 16:06:20 -0500 Subject: [PATCH 23/62] Fix bug where deleting the server hostname removed the entire form --- static/directives/config/config-setup-tool.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/static/directives/config/config-setup-tool.html b/static/directives/config/config-setup-tool.html index 62311d78d..9ad2ab8cd 100644 --- a/static/directives/config/config-setup-tool.html +++ b/static/directives/config/config-setup-tool.html @@ -1,6 +1,6 @@
      -
      +
      From c0c27648eaf227c0f541dfe09f7ba9acf12b1d15 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 8 Jan 2015 16:16:22 -0500 Subject: [PATCH 24/62] Clarify where the configuration is saved --- static/directives/config/config-setup-tool.html | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/static/directives/config/config-setup-tool.html b/static/directives/config/config-setup-tool.html index 9ad2ab8cd..391ba0ce8 100644 --- a/static/directives/config/config-setup-tool.html +++ b/static/directives/config/config-setup-tool.html @@ -573,7 +573,8 @@ Configuration Changes Saved
      -

      Your configuration changes have been saved and will be applied the next time the container is restarted.

      +

      Your configuration changes have been saved to config.yaml in the mounted config + volume and will be applied the next time the container is restarted.

      From 6d604a656a8bccbe41e6bb49a6620ebb25b21575 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Fri, 9 Jan 2015 16:23:31 -0500 Subject: [PATCH 25/62] Move config handling into a provider class to make testing much easier --- app.py | 64 +++----- endpoints/api/suconfig.py | 43 +++--- static/css/quay.css | 1 + .../directives/config/config-file-field.html | 4 +- test/test_suconfig_api.py | 44 +++--- util/config/configutil.py | 29 ---- util/config/provider.py | 139 ++++++++++++++++++ util/config/validator.py | 4 +- 8 files changed, 207 insertions(+), 121 deletions(-) create mode 100644 util/config/provider.py diff --git a/app.py b/app.py index f00cc23db..b1281b760 100644 --- a/app.py +++ b/app.py @@ -2,7 +2,7 @@ import logging import os import json -from flask import Flask as BaseFlask, Config as BaseConfig, request, Request +from flask import Flask, Config, request, Request from flask.ext.principal import Principal from flask.ext.login import LoginManager from flask.ext.mail import Mail @@ -14,48 +14,33 @@ from data import model from data import database from data.userfiles import Userfiles from data.users import UserAuthentication -from util.analytics import Analytics -from util.exceptionlog import Sentry -from util.queuemetrics import QueueMetrics -from util.names import urn_generator -from util.oauth import GoogleOAuthConfig, GithubOAuthConfig -from util.config.configutil import import_yaml, generate_secret_key -from data.billing import Billing from data.buildlogs import BuildLogs from data.archivedlogs import LogArchive from data.queue import WorkQueue from data.userevent import UserEventsBuilderModule + from avatars.avatars import Avatar - -class Config(BaseConfig): - """ Flask config enhanced with a `from_yamlfile` method """ - - def from_yamlfile(self, config_file): - import_yaml(self, config_file) - -class Flask(BaseFlask): - """ Extends the Flask class to implement our custom Config class. """ - - def make_config(self, instance_relative=False): - root_path = self.instance_path if instance_relative else self.root_path - return Config(root_path, self.default_config) - - -OVERRIDE_CONFIG_DIRECTORY = 'conf/stack/' -OVERRIDE_CONFIG_YAML_FILENAME = OVERRIDE_CONFIG_DIRECTORY + 'config.yaml' -OVERRIDE_CONFIG_PY_FILENAME = OVERRIDE_CONFIG_DIRECTORY + 'config.py' - -OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG' -LICENSE_FILENAME = OVERRIDE_CONFIG_DIRECTORY + 'license.enc' +from util.analytics import Analytics +from data.billing import Billing +from util.config.provider import FileConfigProvider, TestConfigProvider +from util.exceptionlog import Sentry +from util.names import urn_generator +from util.oauth import GoogleOAuthConfig, GithubOAuthConfig +from util.queuemetrics import QueueMetrics +from util.config.configutil import generate_secret_key app = Flask(__name__) logger = logging.getLogger(__name__) profile = logging.getLogger('profile') +CONFIG_PROVIDER = FileConfigProvider('conf/stack/', 'config.yaml', 'config.py') +# Instantiate the default configuration (for test or for normal operation). if 'TEST' in os.environ: + CONFIG_PROVIDER = TestConfigProvider() + from test.testconfig import TestConfig logger.debug('Loading test config.') app.config.from_object(TestConfig()) @@ -63,20 +48,17 @@ else: from config import DefaultConfig logger.debug('Loading default config.') app.config.from_object(DefaultConfig()) - - if os.path.exists(OVERRIDE_CONFIG_PY_FILENAME): - logger.debug('Applying config file: %s', OVERRIDE_CONFIG_PY_FILENAME) - app.config.from_pyfile(OVERRIDE_CONFIG_PY_FILENAME) - - if os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME): - logger.debug('Applying config file: %s', OVERRIDE_CONFIG_YAML_FILENAME) - app.config.from_yamlfile(OVERRIDE_CONFIG_YAML_FILENAME) - - environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}')) - app.config.update(environ_config) - app.teardown_request(database.close_db_filter) +# Load the override config via the provider. +CONFIG_PROVIDER.update_app_config(app.config) + +# Update any configuration found in the override environment variable. +OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG' + +environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}')) +app.config.update(environ_config) + class RequestWithId(Request): request_gen = staticmethod(urn_generator(['request'])) diff --git a/endpoints/api/suconfig.py b/endpoints/api/suconfig.py index d403b617b..50b3635cf 100644 --- a/endpoints/api/suconfig.py +++ b/endpoints/api/suconfig.py @@ -3,17 +3,16 @@ import os import json from flask import abort -from endpoints.api import (ApiResource, nickname, resource, internal_only, show_if, hide_if, +from endpoints.api import (ApiResource, nickname, resource, internal_only, show_if, require_fresh_login, request, validate_json_request, verify_not_prod) from endpoints.common import common_login -from app import app, OVERRIDE_CONFIG_YAML_FILENAME, OVERRIDE_CONFIG_DIRECTORY +from app import app, CONFIG_PROVIDER from data import model from auth.permissions import SuperUserPermission from auth.auth_context import get_authenticated_user from data.database import User -from util.config.configutil import (import_yaml, export_yaml, add_enterprise_config_defaults, - set_config_value) +from util.config.configutil import add_enterprise_config_defaults from util.config.validator import validate_service_for_config, SSL_FILENAMES import features @@ -32,10 +31,6 @@ def database_has_users(): """ Returns whether the database has any users defined. """ return bool(list(User.select().limit(1))) -def config_file_exists(): - """ Returns whether a configuration file exists. """ - return os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME) - @resource('/v1/superuser/registrystatus') @internal_only @@ -48,12 +43,13 @@ class SuperUserRegistryStatus(ApiResource): @verify_not_prod def get(self): """ Returns whether a valid configuration, database and users exist. """ + file_exists = CONFIG_PROVIDER.yaml_exists() return { - 'dir_exists': os.path.exists(OVERRIDE_CONFIG_DIRECTORY), - 'file_exists': os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME), + 'dir_exists': CONFIG_PROVIDER.volume_exists(), + 'file_exists': file_exists, 'is_testing': app.config['TESTING'], 'valid_db': database_is_valid(), - 'ready': not app.config['TESTING'] and config_file_exists() and bool(app.config['SUPER_USERS']) + 'ready': not app.config['TESTING'] and file_exists and bool(app.config['SUPER_USERS']) } @@ -88,12 +84,7 @@ class SuperUserConfig(ApiResource): def get(self): """ Returns the currently defined configuration, if any. """ if SuperUserPermission().can(): - config_object = {} - try: - import_yaml(config_object, OVERRIDE_CONFIG_YAML_FILENAME) - except Exception: - config_object = None - + config_object = CONFIG_PROVIDER.get_yaml() return { 'config': config_object } @@ -107,7 +98,7 @@ class SuperUserConfig(ApiResource): """ Updates the config.yaml file. """ # Note: This method is called to set the database configuration before super users exists, # so we also allow it to be called if there is no valid registry configuration setup. - if not config_file_exists() or SuperUserPermission().can(): + if not CONFIG_PROVIDER.yaml_exists() or SuperUserPermission().can(): config_object = request.get_json()['config'] hostname = request.get_json()['hostname'] @@ -115,7 +106,7 @@ class SuperUserConfig(ApiResource): add_enterprise_config_defaults(config_object, app.config['SECRET_KEY'], hostname) # Write the configuration changes to the YAML file. - export_yaml(config_object, OVERRIDE_CONFIG_YAML_FILENAME) + CONFIG_PROVIDER.save_yaml(config_object) return { 'exists': True, @@ -139,7 +130,7 @@ class SuperUserConfigFile(ApiResource): if SuperUserPermission().can(): return { - 'exists': os.path.exists(os.path.join(OVERRIDE_CONFIG_DIRECTORY, filename)) + 'exists': CONFIG_PROVIDER.volume_file_exists(filename) } abort(403) @@ -156,7 +147,7 @@ class SuperUserConfigFile(ApiResource): if not uploaded_file: abort(400) - uploaded_file.save(os.path.join(OVERRIDE_CONFIG_DIRECTORY, filename)) + CONFIG_PROVIDER.save_volume_file(filename, uploaded_file) return { 'status': True } @@ -209,7 +200,7 @@ class SuperUserCreateInitialSuperUser(ApiResource): # # We do this special security check because at the point this method is called, the database # is clean but does not (yet) have any super users for our permissions code to check against. - if config_file_exists() and not database_has_users(): + if CONFIG_PROVIDER.yaml_exists() and not database_has_users(): data = request.get_json() username = data['username'] password = data['password'] @@ -219,7 +210,11 @@ class SuperUserCreateInitialSuperUser(ApiResource): superuser = model.create_user(username, password, email, auto_verify=True) # Add the user to the config. - set_config_value(OVERRIDE_CONFIG_YAML_FILENAME, 'SUPER_USERS', [username]) + config_object = CONFIG_PROVIDER.get_yaml() + config_object['SUPER_USERS'] = [username] + CONFIG_PROVIDER.save_yaml(config_object) + + # Update the in-memory config for the new superuser. app.config['SUPER_USERS'] = [username] # Conduct login with that user. @@ -262,7 +257,7 @@ class SuperUserConfigValidate(ApiResource): # Note: This method is called to validate the database configuration before super users exists, # so we also allow it to be called if there is no valid registry configuration setup. Note that # this is also safe since this method does not access any information not given in the request. - if not config_file_exists() or SuperUserPermission().can(): + if not CONFIG_PROVIDER.yaml_exists() or SuperUserPermission().can(): config = request.get_json()['config'] return validate_service_for_config(service, config) diff --git a/static/css/quay.css b/static/css/quay.css index 0ae6bf47b..08d8361af 100644 --- a/static/css/quay.css +++ b/static/css/quay.css @@ -4923,6 +4923,7 @@ i.slack-icon { border: 1px solid #eee; vertical-align: middle; padding: 4px; + max-width: 150px; } .modal-footer.alert { diff --git a/static/directives/config/config-file-field.html b/static/directives/config/config-file-field.html index a819c7300..7e4710905 100644 --- a/static/directives/config/config-file-field.html +++ b/static/directives/config/config-file-field.html @@ -2,9 +2,7 @@ {{ filename }} {{ filename }} not found in mounted config directory: - - - + Uploading file as {{ filename }}... {{ uploadProgress }}% diff --git a/test/test_suconfig_api.py b/test/test_suconfig_api.py index 44234d7d6..9cf72f573 100644 --- a/test/test_suconfig_api.py +++ b/test/test_suconfig_api.py @@ -1,19 +1,28 @@ from test.test_api_usage import ApiTestCase, READ_ACCESS_USER, ADMIN_ACCESS_USER from endpoints.api.suconfig import (SuperUserRegistryStatus, SuperUserConfig, SuperUserConfigFile, SuperUserCreateInitialSuperUser, SuperUserConfigValidate) -from app import OVERRIDE_CONFIG_YAML_FILENAME +from app import CONFIG_PROVIDER from data.database import User import unittest -import os + + +class ConfigForTesting(object): + def __enter__(self): + CONFIG_PROVIDER.reset_for_test() + return CONFIG_PROVIDER + + def __exit__(self, type, value, traceback): + pass class TestSuperUserRegistryStatus(ApiTestCase): def test_registry_status(self): - json = self.getJsonResponse(SuperUserRegistryStatus) - self.assertTrue(json['is_testing']) - self.assertTrue(json['valid_db']) - self.assertFalse(json['file_exists']) - self.assertFalse(json['ready']) + with ConfigForTesting(): + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertTrue(json['is_testing']) + self.assertTrue(json['valid_db']) + self.assertFalse(json['file_exists']) + self.assertFalse(json['ready']) class TestSuperUserConfigFile(ApiTestCase): @@ -58,7 +67,7 @@ class TestSuperUserCreateInitialSuperUser(ApiTestCase): self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) def test_config_file_with_db_users(self): - try: + with ConfigForTesting(): # Write some config. self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) @@ -66,11 +75,9 @@ class TestSuperUserCreateInitialSuperUser(ApiTestCase): # fail. data = dict(username='cooluser', password='password', email='fake@example.com') self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) - finally: - os.remove(OVERRIDE_CONFIG_YAML_FILENAME) def test_config_file_with_no_db_users(self): - try: + with ConfigForTesting(): # Write some config. self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) @@ -90,9 +97,6 @@ class TestSuperUserCreateInitialSuperUser(ApiTestCase): result = self.getJsonResponse(SuperUserConfig) self.assertEquals(['cooluser'], result['config']['SUPER_USERS']) - finally: - os.remove(OVERRIDE_CONFIG_YAML_FILENAME) - class TestSuperUserConfigValidate(ApiTestCase): def test_nonsuperuser_noconfig(self): @@ -104,7 +108,7 @@ class TestSuperUserConfigValidate(ApiTestCase): def test_nonsuperuser_config(self): - try: + with ConfigForTesting(): # The validate config call works if there is no config.yaml OR the user is a superuser. # Add a config, and verify it breaks when unauthenticated. json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) @@ -120,8 +124,6 @@ class TestSuperUserConfigValidate(ApiTestCase): data=dict(config={})) self.assertFalse(result['status']) - finally: - os.remove(OVERRIDE_CONFIG_YAML_FILENAME) class TestSuperUserConfig(ApiTestCase): @@ -142,14 +144,14 @@ class TestSuperUserConfig(ApiTestCase): self.assertIsNone(json['config']) def test_put(self): - try: + with ConfigForTesting() as config: # The update config call works if there is no config.yaml OR the user is a superuser. First # try writing it without a superuser present. json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) self.assertTrue(json['exists']) - # Verify the config.yaml file exists. - self.assertTrue(os.path.exists(OVERRIDE_CONFIG_YAML_FILENAME)) + # Verify the config file exists. + self.assertTrue(config.yaml_exists()) # Try writing it again. This should now fail, since the config.yaml exists. self.putResponse(SuperUserConfig, data=dict(config={}, hostname='barbaz'), expected_code=403) @@ -170,8 +172,6 @@ class TestSuperUserConfig(ApiTestCase): json = self.getJsonResponse(SuperUserConfig) self.assertIsNotNone(json['config']) - finally: - os.remove(OVERRIDE_CONFIG_YAML_FILENAME) if __name__ == '__main__': unittest.main() \ No newline at end of file diff --git a/util/config/configutil.py b/util/config/configutil.py index 981a9150c..7a31390c9 100644 --- a/util/config/configutil.py +++ b/util/config/configutil.py @@ -7,35 +7,6 @@ def generate_secret_key(): return str(cryptogen.getrandbits(256)) -def import_yaml(config_obj, config_file): - with open(config_file) as f: - c = yaml.safe_load(f) - if not c: - logger.debug('Empty YAML config file') - return - - if isinstance(c, str): - raise Exception('Invalid YAML config file: ' + str(c)) - - for key in c.iterkeys(): - if key.isupper(): - config_obj[key] = c[key] - - -def export_yaml(config_obj, config_file): - with open(config_file, 'w') as f: - f.write(yaml.safe_dump(config_obj, encoding='utf-8', allow_unicode=True)) - - -def set_config_value(config_file, config_key, value): - """ Loads the configuration from the given YAML config file, sets the given key to - the given value, and then writes it back out to the given YAML config file. """ - config_obj = {} - import_yaml(config_obj, config_file) - config_obj[config_key] = value - export_yaml(config_obj, config_file) - - def add_enterprise_config_defaults(config_obj, current_secret_key, hostname): """ Adds/Sets the config defaults for enterprise registry config. """ # These have to be false. diff --git a/util/config/provider.py b/util/config/provider.py new file mode 100644 index 000000000..c4ddbc97c --- /dev/null +++ b/util/config/provider.py @@ -0,0 +1,139 @@ +import os +import yaml +import logging +import json + +logger = logging.getLogger(__name__) + +def _import_yaml(config_obj, config_file): + with open(config_file) as f: + c = yaml.safe_load(f) + if not c: + logger.debug('Empty YAML config file') + return + + if isinstance(c, str): + raise Exception('Invalid YAML config file: ' + str(c)) + + for key in c.iterkeys(): + if key.isupper(): + config_obj[key] = c[key] + + return config_obj + + +def _export_yaml(config_obj, config_file): + with open(config_file, 'w') as f: + f.write(yaml.safe_dump(config_obj, encoding='utf-8', allow_unicode=True)) + + +class BaseProvider(object): + """ A configuration provider helps to load, save, and handle config override in the application. + """ + + def update_app_config(self, app_config): + """ Updates the given application config object with the loaded override config. """ + raise NotImplementedError + + def get_yaml(self): + """ Returns the contents of the YAML config override file, or None if none. """ + raise NotImplementedError + + def save_yaml(self, config_object): + """ Updates the contents of the YAML config override file to those given. """ + raise NotImplementedError + + def yaml_exists(self): + """ Returns true if a YAML config override file exists in the config volume. """ + raise NotImplementedError + + def volume_exists(self): + """ Returns whether the config override volume exists. """ + raise NotImplementedError + + def volume_file_exists(self, filename): + """ Returns whether the file with the given name exists under the config override volume. """ + raise NotImplementedError + + def save_volume_file(self, filename, flask_file): + """ Saves the given flask file to the config override volume, with the given + filename. + """ + raise NotImplementedError + + +class FileConfigProvider(BaseProvider): + """ Implementation of the config provider that reads the data from the file system. """ + def __init__(self, config_volume, yaml_filename, py_filename): + self.config_volume = config_volume + self.yaml_filename = yaml_filename + self.py_filename = py_filename + + self.yaml_path = os.path.join(config_volume, yaml_filename) + self.py_path = os.path.join(config_volume, py_filename) + + def update_app_config(self, app_config): + if os.path.exists(self.py_path): + logger.debug('Applying config file: %s', self.py_path) + app_config.from_pyfile(self.py_path) + + if os.path.exists(self.yaml_path): + logger.debug('Applying config file: %s', self.yaml_path) + _import_yaml(app_config, self.yaml_path) + + def get_yaml(self): + if not os.path.exists(self.yaml_path): + return None + + config_obj = {} + _import_yaml(config_obj, self.yaml_path) + return config_obj + + def save_yaml(self, config_obj): + _export_yaml(config_obj, self.yaml_path) + + def yaml_exists(self): + return self.volume_file_exists(self.yaml_filename) + + def volume_exists(self): + return os.path.exists(self.config_volume) + + def volume_file_exists(self, filename): + return os.path.exists(os.path.join(self.config_volume, filename)) + + def save_volume_file(self, filename, flask_file): + flask_file.save(os.path.join(self.config_volume, filename)) + + +class TestConfigProvider(BaseProvider): + """ Implementation of the config provider for testing. Everything is kept in-memory instead on + the real file system. """ + def __init__(self): + self.files = {} + + def update_app_config(self, app_config): + pass + + def get_yaml(self): + if not 'config.yaml' in self.files: + return None + + return json.loads(self.files.get('config.yaml', '{}')) + + def save_yaml(self, config_obj): + self.files['config.yaml'] = json.dumps(config_obj) + + def yaml_exists(self): + return 'config.yaml' in self.files + + def volume_exists(self): + return True + + def volume_file_exists(self, filename): + return filename in self.files + + def save_volume_file(self, filename, flask_file): + self.files[filename] = '' + + def reset_for_test(self): + self.files = {} diff --git a/util/config/validator.py b/util/config/validator.py index 969f26fed..ec6d6f708 100644 --- a/util/config/validator.py +++ b/util/config/validator.py @@ -9,7 +9,7 @@ from flask import Flask from flask.ext.mail import Mail, Message from data.database import validate_database_url, User from storage import get_storage_driver -from app import app, OVERRIDE_CONFIG_DIRECTORY +from app import app, CONFIG_PROVIDER from auth.auth_context import get_authenticated_user from util.oauth import GoogleOAuthConfig, GithubOAuthConfig @@ -138,7 +138,7 @@ def _validate_ssl(config): return for filename in SSL_FILENAMES: - if not os.path.exists(os.path.join(OVERRIDE_CONFIG_DIRECTORY, filename)): + if not CONFIG_PROVIDER.volume_file_exists(filename): raise Exception('Missing required SSL file: %s' % filename) From 0d2c42ad0310aad68672f3e146a9d1508b80295f Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Fri, 9 Jan 2015 17:11:51 -0500 Subject: [PATCH 26/62] Fix tests --- config.py | 4 +- test/test_suconfig_api.py | 86 ++++++++++++++++++++++----------------- util/config/provider.py | 4 +- 3 files changed, 53 insertions(+), 41 deletions(-) diff --git a/config.py b/config.py index 2748b77ad..e6f06a60e 100644 --- a/config.py +++ b/config.py @@ -194,6 +194,4 @@ class DefaultConfig(object): SYSTEM_SERVICES_PATH = "conf/init/" # Services that should not be shown in the logs view. - SYSTEM_SERVICE_BLACKLIST = ['tutumdocker', 'dockerfilebuild'] - - DEBUGGING = True \ No newline at end of file + SYSTEM_SERVICE_BLACKLIST = ['tutumdocker', 'dockerfilebuild'] \ No newline at end of file diff --git a/test/test_suconfig_api.py b/test/test_suconfig_api.py index 9cf72f573..a8f74483b 100644 --- a/test/test_suconfig_api.py +++ b/test/test_suconfig_api.py @@ -8,12 +8,14 @@ import unittest class ConfigForTesting(object): + def __enter__(self): CONFIG_PROVIDER.reset_for_test() return CONFIG_PROVIDER def __exit__(self, type, value, traceback): - pass + CONFIG_PROVIDER.reset_for_test() + class TestSuperUserRegistryStatus(ApiTestCase): def test_registry_status(self): @@ -27,44 +29,51 @@ class TestSuperUserRegistryStatus(ApiTestCase): class TestSuperUserConfigFile(ApiTestCase): def test_get_non_superuser(self): - # No user. - self.getResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) + with ConfigForTesting(): + # No user. + self.getResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) - # Non-superuser. - self.login(READ_ACCESS_USER) - self.getResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) + # Non-superuser. + self.login(READ_ACCESS_USER) + self.getResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) def test_get_superuser_invalid_filename(self): - self.login(ADMIN_ACCESS_USER) - self.getResponse(SuperUserConfigFile, params=dict(filename='somefile'), expected_code=404) + with ConfigForTesting(): + self.login(ADMIN_ACCESS_USER) + self.getResponse(SuperUserConfigFile, params=dict(filename='somefile'), expected_code=404) def test_get_superuser(self): - self.login(ADMIN_ACCESS_USER) - result = self.getJsonResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) - self.assertFalse(result['exists']) + with ConfigForTesting(): + self.login(ADMIN_ACCESS_USER) + result = self.getJsonResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) + self.assertFalse(result['exists']) def test_post_non_superuser(self): - # No user. - self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) + with ConfigForTesting(): + # No user. + self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) - # Non-superuser. - self.login(READ_ACCESS_USER) - self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) + # Non-superuser. + self.login(READ_ACCESS_USER) + self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) def test_post_superuser_invalid_filename(self): - self.login(ADMIN_ACCESS_USER) - self.postResponse(SuperUserConfigFile, params=dict(filename='somefile'), expected_code=404) + with ConfigForTesting(): + self.login(ADMIN_ACCESS_USER) + self.postResponse(SuperUserConfigFile, params=dict(filename='somefile'), expected_code=404) def test_post_superuser(self): - self.login(ADMIN_ACCESS_USER) - self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=400) + with ConfigForTesting(): + self.login(ADMIN_ACCESS_USER) + self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=400) class TestSuperUserCreateInitialSuperUser(ApiTestCase): def test_no_config_file(self): - # If there is no config.yaml, then this method should security fail. - data = dict(username='cooluser', password='password', email='fake@example.com') - self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) + with ConfigForTesting(): + # If there is no config.yaml, then this method should security fail. + data = dict(username='cooluser', password='password', email='fake@example.com') + self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) def test_config_file_with_db_users(self): with ConfigForTesting(): @@ -100,11 +109,12 @@ class TestSuperUserCreateInitialSuperUser(ApiTestCase): class TestSuperUserConfigValidate(ApiTestCase): def test_nonsuperuser_noconfig(self): - self.login(ADMIN_ACCESS_USER) - result = self.postJsonResponse(SuperUserConfigValidate, params=dict(service='someservice'), - data=dict(config={})) + with ConfigForTesting(): + self.login(ADMIN_ACCESS_USER) + result = self.postJsonResponse(SuperUserConfigValidate, params=dict(service='someservice'), + data=dict(config={})) - self.assertFalse(result['status']) + self.assertFalse(result['status']) def test_nonsuperuser_config(self): @@ -128,20 +138,22 @@ class TestSuperUserConfigValidate(ApiTestCase): class TestSuperUserConfig(ApiTestCase): def test_get_non_superuser(self): - # No user. - self.getResponse(SuperUserConfig, expected_code=401) + with ConfigForTesting(): + # No user. + self.getResponse(SuperUserConfig, expected_code=401) - # Non-superuser. - self.login(READ_ACCESS_USER) - self.getResponse(SuperUserConfig, expected_code=403) + # Non-superuser. + self.login(READ_ACCESS_USER) + self.getResponse(SuperUserConfig, expected_code=403) def test_get_superuser(self): - self.login(ADMIN_ACCESS_USER) - json = self.getJsonResponse(SuperUserConfig) + with ConfigForTesting(): + self.login(ADMIN_ACCESS_USER) + json = self.getJsonResponse(SuperUserConfig) - # Note: We expect the config to be none because a config.yaml should never be checked into - # the directory. - self.assertIsNone(json['config']) + # Note: We expect the config to be none because a config.yaml should never be checked into + # the directory. + self.assertIsNone(json['config']) def test_put(self): with ConfigForTesting() as config: diff --git a/util/config/provider.py b/util/config/provider.py index c4ddbc97c..7d135d40f 100644 --- a/util/config/provider.py +++ b/util/config/provider.py @@ -110,9 +110,10 @@ class TestConfigProvider(BaseProvider): the real file system. """ def __init__(self): self.files = {} + self._config = None def update_app_config(self, app_config): - pass + self._config = app_config def get_yaml(self): if not 'config.yaml' in self.files: @@ -136,4 +137,5 @@ class TestConfigProvider(BaseProvider): self.files[filename] = '' def reset_for_test(self): + self._config['SUPER_USERS'] = ['devtable'] self.files = {} From 511c607bbb25a1fc3dfa93287b41cfabe4a7c34e Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 13 Jan 2015 14:33:29 -0500 Subject: [PATCH 27/62] Check for 502s as well --- static/js/controllers.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/static/js/controllers.js b/static/js/controllers.js index e0b04e319..171aa3cac 100644 --- a/static/js/controllers.js +++ b/static/js/controllers.js @@ -3038,7 +3038,7 @@ function SuperUserAdminCtrl($scope, $timeout, ApiService, Features, UserService, $scope.checkContainerStatus = function() { var errorHandler = function(resp) { - if (resp.status == 404 && $scope.configStep == 'valid-database') { + if ((resp.status == 404 || resp.status == 502) && $scope.configStep == 'valid-database') { // Container has not yet come back up, so we schedule another check. $scope.waitForValidConfig(); return; From cc453e7d1090ebece70f9217a5f5c013e8e35efe Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Wed, 14 Jan 2015 17:04:02 -0500 Subject: [PATCH 28/62] Fix some issues around validation in the config forms --- static/css/core-ui.css | 8 +------- static/directives/config/config-setup-tool.html | 17 ++++++++++------- static/js/controllers.js | 8 ++++++-- static/partials/super-user.html | 7 ++++++- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/static/css/core-ui.css b/static/css/core-ui.css index 570492b4b..02ffbb34a 100644 --- a/static/css/core-ui.css +++ b/static/css/core-ui.css @@ -278,13 +278,6 @@ padding: 6px; } -.config-setup-tool-element label { - padding-left: 10px; - padding-right: 10px; - margin: 4px; - cursor: pointer; -} - .config-file-field-element input { display: inline-block; width: 78px; @@ -302,6 +295,7 @@ .co-checkbox label { position: relative; padding-left: 28px; + cursor: pointer; } .co-checkbox label:before { diff --git a/static/directives/config/config-setup-tool.html b/static/directives/config/config-setup-tool.html index 391ba0ce8..a2c3c0fad 100644 --- a/static/directives/config/config-setup-tool.html +++ b/static/directives/config/config-setup-tool.html @@ -61,7 +61,8 @@

      Server Hostname: + placeholder="Hostname (and optional port if non-standard)" + pattern="^[a-zA-Z-0-9\.]+(:[0-9]+)?$">
      The HTTP host (and optionally the port number if a non-standard HTTP/HTTPS port) of the location where the registry will be accessible on the network @@ -119,7 +120,8 @@
      Redis Hostname: + placeholder="The redis server hostname" + pattern="^[a-zA-Z-0-9\.]+(:[0-9]+)?$">
      SMTP Server: + placeholder="SMTP server for sending e-mail" + pattern="^[a-zA-Z-0-9\.]+(:[0-9]+)?$">
      Mail Sender: - +
      E-mail address from which all e-mails are sent. If not specified, support@quay.io will be used. @@ -374,7 +377,7 @@ + pattern="^https?://([a-zA-Z0-9]+\.?\/?)+$">
      The Github Enterprise endpoint. Must start with http:// or https://. @@ -506,7 +509,7 @@ + pattern="^https?://([a-zA-Z0-9]+\.?\/?)+$">
      The Github Enterprise endpoint. Must start with http:// or https://. diff --git a/static/js/controllers.js b/static/js/controllers.js index 171aa3cac..f4d5f2349 100644 --- a/static/js/controllers.js +++ b/static/js/controllers.js @@ -2924,7 +2924,7 @@ function SuperUserAdminCtrl($scope, $timeout, ApiService, Features, UserService, $scope.creatingUser = false; $scope.newUser = {}; $scope.createdUser = resp; - $scope.loadUsers(); + $scope.loadUsersInternal(); }, errorHandler) }; @@ -3028,12 +3028,16 @@ function SuperUserAdminCtrl($scope, $timeout, ApiService, Features, UserService, }; $scope.createSuperUser = function() { + $scope.createSuperuserIssue = null; $scope.configStep = 'creating-superuser'; ApiService.scCreateInitialSuperuser($scope.superUser, null).then(function(resp) { UserService.load(); $('#createSuperuserModal').modal('hide'); $scope.checkContainerStatus(); - }, ApiService.errorDisplay('Could not create superuser')); + }, function(resp) { + $scope.configStep = 'create-superuser'; + $scope.createSuperuserIssue = ApiService.getErrorMessage(resp, 'Could not create superuser'); + }); }; $scope.checkContainerStatus = function() { diff --git a/static/partials/super-user.html b/static/partials/super-user.html index 46cf248a4..78e8c6f51 100644 --- a/static/partials/super-user.html +++ b/static/partials/super-user.html @@ -292,7 +292,9 @@
      - +
      @@ -302,6 +304,9 @@
      +
      + pattern="{{ HOSTNAME_REGEX }}">
      The HTTP host (and optionally the port number if a non-standard HTTP/HTTPS port) of the location where the registry will be accessible on the network @@ -121,7 +121,8 @@
      + pattern="{{ HOSTNAME_REGEX }}" + validator="validateHostname(value)">>
      + pattern="{{ HOSTNAME_REGEX }}" + validator="validateHostname(value)">>
      Database Server: + placeholder="dbserverhost">
      The server (and optionally, custom port) where the database lives
      diff --git a/util/config/superusermanager.py b/util/config/superusermanager.py new file mode 100644 index 000000000..5930da9cf --- /dev/null +++ b/util/config/superusermanager.py @@ -0,0 +1,38 @@ +from multiprocessing.sharedctypes import Value, Array +from util.validation import MAX_LENGTH + +class SuperUserManager(object): + """ In-memory helper class for quickly accessing (and updating) the valid + set of super users. This class communicates across processes to ensure + that the shared set is always the same. + """ + + def __init__(self, app): + usernames = app.config.get('SUPER_USERS', []) + usernames_str = ','.join(usernames) + + self._max_length = len(usernames_str) + MAX_LENGTH + 1 + self._array = Array('c', self._max_length, lock=True) + self._array.value = usernames_str + + def is_superuser(self, username): + """ Returns if the given username represents a super user. """ + usernames = self._array.value.split(',') + return username in usernames + + def register_superuser(self, username): + """ Registers a new username as a super user for the duration of the container. + Note that this does *not* change any underlying config files. + """ + usernames = self._array.value.split(',') + usernames.append(username) + new_string = ','.join(usernames) + + if len(new_string) <= self._max_length: + self._array.value = new_string + else: + raise Exception('Maximum superuser count reached. Please report this to support.') + + def has_superusers(self): + """ Returns whether there are any superusers defined. """ + return bool(self._array.value) From c8229b9c8aca6513454ed13aa6b148d5b9791866 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Fri, 23 Jan 2015 17:19:15 -0500 Subject: [PATCH 33/62] Implement new step-by-step setup --- data/migrations/env.py | 3 +- data/runmigration.py | 20 ++ endpoints/api/suconfig.py | 115 +++++- endpoints/web.py | 8 + static/css/core-ui.css | 286 ++++++++++++++- .../directives/config/config-setup-tool.html | 83 ++--- static/directives/cor-loader-inline.html | 5 + static/directives/cor-loader.html | 5 + static/directives/cor-step-bar.html | 3 + static/directives/cor-step.html | 6 + static/js/app.js | 12 +- static/js/controllers.js | 340 ------------------ static/js/controllers/setup.js | 281 +++++++++++++++ static/js/controllers/superuser.js | 205 +++++++++++ static/js/core-config-setup.js | 11 +- static/js/core-ui.js | 93 ++++- static/partials/setup.html | 289 +++++++++++++++ static/partials/super-user.html | 203 +---------- test/test_suconfig_api.py | 5 +- util/config/provider.py | 19 + 20 files changed, 1393 insertions(+), 599 deletions(-) create mode 100644 data/runmigration.py create mode 100644 static/directives/cor-loader-inline.html create mode 100644 static/directives/cor-loader.html create mode 100644 static/directives/cor-step-bar.html create mode 100644 static/directives/cor-step.html create mode 100644 static/js/controllers/setup.js create mode 100644 static/js/controllers/superuser.js create mode 100644 static/partials/setup.html diff --git a/data/migrations/env.py b/data/migrations/env.py index 3b2df5186..108c4c496 100644 --- a/data/migrations/env.py +++ b/data/migrations/env.py @@ -18,7 +18,8 @@ config.set_main_option('sqlalchemy.url', unquote(app.config['DB_URI'])) # Interpret the config file for Python logging. # This line sets up loggers basically. -fileConfig(config.config_file_name) +if config.config_file_name: + fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support diff --git a/data/runmigration.py b/data/runmigration.py new file mode 100644 index 000000000..b06cf861d --- /dev/null +++ b/data/runmigration.py @@ -0,0 +1,20 @@ +import logging + +from alembic.config import Config +from alembic.script import ScriptDirectory +from alembic.environment import EnvironmentContext +from alembic.migration import __name__ as migration_name + +def run_alembic_migration(log_handler=None): + if log_handler: + logging.getLogger(migration_name).addHandler(log_handler) + + config = Config() + config.set_main_option("script_location", "data:migrations") + script = ScriptDirectory.from_config(config) + + def fn(rev, context): + return script._upgrade_revs('head', rev) + + with EnvironmentContext(config, script, fn=fn, destination_rev='head'): + script.run_env() \ No newline at end of file diff --git a/endpoints/api/suconfig.py b/endpoints/api/suconfig.py index 05efb4cd7..daaba41ce 100644 --- a/endpoints/api/suconfig.py +++ b/endpoints/api/suconfig.py @@ -1,19 +1,22 @@ import logging import os import json +import signal -from flask import abort +from flask import abort, Response from endpoints.api import (ApiResource, nickname, resource, internal_only, show_if, require_fresh_login, request, validate_json_request, verify_not_prod) from endpoints.common import common_login from app import app, CONFIG_PROVIDER, superusers from data import model +from data.database import configure from auth.permissions import SuperUserPermission from auth.auth_context import get_authenticated_user from data.database import User from util.config.configutil import add_enterprise_config_defaults from util.config.validator import validate_service_for_config, SSL_FILENAMES +from data.runmigration import run_alembic_migration import features @@ -21,12 +24,16 @@ logger = logging.getLogger(__name__) def database_is_valid(): """ Returns whether the database, as configured, is valid. """ + if app.config['TESTING']: + return False + try: - User.select().limit(1) + list(User.select().limit(1)) return True except: return False + def database_has_users(): """ Returns whether the database has any users defined. """ return bool(list(User.select().limit(1))) @@ -42,17 +49,107 @@ class SuperUserRegistryStatus(ApiResource): @nickname('scRegistryStatus') @verify_not_prod def get(self): - """ Returns whether a valid configuration, database and users exist. """ - file_exists = CONFIG_PROVIDER.yaml_exists() + """ Returns the status of the registry. """ + # If there is no conf/stack volume, then report that status. + if not CONFIG_PROVIDER.volume_exists(): + return { + 'status': 'missing-config-dir' + } + + # If there is no config file, we need to setup the database. + if not CONFIG_PROVIDER.yaml_exists(): + return { + 'status': 'config-db' + } + + # If the database isn't yet valid, then we need to set it up. + if not database_is_valid(): + return { + 'status': 'setup-db' + } + + # If we have SETUP_COMPLETE, then we're ready to go! + if app.config.get('SETUP_COMPLETE', False): + return { + 'requires_restart': CONFIG_PROVIDER.requires_restart(app.config), + 'status': 'ready' + } + return { - 'dir_exists': CONFIG_PROVIDER.volume_exists(), - 'file_exists': file_exists, - 'is_testing': app.config['TESTING'], - 'valid_db': database_is_valid(), - 'ready': not app.config['TESTING'] and file_exists and superusers.has_superusers() + 'status': 'create-superuser' if not database_has_users() else 'config' } +class _AlembicLogHandler(logging.Handler): + def __init__(self): + super(_AlembicLogHandler, self).__init__() + self.records = [] + + def emit(self, record): + self.records.append({ + 'level': record.levelname, + 'message': record.getMessage() + }) + +@resource('/v1/superuser/setupdb') +@internal_only +@show_if(features.SUPER_USERS) +class SuperUserSetupDatabase(ApiResource): + """ Resource for invoking alembic to setup the database. """ + @verify_not_prod + @nickname('scSetupDatabase') + def get(self): + """ Invokes the alembic upgrade process. """ + # Note: This method is called after the database configured is saved, but before the + # database has any tables. Therefore, we only allow it to be run in that unique case. + if CONFIG_PROVIDER.yaml_exists() and not database_is_valid(): + # Note: We need to reconfigure the database here as the config has changed. + combined = dict(**app.config) + combined.update(CONFIG_PROVIDER.get_yaml()) + + configure(combined) + app.config['DB_URI'] = combined['DB_URI'] + + log_handler = _AlembicLogHandler() + + try: + run_alembic_migration(log_handler) + except Exception as ex: + return { + 'error': str(ex) + } + + return { + 'logs': log_handler.records + } + + abort(403) + + + +@resource('/v1/superuser/shutdown') +@internal_only +@show_if(features.SUPER_USERS) +class SuperUserShutdown(ApiResource): + """ Resource for sending a shutdown signal to the container. """ + + @verify_not_prod + @nickname('scShutdownContainer') + def post(self): + """ Sends a signal to the phusion init system to shut down the container. """ + # Note: This method is called to set the database configuration before super users exists, + # so we also allow it to be called if there is no valid registry configuration setup. + if app.config['TESTING'] or not database_has_users() or SuperUserPermission().can(): + # Note: We skip if debugging locally. + if app.config.get('DEBUGGING') == True: + return {} + + os.kill(1, signal.SIGINT) + return {} + + abort(403) + + @resource('/v1/superuser/config') @internal_only @show_if(features.SUPER_USERS) diff --git a/endpoints/web.py b/endpoints/web.py index cf4c94bc7..6a1d4f076 100644 --- a/endpoints/web.py +++ b/endpoints/web.py @@ -98,6 +98,7 @@ def organizations(): def user(): return index('') + @web.route('/superuser/') @no_cache @route_show_if(features.SUPER_USERS) @@ -105,6 +106,13 @@ def superuser(): return index('') +@web.route('/setup/') +@no_cache +@route_show_if(features.SUPER_USERS) +def setup(): + return index('') + + @web.route('/signin/') @no_cache def signin(redirect=None): diff --git a/static/css/core-ui.css b/static/css/core-ui.css index 02ffbb34a..1a4e34816 100644 --- a/static/css/core-ui.css +++ b/static/css/core-ui.css @@ -366,10 +366,6 @@ bottom: 0px; } -.config-setup-tool .cor-floating-bottom-bar { - text-align: right; -} - .config-setup-tool .cor-floating-bottom-bar button i.fa { margin-right: 6px; } @@ -418,3 +414,285 @@ font-family: Consolas, "Lucida Console", Monaco, monospace; font-size: 12px; } + +.co-m-loader, .co-m-inline-loader { + min-width: 28px; } + +.co-m-loader { + display: block; + position: absolute; + left: 50%; + top: 50%; + margin: -11px 0 0 -13px; } + +.co-m-inline-loader { + display: inline-block; + cursor: default; } + .co-m-inline-loader:hover { + text-decoration: none; } + +.co-m-loader-dot__one, .co-m-loader-dot__two, .co-m-loader-dot__three { + -webkit-border-radius: 3px; + -moz-border-radius: 3px; + -ms-border-radius: 3px; + -o-border-radius: 3px; + border-radius: 3px; + animation-fill-mode: both; + -webkit-animation-fill-mode: both; + -moz-animation-fill-mode: both; + -ms-animation-fill-mode: both; + -o-animation-fill-mode: both; + animation-name: bouncedelay; + animation-duration: 1s; + animation-timing-function: ease-in-out; + animation-delay: 0; + animation-direction: normal; + animation-iteration-count: infinite; + animation-fill-mode: forwards; + animation-play-state: running; + -webkit-animation-name: bouncedelay; + -webkit-animation-duration: 1s; + -webkit-animation-timing-function: ease-in-out; + -webkit-animation-delay: 0; + -webkit-animation-direction: normal; + -webkit-animation-iteration-count: infinite; + -webkit-animation-fill-mode: forwards; + -webkit-animation-play-state: running; + -moz-animation-name: bouncedelay; + -moz-animation-duration: 1s; + -moz-animation-timing-function: ease-in-out; + -moz-animation-delay: 0; + -moz-animation-direction: normal; + -moz-animation-iteration-count: infinite; + -moz-animation-fill-mode: forwards; + -moz-animation-play-state: running; + display: inline-block; + height: 6px; + width: 6px; + background: #419eda; + border-radius: 100%; + display: inline-block; } + +.co-m-loader-dot__one { + animation-delay: -0.32s; + -webkit-animation-delay: -0.32s; + -moz-animation-delay: -0.32s; + -ms-animation-delay: -0.32s; + -o-animation-delay: -0.32s; } + +.co-m-loader-dot__two { + animation-delay: -0.16s; + -webkit-animation-delay: -0.16s; + -moz-animation-delay: -0.16s; + -ms-animation-delay: -0.16s; + -o-animation-delay: -0.16s; } + +@-webkit-keyframes bouncedelay { + 0%, 80%, 100% { + -webkit-transform: scale(0.25, 0.25); + -moz-transform: scale(0.25, 0.25); + -ms-transform: scale(0.25, 0.25); + -o-transform: scale(0.25, 0.25); + transform: scale(0.25, 0.25); } + + 40% { + -webkit-transform: scale(1, 1); + -moz-transform: scale(1, 1); + -ms-transform: scale(1, 1); + -o-transform: scale(1, 1); + transform: scale(1, 1); } } + +@-moz-keyframes bouncedelay { + 0%, 80%, 100% { + -webkit-transform: scale(0.25, 0.25); + -moz-transform: scale(0.25, 0.25); + -ms-transform: scale(0.25, 0.25); + -o-transform: scale(0.25, 0.25); + transform: scale(0.25, 0.25); } + + 40% { + -webkit-transform: scale(1, 1); + -moz-transform: scale(1, 1); + -ms-transform: scale(1, 1); + -o-transform: scale(1, 1); + transform: scale(1, 1); } } + +@-ms-keyframes bouncedelay { + 0%, 80%, 100% { + -webkit-transform: scale(0.25, 0.25); + -moz-transform: scale(0.25, 0.25); + -ms-transform: scale(0.25, 0.25); + -o-transform: scale(0.25, 0.25); + transform: scale(0.25, 0.25); } + + 40% { + -webkit-transform: scale(1, 1); + -moz-transform: scale(1, 1); + -ms-transform: scale(1, 1); + -o-transform: scale(1, 1); + transform: scale(1, 1); } } + +@keyframes bouncedelay { + 0%, 80%, 100% { + -webkit-transform: scale(0.25, 0.25); + -moz-transform: scale(0.25, 0.25); + -ms-transform: scale(0.25, 0.25); + -o-transform: scale(0.25, 0.25); + transform: scale(0.25, 0.25); } + + 40% { + -webkit-transform: scale(1, 1); + -moz-transform: scale(1, 1); + -ms-transform: scale(1, 1); + -o-transform: scale(1, 1); + transform: scale(1, 1); } } + +.co-dialog .modal-body { + padding: 10px; + min-height: 100px; +} + +.co-dialog .modal-content { + border-radius: 0px; +} + +.co-dialog.fatal-error .modal-content { + padding-left: 175px; +} + +.co-dialog.fatal-error .alert-icon-container-container { + position: absolute; + top: -36px; + left: -175px; + bottom: 20px; +} + +.co-dialog.fatal-error .alert-icon-container { + height: 100%; + display: table; +} + +.co-dialog.fatal-error .alert-icon { + display: table-cell; + vertical-align: middle; + border-right: 1px solid #eee; + margin-right: 20px; +} + +.co-dialog.fatal-error .alert-icon:before { + content: "\f071"; + font-family: FontAwesome; + font-size: 60px; + padding-left: 50px; + padding-right: 50px; + color: #c53c3f; + text-align: center; +} + + +.co-dialog .modal-header .cor-step-bar { + float: right; +} + +.co-dialog .modal-footer.working { + text-align: left; +} + +.co-dialog .modal-footer.working .cor-loader-inline { + margin-right: 10px; +} + +.co-dialog .modal-footer .left-align { + float: left; + vertical-align: middle; + font-size: 16px; + margin-top: 8px; +} + +.co-dialog .modal-footer .left-align i.fa-warning { + color: #ffba35; + display: inline-block; + margin-right: 6px; +} + +.co-dialog .modal-footer .left-align i.fa-check { + color: green; + display: inline-block; + margin-right: 6px; +} + +.co-step-bar .co-step-element { + cursor: default; + display: inline-block; + width: 28px; + height: 28px; + + position: relative; + color: #ddd; + + text-align: center; + line-height: 24px; + font-size: 16px; +} + +.co-step-bar .co-step-element.text { + margin-left: 24px; + background: white; +} + +.co-step-bar .co-step-element.icon { + margin-left: 22px; +} + +.co-step-bar .co-step-element:first-child { + margin-left: 0px; +} + +.co-step-bar .co-step-element.active { + color: #53a3d9; +} + +.co-step-bar .co-step-element:first-child:before { + display: none; +} + +.co-step-bar .co-step-element:before { + content: ""; + position: absolute; + top: 12px; + width: 14px; + border-top: 2px solid #ddd; +} + +.co-step-bar .co-step-element.icon:before { + left: -20px; +} + +.co-step-bar .co-step-element.text:before { + left: -22px; +} + +.co-step-bar .co-step-element.active:before { + border-top: 2px solid #53a3d9; +} + + +.co-step-bar .co-step-element.text { + border-radius: 100%; + border: 2px solid #ddd; +} + +.co-step-bar .co-step-element.text.active { + border: 2px solid #53a3d9; +} + +@media screen and (min-width: 900px) { + .co-dialog .modal-dialog { + width: 800px; + } +} + +.co-alert .co-step-bar { + float: right; + margin-top: 6px; +} \ No newline at end of file diff --git a/static/directives/config/config-setup-tool.html b/static/directives/config/config-setup-tool.html index 2c8397ecb..6b40f1fd5 100644 --- a/static/directives/config/config-setup-tool.html +++ b/static/directives/config/config-setup-tool.html @@ -553,42 +553,15 @@ -
      - - - - - - diff --git a/test/test_suconfig_api.py b/test/test_suconfig_api.py index a8f74483b..ca05d8705 100644 --- a/test/test_suconfig_api.py +++ b/test/test_suconfig_api.py @@ -21,10 +21,7 @@ class TestSuperUserRegistryStatus(ApiTestCase): def test_registry_status(self): with ConfigForTesting(): json = self.getJsonResponse(SuperUserRegistryStatus) - self.assertTrue(json['is_testing']) - self.assertTrue(json['valid_db']) - self.assertFalse(json['file_exists']) - self.assertFalse(json['ready']) + self.assertEquals('config-db', json['status']) class TestSuperUserConfigFile(ApiTestCase): diff --git a/util/config/provider.py b/util/config/provider.py index 7d135d40f..24380eab0 100644 --- a/util/config/provider.py +++ b/util/config/provider.py @@ -61,6 +61,12 @@ class BaseProvider(object): """ raise NotImplementedError + def requires_restart(self, app_config): + """ If true, the configuration loaded into memory for the app does not match that on disk, + indicating that this container requires a restart. + """ + raise NotImplementedError + class FileConfigProvider(BaseProvider): """ Implementation of the config provider that reads the data from the file system. """ @@ -104,6 +110,16 @@ class FileConfigProvider(BaseProvider): def save_volume_file(self, filename, flask_file): flask_file.save(os.path.join(self.config_volume, filename)) + def requires_restart(self, app_config): + file_config = self.get_yaml() + if not file_config: + return False + + for key in file_config: + if app_config.get(key) != file_config[key]: + return True + + return False class TestConfigProvider(BaseProvider): """ Implementation of the config provider for testing. Everything is kept in-memory instead on @@ -136,6 +152,9 @@ class TestConfigProvider(BaseProvider): def save_volume_file(self, filename, flask_file): self.files[filename] = '' + def requires_restart(self, app_config): + return False + def reset_for_test(self): self._config['SUPER_USERS'] = ['devtable'] self.files = {} From c88d97cf8b3bc502c0ef988ad452e563b96007c2 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Mon, 26 Jan 2015 12:19:45 -0500 Subject: [PATCH 34/62] Fix typo --- static/partials/about.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/static/partials/about.html b/static/partials/about.html index d154795ac..7440efc09 100644 --- a/static/partials/about.html +++ b/static/partials/about.html @@ -40,7 +40,7 @@

      Our Story

      -

      Quay.io was originally created out of necessesity when we wanted to use Docker containers with our original IDE product. We were using Docker containers to host and isolate server processes invoked on behalf of our users and often running their code. We started by building the Docker image dynamically whenever we spun up a new host node. The image was monolithic. It was too large, took too long to build, and was hard to manage conflicts. It was everything that Docker wasn't supposed to be. When we decided to split it up into pre-built images and host them somewhere, we noticed that there wasn't a good place to host images securely. Determined to scratch our own itch, we built Quay.io, and officially launched it as an aside in our presentation to the Docker New York City Meetup on October 2nd, 2013.

      +

      Quay.io was originally created out of necessity when we wanted to use Docker containers with our original IDE product. We were using Docker containers to host and isolate server processes invoked on behalf of our users and often running their code. We started by building the Docker image dynamically whenever we spun up a new host node. The image was monolithic. It was too large, took too long to build, and was hard to manage conflicts. It was everything that Docker wasn't supposed to be. When we decided to split it up into pre-built images and host them somewhere, we noticed that there wasn't a good place to host images securely. Determined to scratch our own itch, we built Quay.io, and officially launched it as an aside in our presentation to the Docker New York City Meetup on October 2nd, 2013.

      After launch, our customers demanded that Quay.io become our main focus. They rely on us to make sure they can store and distribute their Docker images, and we understand that solemn responsibility. Our customers have been fantastic with giving us great feedback and suggestions.

      In August, 2014, Quay.io joined CoreOS to provide registry support for the enterprise. As ever, we are working as hard as we can to deliver on the promise and execute our vision of what a top notch Docker registry should be.

      From 6a0158d36161628e7711b963b3b3f91b416fd58f Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Mon, 26 Jan 2015 13:46:57 -0500 Subject: [PATCH 35/62] Show a warning in the superuser panel if a container restart is required, and provide a button to do so. This change also moves the restart and monitoring code into a service --- static/css/quay.css | 21 ++++++++ static/js/app.js | 81 +++++++++++++++++++++++++++--- static/js/controllers/setup.js | 44 +++++----------- static/js/controllers/superuser.js | 26 ++++++++-- static/partials/super-user.html | 39 ++++++++++++-- 5 files changed, 164 insertions(+), 47 deletions(-) diff --git a/static/css/quay.css b/static/css/quay.css index 4140f8170..e46fb2aa6 100644 --- a/static/css/quay.css +++ b/static/css/quay.css @@ -4955,3 +4955,24 @@ i.slack-icon { margin-bottom: 20px; padding-left: 22px; } + +.restart-required { + position: relative; + padding-left: 54px; +} + +.restart-required button { + float: right; + margin-top: 4px; +} + +.restart-required button i.fa { + margin-right: 6px; +} + +.restart-required i.fa-warning { + position: absolute; + top: 24px; + left: 16px; + font-size: 28px; +} diff --git a/static/js/app.js b/static/js/app.js index b5c346f1d..380324f8d 100644 --- a/static/js/app.js +++ b/static/js/app.js @@ -1047,12 +1047,35 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading } }; + var freshLoginInProgress = []; + var reject = function(msg) { + for (var i = 0; i < freshLoginInProgress.length; ++i) { + freshLoginInProgress[i].deferred.reject({'data': {'message': msg}}); + } + freshLoginInProgress = []; + }; + + var retry = function() { + for (var i = 0; i < freshLoginInProgress.length; ++i) { + freshLoginInProgress[i].retry(); + } + freshLoginInProgress = []; + }; + var freshLoginFailCheck = function(opName, opArgs) { return function(resp) { var deferred = $q.defer(); // If the error is a fresh login required, show the dialog. if (resp.status == 401 && resp.data['error_type'] == 'fresh_login_required') { + var retryOperation = function() { + apiService[opName].apply(apiService, opArgs).then(function(resp) { + deferred.resolve(resp); + }, function(resp) { + deferred.reject(resp); + }); + }; + var verifyNow = function() { var info = { 'password': $('#freshPassword').val() @@ -1062,19 +1085,27 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading // Conduct the sign in of the user. apiService.verifyUser(info).then(function() { - // On success, retry the operation. if it succeeds, then resolve the + // On success, retry the operations. if it succeeds, then resolve the // deferred promise with the result. Otherwise, reject the same. - apiService[opName].apply(apiService, opArgs).then(function(resp) { - deferred.resolve(resp); - }, function(resp) { - deferred.reject(resp); - }); + retry(); }, function(resp) { // Reject with the sign in error. - deferred.reject({'data': {'message': 'Invalid verification credentials'}}); + reject('Invalid verification credentials'); }); }; + // Add the retry call to the in progress list. If there is more than a single + // in progress call, we skip showing the dialog (since it has already been + // shown). + freshLoginInProgress.push({ + 'deferred': deferred, + 'retry': retryOperation + }) + + if (freshLoginInProgress.length > 1) { + return deferred.promise; + } + var box = bootbox.dialog({ "message": 'It has been more than a few minutes since you last logged in, ' + 'so please verify your password to perform this sensitive operation:' + @@ -1092,7 +1123,7 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading "label": "Cancel", "className": "btn-default", "callback": function() { - deferred.reject({'data': {'message': 'Verification canceled'}}); + reject('Verification canceled') } } } @@ -1244,6 +1275,40 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading return cookieService; }]); + $provide.factory('ContainerService', ['ApiService', '$timeout', + function(ApiService, $timeout) { + var containerService = {}; + containerService.restartContainer = function() { + ApiService.scShutdownContainer(null, null).then(function(resp) { + $timeout(function() { + containerService.checkStatus(); + }, 2000); + }, ApiService.errorDisplay('Cannot restart container. Please report this to support.')) + }; + + containerService.scheduleStatusCheck = function(callback) { + $timeout(function() { + containerService.checkStatus(callback); + }, 2000); + }; + + containerService.checkStatus = function(callback) { + var errorHandler = function(resp) { + if (resp.status == 404 || resp.status == 502) { + // Container has not yet come back up, so we schedule another check. + containerService.scheduleStatusCheck(callback); + return; + } + + return ApiService.errorDisplay('Cannot load status. Please report this to support')(resp); + }; + + ApiService.scRegistryStatus(null, null).then(callback, errorHandler, /* background */true); + }; + + return containerService; + }]); + $provide.factory('UserService', ['ApiService', 'CookieService', '$rootScope', 'Config', function(ApiService, CookieService, $rootScope, Config) { var userResponse = { diff --git a/static/js/controllers/setup.js b/static/js/controllers/setup.js index f5d1b941a..16a136629 100644 --- a/static/js/controllers/setup.js +++ b/static/js/controllers/setup.js @@ -1,4 +1,4 @@ -function SetupCtrl($scope, $timeout, ApiService, Features, UserService, AngularPollChannel, CoreDialog) { +function SetupCtrl($scope, $timeout, ApiService, Features, UserService, ContainerService, CoreDialog) { if (!Features.SUPER_USERS) { return; } @@ -102,6 +102,11 @@ function SetupCtrl($scope, $timeout, ApiService, Features, UserService, AngularP } }); + $scope.restartContainer = function(state) { + $scope.currentStep = state; + ContainerService.restartContainer(); + }; + $scope.showSuperuserPanel = function() { $('#setupModal').modal('hide'); window.location = '/superuser'; @@ -160,37 +165,6 @@ function SetupCtrl($scope, $timeout, ApiService, Features, UserService, AngularP CoreDialog.fatal(title, message); }; - $scope.restartContainer = function(restartState) { - $scope.currentStep = restartState; - ApiService.scShutdownContainer(null, null).then(function(resp) { - $timeout(function() { - $scope.checkStatus(); - }, 2000); - }, ApiService.errorDisplay('Cannot restart container. Please report this to support.')) - }; - - $scope.scheduleStatusCheck = function() { - $timeout(function() { - $scope.checkStatus(); - }, 3000); - }; - - $scope.checkStatus = function() { - var errorHandler = function(resp) { - if (resp.status == 404 || resp.status == 502) { - // Container has not yet come back up, so we schedule another check. - $scope.scheduleStatusCheck(); - return; - } - - return ApiService.errorDisplay('Cannot load status. Please report this to support')(resp); - }; - - ApiService.scRegistryStatus(null, null).then(function(resp) { - $scope.currentStep = resp['status']; - }, errorHandler, /* background */true); - }; - $scope.parseDbUri = function(value) { if (!value) { return null; } @@ -276,6 +250,12 @@ function SetupCtrl($scope, $timeout, ApiService, Features, UserService, AngularP }, ApiService.errorDisplay('Cannot validate database. Please report this to support')); }; + $scope.checkStatus = function() { + ContainerService.checkStatus(function(resp) { + $scope.currentStep = resp['status']; + }); + }; + // Load the initial status. $scope.checkStatus(); } \ No newline at end of file diff --git a/static/js/controllers/superuser.js b/static/js/controllers/superuser.js index 3efbd02d3..d3bbbaab2 100644 --- a/static/js/controllers/superuser.js +++ b/static/js/controllers/superuser.js @@ -1,4 +1,4 @@ -function SuperUserAdminCtrl($scope, $timeout, ApiService, Features, UserService, AngularPollChannel, CoreDialog) { +function SuperUserAdminCtrl($scope, $timeout, ApiService, Features, UserService, ContainerService, AngularPollChannel, CoreDialog) { if (!Features.SUPER_USERS) { return; } @@ -7,6 +7,7 @@ function SuperUserAdminCtrl($scope, $timeout, ApiService, Features, UserService, UserService.updateUserIn($scope); $scope.configStatus = null; + $scope.requiresRestart = null; $scope.logsCounter = 0; $scope.newUser = {}; $scope.createdUser = null; @@ -17,6 +18,10 @@ function SuperUserAdminCtrl($scope, $timeout, ApiService, Features, UserService, $scope.logsScrolled = false; $scope.csrf_token = window.__token; + $scope.configurationSaved = function() { + $scope.requiresRestart = true; + }; + $scope.showCreateUser = function() { $scope.createdUser = null; $('#createUserModal').modal('show'); @@ -183,9 +188,24 @@ function SuperUserAdminCtrl($scope, $timeout, ApiService, Features, UserService, }, ApiService.errorDisplay('Cannot send recovery email')) }; + $scope.restartContainer = function() { + $('#restartingContainerModal').modal({ + keyboard: false, + backdrop: 'static' + }); + + ContainerService.restartContainer(); + $timeout(function() { + $scope.checkStatus(); + }, 2000); + }; + $scope.checkStatus = function() { - ApiService.scRegistryStatus(null, null).then(function(resp) { + ContainerService.checkStatus(function(resp) { + $('#restartingContainerModal').modal('hide'); $scope.configStatus = resp['status']; + $scope.requiresRestart = resp['requires_restart']; + if ($scope.configStatus == 'ready') { $scope.loadUsers(); } else { @@ -197,7 +217,7 @@ function SuperUserAdminCtrl($scope, $timeout, ApiService, Features, UserService, var title = "Installation Incomplete"; CoreDialog.fatal(title, message); } - }, ApiService.errorDisplay('Cannot load status. Please report this to support'), /* background */true); + }); }; // Load the initial status. diff --git a/static/partials/super-user.html b/static/partials/super-user.html index eab840971..a48052d9b 100644 --- a/static/partials/super-user.html +++ b/static/partials/super-user.html @@ -1,6 +1,14 @@
      +
      + + +
      Container restart required!
      + Configuration changes have been made but the container hasn't been restarted yet. +
      Enterprise Registry Management @@ -30,7 +38,8 @@
      -
      +
      @@ -161,7 +170,7 @@
      -
      Database Server: + placeholder="dbserverhost" + pattern="{{ HOSTNAME_REGEX }}" + validator="validateHostname(value)">>
      The server (and optionally, custom port) where the database lives
      From 98b4f62ef7ab40420d993274d4e7bec9d99208b0 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 10 Feb 2015 15:43:01 -0500 Subject: [PATCH 55/62] Switch to using a squashed image for the build workers --- buildman/manager/executor.py | 3 ++- buildman/templates/cloudconfig.yaml | 25 ++++++++++--------------- requirements-nover.txt | 1 + requirements.txt | 1 + 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/buildman/manager/executor.py b/buildman/manager/executor.py index 92641c6ce..035d5cdf8 100644 --- a/buildman/manager/executor.py +++ b/buildman/manager/executor.py @@ -11,6 +11,7 @@ from trollius import coroutine, From, Return, get_event_loop from functools import partial from buildman.asyncutil import AsyncWrapper +from container_cloud_config import CloudConfigContext logger = logging.getLogger(__name__) @@ -20,7 +21,7 @@ ONE_HOUR = 60*60 ENV = Environment(loader=FileSystemLoader('buildman/templates')) TEMPLATE = ENV.get_template('cloudconfig.yaml') - +CloudConfigContext().populate_jinja_environment(ENV) class ExecutorException(Exception): """ Exception raised when there is a problem starting or stopping a builder. diff --git a/buildman/templates/cloudconfig.yaml b/buildman/templates/cloudconfig.yaml index 13e6894bf..51bb2f090 100644 --- a/buildman/templates/cloudconfig.yaml +++ b/buildman/templates/cloudconfig.yaml @@ -19,18 +19,13 @@ coreos: group: {{ coreos_channel }} units: - - name: quay-builder.service - command: start - content: | - [Unit] - Description=Quay builder container - Author=Jake Moshenko - After=docker.service - - [Service] - TimeoutStartSec=600 - TimeoutStopSec=2000 - ExecStartPre=/usr/bin/docker login -u {{ quay_username }} -p {{ quay_password }} -e unused quay.io - ExecStart=/usr/bin/docker run --rm --net=host --name quay-builder --privileged --env-file /root/overrides.list -v /var/run/docker.sock:/var/run/docker.sock -v /usr/share/ca-certificates:/etc/ssl/certs quay.io/coreos/registry-build-worker:{{ worker_tag }} - ExecStop=/usr/bin/docker stop quay-builder - ExecStopPost=/bin/sh -xc "/bin/sleep 120; /usr/bin/systemctl --no-block poweroff" + {{ dockersystemd('quay-builder', + 'quay.io/coreos/registry-build-worker', + quay_username, + quay_password, + worker_tag, + extra_args='--net=host --privileged --env-file /root/overrides.list -v /var/run/docker.sock:/var/run/docker.sock -v /usr/share/ca-certificates:/etc/ssl/certs', + exec_stop_post=['/bin/sh -xc "/bin/sleep 120; /usr/bin/systemctl --no-block poweroff"'], + flattened=True, + restart_policy='no' + ) | indent(4) }} diff --git a/requirements-nover.txt b/requirements-nover.txt index 3ac8c84a6..83953b6be 100644 --- a/requirements-nover.txt +++ b/requirements-nover.txt @@ -40,6 +40,7 @@ git+https://github.com/DevTable/aniso8601-fake.git git+https://github.com/DevTable/anunidecode.git git+https://github.com/DevTable/avatar-generator.git git+https://github.com/DevTable/pygithub.git +git+https://github.com/DevTable/container-cloud-config.git git+https://github.com/jplana/python-etcd.git gipc pygpgme diff --git a/requirements.txt b/requirements.txt index 73ce4da45..7981d1c2b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -63,5 +63,6 @@ git+https://github.com/DevTable/aniso8601-fake.git git+https://github.com/DevTable/anunidecode.git git+https://github.com/DevTable/avatar-generator.git git+https://github.com/DevTable/pygithub.git +git+https://github.com/DevTable/container-cloud-config.git git+https://github.com/NateFerrero/oauth2lib.git git+https://github.com/jplana/python-etcd.git From 893ae46dec6b7114504fd41252890ddb461bf205 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 10 Feb 2015 21:46:58 -0500 Subject: [PATCH 56/62] Add an ImageTree class and change to searching *all applicable* branches when looking for the best cache tag. --- buildman/jobutil/buildjob.py | 41 ++++++------- buildman/jobutil/buildstatus.py | 8 +++ test/test_imagetree.py | 96 +++++++++++++++++++++++++++++ util/imagetree.py | 103 ++++++++++++++++++++++++++++++++ 4 files changed, 228 insertions(+), 20 deletions(-) create mode 100644 test/test_imagetree.py create mode 100644 util/imagetree.py diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index bcf2c33c2..46dab931c 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -4,6 +4,7 @@ import logging from cachetools import lru_cache from endpoints.notificationhelper import spawn_notification from data import model +from util.imagetree import ImageTree logger = logging.getLogger(__name__) @@ -91,31 +92,31 @@ class BuildJob(object): repo_namespace = repo_build.repository.namespace_user.username repo_name = repo_build.repository.name - current_image = model.get_image(repo_build.repository, base_image_id) - next_image = None - if current_image is None: + base_image = model.get_image(repo_build.repository, base_image_id) + if base_image is None: return None - # For each cache comment, find a child image that matches the command. - for cache_command in cache_commands: - full_command = '["/bin/sh", "-c", "%s"]' % cache_command - next_image = model.find_child_image(repo_build.repository, current_image, full_command) - if next_image is None: - break + # Build an in-memory tree of the full heirarchy of images in the repository. + all_images = model.get_repository_images(repo_namespace, repo_name) + all_tags = model.list_repository_tags(repo_namespace, repo_name) + tree = ImageTree(all_images, all_tags, base_filter=base_image.id) - current_image = next_image - logger.debug('Found cached image %s for comment %s', current_image.id, full_command) + # Find a path in the tree, starting at the base image, that matches the cache comments + # or some subset thereof. + def checker(step, image): + if step >= len(cache_commands): + return False - # Find a tag associated with the image, if any. - # TODO(jschorr): We should just return the image ID instead of a parent tag, OR we should - # make this more efficient. - for tag in model.list_repository_tags(repo_namespace, repo_name): - tag_image = tag.image - ancestor_index = '/%s/' % current_image.id - if ancestor_index in tag_image.ancestors or tag_image.id == current_image.id: - return tag.name + full_command = '["/bin/sh", "-c", "%s"]' % cache_commands[step] + return image.storage.comment == full_command + + path = tree.find_longest_path(base_image.id, checker) + if not path: + return None + + # Find any tag associated with the last image in the path. + return tree.tag_containing_images(path[-1]) - return None def _determine_cached_tag_by_tag(self): """ Determines the cached tag by looking for one of the tags being built, and seeing if it diff --git a/buildman/jobutil/buildstatus.py b/buildman/jobutil/buildstatus.py index 393ecd3b4..2ae127ee0 100644 --- a/buildman/jobutil/buildstatus.py +++ b/buildman/jobutil/buildstatus.py @@ -7,6 +7,7 @@ class StatusHandler(object): def __init__(self, build_logs, repository_build_uuid): self._current_phase = None + self._current_command = None self._uuid = repository_build_uuid self._build_logs = build_logs @@ -26,9 +27,16 @@ class StatusHandler(object): self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data) def append_log(self, log_message, extra_data=None): + if log_message is None: + return + self._append_log_message(log_message, log_data=extra_data) def set_command(self, command, extra_data=None): + if self._current_command == command: + return + + self._current_command = command self._append_log_message(command, self._build_logs.COMMAND, extra_data) def set_error(self, error_message, extra_data=None, internal_error=False): diff --git a/test/test_imagetree.py b/test/test_imagetree.py new file mode 100644 index 000000000..824709be9 --- /dev/null +++ b/test/test_imagetree.py @@ -0,0 +1,96 @@ +import unittest + +from app import app +from util.imagetree import ImageTree +from initdb import setup_database_for_testing, finished_database_for_testing +from data import model + +NAMESPACE = 'devtable' +SIMPLE_REPO = 'simple' +COMPLEX_REPO = 'complex' + +class TestImageTree(unittest.TestCase): + def setUp(self): + setup_database_for_testing(self) + self.app = app.test_client() + self.ctx = app.test_request_context() + self.ctx.__enter__() + + def tearDown(self): + finished_database_for_testing(self) + self.ctx.__exit__(True, None, None) + + def _get_base_image(self, all_images): + for image in all_images: + if image.ancestors == '/': + return image + + return None + + def test_longest_path_simple_repo(self): + all_images = list(model.get_repository_images(NAMESPACE, SIMPLE_REPO)) + all_tags = list(model.list_repository_tags(NAMESPACE, SIMPLE_REPO)) + tree = ImageTree(all_images, all_tags) + + base_image = self._get_base_image(all_images) + tag_image = all_tags[0].image + + def checker(index, image): + return True + + ancestors = tag_image.ancestors.split('/')[2:-1] # Skip the first image. + result = tree.find_longest_path(base_image.id, checker) + self.assertEquals(3, len(result)) + for index in range(0, 2): + self.assertEquals(int(ancestors[index]), result[index].id) + + self.assertEquals('latest', tree.tag_containing_image(result[-1])) + + def test_longest_path_complex_repo(self): + all_images = list(model.get_repository_images(NAMESPACE, COMPLEX_REPO)) + all_tags = list(model.list_repository_tags(NAMESPACE, COMPLEX_REPO)) + tree = ImageTree(all_images, all_tags) + + base_image = self._get_base_image(all_images) + + def checker(index, image): + return True + + result = tree.find_longest_path(base_image.id, checker) + self.assertEquals(4, len(result)) + self.assertEquals('v2.0', tree.tag_containing_image(result[-1])) + + def test_filtering(self): + all_images = list(model.get_repository_images(NAMESPACE, COMPLEX_REPO)) + all_tags = list(model.list_repository_tags(NAMESPACE, COMPLEX_REPO)) + tree = ImageTree(all_images, all_tags, parent_filter=1245) + + base_image = self._get_base_image(all_images) + + def checker(index, image): + return True + + result = tree.find_longest_path(base_image.id, checker) + self.assertEquals(0, len(result)) + + def test_find_tag_parent_image(self): + all_images = list(model.get_repository_images(NAMESPACE, COMPLEX_REPO)) + all_tags = list(model.list_repository_tags(NAMESPACE, COMPLEX_REPO)) + tree = ImageTree(all_images, all_tags) + + base_image = self._get_base_image(all_images) + + def checker(index, image): + return True + + result = tree.find_longest_path(base_image.id, checker) + self.assertEquals(4, len(result)) + + # Only use the first two images. They don't have tags, but the method should + # still return the tag that contains them. + self.assertEquals('v2.0', tree.tag_containing_image(result[0])) + + +if __name__ == '__main__': + unittest.main() + diff --git a/util/imagetree.py b/util/imagetree.py new file mode 100644 index 000000000..39cd5c3c9 --- /dev/null +++ b/util/imagetree.py @@ -0,0 +1,103 @@ +class ImageTreeNode(object): + """ A node in the image tree. """ + def __init__(self, image): + self.image = image + self.parent = None + self.children = [] + self.tags = [] + + def add_child(self, child): + self.children.append(child) + child.parent = self + + def add_tag(self, tag): + self.tags.append(tag) + + +class ImageTree(object): + """ In-memory tree for easy traversal and lookup of images in a repository. """ + + def __init__(self, all_images, all_tags, base_filter=None): + self._tag_map = {} + self._image_map = {} + + self._build(all_images, all_tags, base_filter) + + def _build(self, all_images, all_tags, base_filter=None): + # Build nodes for each of the images. + for image in all_images: + ancestors = image.ancestors.split('/')[1:-1] + + # Filter any unneeded images. + if base_filter is not None: + if image.id != base_filter and not str(base_filter) in ancestors: + continue + + self._image_map[image.id] = ImageTreeNode(image) + + # Connect the nodes to their parents. + for image_node in self._image_map.values(): + image = image_node.image + parent_image_id = image.ancestors.split('/')[-2] if image.ancestors else None + if not parent_image_id: + continue + + parent_node = self._image_map.get(int(parent_image_id)) + if parent_node is not None: + parent_node.add_child(image_node) + + # Build the tag map. + for tag in all_tags: + image_node = self._image_map.get(tag.image.id) + if not image_node: + continue + + self._tag_map = image_node + image_node.add_tag(tag.name) + + + def find_longest_path(self, image_id, checker): + """ Returns a list of images representing the longest path that matches the given + checker function, starting from the given image_id *exclusive*. + """ + start_node = self._image_map.get(image_id) + if not start_node: + return [] + + return self._find_longest_path(start_node, checker, -1)[1:] + + + def _find_longest_path(self, image_node, checker, index): + found_path = [] + + for child_node in image_node.children: + if not checker(index + 1, child_node.image): + continue + + found = self._find_longest_path(child_node, checker, index + 1) + if found and len(found) > len(found_path): + found_path = found + + return [image_node.image] + found_path + + + def tag_containing_image(self, image): + """ Returns the name of the closest tag containing the given image. """ + if not image: + return None + + # Check the current image for a tag. + image_node = self._image_map.get(image.id) + if image_node is None: + return None + + if image_node.tags: + return image_node.tags[0] + + # Check any deriving images for a tag. + for child_node in image_node.children: + found = self.tag_containing_image(child_node.image) + if found is not None: + return found + + return None \ No newline at end of file From f8a917ec26d57c239f8be871b9b3518f5ef4cb19 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 10 Feb 2015 22:02:39 -0500 Subject: [PATCH 57/62] Fix test --- test/test_imagetree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_imagetree.py b/test/test_imagetree.py index 824709be9..d72eb6505 100644 --- a/test/test_imagetree.py +++ b/test/test_imagetree.py @@ -63,7 +63,7 @@ class TestImageTree(unittest.TestCase): def test_filtering(self): all_images = list(model.get_repository_images(NAMESPACE, COMPLEX_REPO)) all_tags = list(model.list_repository_tags(NAMESPACE, COMPLEX_REPO)) - tree = ImageTree(all_images, all_tags, parent_filter=1245) + tree = ImageTree(all_images, all_tags, base_filter=1245) base_image = self._get_base_image(all_images) From e1a15464a195d69720f938735ae5dabbc6723977 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Wed, 11 Feb 2015 16:02:36 -0500 Subject: [PATCH 58/62] Fix typo, add some logging and fix command comparison --- buildman/jobutil/buildjob.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index 46dab931c..1710c3aab 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -108,14 +108,17 @@ class BuildJob(object): return False full_command = '["/bin/sh", "-c", "%s"]' % cache_commands[step] - return image.storage.comment == full_command + logger.debug('Checking step #%s: %s, %s == %s', step, image.id, + image.storage.command, full_command) + + return image.storage.command == full_command path = tree.find_longest_path(base_image.id, checker) if not path: return None # Find any tag associated with the last image in the path. - return tree.tag_containing_images(path[-1]) + return tree.tag_containing_image(path[-1]) def _determine_cached_tag_by_tag(self): From 42db22157635a269eca02dcdda16f919facdd7d3 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Wed, 11 Feb 2015 16:25:09 -0500 Subject: [PATCH 59/62] Disable proxy server buffer changes --- conf/proxy-server-base.conf | 1 - conf/server-base.conf | 1 - 2 files changed, 2 deletions(-) diff --git a/conf/proxy-server-base.conf b/conf/proxy-server-base.conf index 5bee725cf..fb2f3f962 100644 --- a/conf/proxy-server-base.conf +++ b/conf/proxy-server-base.conf @@ -13,7 +13,6 @@ proxy_set_header X-Forwarded-For $proxy_protocol_addr; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Host $http_host; proxy_redirect off; -proxy_buffer_size 6m; proxy_set_header Transfer-Encoding $http_transfer_encoding; diff --git a/conf/server-base.conf b/conf/server-base.conf index 6ac4dfb28..d5b211c52 100644 --- a/conf/server-base.conf +++ b/conf/server-base.conf @@ -16,7 +16,6 @@ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Host $http_host; proxy_redirect off; -proxy_buffer_size 6m; proxy_set_header Transfer-Encoding $http_transfer_encoding; From f796c281d576f49192219a6d2893134ffea7a4bb Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Wed, 11 Feb 2015 17:12:53 -0500 Subject: [PATCH 60/62] Remove support for v0.2 --- buildman/component/buildcomponent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildman/component/buildcomponent.py b/buildman/component/buildcomponent.py index dc66dcfa1..72caec215 100644 --- a/buildman/component/buildcomponent.py +++ b/buildman/component/buildcomponent.py @@ -19,7 +19,7 @@ HEARTBEAT_DELTA = datetime.timedelta(seconds=30) HEARTBEAT_TIMEOUT = 10 INITIAL_TIMEOUT = 25 -SUPPORTED_WORKER_VERSIONS = ['0.2', '0.3'] +SUPPORTED_WORKER_VERSIONS = ['0.3'] logger = logging.getLogger(__name__) From 7a199f63eb0ede1efbd87033c09ca76394cf4379 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 12 Feb 2015 14:00:26 -0500 Subject: [PATCH 61/62] Various small fixes and add support for subjectAltName to the SSL cert check --- static/js/controllers/setup.js | 7 +++++-- static/partials/setup.html | 3 ++- util/config/validator.py | 20 +++++++++++++++++--- 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/static/js/controllers/setup.js b/static/js/controllers/setup.js index e3f6b0772..9dc76a17f 100644 --- a/static/js/controllers/setup.js +++ b/static/js/controllers/setup.js @@ -123,7 +123,7 @@ function SetupCtrl($scope, $timeout, ApiService, Features, UserService, Containe $scope.showSuperuserPanel = function() { $('#setupModal').modal('hide'); - var prefix = scope.hasSSL ? 'https' : 'http'; + var prefix = $scope.hasSSL ? 'https' : 'http'; var hostname = $scope.hostname; window.location = prefix + '://' + hostname + '/superuser'; }; @@ -198,9 +198,12 @@ function SetupCtrl($scope, $timeout, ApiService, Features, UserService, Containe }; $scope.serializeDbUri = function(fields) { - if (!fields['server']) { return '' }; + if (!fields['server']) { return ''; } try { + if (!fields['server']) { return ''; } + if (!fields['database']) { return ''; } + var uri = URI(); uri = uri && uri.host(fields['server']); uri = uri && uri.protocol(fields['kind']); diff --git a/static/partials/setup.html b/static/partials/setup.html index 644f4c070..dba8a2b33 100644 --- a/static/partials/setup.html +++ b/static/partials/setup.html @@ -226,7 +226,8 @@ Problem Detected - diff --git a/util/config/validator.py b/util/config/validator.py index 385539899..3c1671c2b 100644 --- a/util/config/validator.py +++ b/util/config/validator.py @@ -195,9 +195,23 @@ def _validate_ssl(config): if common_name is None: raise Exception('Missing CommonName (CN) from SSL certificate') - if not fnmatch(config['SERVER_HOSTNAME'], common_name): - raise Exception('CommonName (CN) "%s" in SSL cert does not match server hostname "%s"' % - (common_name, config['SERVER_HOSTNAME'])) + # Build the list of allowed host patterns. + hosts = set([common_name]) + + # Find the DNS extension, if any. + for i in range(0, cert.get_extension_count()): + ext = cert.get_extension(i) + if ext.get_short_name() == 'subjectAltName': + value = str(ext) + hosts.update([host.strip()[4:] for host in value.split(',')]) + + # Check each host. + for host in hosts: + if fnmatch(config['SERVER_HOSTNAME'], host): + return + + raise Exception('Supported names "%s" in SSL cert do not match server hostname "%s"' % + (', '.join(list(hosts)), config['SERVER_HOSTNAME'])) From e8458267fdaa72693155383eee1505bc26280f9c Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 12 Feb 2015 15:27:05 -0500 Subject: [PATCH 62/62] Add missing phase info --- static/css/quay.css | 6 +++--- static/js/app.js | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/static/css/quay.css b/static/css/quay.css index e46fb2aa6..9046bf722 100644 --- a/static/css/quay.css +++ b/static/css/quay.css @@ -857,8 +857,8 @@ i.toggle-icon:hover { background-color: #f0ad4e; } -.phase-icon.priming-cache { - background-color: #ddd; +.phase-icon.priming-cache, .phase-icon.checking-cache { + background-color: #cab442; } .phase-icon.pushing { @@ -2586,7 +2586,7 @@ p.editable:hover i { } .repo-build .build-pane .build-logs .log-container.command { - margin-left: 42px; + margin-left: 22px; } .repo-build .build-pane .build-logs .container-header.building { diff --git a/static/js/app.js b/static/js/app.js index 29db85ae3..ed9c7a842 100644 --- a/static/js/app.js +++ b/static/js/app.js @@ -5824,6 +5824,9 @@ quayApp.directive('buildMessage', function () { case 'building': return 'Building image from Dockerfile'; + case 'checking-cache': + return 'Looking up cached images'; + case 'priming-cache': return 'Priming cache for build'; @@ -5880,6 +5883,7 @@ quayApp.directive('buildProgress', function () { break; case 'initializing': + case 'checking-cache': case 'starting': case 'waiting': case 'cannot_load':