-
-
-
-
-
-
-
-
- Save Configuration Changes
- Configuration Saved
-
-
-
-
- {{ configform.$error['required'].length }} configuration fields remaining
-
-
- Invalid configuration field
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
{{ serviceInfo.service.title }}
-
-
- {{ serviceInfo.errorMessage }}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/config_app/js/main.ts b/config_app/js/main.ts
deleted file mode 100644
index cdd326001..000000000
--- a/config_app/js/main.ts
+++ /dev/null
@@ -1,37 +0,0 @@
-// imports shims, etc
-import 'core-js';
-
-import * as angular from 'angular';
-import { ConfigAppModule } from './config-app.module';
-import { bundle } from 'ng-metadata/core';
-
-// load all app dependencies
-require('../static/lib/angular-file-upload.min.js');
-require('../../static/js/tar');
-
-const ng1QuayModule: string = bundle(ConfigAppModule, []).name;
-angular.module('quay-config', [ng1QuayModule])
- .run(() => {
- });
-
-declare var require: any;
-function requireAll(r) {
- r.keys().forEach(r);
-}
-
-// load all services
-requireAll(require.context('./services', true, /\.js$/));
-
-
-// load all the components after services
-requireAll(require.context('./setup', true, /\.js$/));
-requireAll(require.context('./core-config-setup', true, /\.js$/));
-requireAll(require.context('./components', true, /\.js$/));
-
-// load config-app specific css
-requireAll(require.context('../static/css', true, /\.css$/));
-
-
-// Load all the main quay css
-requireAll(require.context('../../static/css', true, /\.css$/));
-requireAll(require.context('../../static/lib', true, /\.css$/));
diff --git a/config_app/js/services/angular-poll-channel.js b/config_app/js/services/angular-poll-channel.js
deleted file mode 100644
index 697a04e15..000000000
--- a/config_app/js/services/angular-poll-channel.js
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Specialized class for conducting an HTTP poll, while properly preventing multiple calls.
- */
-angular.module('quay-config').factory('AngularPollChannel',
- ['ApiService', '$timeout', 'DocumentVisibilityService', 'CORE_EVENT', '$rootScope',
- function(ApiService, $timeout, DocumentVisibilityService, CORE_EVENT, $rootScope) {
- var _PollChannel = function(scope, requester, opt_sleeptime) {
- this.scope_ = scope;
- this.requester_ = requester;
- this.sleeptime_ = opt_sleeptime || (60 * 1000 /* 60s */);
- this.timer_ = null;
-
- this.working = false;
- this.polling = false;
- this.skipping = false;
-
- var that = this;
-
- var visibilityHandler = $rootScope.$on(CORE_EVENT.DOC_VISIBILITY_CHANGE, function() {
- // If the poll channel was skipping because the visibility was hidden, call it immediately.
- if (that.skipping && !DocumentVisibilityService.isHidden()) {
- that.call_();
- }
- });
-
- scope.$on('$destroy', function() {
- that.stop();
- visibilityHandler();
- });
- };
-
- _PollChannel.prototype.setSleepTime = function(sleepTime) {
- this.sleeptime_ = sleepTime;
- this.stop();
- this.start(true);
- };
-
- _PollChannel.prototype.stop = function() {
- if (this.timer_) {
- $timeout.cancel(this.timer_);
- this.timer_ = null;
- this.polling = false;
- }
-
- this.skipping = false;
- this.working = false;
- };
-
- _PollChannel.prototype.start = function(opt_skipFirstCall) {
- // Make sure we invoke call outside the normal digest cycle, since
- // we'll call $scope.$apply ourselves.
- var that = this;
- setTimeout(function() {
- if (opt_skipFirstCall) {
- that.setupTimer_();
- return;
- }
-
- that.call_();
- }, 0);
- };
-
- _PollChannel.prototype.call_ = function() {
- if (this.working) { return; }
-
- // If the document is currently hidden, skip the call.
- if (DocumentVisibilityService.isHidden()) {
- this.skipping = true;
- this.setupTimer_();
- return;
- }
-
- var that = this;
- this.working = true;
-
- $timeout(function() {
- that.requester_(function(status) {
- if (status) {
- that.working = false;
- that.skipping = false;
- that.setupTimer_();
- } else {
- that.stop();
- }
- });
- }, 0);
- };
-
- _PollChannel.prototype.setupTimer_ = function() {
- if (this.timer_) { return; }
-
- var that = this;
- this.polling = true;
- this.timer_ = $timeout(function() {
- that.timer_ = null;
- that.call_();
- }, this.sleeptime_)
- };
-
- var service = {
- 'create': function(scope, requester, opt_sleeptime) {
- return new _PollChannel(scope, requester, opt_sleeptime);
- }
- };
-
- return service;
-}]);
diff --git a/config_app/js/services/api-service.js b/config_app/js/services/api-service.js
deleted file mode 100644
index 814e25a45..000000000
--- a/config_app/js/services/api-service.js
+++ /dev/null
@@ -1,335 +0,0 @@
-/**
- * Service which exposes the server-defined API as a nice set of helper methods and automatic
- * callbacks. Any method defined on the server is exposed here as an equivalent method. Also
- * defines some helper functions for working with API responses.
- */
-angular.module('quay-config').factory('ApiService', ['Restangular', '$q', 'UtilService', function(Restangular, $q, UtilService) {
- var apiService = {};
-
- if (!window.__endpoints) {
- return apiService;
- }
-
- var getResource = function(getMethod, operation, opt_parameters, opt_background) {
- var resource = {};
- resource.withOptions = function(options) {
- this.options = options;
- return this;
- };
-
- resource.get = function(processor, opt_errorHandler) {
- var options = this.options;
- var result = {
- 'loading': true,
- 'value': null,
- 'hasError': false
- };
-
- getMethod(options, opt_parameters, opt_background, true).then(function(resp) {
- result.value = processor(resp);
- result.loading = false;
- }, function(resp) {
- result.hasError = true;
- result.loading = false;
- if (opt_errorHandler) {
- opt_errorHandler(resp);
- }
- });
-
- return result;
- };
-
- return resource;
- };
-
- var buildUrl = function(path, parameters) {
- // We already have /api/v1/ on the URLs, so remove them from the paths.
- path = path.substr('/api/v1/'.length, path.length);
-
- // Build the path, adjusted with the inline parameters.
- var used = {};
- var url = '';
- for (var i = 0; i < path.length; ++i) {
- var c = path[i];
- if (c == '{') {
- var end = path.indexOf('}', i);
- var varName = path.substr(i + 1, end - i - 1);
-
- if (!parameters[varName]) {
- throw new Error('Missing parameter: ' + varName);
- }
-
- used[varName] = true;
- url += parameters[varName];
- i = end;
- continue;
- }
-
- url += c;
- }
-
- // Append any query parameters.
- var isFirst = true;
- for (var paramName in parameters) {
- if (!parameters.hasOwnProperty(paramName)) { continue; }
- if (used[paramName]) { continue; }
-
- var value = parameters[paramName];
- if (value) {
- url += isFirst ? '?' : '&';
- url += paramName + '=' + encodeURIComponent(value)
- isFirst = false;
- }
- }
-
- return url;
- };
-
- var getGenericOperationName = function(userOperationName) {
- return userOperationName.replace('User', '');
- };
-
- var getMatchingUserOperationName = function(orgOperationName, method, userRelatedResource) {
- if (userRelatedResource) {
- if (userRelatedResource[method.toLowerCase()]) {
- return userRelatedResource[method.toLowerCase()]['operationId'];
- }
- }
-
- throw new Error('Could not find user operation matching org operation: ' + orgOperationName);
- };
-
- var freshLoginInProgress = [];
- var reject = function(msg) {
- for (var i = 0; i < freshLoginInProgress.length; ++i) {
- freshLoginInProgress[i].deferred.reject({'data': {'message': msg}});
- }
- freshLoginInProgress = [];
- };
-
- var retry = function() {
- for (var i = 0; i < freshLoginInProgress.length; ++i) {
- freshLoginInProgress[i].retry();
- }
- freshLoginInProgress = [];
- };
-
- var freshLoginFailCheck = function(opName, opArgs) {
- return function(resp) {
- var deferred = $q.defer();
-
- // If the error is a fresh login required, show the dialog.
- // TODO: remove error_type (old style error)
- var fresh_login_required = resp.data['title'] == 'fresh_login_required' || resp.data['error_type'] == 'fresh_login_required';
- if (resp.status == 401 && fresh_login_required) {
- var retryOperation = function() {
- apiService[opName].apply(apiService, opArgs).then(function(resp) {
- deferred.resolve(resp);
- }, function(resp) {
- deferred.reject(resp);
- });
- };
-
- var verifyNow = function() {
- if (!$('#freshPassword').val()) {
- return;
- }
-
- var info = {
- 'password': $('#freshPassword').val()
- };
-
- $('#freshPassword').val('');
-
- // Conduct the sign in of the user.
- apiService.verifyUser(info).then(function() {
- // On success, retry the operations. if it succeeds, then resolve the
- // deferred promise with the result. Otherwise, reject the same.
- retry();
- }, function(resp) {
- // Reject with the sign in error.
- reject('Invalid verification credentials');
- });
- };
-
- // Add the retry call to the in progress list. If there is more than a single
- // in progress call, we skip showing the dialog (since it has already been
- // shown).
- freshLoginInProgress.push({
- 'deferred': deferred,
- 'retry': retryOperation
- })
-
- if (freshLoginInProgress.length > 1) {
- return deferred.promise;
- }
-
- var box = bootbox.dialog({
- "message": 'It has been more than a few minutes since you last logged in, ' +
- 'so please verify your password to perform this sensitive operation:' +
- '
',
- "title": 'Please Verify',
- "buttons": {
- "verify": {
- "label": "Verify",
- "className": "btn-success btn-continue",
- "callback": verifyNow
- },
- "close": {
- "label": "Cancel",
- "className": "btn-default",
- "callback": function() {
- reject('Verification canceled')
- }
- }
- }
- });
-
- box.bind('shown.bs.modal', function(){
- box.find("input").focus();
- box.find("form").submit(function() {
- if (!$('#freshPassword').val()) { return; }
-
- box.modal('hide');
- verifyNow();
- });
- });
-
- // Return a new promise. We'll accept or reject it based on the result
- // of the login.
- return deferred.promise;
- }
-
- // Otherwise, we just 'raise' the error via the reject method on the promise.
- return $q.reject(resp);
- };
- };
-
- var buildMethodsForOperation = function(operation, method, path, resourceMap) {
- var operationName = operation['operationId'];
- var urlPath = path['x-path'];
-
- // Add the operation itself.
- apiService[operationName] = function(opt_options, opt_parameters, opt_background, opt_forceget, opt_responseType) {
- var one = Restangular.one(buildUrl(urlPath, opt_parameters));
-
- if (opt_background || opt_responseType) {
- let httpConfig = {};
-
- if (opt_background) {
- httpConfig['ignoreLoadingBar'] = true;
- }
- if (opt_responseType) {
- httpConfig['responseType'] = opt_responseType;
- }
-
- one.withHttpConfig(httpConfig);
- }
-
- var opObj = one[opt_forceget ? 'get' : 'custom' + method.toUpperCase()](opt_options);
-
- // If the operation requires_fresh_login, then add a specialized error handler that
- // will defer the operation's result if sudo is requested.
- if (operation['x-requires-fresh-login']) {
- opObj = opObj.catch(freshLoginFailCheck(operationName, arguments));
- }
- return opObj;
- };
-
- // If the method for the operation is a GET, add an operationAsResource method.
- if (method == 'get') {
- apiService[operationName + 'AsResource'] = function(opt_parameters, opt_background) {
- var getMethod = apiService[operationName];
- return getResource(getMethod, operation, opt_parameters, opt_background);
- };
- }
-
- // If the operation has a user-related operation, then make a generic operation for this operation
- // that can call both the user and the organization versions of the operation, depending on the
- // parameters given.
- if (path['x-user-related']) {
- var userOperationName = getMatchingUserOperationName(operationName, method, resourceMap[path['x-user-related']]);
- var genericOperationName = getGenericOperationName(userOperationName);
- apiService[genericOperationName] = function(orgname, opt_options, opt_parameters, opt_background) {
- if (orgname) {
- if (orgname.name) {
- orgname = orgname.name;
- }
-
- var params = jQuery.extend({'orgname' : orgname}, opt_parameters || {}, opt_background);
- return apiService[operationName](opt_options, params);
- } else {
- return apiService[userOperationName](opt_options, opt_parameters, opt_background);
- }
- };
- }
- };
-
-
- var allowedMethods = ['get', 'post', 'put', 'delete'];
- var resourceMap = {};
- var forEachOperation = function(callback) {
- for (var path in window.__endpoints) {
- if (!window.__endpoints.hasOwnProperty(path)) {
- continue;
- }
-
- for (var method in window.__endpoints[path]) {
- if (!window.__endpoints[path].hasOwnProperty(method)) {
- continue;
- }
-
- if (allowedMethods.indexOf(method.toLowerCase()) < 0) { continue; }
- callback(window.__endpoints[path][method], method, window.__endpoints[path]);
- }
- }
- };
-
- // Build the map of resource names to their objects.
- forEachOperation(function(operation, method, path) {
- resourceMap[path['x-name']] = path;
- });
-
- // Construct the methods for each API endpoint.
- forEachOperation(function(operation, method, path) {
- buildMethodsForOperation(operation, method, path, resourceMap);
- });
-
- apiService.getErrorMessage = function(resp, defaultMessage) {
- var message = defaultMessage;
- if (resp && resp['data']) {
- //TODO: remove error_message and error_description (old style error)
- message = resp['data']['detail'] || resp['data']['error_message'] || resp['data']['message'] || resp['data']['error_description'] || message;
- }
-
- return message;
- };
-
- apiService.errorDisplay = function(defaultMessage, opt_handler) {
- return function(resp) {
- var message = apiService.getErrorMessage(resp, defaultMessage);
- if (opt_handler) {
- var handlerMessage = opt_handler(resp);
- if (handlerMessage) {
- message = handlerMessage;
- }
- }
-
- message = UtilService.stringToHTML(message);
- bootbox.dialog({
- "message": message,
- "title": defaultMessage || 'Request Failure',
- "buttons": {
- "close": {
- "label": "Close",
- "className": "btn-primary"
- }
- }
- });
- };
- };
-
- return apiService;
-}]);
diff --git a/config_app/js/services/container-service.js b/config_app/js/services/container-service.js
deleted file mode 100644
index 31b495176..000000000
--- a/config_app/js/services/container-service.js
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Helper service for working with the registry's container. Only works in enterprise.
- */
-angular.module('quay-config')
- .factory('ContainerService', ['ApiService', '$timeout', 'Restangular',
- function(ApiService, $timeout, Restangular) {
- var containerService = {};
- containerService.restartContainer = function(callback) {
- ApiService.errorDisplay('Removed Endpoint. This error should never be seen.')
- };
-
- containerService.scheduleStatusCheck = function(callback, opt_config) {
- $timeout(function() {
- containerService.checkStatus(callback, opt_config);
- }, 2000);
- };
-
- containerService.checkStatus = function(callback, opt_config) {
- var errorHandler = function(resp) {
- if (resp.status == 404 || resp.status == 502 || resp.status == -1) {
- // Container has not yet come back up, so we schedule another check.
- containerService.scheduleStatusCheck(callback, opt_config);
- return;
- }
-
- return ApiService.errorDisplay('Cannot load status. Please report this to support')(resp);
- };
-
- // If config is specified, override the API base URL from this point onward.
- // TODO: Find a better way than this. This is safe, since this will only be called
- // for a restart, but it is still ugly.
- if (opt_config && opt_config['SERVER_HOSTNAME']) {
- var scheme = opt_config['PREFERRED_URL_SCHEME'] || 'http';
- var baseUrl = scheme + '://' + opt_config['SERVER_HOSTNAME'] + '/api/v1/';
- Restangular.setBaseUrl(baseUrl);
- }
-
- ApiService.scRegistryStatus(null, null, /* background */true)
- .then(callback, errorHandler);
- };
-
- return containerService;
- }]);
diff --git a/config_app/js/services/cookie-service.js b/config_app/js/services/cookie-service.js
deleted file mode 100644
index af904124a..000000000
--- a/config_app/js/services/cookie-service.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Helper service for working with cookies.
- */
-angular.module('quay-config').factory('CookieService', ['$cookies', function($cookies) {
- var cookieService = {};
- cookieService.putPermanent = function(name, value) {
- document.cookie = escape(name) + "=" + escape(value) + "; expires=Fri, 31 Dec 9999 23:59:59 GMT; path=/";
- };
-
- cookieService.putSession = function(name, value) {
- $cookies.put(name, value);
- };
-
- cookieService.clear = function(name) {
- $cookies.remove(name);
- };
-
- cookieService.get = function(name) {
- return $cookies.get(name);
- };
-
- return cookieService;
-}]);
diff --git a/config_app/js/services/document-visibility-service.js b/config_app/js/services/document-visibility-service.js
deleted file mode 100644
index 59d935d8b..000000000
--- a/config_app/js/services/document-visibility-service.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Helper service which fires off events when the document's visibility changes, as well as allowing
- * other Angular code to query the state of the document's visibility directly.
- */
-angular.module('quay-config').constant('CORE_EVENT', {
- DOC_VISIBILITY_CHANGE: 'core.event.doc_visibility_change'
-});
-
-angular.module('quay-config').factory('DocumentVisibilityService', ['$rootScope', '$document', 'CORE_EVENT',
- function($rootScope, $document, CORE_EVENT) {
- var document = $document[0],
- features,
- detectedFeature;
-
- function broadcastChangeEvent() {
- $rootScope.$broadcast(CORE_EVENT.DOC_VISIBILITY_CHANGE,
- document[detectedFeature.propertyName]);
- }
-
- features = {
- standard: {
- eventName: 'visibilitychange',
- propertyName: 'hidden'
- },
- moz: {
- eventName: 'mozvisibilitychange',
- propertyName: 'mozHidden'
- },
- ms: {
- eventName: 'msvisibilitychange',
- propertyName: 'msHidden'
- },
- webkit: {
- eventName: 'webkitvisibilitychange',
- propertyName: 'webkitHidden'
- }
- };
-
- Object.keys(features).some(function(feature) {
- if (document[features[feature].propertyName] !== undefined) {
- detectedFeature = features[feature];
- return true;
- }
- });
-
- if (detectedFeature) {
- $document.on(detectedFeature.eventName, broadcastChangeEvent);
- }
-
- return {
- /**
- * Is the window currently hidden or not.
- */
- isHidden: function() {
- if (detectedFeature) {
- return document[detectedFeature.propertyName];
- }
- }
- };
-}]);
\ No newline at end of file
diff --git a/config_app/js/services/features-config.js b/config_app/js/services/features-config.js
deleted file mode 100644
index e655f32bf..000000000
--- a/config_app/js/services/features-config.js
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Feature flags.
- */
-angular.module('quay-config').factory('Features', [function() {
- if (!window.__features) {
- return {};
- }
-
- var features = window.__features;
- features.getFeature = function(name, opt_defaultValue) {
- var value = features[name];
- if (value == null) {
- return opt_defaultValue;
- }
- return value;
- };
-
- features.hasFeature = function(name) {
- return !!features.getFeature(name);
- };
-
- features.matchesFeatures = function(list) {
- for (var i = 0; i < list.length; ++i) {
- var value = features.getFeature(list[i]);
- if (!value) {
- return false;
- }
- }
- return true;
- };
-
- return features;
-}]);
-
-/**
- * Application configuration.
- */
-angular.module('quay-config').factory('Config', ['Features', function(Features) {
- if (!window.__config) {
- return {};
- }
-
- var config = window.__config;
- config.getDomain = function() {
- return config['SERVER_HOSTNAME'];
- };
-
- config.getHost = function(opt_auth) {
- var auth = opt_auth;
- if (auth) {
- auth = auth + '@';
- }
-
- return config['PREFERRED_URL_SCHEME'] + '://' + auth + config['SERVER_HOSTNAME'];
- };
-
- config.getHttp = function() {
- return config['PREFERRED_URL_SCHEME'];
- };
-
- config.getUrl = function(opt_path) {
- var path = opt_path || '';
- return config['PREFERRED_URL_SCHEME'] + '://' + config['SERVER_HOSTNAME'] + path;
- };
-
- config.getValue = function(name, opt_defaultValue) {
- var value = config[name];
- if (value == null) {
- return opt_defaultValue;
- }
- return value;
- };
-
- config.getEnterpriseLogo = function(opt_defaultValue) {
- if (!config.ENTERPRISE_LOGO_URL) {
- if (opt_defaultValue) {
- return opt_defaultValue;
- }
-
- if (Features.BILLING) {
- return '/static/img/quay-horizontal-color.svg';
- } else {
- return '/static/img/QuayEnterprise_horizontal_color.svg';
- }
- }
-
- return config.ENTERPRISE_LOGO_URL;
- };
-
- return config;
-}]);
\ No newline at end of file
diff --git a/config_app/js/services/services.types.ts b/config_app/js/services/services.types.ts
deleted file mode 100644
index 217824f6b..000000000
--- a/config_app/js/services/services.types.ts
+++ /dev/null
@@ -1,15 +0,0 @@
-export interface AngularPollChannel {
- create: PollConstructor
-}
-
-type PollConstructor = (scope: MockAngularScope, requester: ShouldContinueCallback, opt_sleeptime?: number) => PollHandle;
-type MockAngularScope = {
- '$on': Function
-};
-type ShouldContinueCallback = (boolean) => void;
-
-export interface PollHandle {
- start(opt_skipFirstCall?: boolean): void,
- stop(): void,
- setSleepTime(sleepTime: number): void,
-}
diff --git a/config_app/js/services/user-service.js b/config_app/js/services/user-service.js
deleted file mode 100644
index 8c222b955..000000000
--- a/config_app/js/services/user-service.js
+++ /dev/null
@@ -1,177 +0,0 @@
-import * as Raven from 'raven-js';
-
-
-/**
- * Service which monitors the current user session and provides methods for returning information
- * about the user.
- */
-angular.module('quay-config')
- .factory('UserService', ['ApiService', 'CookieService', '$rootScope', 'Config', '$location', '$timeout',
-
-function(ApiService, CookieService, $rootScope, Config, $location, $timeout) {
- var userResponse = {
- verified: false,
- anonymous: true,
- username: null,
- email: null,
- organizations: [],
- logins: [],
- beforeload: true
- };
-
- var userService = {};
-
- userService.hasEverLoggedIn = function() {
- return CookieService.get('quay.loggedin') == 'true';
- };
-
- userService.updateUserIn = function(scope, opt_callback) {
- scope.$watch(function () { return userService.currentUser(); }, function (currentUser) {
- if (currentUser) {
- $timeout(function(){
- scope.user = currentUser;
- if (opt_callback) {
- opt_callback(currentUser);
- }
- }, 0, false);
- };
- }, true);
- };
-
- userService.load = function(opt_callback) {
- var handleUserResponse = function(loadedUser) {
- userResponse = loadedUser;
-
- if (!userResponse.anonymous) {
- if (Config.MIXPANEL_KEY) {
- try {
- mixpanel.identify(userResponse.username);
- mixpanel.people.set({
- '$email': userResponse.email,
- '$username': userResponse.username,
- 'verified': userResponse.verified
- });
- mixpanel.people.set_once({
- '$created': new Date()
- })
- } catch (e) {
- window.console.log(e);
- }
- }
-
- if (Config.MARKETO_MUNCHKIN_ID && userResponse['marketo_user_hash']) {
- var associateLeadBody = {'Email': userResponse.email};
- if (window.Munchkin !== undefined) {
- try {
- Munchkin.munchkinFunction(
- 'associateLead',
- associateLeadBody,
- userResponse['marketo_user_hash']
- );
- } catch (e) {
- }
- } else {
- window.__quay_munchkin_queue.push([
- 'associateLead',
- associateLeadBody,
- userResponse['marketo_user_hash']
- ]);
- }
- }
-
- if (window.Raven !== undefined) {
- try {
- Raven.setUser({
- email: userResponse.email,
- id: userResponse.username
- });
- } catch (e) {
- window.console.log(e);
- }
- }
-
- CookieService.putPermanent('quay.loggedin', 'true');
- } else {
- if (window.Raven !== undefined) {
- Raven.setUser();
- }
- }
-
- // If the loaded user has a prompt, redirect them to the update page.
- if (loadedUser.prompts && loadedUser.prompts.length) {
- $location.path('/updateuser');
- return;
- }
-
- if (opt_callback) {
- opt_callback(loadedUser);
- }
- };
-
- ApiService.getLoggedInUser().then(function(loadedUser) {
- handleUserResponse(loadedUser);
- }, function() {
- handleUserResponse({'anonymous': true});
- });
- };
-
- userService.isOrganization = function(name) {
- return !!userService.getOrganization(name);
- };
-
- userService.getOrganization = function(name) {
- if (!userResponse || !userResponse.organizations) { return null; }
- for (var i = 0; i < userResponse.organizations.length; ++i) {
- var org = userResponse.organizations[i];
- if (org.name == name) {
- return org;
- }
- }
-
- return null;
- };
-
- userService.isNamespaceAdmin = function(namespace) {
- if (namespace == userResponse.username) {
- return true;
- }
-
- var org = userService.getOrganization(namespace);
- if (!org) {
- return false;
- }
-
- return org.is_org_admin;
- };
-
- userService.isKnownNamespace = function(namespace) {
- if (namespace == userResponse.username) {
- return true;
- }
-
- var org = userService.getOrganization(namespace);
- return !!org;
- };
-
- userService.getNamespace = function(namespace) {
- var org = userService.getOrganization(namespace);
- if (org) {
- return org;
- }
-
- if (namespace == userResponse.username) {
- return userResponse;
- }
-
- return null;
- };
-
- userService.currentUser = function() {
- return userResponse;
- };
-
- // Update the user in the root scope.
- userService.updateUserIn($rootScope);
-
- return userService;
-}]);
diff --git a/config_app/js/services/util-service.js b/config_app/js/services/util-service.js
deleted file mode 100644
index 34f0a4191..000000000
--- a/config_app/js/services/util-service.js
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Service which exposes various utility methods.
- */
-angular.module('quay-config').factory('UtilService', ['$sanitize',
- function($sanitize) {
- var utilService = {};
-
- var adBlockEnabled = null;
-
- utilService.isAdBlockEnabled = function(callback) {
- if (adBlockEnabled !== null) {
- callback(adBlockEnabled);
- return;
- }
-
- if(typeof blockAdBlock === 'undefined') {
- callback(true);
- return;
- }
-
- var bab = new BlockAdBlock({
- checkOnLoad: false,
- resetOnEnd: true
- });
-
- bab.onDetected(function() { adBlockEnabled = true; callback(true); });
- bab.onNotDetected(function() { adBlockEnabled = false; callback(false); });
- bab.check();
- };
-
- utilService.isEmailAddress = function(val) {
- var emailRegex = /^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$/;
- return emailRegex.test(val);
- };
-
- utilService.escapeHtmlString = function(text) {
- var textStr = (text || '').toString();
- var adjusted = textStr.replace(/&/g, "&")
- .replace(//g, ">")
- .replace(/"/g, """)
- .replace(/'/g, "'");
-
- return adjusted;
- };
-
- utilService.stringToHTML = function(text) {
- text = utilService.escapeHtmlString(text);
- text = text.replace(/\n/g, '
');
- return text;
- };
-
- utilService.getRestUrl = function(args) {
- var url = '';
- for (var i = 0; i < arguments.length; ++i) {
- if (i > 0) {
- url += '/';
- }
- url += encodeURI(arguments[i])
- }
- return url;
- };
-
- utilService.textToSafeHtml = function(text) {
- return $sanitize(utilService.escapeHtmlString(text));
- };
-
- return utilService;
- }])
- .factory('CoreDialog', [() => {
- var service = {};
- service['fatal'] = function(title, message) {
- bootbox.dialog({
- "title": title,
- "message": "
" + message,
- "buttons": {},
- "className": "co-dialog fatal-error",
- "closeButton": false
- });
- };
-
- return service;
- }]);
diff --git a/config_app/js/setup/setup.component.js b/config_app/js/setup/setup.component.js
deleted file mode 100644
index 65a076f31..000000000
--- a/config_app/js/setup/setup.component.js
+++ /dev/null
@@ -1,319 +0,0 @@
-import * as URI from 'urijs';
-const templateUrl = require('./setup.html');
-
-(function() {
- /**
- * The Setup page provides a nice GUI walkthrough experience for setting up Red Hat Quay.
- */
-
- angular.module('quay-config').directive('setup', () => {
- const directiveDefinitionObject = {
- priority: 1,
- templateUrl,
- replace: true,
- transclude: true,
- restrict: 'C',
- scope: {
- 'isActive': '=isActive',
- 'configurationSaved': '&configurationSaved',
- 'setupCompleted': '&setupCompleted',
- },
- controller: SetupCtrl,
- };
-
- return directiveDefinitionObject;
- })
-
- function SetupCtrl($scope, $timeout, ApiService, Features, UserService, ContainerService, CoreDialog) {
- // if (!Features.SUPER_USERS) {
- // return;
- // }
-
- $scope.HOSTNAME_REGEX = '^[a-zA-Z-0-9_\.\-]+(:[0-9]+)?$';
-
- $scope.validateHostname = function(hostname) {
- if (hostname.indexOf('127.0.0.1') == 0 || hostname.indexOf('localhost') == 0) {
- return 'Please specify a non-localhost hostname. "localhost" will refer to the container, not your machine.'
- }
-
- return null;
- };
-
- // Note: The values of the enumeration are important for isStepFamily. For example,
- // *all* states under the "configuring db" family must start with "config-db".
- $scope.States = {
- // Loading the state of the product.
- 'LOADING': 'loading',
-
- // The configuration directory is missing.
- 'MISSING_CONFIG_DIR': 'missing-config-dir',
-
- // The config.yaml exists but it is invalid.
- 'INVALID_CONFIG': 'config-invalid',
-
- // DB is being configured.
- 'CONFIG_DB': 'config-db',
-
- // DB information is being validated.
- 'VALIDATING_DB': 'config-db-validating',
-
- // DB information is being saved to the config.
- 'SAVING_DB': 'config-db-saving',
-
- // A validation error occurred with the database.
- 'DB_ERROR': 'config-db-error',
-
- // Database is being setup.
- 'DB_SETUP': 'setup-db',
-
- // An error occurred when setting up the database.
- 'DB_SETUP_ERROR': 'setup-db-error',
-
- // A superuser is being configured.
- 'CREATE_SUPERUSER': 'create-superuser',
-
- // The superuser is being created.
- 'CREATING_SUPERUSER': 'create-superuser-creating',
-
- // An error occurred when setting up the superuser.
- 'SUPERUSER_ERROR': 'create-superuser-error',
-
- // The superuser was created successfully.
- 'SUPERUSER_CREATED': 'create-superuser-created',
-
- // General configuration is being setup.
- 'CONFIG': 'config',
-
- // The configuration is fully valid.
- 'VALID_CONFIG': 'valid-config',
-
- // The product is ready for use.
- 'READY': 'ready'
- }
-
- $scope.csrf_token = window.__token;
- $scope.currentStep = $scope.States.LOADING;
- $scope.errors = {};
- $scope.stepProgress = [];
- $scope.hasSSL = false;
- $scope.hostname = null;
- $scope.currentConfig = null;
-
- $scope.currentState = {
- 'hasDatabaseSSLCert': false
- };
-
- $scope.$watch('currentStep', function(currentStep) {
- $scope.stepProgress = $scope.getProgress(currentStep);
-
- switch (currentStep) {
- case $scope.States.CONFIG:
- $('#setupModal').modal('hide');
- break;
-
- case $scope.States.MISSING_CONFIG_DIR:
- $scope.showMissingConfigDialog();
- break;
-
- case $scope.States.INVALID_CONFIG:
- $scope.showInvalidConfigDialog();
- break;
-
- case $scope.States.DB_SETUP:
- $scope.performDatabaseSetup();
- // Fall-through.
-
- case $scope.States.CREATE_SUPERUSER:
- case $scope.States.CONFIG_DB:
- case $scope.States.VALID_CONFIG:
- case $scope.States.READY:
- $('#setupModal').modal({
- keyboard: false,
- backdrop: 'static'
- });
- break;
- }
- });
-
- $scope.restartContainer = function(state) {
- $scope.currentStep = state;
- ContainerService.restartContainer(function() {
- $scope.checkStatus()
- });
- };
-
- $scope.showSuperuserPanel = function() {
- $('#setupModal').modal('hide');
- var prefix = $scope.hasSSL ? 'https' : 'http';
- var hostname = $scope.hostname;
- if (!hostname) {
- hostname = document.location.hostname;
- if (document.location.port) {
- hostname = hostname + ':' + document.location.port;
- }
- }
-
- window.location = prefix + '://' + hostname + '/superuser';
- };
-
- $scope.configurationSaved = function(config) {
- $scope.hasSSL = config['PREFERRED_URL_SCHEME'] == 'https';
- $scope.hostname = config['SERVER_HOSTNAME'];
- $scope.currentConfig = config;
-
- $scope.currentStep = $scope.States.VALID_CONFIG;
- };
-
- $scope.getProgress = function(step) {
- var isStep = $scope.isStep;
- var isStepFamily = $scope.isStepFamily;
- var States = $scope.States;
-
- return [
- isStepFamily(step, States.CONFIG_DB),
- isStepFamily(step, States.DB_SETUP),
- isStepFamily(step, States.CREATE_SUPERUSER),
- isStep(step, States.CONFIG),
- isStep(step, States.VALID_CONFIG),
- isStep(step, States.READY)
- ];
- };
-
- $scope.isStepFamily = function(step, family) {
- if (!step) { return false; }
- return step.indexOf(family) == 0;
- };
-
- $scope.isStep = function(step) {
- for (var i = 1; i < arguments.length; ++i) {
- if (arguments[i] == step) {
- return true;
- }
- }
- return false;
- };
-
- $scope.beginSetup = function() {
- $scope.currentStep = $scope.States.CONFIG_DB;
- };
-
- $scope.showInvalidConfigDialog = function() {
- var message = "The
config.yaml
file found in
conf/stack
could not be parsed."
- var title = "Invalid configuration file";
- CoreDialog.fatal(title, message);
- };
-
-
- $scope.showMissingConfigDialog = function() {
- var message = "A volume should be mounted into the container at
/conf/stack
: " +
- "
docker run -v /path/to/config:/conf/stack " +
- "
Once fixed, restart the container. For more information, " +
- "
" +
- "Read the Setup Guide "
-
- var title = "Missing configuration volume";
- CoreDialog.fatal(title, message);
- };
-
- $scope.parseDbUri = function(value) {
- if (!value) { return null; }
-
- // Format: mysql+pymysql://
:@/
- var uri = URI(value);
- return {
- 'kind': uri.protocol(),
- 'username': uri.username(),
- 'password': uri.password(),
- 'server': uri.host(),
- 'database': uri.path() ? uri.path().substr(1) : ''
- };
- };
-
- $scope.serializeDbUri = function(fields) {
- if (!fields['server']) { return ''; }
- if (!fields['database']) { return ''; }
-
- var uri = URI();
- try {
- uri = uri && uri.host(fields['server']);
- uri = uri && uri.protocol(fields['kind']);
- uri = uri && uri.username(fields['username']);
- uri = uri && uri.password(fields['password']);
- uri = uri && uri.path('/' + (fields['database'] || ''));
- uri = uri && uri.toString();
- } catch (ex) {
- return '';
- }
-
- return uri;
- };
-
- $scope.createSuperUser = function() {
- $scope.currentStep = $scope.States.CREATING_SUPERUSER;
- ApiService.scCreateInitialSuperuser($scope.superUser, null).then(function(resp) {
- $scope.checkStatus();
- }, function(resp) {
- $scope.currentStep = $scope.States.SUPERUSER_ERROR;
- $scope.errors.SuperuserCreationError = ApiService.getErrorMessage(resp, 'Could not create superuser');
- });
- };
-
- $scope.performDatabaseSetup = function() {
- $scope.currentStep = $scope.States.DB_SETUP;
- ApiService.scSetupDatabase(null, null).then(function(resp) {
- if (resp['error']) {
- $scope.currentStep = $scope.States.DB_SETUP_ERROR;
- $scope.errors.DatabaseSetupError = resp['error'];
- } else {
- $scope.currentStep = $scope.States.CREATE_SUPERUSER;
- }
- }, ApiService.errorDisplay('Could not setup database. Please report this to support.'))
- };
-
- $scope.validateDatabase = function() {
- $scope.currentStep = $scope.States.VALIDATING_DB;
- $scope.databaseInvalid = null;
-
- var data = {
- 'config': {
- 'DB_URI': $scope.databaseUri
- },
- };
-
- if ($scope.currentState.hasDatabaseSSLCert) {
- data['config']['DB_CONNECTION_ARGS'] = {
- 'ssl': {
- 'ca': 'conf/stack/database.pem'
- }
- };
- }
-
- var params = {
- 'service': 'database'
- };
-
- ApiService.scValidateConfig(data, params).then(function(resp) {
- var status = resp.status;
-
- if (status) {
- $scope.currentStep = $scope.States.SAVING_DB;
- ApiService.scUpdateConfig(data, null).then(function(resp) {
- $scope.checkStatus();
- }, ApiService.errorDisplay('Cannot update config. Please report this to support'));
- } else {
- $scope.currentStep = $scope.States.DB_ERROR;
- $scope.errors.DatabaseValidationError = resp.reason;
- }
- }, ApiService.errorDisplay('Cannot validate database. Please report this to support'));
- };
-
- $scope.checkStatus = function() {
- ContainerService.checkStatus(function(resp) {
- $scope.currentStep = resp['status'];
- }, $scope.currentConfig);
- };
-
- // Load the initial status.
- $scope.checkStatus();
- };
-})();
diff --git a/config_app/static/css/config-setup-app-component.css b/config_app/static/css/config-setup-app-component.css
deleted file mode 100644
index 2d41544d9..000000000
--- a/config_app/static/css/config-setup-app-component.css
+++ /dev/null
@@ -1,93 +0,0 @@
-.config-setup-wrapper {
- display: flex;
-}
-
-.config-setup_option {
- font-size: 22px;
- height: 250px;
- display: flex;
- flex: 1;
- flex-direction: column;
- align-items: center;
- padding: 15px;
- margin: 15px;
- justify-content: space-evenly;
-}
-
-.config-setup_option i {
- padding-bottom: 10px;
-}
-
-.config-setup_option div {
- text-align: center;
- min-height: 100px;
-}
-
-.config-setup_option:hover {
- background-color: #dddddd;
- text-decoration: none;
-}
-
-/* Overrides for fixing old quay styles*/
-
-.quay-config-app .alert-danger {
- padding: 25px;
- display: flex;
-}
-
-.quay-config-app .alert-danger:before {
- content: "\f071";
- font-family: Font Awesome\ 5 Free;
- font-weight: 900;
- font-size: 30px;
- padding-right: 15px;
- color: #c53c3f;
- text-align: center;
-}
-
-.quay-config-app .co-alert.co-alert-success {
- padding: 25px;
- display: flex;
- margin-bottom: 0;
- text-align: left;
-}
-
-.quay-config-app .co-alert.co-alert-success:before {
- font-family: Font Awesome\ 5 Free;
- font-weight: 900;
- font-size: 30px;
- padding-right: 15px;
- color: green;
- text-align: center;
- position: static;
-}
-
-.co-alert.co-alert-danger:after {
- /* Ignore the exclamation mark, it also messes with spacing elements */
- content: none;
-}
-
-/* Fixes the transition to font awesome 5 */
-.quay-config-app .co-alert.co-alert-warning::before {
- font-family: Font Awesome\ 5 Free;
- font-weight: 900;
-}
-
-.quay-config-app .co-alert.co-alert-info::before {
- font-family: Font Awesome\ 5 Free;
- font-weight: 900;
-}
-
-.quay-config-app .co-alert.co-alert-danger::after {
- font-family: Font Awesome\ 5 Free;
- font-weight: 900;
- /* Font Awesome 5's icons are slightly bigger, so we have to adjust this one because it is inside another icon */
- font-size: 12px;
- top: 18px;
- left: 20.75px;
-}
-
-.quay-config-app .co-modify-link::after {
- font-family: Font Awesome\ 5 Free;
- font-weight: 900;
-}
diff --git a/config_app/static/css/cor-option.css b/config_app/static/css/cor-option.css
deleted file mode 100644
index 97ae7887d..000000000
--- a/config_app/static/css/cor-option.css
+++ /dev/null
@@ -1,8 +0,0 @@
-.cor-options-menu .fa-cog {
- color: #999;
- cursor: pointer;
-}
-
-.open .fa-cog {
- color: #428BCA;
-}
diff --git a/config_app/static/css/cor-title.css b/config_app/static/css/cor-title.css
deleted file mode 100644
index ef199785a..000000000
--- a/config_app/static/css/cor-title.css
+++ /dev/null
@@ -1,4 +0,0 @@
-.cor-title {
- display: flex;
- justify-content: center;
-}
diff --git a/config_app/static/img/RH_Logo_Quay_Black_UX-horizontal.svg b/config_app/static/img/RH_Logo_Quay_Black_UX-horizontal.svg
deleted file mode 100644
index ae73f2568..000000000
--- a/config_app/static/img/RH_Logo_Quay_Black_UX-horizontal.svg
+++ /dev/null
@@ -1,116 +0,0 @@
-
-
-
-image/svg+xml
\ No newline at end of file
diff --git a/config_app/static/img/network-tile.png b/config_app/static/img/network-tile.png
deleted file mode 100644
index c27deaff2..000000000
Binary files a/config_app/static/img/network-tile.png and /dev/null differ
diff --git a/config_app/static/img/quay-logo.png b/config_app/static/img/quay-logo.png
deleted file mode 100644
index 031087b1d..000000000
Binary files a/config_app/static/img/quay-logo.png and /dev/null differ
diff --git a/config_app/static/img/quay_favicon.png b/config_app/static/img/quay_favicon.png
deleted file mode 100644
index a1ae74d8b..000000000
Binary files a/config_app/static/img/quay_favicon.png and /dev/null differ
diff --git a/config_app/static/img/redis-small.png b/config_app/static/img/redis-small.png
deleted file mode 100644
index c330a720e..000000000
Binary files a/config_app/static/img/redis-small.png and /dev/null differ
diff --git a/config_app/static/img/rocket.png b/config_app/static/img/rocket.png
deleted file mode 100644
index b9ffddf39..000000000
Binary files a/config_app/static/img/rocket.png and /dev/null differ
diff --git a/config_app/static/lib/angular-file-upload.min.js b/config_app/static/lib/angular-file-upload.min.js
deleted file mode 100644
index b9d0196f7..000000000
--- a/config_app/static/lib/angular-file-upload.min.js
+++ /dev/null
@@ -1,2 +0,0 @@
-/*! 1.4.0 */
-!function(){var a=angular.module("angularFileUpload",[]);a.service("$upload",["$http","$timeout",function(a,b){function c(c){c.method=c.method||"POST",c.headers=c.headers||{},c.transformRequest=c.transformRequest||function(b,c){return window.ArrayBuffer&&b instanceof window.ArrayBuffer?b:a.defaults.transformRequest[0](b,c)},window.XMLHttpRequest.__isShim&&(c.headers.__setXHR_=function(){return function(a){a&&(c.__XHR=a,c.xhrFn&&c.xhrFn(a),a.upload.addEventListener("progress",function(a){c.progress&&b(function(){c.progress&&c.progress(a)})},!1),a.upload.addEventListener("load",function(a){a.lengthComputable&&c.progress&&c.progress(a)},!1))}});var d=a(c);return d.progress=function(a){return c.progress=a,d},d.abort=function(){return c.__XHR&&b(function(){c.__XHR.abort()}),d},d.xhr=function(a){return c.xhrFn=a,d},d.then=function(a,b){return function(d,e,f){c.progress=f||c.progress;var g=b.apply(a,[d,e,f]);return g.abort=a.abort,g.progress=a.progress,g.xhr=a.xhr,g.then=a.then,g}}(d,d.then),d}this.upload=function(b){b.headers=b.headers||{},b.headers["Content-Type"]=void 0,b.transformRequest=b.transformRequest||a.defaults.transformRequest;var d=new FormData,e=b.transformRequest,f=b.data;return b.transformRequest=function(a,c){if(f)if(b.formDataAppender)for(var d in f){var g=f[d];b.formDataAppender(a,d,g)}else for(var d in f){var g=f[d];if("function"==typeof e)g=e(g,c);else for(var h=0;h0||navigator.msMaxTouchPoints>0)&&d.bind("touchend",function(a){a.preventDefault(),a.target.click()})}}]),a.directive("ngFileDropAvailable",["$parse","$timeout",function(a,b){return function(c,d,e){if("draggable"in document.createElement("span")){var f=a(e.ngFileDropAvailable);b(function(){f(c)})}}}]),a.directive("ngFileDrop",["$parse","$timeout",function(a,b){return function(c,d,e){function f(a,b){if(b.isDirectory){var c=b.createReader();i++,c.readEntries(function(b){for(var c=0;c0&&j[0].webkitGetAsEntry)for(var k=0;k
-
-
-
-
- {% for style_url in external_styles %}
-
- {% endfor %}
-
- {% for script_url in external_scripts %}
-
- {% endfor %}
-
- {% for script_path in main_scripts %}
-
- {% endfor %}
-
- Config app
-
-
-
-
-
-
-
-
diff --git a/config_app/webpack.config.js b/config_app/webpack.config.js
deleted file mode 100644
index be3cbd931..000000000
--- a/config_app/webpack.config.js
+++ /dev/null
@@ -1,75 +0,0 @@
-const webpack = require('webpack');
-const path = require('path');
-const TerserPlugin = require('terser-webpack-plugin');
-
-let config = {
- entry: {
- configapp: "./js/main.ts"
- },
- output: {
- path: path.resolve(__dirname, "static/build"),
- filename: '[name]-quay-frontend.bundle.js',
- chunkFilename: '[name]-quay-frontend.chunk.js'
- },
- resolve: {
- extensions: [".ts", ".js"],
- modules: [
- // Allows us to use the top-level node modules
- path.resolve(__dirname, '../node_modules'),
- path.resolve(__dirname, '../static/css/')
- ]
- },
- externals: {
- angular: "angular",
- jquery: "$",
- },
- module: {
- rules: [
- {
- test: /\.ts$/,
- use: ["ts-loader"],
- exclude: /node_modules/
- },
- {
- test: /\.css$/,
- use: [
- "style-loader",
- "css-loader?minimize=true",
- ],
- },
- {
- test: /\.html$/,
- use: [
- 'ngtemplate-loader?relativeTo=' + (path.resolve(__dirname)),
- 'html-loader',
- ]
- },
- ]
- },
- optimization: {},
- plugins: [
- // Replace references to global variables with associated modules
- new webpack.ProvidePlugin({
- FileSaver: 'file-saver',
- angular: "angular",
- $: "jquery",
- }),
- ],
- devtool: "cheap-module-source-map",
-};
-
-/**
- * Production settings
- */
-if (process.env.NODE_ENV === 'production') {
- config.optimization.minimizer = [
- new TerserPlugin({
- // Disable mangle to prevent AngularJS errors
- terserOptions: {mangle: false},
- sourceMap: true,
- }),
- ];
- config.output.filename = '[name]-quay-frontend-[hash].bundle.js';
-}
-
-module.exports = config;
diff --git a/contrib/osx/local-setup.sh b/contrib/osx/local-setup.sh
new file mode 100755
index 000000000..3ce6735c0
--- /dev/null
+++ b/contrib/osx/local-setup.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Put us at the root of the quay repo no matter what
+pushd $(dirname "${0}") > /dev/null
+basedir=$(pwd -L)
+cd "${basedir}"/../../
+
+# Install Docker and C libraries on which Python libraries are dependent
+brew update
+brew upgrade
+brew install boot2docker docker libevent libmagic postgresql gpgme
+
+# Some OSX installs don't have /usr/include, which is required for finding SASL headers for our LDAP library
+if [ ! -e /usr/include ]; then
+ sudo ln -s `xcrun --show-sdk-path`/usr/include /usr/include
+fi
+
+# Install Python dependencies
+sudo pip install -r requirements.txt
+
+# Put the local testing config in place
+git clone git@github.com:coreos-inc/quay-config.git ../quay-config
+ln -s ../../quay-config/local conf/stack
diff --git a/data/appr_model/__init__.py b/data/appr_model/__init__.py
deleted file mode 100644
index 7c9620864..000000000
--- a/data/appr_model/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from data.appr_model import (
- blob,
- channel,
- manifest,
- manifest_list,
- package,
- release,
- tag,
-)
diff --git a/data/appr_model/blob.py b/data/appr_model/blob.py
deleted file mode 100644
index d340a7491..000000000
--- a/data/appr_model/blob.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import logging
-
-from peewee import IntegrityError
-
-from data.model import db_transaction
-
-logger = logging.getLogger(__name__)
-
-def _ensure_sha256_header(digest):
- if digest.startswith('sha256:'):
- return digest
- return 'sha256:' + digest
-
-
-def get_blob(digest, models_ref):
- """ Find a blob by its digest. """
- Blob = models_ref.Blob
- return Blob.select().where(Blob.digest == _ensure_sha256_header(digest)).get()
-
-
-def get_or_create_blob(digest, size, media_type_name, locations, models_ref):
- """ Try to find a blob by its digest or create it. """
- Blob = models_ref.Blob
- BlobPlacement = models_ref.BlobPlacement
-
- # Get or create the blog entry for the digest.
- try:
- blob = get_blob(digest, models_ref)
- logger.debug('Retrieved blob with digest %s', digest)
- except Blob.DoesNotExist:
- blob = Blob.create(digest=_ensure_sha256_header(digest),
- media_type_id=Blob.media_type.get_id(media_type_name),
- size=size)
- logger.debug('Created blob with digest %s', digest)
-
- # Add the locations to the blob.
- for location_name in locations:
- location_id = BlobPlacement.location.get_id(location_name)
- try:
- BlobPlacement.create(blob=blob, location=location_id)
- except IntegrityError:
- logger.debug('Location %s already existing for blob %s', location_name, blob.id)
-
- return blob
-
-
-def get_blob_locations(digest, models_ref):
- """ Find all locations names for a blob. """
- Blob = models_ref.Blob
- BlobPlacement = models_ref.BlobPlacement
- BlobPlacementLocation = models_ref.BlobPlacementLocation
-
- return [x.name for x in
- BlobPlacementLocation
- .select()
- .join(BlobPlacement)
- .join(Blob)
- .where(Blob.digest == _ensure_sha256_header(digest))]
-
-
-def ensure_blob_locations(models_ref, *names):
- BlobPlacementLocation = models_ref.BlobPlacementLocation
-
- with db_transaction():
- locations = BlobPlacementLocation.select().where(BlobPlacementLocation.name << names)
-
- insert_names = list(names)
-
- for location in locations:
- insert_names.remove(location.name)
-
- if not insert_names:
- return
-
- data = [{'name': name} for name in insert_names]
- BlobPlacementLocation.insert_many(data).execute()
diff --git a/data/appr_model/channel.py b/data/appr_model/channel.py
deleted file mode 100644
index 3631d97a5..000000000
--- a/data/appr_model/channel.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from data.appr_model import tag as tag_model
-
-
-def get_channel_releases(repo, channel, models_ref):
- """ Return all previously linked tags.
- This works based upon Tag lifetimes.
- """
- Channel = models_ref.Channel
- Tag = models_ref.Tag
-
- tag_kind_id = Channel.tag_kind.get_id('channel')
- channel_name = channel.name
- return (Tag
- .select(Tag, Channel)
- .join(Channel, on=(Tag.id == Channel.linked_tag))
- .where(Channel.repository == repo,
- Channel.name == channel_name,
- Channel.tag_kind == tag_kind_id, Channel.lifetime_end != None)
- .order_by(Tag.lifetime_end))
-
-
-def get_channel(repo, channel_name, models_ref):
- """ Find a Channel by name. """
- channel = tag_model.get_tag(repo, channel_name, models_ref, "channel")
- return channel
-
-
-def get_tag_channels(repo, tag_name, models_ref, active=True):
- """ Find the Channels associated with a Tag. """
- Tag = models_ref.Tag
-
- tag = tag_model.get_tag(repo, tag_name, models_ref, "release")
- query = tag.tag_parents
-
- if active:
- query = tag_model.tag_is_alive(query, Tag)
-
- return query
-
-
-def delete_channel(repo, channel_name, models_ref):
- """ Delete a channel by name. """
- return tag_model.delete_tag(repo, channel_name, models_ref, "channel")
-
-
-def create_or_update_channel(repo, channel_name, tag_name, models_ref):
- """ Creates or updates a channel to include a particular tag. """
- tag = tag_model.get_tag(repo, tag_name, models_ref, 'release')
- return tag_model.create_or_update_tag(repo, channel_name, models_ref, linked_tag=tag,
- tag_kind="channel")
-
-
-def get_repo_channels(repo, models_ref):
- """ Creates or updates a channel to include a particular tag. """
- Channel = models_ref.Channel
- Tag = models_ref.Tag
-
- tag_kind_id = Channel.tag_kind.get_id('channel')
- query = (Channel
- .select(Channel, Tag)
- .join(Tag, on=(Tag.id == Channel.linked_tag))
- .where(Channel.repository == repo,
- Channel.tag_kind == tag_kind_id))
- return tag_model.tag_is_alive(query, Channel)
diff --git a/data/appr_model/manifest.py b/data/appr_model/manifest.py
deleted file mode 100644
index f08be8d9b..000000000
--- a/data/appr_model/manifest.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import logging
-import hashlib
-import json
-
-from cnr.models.package_base import get_media_type
-
-from data.database import db_transaction, MediaType
-from data.appr_model import tag as tag_model
-
-
-logger = logging.getLogger(__name__)
-
-
-def _ensure_sha256_header(digest):
- if digest.startswith('sha256:'):
- return digest
- return 'sha256:' + digest
-
-
-def _digest(manifestjson):
- return _ensure_sha256_header(hashlib.sha256(json.dumps(manifestjson, sort_keys=True)).hexdigest())
-
-
-def get_manifest_query(digest, media_type, models_ref):
- Manifest = models_ref.Manifest
- return Manifest.select().where(Manifest.digest == _ensure_sha256_header(digest),
- Manifest.media_type == Manifest.media_type.get_id(media_type))
-
-
-def get_manifest_with_blob(digest, media_type, models_ref):
- Blob = models_ref.Blob
- query = get_manifest_query(digest, media_type, models_ref)
- return query.join(Blob).get()
-
-
-def get_or_create_manifest(manifest_json, media_type_name, models_ref):
- Manifest = models_ref.Manifest
- digest = _digest(manifest_json)
- try:
- manifest = get_manifest_query(digest, media_type_name, models_ref).get()
- except Manifest.DoesNotExist:
- with db_transaction():
- manifest = Manifest.create(digest=digest,
- manifest_json=manifest_json,
- media_type=Manifest.media_type.get_id(media_type_name))
- return manifest
-
-def get_manifest_types(repo, models_ref, release=None):
- """ Returns an array of MediaTypes.name for a repo, can filter by tag """
- Tag = models_ref.Tag
- ManifestListManifest = models_ref.ManifestListManifest
-
- query = tag_model.tag_is_alive(Tag
- .select(MediaType.name)
- .join(ManifestListManifest,
- on=(ManifestListManifest.manifest_list == Tag.manifest_list))
- .join(MediaType,
- on=(ManifestListManifest.media_type == MediaType.id))
- .where(Tag.repository == repo,
- Tag.tag_kind == Tag.tag_kind.get_id('release')), Tag)
- if release:
- query = query.where(Tag.name == release)
-
- manifests = set()
- for m in query.distinct().tuples():
- manifests.add(get_media_type(m[0]))
- return manifests
diff --git a/data/appr_model/manifest_list.py b/data/appr_model/manifest_list.py
deleted file mode 100644
index 92b10be6e..000000000
--- a/data/appr_model/manifest_list.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import logging
-import hashlib
-import json
-
-from data.database import db_transaction
-
-
-logger = logging.getLogger(__name__)
-
-
-def _ensure_sha256_header(digest):
- if digest.startswith('sha256:'):
- return digest
- return 'sha256:' + digest
-
-
-def _digest(manifestjson):
- return _ensure_sha256_header(hashlib.sha256(json.dumps(manifestjson, sort_keys=True)).hexdigest())
-
-
-def get_manifest_list(digest, models_ref):
- ManifestList = models_ref.ManifestList
- return ManifestList.select().where(ManifestList.digest == _ensure_sha256_header(digest)).get()
-
-
-def get_or_create_manifest_list(manifest_list_json, media_type_name, schema_version, models_ref):
- ManifestList = models_ref.ManifestList
-
- digest = _digest(manifest_list_json)
- media_type_id = ManifestList.media_type.get_id(media_type_name)
-
- try:
- return get_manifest_list(digest, models_ref)
- except ManifestList.DoesNotExist:
- with db_transaction():
- manifestlist = ManifestList.create(digest=digest, manifest_list_json=manifest_list_json,
- schema_version=schema_version, media_type=media_type_id)
- return manifestlist
-
-
-def create_manifestlistmanifest(manifestlist, manifest_ids, manifest_list_json, models_ref):
- """ From a manifestlist, manifests, and the manifest list blob,
- create if doesn't exist the manfiestlistmanifest for each manifest """
- for pos in xrange(len(manifest_ids)):
- manifest_id = manifest_ids[pos]
- manifest_json = manifest_list_json[pos]
- get_or_create_manifestlistmanifest(manifest=manifest_id,
- manifestlist=manifestlist,
- media_type_name=manifest_json['mediaType'],
- models_ref=models_ref)
-
-
-def get_or_create_manifestlistmanifest(manifest, manifestlist, media_type_name, models_ref):
- ManifestListManifest = models_ref.ManifestListManifest
-
- media_type_id = ManifestListManifest.media_type.get_id(media_type_name)
- try:
- ml = (ManifestListManifest
- .select()
- .where(ManifestListManifest.manifest == manifest,
- ManifestListManifest.media_type == media_type_id,
- ManifestListManifest.manifest_list == manifestlist)).get()
-
- except ManifestListManifest.DoesNotExist:
- ml = ManifestListManifest.create(manifest_list=manifestlist, media_type=media_type_id,
- manifest=manifest)
- return ml
diff --git a/data/appr_model/models.py b/data/appr_model/models.py
deleted file mode 100644
index 0fde7d83c..000000000
--- a/data/appr_model/models.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from collections import namedtuple
-
-from data.database import (ApprTag, ApprTagKind, ApprBlobPlacementLocation, ApprManifestList,
- ApprManifestBlob, ApprBlob, ApprManifestListManifest, ApprManifest,
- ApprBlobPlacement, ApprChannel)
-
-ModelsRef = namedtuple('ModelsRef', ['Tag', 'TagKind', 'BlobPlacementLocation', 'ManifestList',
- 'ManifestBlob', 'Blob', 'ManifestListManifest', 'Manifest',
- 'BlobPlacement', 'Channel', 'manifestlistmanifest_set_name',
- 'tag_set_prefetch_name'])
-
-NEW_MODELS = ModelsRef(ApprTag, ApprTagKind, ApprBlobPlacementLocation, ApprManifestList,
- ApprManifestBlob, ApprBlob, ApprManifestListManifest, ApprManifest,
- ApprBlobPlacement, ApprChannel, 'apprmanifestlistmanifest_set',
- 'apprtag_set')
diff --git a/data/appr_model/package.py b/data/appr_model/package.py
deleted file mode 100644
index 97ea9f791..000000000
--- a/data/appr_model/package.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from cnr.models.package_base import get_media_type, manifest_media_type
-from peewee import prefetch
-
-
-from data import model
-from data.database import Repository, Namespace
-from data.appr_model import tag as tag_model
-
-
-def list_packages_query(models_ref, namespace=None, media_type=None, search_query=None,
- username=None):
- """ List and filter repository by search query. """
- Tag = models_ref.Tag
-
- if username and not search_query:
- repositories = model.repository.get_visible_repositories(username,
- kind_filter='application',
- include_public=True,
- namespace=namespace,
- limit=50)
- if not repositories:
- return []
-
- repo_query = (Repository
- .select(Repository, Namespace.username)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(Repository.id << [repo.rid for repo in repositories]))
-
- if namespace:
- repo_query = (repo_query
- .where(Namespace.username == namespace))
- else:
- if search_query is not None:
- fields = [model.repository.SEARCH_FIELDS.name.name]
- repositories = model.repository.get_app_search(search_query,
- username=username,
- search_fields=fields,
- limit=50)
- if not repositories:
- return []
-
- repo_query = (Repository
- .select(Repository, Namespace.username)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(Repository.id << [repo.id for repo in repositories]))
- else:
- repo_query = (Repository
- .select(Repository, Namespace.username)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(Repository.visibility == model.repository.get_public_repo_visibility(),
- Repository.kind == Repository.kind.get_id('application')))
-
- if namespace:
- repo_query = (repo_query
- .where(Namespace.username == namespace))
-
- tag_query = (Tag
- .select()
- .where(Tag.tag_kind == Tag.tag_kind.get_id('release'))
- .order_by(Tag.lifetime_start))
-
- if media_type:
- tag_query = tag_model.filter_tags_by_media_type(tag_query, media_type, models_ref)
-
- tag_query = tag_model.tag_is_alive(tag_query, Tag)
- query = prefetch(repo_query, tag_query)
- return query
diff --git a/data/appr_model/release.py b/data/appr_model/release.py
deleted file mode 100644
index dcfa455d0..000000000
--- a/data/appr_model/release.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import bisect
-
-from cnr.exception import PackageAlreadyExists
-from cnr.models.package_base import manifest_media_type
-
-from data.database import db_transaction, get_epoch_timestamp
-from data.appr_model import (blob as blob_model, manifest as manifest_model,
- manifest_list as manifest_list_model,
- tag as tag_model)
-
-
-LIST_MEDIA_TYPE = 'application/vnd.cnr.manifest.list.v0.json'
-SCHEMA_VERSION = 'v0'
-
-
-def _ensure_sha256_header(digest):
- if digest.startswith('sha256:'):
- return digest
- return 'sha256:' + digest
-
-
-def get_app_release(repo, tag_name, media_type, models_ref):
- """ Returns (tag, manifest, blob) given a repo object, tag_name, and media_type). """
- ManifestListManifest = models_ref.ManifestListManifest
- Manifest = models_ref.Manifest
- Blob = models_ref.Blob
- ManifestBlob = models_ref.ManifestBlob
- manifestlistmanifest_set_name = models_ref.manifestlistmanifest_set_name
-
- tag = tag_model.get_tag(repo, tag_name, models_ref, tag_kind='release')
- media_type_id = ManifestListManifest.media_type.get_id(manifest_media_type(media_type))
- manifestlistmanifest = (getattr(tag.manifest_list, manifestlistmanifest_set_name)
- .join(Manifest)
- .where(ManifestListManifest.media_type == media_type_id).get())
- manifest = manifestlistmanifest.manifest
- blob = Blob.select().join(ManifestBlob).where(ManifestBlob.manifest == manifest).get()
- return (tag, manifest, blob)
-
-
-def delete_app_release(repo, tag_name, media_type, models_ref):
- """ Terminate a Tag/media-type couple
- It find the corresponding tag/manifest and remove from the manifestlistmanifest the manifest
- 1. it terminates the current tag (in all-cases)
- 2. if the new manifestlist is not empty, it creates a new tag for it
- """
- ManifestListManifest = models_ref.ManifestListManifest
- manifestlistmanifest_set_name = models_ref.manifestlistmanifest_set_name
-
- media_type_id = ManifestListManifest.media_type.get_id(manifest_media_type(media_type))
-
- with db_transaction():
- tag = tag_model.get_tag(repo, tag_name, models_ref)
- manifest_list = tag.manifest_list
- list_json = manifest_list.manifest_list_json
- mlm_query = (ManifestListManifest
- .select()
- .where(ManifestListManifest.manifest_list == tag.manifest_list))
- list_manifest_ids = sorted([mlm.manifest_id for mlm in mlm_query])
- manifestlistmanifest = (getattr(tag.manifest_list, manifestlistmanifest_set_name)
- .where(ManifestListManifest.media_type == media_type_id).get())
- index = list_manifest_ids.index(manifestlistmanifest.manifest_id)
- list_manifest_ids.pop(index)
- list_json.pop(index)
-
- if not list_json:
- tag.lifetime_end = get_epoch_timestamp()
- tag.save()
- else:
- manifestlist = manifest_list_model.get_or_create_manifest_list(list_json, LIST_MEDIA_TYPE,
- SCHEMA_VERSION, models_ref)
- manifest_list_model.create_manifestlistmanifest(manifestlist, list_manifest_ids,
- list_json, models_ref)
- tag = tag_model.create_or_update_tag(repo, tag_name, models_ref, manifest_list=manifestlist,
- tag_kind="release")
- return tag
-
-
-def create_app_release(repo, tag_name, manifest_data, digest, models_ref, force=False):
- """ Create a new application release, it includes creating a new Tag, ManifestList,
- ManifestListManifests, Manifest, ManifestBlob.
-
- To deduplicate the ManifestList, the manifestlist_json is kept ordered by the manifest.id.
- To find the insert point in the ManifestList it uses bisect on the manifest-ids list.
- """
- ManifestList = models_ref.ManifestList
- ManifestListManifest = models_ref.ManifestListManifest
- Blob = models_ref.Blob
- ManifestBlob = models_ref.ManifestBlob
-
- with db_transaction():
- # Create/get the package manifest
- manifest = manifest_model.get_or_create_manifest(manifest_data, manifest_data['mediaType'],
- models_ref)
- # get the tag
- tag = tag_model.get_or_initialize_tag(repo, tag_name, models_ref)
-
- if tag.manifest_list is None:
- tag.manifest_list = ManifestList(media_type=ManifestList.media_type.get_id(LIST_MEDIA_TYPE),
- schema_version=SCHEMA_VERSION,
- manifest_list_json=[], )
-
- elif tag_model.tag_media_type_exists(tag, manifest.media_type, models_ref):
- if force:
- delete_app_release(repo, tag_name, manifest.media_type.name, models_ref)
- return create_app_release(repo, tag_name, manifest_data, digest, models_ref, force=False)
- else:
- raise PackageAlreadyExists("package exists already")
-
- list_json = tag.manifest_list.manifest_list_json
- mlm_query = (ManifestListManifest
- .select()
- .where(ManifestListManifest.manifest_list == tag.manifest_list))
- list_manifest_ids = sorted([mlm.manifest_id for mlm in mlm_query])
- insert_point = bisect.bisect_left(list_manifest_ids, manifest.id)
- list_json.insert(insert_point, manifest.manifest_json)
- list_manifest_ids.insert(insert_point, manifest.id)
- manifestlist = manifest_list_model.get_or_create_manifest_list(list_json, LIST_MEDIA_TYPE,
- SCHEMA_VERSION, models_ref)
- manifest_list_model.create_manifestlistmanifest(manifestlist, list_manifest_ids, list_json,
- models_ref)
-
- tag = tag_model.create_or_update_tag(repo, tag_name, models_ref, manifest_list=manifestlist,
- tag_kind="release")
- blob_digest = digest
-
- try:
- (ManifestBlob
- .select()
- .join(Blob)
- .where(ManifestBlob.manifest == manifest,
- Blob.digest == _ensure_sha256_header(blob_digest)).get())
- except ManifestBlob.DoesNotExist:
- blob = blob_model.get_blob(blob_digest, models_ref)
- ManifestBlob.create(manifest=manifest, blob=blob)
- return tag
-
-def get_release_objs(repo, models_ref, media_type=None):
- """ Returns an array of Tag for a repo, with optional filtering by media_type. """
- Tag = models_ref.Tag
-
- release_query = (Tag
- .select()
- .where(Tag.repository == repo,
- Tag.tag_kind == Tag.tag_kind.get_id("release")))
- if media_type:
- release_query = tag_model.filter_tags_by_media_type(release_query, media_type, models_ref)
-
- return tag_model.tag_is_alive(release_query, Tag)
-
-def get_releases(repo, model_refs, media_type=None):
- """ Returns an array of Tag.name for a repo, can filter by media_type. """
- return [t.name for t in get_release_objs(repo, model_refs, media_type)]
diff --git a/data/appr_model/tag.py b/data/appr_model/tag.py
deleted file mode 100644
index 4903a4572..000000000
--- a/data/appr_model/tag.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import logging
-
-from cnr.models.package_base import manifest_media_type
-from peewee import IntegrityError
-
-from data.model import (db_transaction, TagAlreadyCreatedException)
-from data.database import get_epoch_timestamp_ms, db_for_update
-
-
-logger = logging.getLogger(__name__)
-
-
-def tag_is_alive(query, cls, now_ts=None):
- return query.where((cls.lifetime_end >> None) |
- (cls.lifetime_end > now_ts))
-
-
-def tag_media_type_exists(tag, media_type, models_ref):
- ManifestListManifest = models_ref.ManifestListManifest
- manifestlistmanifest_set_name = models_ref.manifestlistmanifest_set_name
- return (getattr(tag.manifest_list, manifestlistmanifest_set_name)
- .where(ManifestListManifest.media_type == media_type).count() > 0)
-
-
-def create_or_update_tag(repo, tag_name, models_ref, manifest_list=None, linked_tag=None,
- tag_kind="release"):
- Tag = models_ref.Tag
-
- now_ts = get_epoch_timestamp_ms()
- tag_kind_id = Tag.tag_kind.get_id(tag_kind)
- with db_transaction():
- try:
- tag = db_for_update(tag_is_alive(Tag
- .select()
- .where(Tag.repository == repo,
- Tag.name == tag_name,
- Tag.tag_kind == tag_kind_id), Tag, now_ts)).get()
- if tag.manifest_list == manifest_list and tag.linked_tag == linked_tag:
- return tag
- tag.lifetime_end = now_ts
- tag.save()
- except Tag.DoesNotExist:
- pass
-
- try:
- return Tag.create(repository=repo, manifest_list=manifest_list, linked_tag=linked_tag,
- name=tag_name, lifetime_start=now_ts, lifetime_end=None,
- tag_kind=tag_kind_id)
- except IntegrityError:
- msg = 'Tag with name %s and lifetime start %s under repository %s/%s already exists'
- raise TagAlreadyCreatedException(msg % (tag_name, now_ts, repo.namespace_user, repo.name))
-
-
-def get_or_initialize_tag(repo, tag_name, models_ref, tag_kind="release"):
- Tag = models_ref.Tag
-
- try:
- return tag_is_alive(Tag.select().where(Tag.repository == repo, Tag.name == tag_name), Tag).get()
- except Tag.DoesNotExist:
- return Tag(repo=repo, name=tag_name, tag_kind=Tag.tag_kind.get_id(tag_kind))
-
-
-def get_tag(repo, tag_name, models_ref, tag_kind="release"):
- Tag = models_ref.Tag
- return tag_is_alive(Tag.select()
- .where(Tag.repository == repo,
- Tag.name == tag_name,
- Tag.tag_kind == Tag.tag_kind.get_id(tag_kind)), Tag).get()
-
-
-def delete_tag(repo, tag_name, models_ref, tag_kind="release"):
- Tag = models_ref.Tag
- tag_kind_id = Tag.tag_kind.get_id(tag_kind)
- tag = tag_is_alive(Tag.select()
- .where(Tag.repository == repo,
- Tag.name == tag_name, Tag.tag_kind == tag_kind_id), Tag).get()
- tag.lifetime_end = get_epoch_timestamp_ms()
- tag.save()
- return tag
-
-
-def tag_exists(repo, tag_name, models_ref, tag_kind="release"):
- Tag = models_ref.Tag
- try:
- get_tag(repo, tag_name, models_ref, tag_kind)
- return True
- except Tag.DoesNotExist:
- return False
-
-
-def filter_tags_by_media_type(tag_query, media_type, models_ref):
- """ Return only available tag for a media_type. """
- ManifestListManifest = models_ref.ManifestListManifest
- Tag = models_ref.Tag
- media_type = manifest_media_type(media_type)
- t = (tag_query
- .join(ManifestListManifest, on=(ManifestListManifest.manifest_list == Tag.manifest_list))
- .where(ManifestListManifest.media_type == ManifestListManifest.media_type.get_id(media_type)))
- return t
diff --git a/data/archivedlogs.py b/data/archivedlogs.py
index 0172c74c8..e190b9782 100644
--- a/data/archivedlogs.py
+++ b/data/archivedlogs.py
@@ -1,7 +1,8 @@
import logging
-from util.registry.gzipinputstream import GzipInputStream
+from gzip import GzipFile
from flask import send_file, abort
+from cStringIO import StringIO
from data.userfiles import DelegateUserfiles, UserfilesHandlers
@@ -12,6 +13,18 @@ JSON_MIMETYPE = 'application/json'
logger = logging.getLogger(__name__)
+class LogArchiveHandlers(UserfilesHandlers):
+ def get(self, file_id):
+ path = self._files.get_file_id_path(file_id)
+ try:
+ with self._storage.stream_read_file(self._locations, path) as gzip_stream:
+ with GzipFile(fileobj=gzip_stream) as unzipped:
+ unzipped_buffer = StringIO(unzipped.read())
+ return send_file(unzipped_buffer, mimetype=JSON_MIMETYPE)
+ except IOError:
+ abort(404)
+
+
class LogArchive(object):
def __init__(self, app=None, distributed_storage=None):
self.app = app
@@ -24,10 +37,16 @@ class LogArchive(object):
location = app.config.get('LOG_ARCHIVE_LOCATION')
path = app.config.get('LOG_ARCHIVE_PATH', None)
- handler_name = 'web.logarchive'
+ handler_name = 'logarchive_handlers'
+
+ log_archive = DelegateUserfiles(app, distributed_storage, location, path, handler_name)
+
+ app.add_url_rule('/logarchive/',
+ view_func=LogArchiveHandlers.as_view(handler_name,
+ distributed_storage=distributed_storage,
+ location=location,
+ files=log_archive))
- log_archive = DelegateUserfiles(app, distributed_storage, location, path,
- handler_name=handler_name)
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['log_archive'] = log_archive
diff --git a/data/billing.py b/data/billing.py
index aa2420c01..26f47ed73 100644
--- a/data/billing.py
+++ b/data/billing.py
@@ -6,7 +6,7 @@ from calendar import timegm
from util.morecollections import AttrDict
PLANS = [
- # Deprecated Plans (2013-2014)
+ # Deprecated Plans
{
'title': 'Micro',
'price': 700,
@@ -16,8 +16,7 @@ PLANS = [
'bus_features': False,
'deprecated': True,
'free_trial_days': 14,
- 'superseded_by': 'personal-30',
- 'plans_page_hidden': False,
+ 'superseded_by': None,
},
{
'title': 'Basic',
@@ -28,8 +27,7 @@ PLANS = [
'bus_features': False,
'deprecated': True,
'free_trial_days': 14,
- 'superseded_by': 'bus-micro-30',
- 'plans_page_hidden': False,
+ 'superseded_by': None,
},
{
'title': 'Yacht',
@@ -41,7 +39,6 @@ PLANS = [
'deprecated': True,
'free_trial_days': 180,
'superseded_by': 'bus-small-30',
- 'plans_page_hidden': False,
},
{
'title': 'Personal',
@@ -53,7 +50,6 @@ PLANS = [
'deprecated': True,
'free_trial_days': 14,
'superseded_by': 'personal-30',
- 'plans_page_hidden': False,
},
{
'title': 'Skiff',
@@ -65,7 +61,6 @@ PLANS = [
'deprecated': True,
'free_trial_days': 14,
'superseded_by': 'bus-micro-30',
- 'plans_page_hidden': False,
},
{
'title': 'Yacht',
@@ -77,7 +72,6 @@ PLANS = [
'deprecated': True,
'free_trial_days': 14,
'superseded_by': 'bus-small-30',
- 'plans_page_hidden': False,
},
{
'title': 'Freighter',
@@ -89,7 +83,6 @@ PLANS = [
'deprecated': True,
'free_trial_days': 14,
'superseded_by': 'bus-medium-30',
- 'plans_page_hidden': False,
},
{
'title': 'Tanker',
@@ -101,108 +94,9 @@ PLANS = [
'deprecated': True,
'free_trial_days': 14,
'superseded_by': 'bus-large-30',
- 'plans_page_hidden': False,
},
- # Deprecated plans (2014-2017)
- {
- 'title': 'Personal',
- 'price': 1200,
- 'privateRepos': 5,
- 'stripeId': 'personal-30',
- 'audience': 'Individuals',
- 'bus_features': False,
- 'deprecated': True,
- 'free_trial_days': 30,
- 'superseded_by': 'personal-2018',
- 'plans_page_hidden': False,
- },
- {
- 'title': 'Skiff',
- 'price': 2500,
- 'privateRepos': 10,
- 'stripeId': 'bus-micro-30',
- 'audience': 'For startups',
- 'bus_features': True,
- 'deprecated': True,
- 'free_trial_days': 30,
- 'superseded_by': 'bus-micro-2018',
- 'plans_page_hidden': False,
- },
- {
- 'title': 'Yacht',
- 'price': 5000,
- 'privateRepos': 20,
- 'stripeId': 'bus-small-30',
- 'audience': 'For small businesses',
- 'bus_features': True,
- 'deprecated': True,
- 'free_trial_days': 30,
- 'superseded_by': 'bus-small-2018',
- 'plans_page_hidden': False,
- },
- {
- 'title': 'Freighter',
- 'price': 10000,
- 'privateRepos': 50,
- 'stripeId': 'bus-medium-30',
- 'audience': 'For normal businesses',
- 'bus_features': True,
- 'deprecated': True,
- 'free_trial_days': 30,
- 'superseded_by': 'bus-medium-2018',
- 'plans_page_hidden': False,
- },
- {
- 'title': 'Tanker',
- 'price': 20000,
- 'privateRepos': 125,
- 'stripeId': 'bus-large-30',
- 'audience': 'For large businesses',
- 'bus_features': True,
- 'deprecated': True,
- 'free_trial_days': 30,
- 'superseded_by': 'bus-large-2018',
- 'plans_page_hidden': False,
- },
- {
- 'title': 'Carrier',
- 'price': 35000,
- 'privateRepos': 250,
- 'stripeId': 'bus-xlarge-30',
- 'audience': 'For extra large businesses',
- 'bus_features': True,
- 'deprecated': True,
- 'free_trial_days': 30,
- 'superseded_by': 'bus-xlarge-2018',
- 'plans_page_hidden': False,
- },
- {
- 'title': 'Huge',
- 'price': 65000,
- 'privateRepos': 500,
- 'stripeId': 'bus-500-30',
- 'audience': 'For huge business',
- 'bus_features': True,
- 'deprecated': True,
- 'free_trial_days': 30,
- 'superseded_by': 'bus-500-2018',
- 'plans_page_hidden': False,
- },
- {
- 'title': 'Huuge',
- 'price': 120000,
- 'privateRepos': 1000,
- 'stripeId': 'bus-1000-30',
- 'audience': 'For the SaaS savvy enterprise',
- 'bus_features': True,
- 'deprecated': True,
- 'free_trial_days': 30,
- 'superseded_by': 'bus-1000-2018',
- 'plans_page_hidden': False,
- },
-
- # Active plans (as of Dec 2017)
+ # Active plans
{
'title': 'Open Source',
'price': 0,
@@ -213,115 +107,61 @@ PLANS = [
'deprecated': False,
'free_trial_days': 30,
'superseded_by': None,
- 'plans_page_hidden': False,
},
{
- 'title': 'Developer',
- 'price': 1500,
+ 'title': 'Personal',
+ 'price': 1200,
'privateRepos': 5,
- 'stripeId': 'personal-2018',
+ 'stripeId': 'personal-30',
'audience': 'Individuals',
'bus_features': False,
'deprecated': False,
'free_trial_days': 30,
'superseded_by': None,
- 'plans_page_hidden': False,
},
{
- 'title': 'Micro',
- 'price': 3000,
+ 'title': 'Skiff',
+ 'price': 2500,
'privateRepos': 10,
- 'stripeId': 'bus-micro-2018',
+ 'stripeId': 'bus-micro-30',
'audience': 'For startups',
'bus_features': True,
'deprecated': False,
'free_trial_days': 30,
'superseded_by': None,
- 'plans_page_hidden': False,
},
{
- 'title': 'Small',
- 'price': 6000,
+ 'title': 'Yacht',
+ 'price': 5000,
'privateRepos': 20,
- 'stripeId': 'bus-small-2018',
+ 'stripeId': 'bus-small-30',
'audience': 'For small businesses',
'bus_features': True,
'deprecated': False,
'free_trial_days': 30,
'superseded_by': None,
- 'plans_page_hidden': False,
},
{
- 'title': 'Medium',
- 'price': 12500,
+ 'title': 'Freighter',
+ 'price': 10000,
'privateRepos': 50,
- 'stripeId': 'bus-medium-2018',
+ 'stripeId': 'bus-medium-30',
'audience': 'For normal businesses',
'bus_features': True,
'deprecated': False,
'free_trial_days': 30,
'superseded_by': None,
- 'plans_page_hidden': False,
},
{
- 'title': 'Large',
- 'price': 25000,
+ 'title': 'Tanker',
+ 'price': 20000,
'privateRepos': 125,
- 'stripeId': 'bus-large-2018',
+ 'stripeId': 'bus-large-30',
'audience': 'For large businesses',
'bus_features': True,
'deprecated': False,
'free_trial_days': 30,
'superseded_by': None,
- 'plans_page_hidden': False,
- },
- {
- 'title': 'Extra Large',
- 'price': 45000,
- 'privateRepos': 250,
- 'stripeId': 'bus-xlarge-2018',
- 'audience': 'For extra large businesses',
- 'bus_features': True,
- 'deprecated': False,
- 'free_trial_days': 30,
- 'superseded_by': None,
- 'plans_page_hidden': False,
- },
- {
- 'title': 'XXL',
- 'price': 85000,
- 'privateRepos': 500,
- 'stripeId': 'bus-500-2018',
- 'audience': 'For huge business',
- 'bus_features': True,
- 'deprecated': False,
- 'free_trial_days': 30,
- 'superseded_by': None,
- 'plans_page_hidden': False,
- },
- {
- 'title': 'XXXL',
- 'price': 160000,
- 'privateRepos': 1000,
- 'stripeId': 'bus-1000-2018',
- 'audience': 'For the SaaS savvy enterprise',
- 'bus_features': True,
- 'deprecated': False,
- 'free_trial_days': 30,
- 'superseded_by': None,
- 'plans_page_hidden': False,
- },
- {
- 'title': 'XXXXL',
- 'price': 310000,
- 'privateRepos': 2000,
- 'stripeId': 'bus-2000-2018',
- 'audience': 'For the SaaS savvy big enterprise',
- 'bus_features': True,
- 'deprecated': False,
- 'free_trial_days': 30,
- 'superseded_by': None,
- 'plans_page_hidden': False,
},
]
@@ -335,17 +175,6 @@ def get_plan(plan_id):
return None
-class FakeSubscription(AttrDict):
- @classmethod
- def build(cls, data, customer):
- data = AttrDict.deep_copy(data)
- data['customer'] = customer
- return cls(data)
-
- def delete(self):
- self.customer.subscription = None
-
-
class FakeStripe(object):
class Customer(AttrDict):
FAKE_PLAN = AttrDict({
@@ -393,31 +222,37 @@ class FakeStripe(object):
def save(self):
if self.get('new_card', None) is not None:
- raise stripe.error.CardError('Test raising exception on set card.', self.get('new_card'), 402)
+ raise stripe.CardError('Test raising exception on set card.', self.get('new_card'), 402)
if self.get('new_plan', None) is not None:
if self.subscription is None:
- self.subscription = FakeSubscription.build(self.FAKE_SUBSCRIPTION, self)
+ self.subscription = AttrDict.deep_copy(self.FAKE_SUBSCRIPTION)
self.subscription.plan.id = self.get('new_plan')
+ if self.get('cancel_subscription', None) is not None:
+ self.subscription = None
+
+ def cancel_subscription(self):
+ self['cancel_subscription'] = True
@classmethod
def retrieve(cls, stripe_customer_id):
if stripe_customer_id in cls.ACTIVE_CUSTOMERS:
cls.ACTIVE_CUSTOMERS[stripe_customer_id].pop('new_card', None)
cls.ACTIVE_CUSTOMERS[stripe_customer_id].pop('new_plan', None)
+ cls.ACTIVE_CUSTOMERS[stripe_customer_id].pop('cancel_subscription', None)
return cls.ACTIVE_CUSTOMERS[stripe_customer_id]
else:
new_customer = cls({
'default_card': 'card123',
'cards': AttrDict.deep_copy(cls.FAKE_CARD_LIST),
+ 'subscription': AttrDict.deep_copy(cls.FAKE_SUBSCRIPTION),
'id': stripe_customer_id,
})
- new_customer.subscription = FakeSubscription.build(cls.FAKE_SUBSCRIPTION, new_customer)
cls.ACTIVE_CUSTOMERS[stripe_customer_id] = new_customer
return new_customer
class Invoice(AttrDict):
@staticmethod
- def list(customer, count):
+ def all(customer, count):
return AttrDict({
'data': [],
})
diff --git a/data/buildlogs.py b/data/buildlogs.py
index b6b4d2652..17e5b397f 100644
--- a/data/buildlogs.py
+++ b/data/buildlogs.py
@@ -1,15 +1,11 @@
import redis
import json
-import time
-
-from contextlib import closing
from util.dynamic import import_class
from datetime import timedelta
ONE_DAY = timedelta(days=1)
-SEVEN_DAYS = timedelta(days=7)
class BuildStatusRetrievalError(Exception):
@@ -21,21 +17,7 @@ class RedisBuildLogs(object):
PHASE = 'phase'
def __init__(self, redis_config):
- self._redis_client = None
- self._redis_config = redis_config
-
- @property
- def _redis(self):
- if self._redis_client is not None:
- return self._redis_client
-
- args = dict(self._redis_config)
- args.update({'socket_connect_timeout': 1,
- 'socket_timeout': 2,
- 'single_connection_client': True})
-
- self._redis_client = redis.StrictRedis(**args)
- return self._redis_client
+ self._redis = redis.StrictRedis(socket_connect_timeout=5, **redis_config)
@staticmethod
def _logs_key(build_id):
@@ -46,11 +28,7 @@ class RedisBuildLogs(object):
Appends the serialized form of log_obj to the end of the log entry list
and returns the new length of the list.
"""
- pipeline = self._redis.pipeline(transaction=False)
- pipeline.expire(self._logs_key(build_id), SEVEN_DAYS)
- pipeline.rpush(self._logs_key(build_id), json.dumps(log_obj))
- result = pipeline.execute()
- return result[1]
+ return self._redis.rpush(self._logs_key(build_id), json.dumps(log_obj))
def append_log_message(self, build_id, log_message, log_type=None, log_data=None):
"""
@@ -67,7 +45,7 @@ class RedisBuildLogs(object):
if log_data:
log_obj['data'] = log_data
- return self.append_log_entry(build_id, log_obj) - 1
+ return self._redis.rpush(self._logs_key(build_id), json.dumps(log_obj)) - 1
def get_log_entries(self, build_id, start_index):
"""
@@ -78,14 +56,8 @@ class RedisBuildLogs(object):
llen = self._redis.llen(self._logs_key(build_id))
log_entries = self._redis.lrange(self._logs_key(build_id), start_index, -1)
return (llen, (json.loads(entry) for entry in log_entries))
- except redis.RedisError as re:
- raise BuildStatusRetrievalError('Cannot retrieve build logs: %s' % re)
-
- def expire_status(self, build_id):
- """
- Sets the status entry to expire in 1 day.
- """
- self._redis.expire(self._status_key(build_id), ONE_DAY)
+ except redis.ConnectionError:
+ raise BuildStatusRetrievalError('Cannot retrieve build logs')
def expire_log_entries(self, build_id):
"""
@@ -93,11 +65,6 @@ class RedisBuildLogs(object):
"""
self._redis.expire(self._logs_key(build_id), ONE_DAY)
- def delete_log_entries(self, build_id):
- """
- Delete the log entry
- """
- self._redis.delete(self._logs_key(build_id))
@staticmethod
def _status_key(build_id):
@@ -108,7 +75,7 @@ class RedisBuildLogs(object):
Sets the status key for this build to json serialized form of the supplied
obj.
"""
- self._redis.set(self._status_key(build_id), json.dumps(status_obj), ex=SEVEN_DAYS)
+ self._redis.set(self._status_key(build_id), json.dumps(status_obj))
def get_status(self, build_id):
"""
@@ -116,32 +83,16 @@ class RedisBuildLogs(object):
"""
try:
fetched = self._redis.get(self._status_key(build_id))
- except redis.RedisError as re:
- raise BuildStatusRetrievalError('Cannot retrieve build status: %s' % re)
+ except redis.ConnectionError:
+ raise BuildStatusRetrievalError('Cannot retrieve build status')
return json.loads(fetched) if fetched else None
- @staticmethod
- def _health_key():
- return '_health'
-
def check_health(self):
try:
- args = dict(self._redis_config)
- args.update({'socket_connect_timeout': 1,
- 'socket_timeout': 1,
- 'single_connection_client': True})
-
- with closing(redis.StrictRedis(**args)) as connection:
- if not connection.ping():
- return (False, 'Could not ping redis')
-
- # Ensure we can write and read a key.
- connection.set(self._health_key(), time.time())
- connection.get(self._health_key())
- return (True, None)
- except redis.RedisError as re:
- return (False, 'Could not connect to redis: %s' % re.message)
+ return self._redis.ping() == True
+ except redis.ConnectionError:
+ return False
class BuildLogs(object):
diff --git a/data/cache/__init__.py b/data/cache/__init__.py
deleted file mode 100644
index a7c44dadd..000000000
--- a/data/cache/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from data.cache.impl import NoopDataModelCache, InMemoryDataModelCache, MemcachedModelCache
-
-def get_model_cache(config):
- """ Returns a data model cache matching the given configuration. """
- cache_config = config.get('DATA_MODEL_CACHE_CONFIG', {})
- engine = cache_config.get('engine', 'noop')
-
- if engine == 'noop':
- return NoopDataModelCache()
-
- if engine == 'inmemory':
- return InMemoryDataModelCache()
-
- if engine == 'memcached':
- endpoint = cache_config.get('endpoint', None)
- if endpoint is None:
- raise Exception('Missing `endpoint` for memcached model cache configuration')
-
- timeout = cache_config.get('timeout')
- connect_timeout = cache_config.get('connect_timeout')
- return MemcachedModelCache(endpoint, timeout=timeout, connect_timeout=connect_timeout)
-
- raise Exception('Unknown model cache engine `%s`' % engine)
diff --git a/data/cache/cache_key.py b/data/cache/cache_key.py
deleted file mode 100644
index 93aad65be..000000000
--- a/data/cache/cache_key.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from collections import namedtuple
-
-class CacheKey(namedtuple('CacheKey', ['key', 'expiration'])):
- """ Defines a key into the data model cache. """
- pass
-
-
-def for_repository_blob(namespace_name, repo_name, digest, version):
- """ Returns a cache key for a blob in a repository. """
- return CacheKey('repo_blob__%s_%s_%s_%s' % (namespace_name, repo_name, digest, version), '60s')
-
-
-def for_catalog_page(auth_context_key, start_id, limit):
- """ Returns a cache key for a single page of a catalog lookup for an authed context. """
- params = (auth_context_key or '(anon)', start_id or 0, limit or 0)
- return CacheKey('catalog_page__%s_%s_%s' % params, '60s')
-
-
-def for_namespace_geo_restrictions(namespace_name):
- """ Returns a cache key for the geo restrictions for a namespace. """
- return CacheKey('geo_restrictions__%s' % (namespace_name), '240s')
-
-
-def for_active_repo_tags(repository_id, start_pagination_id, limit):
- """ Returns a cache key for the active tags in a repository. """
- return CacheKey('repo_active_tags__%s_%s_%s' % (repository_id, start_pagination_id, limit),
- '120s')
diff --git a/data/cache/impl.py b/data/cache/impl.py
deleted file mode 100644
index 982e950e9..000000000
--- a/data/cache/impl.py
+++ /dev/null
@@ -1,146 +0,0 @@
-import logging
-import json
-
-from datetime import datetime
-
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-from pymemcache.client.base import Client
-
-from util.expiresdict import ExpiresDict
-from util.timedeltastring import convert_to_timedelta
-
-logger = logging.getLogger(__name__)
-
-
-def is_not_none(value):
- return value is not None
-
-
-@add_metaclass(ABCMeta)
-class DataModelCache(object):
- """ Defines an interface for cache storing and returning tuple data model objects. """
-
- @abstractmethod
- def retrieve(self, cache_key, loader, should_cache=is_not_none):
- """ Checks the cache for the specified cache key and returns the value found (if any). If none
- found, the loader is called to get a result and populate the cache.
- """
- pass
-
-
-class NoopDataModelCache(DataModelCache):
- """ Implementation of the data model cache which does nothing. """
-
- def retrieve(self, cache_key, loader, should_cache=is_not_none):
- return loader()
-
-
-class InMemoryDataModelCache(DataModelCache):
- """ Implementation of the data model cache backed by an in-memory dictionary. """
- def __init__(self):
- self.cache = ExpiresDict()
-
- def retrieve(self, cache_key, loader, should_cache=is_not_none):
- not_found = [None]
- logger.debug('Checking cache for key %s', cache_key.key)
- result = self.cache.get(cache_key.key, default_value=not_found)
- if result != not_found:
- logger.debug('Found result in cache for key %s: %s', cache_key.key, result)
- return json.loads(result)
-
- logger.debug('Found no result in cache for key %s; calling loader', cache_key.key)
- result = loader()
- logger.debug('Got loaded result for key %s: %s', cache_key.key, result)
- if should_cache(result):
- logger.debug('Caching loaded result for key %s with expiration %s: %s', cache_key.key,
- result, cache_key.expiration)
- expires = convert_to_timedelta(cache_key.expiration) + datetime.now()
- self.cache.set(cache_key.key, json.dumps(result), expires=expires)
- logger.debug('Cached loaded result for key %s with expiration %s: %s', cache_key.key,
- result, cache_key.expiration)
- else:
- logger.debug('Not caching loaded result for key %s: %s', cache_key.key, result)
-
- return result
-
-
-_DEFAULT_MEMCACHE_TIMEOUT = 1 # second
-_DEFAULT_MEMCACHE_CONNECT_TIMEOUT = 1 # second
-
-_STRING_TYPE = 1
-_JSON_TYPE = 2
-
-class MemcachedModelCache(DataModelCache):
- """ Implementation of the data model cache backed by a memcached. """
- def __init__(self, endpoint, timeout=_DEFAULT_MEMCACHE_TIMEOUT,
- connect_timeout=_DEFAULT_MEMCACHE_CONNECT_TIMEOUT):
- self.endpoint = endpoint
- self.timeout = timeout
- self.connect_timeout = connect_timeout
- self.client = None
-
- def _get_client(self):
- client = self.client
- if client is not None:
- return client
-
- try:
- # Copied from the doc comment for Client.
- def serialize_json(key, value):
- if type(value) == str:
- return value, _STRING_TYPE
-
- return json.dumps(value), _JSON_TYPE
-
- def deserialize_json(key, value, flags):
- if flags == _STRING_TYPE:
- return value
-
- if flags == _JSON_TYPE:
- return json.loads(value)
-
- raise Exception("Unknown flags for value: {1}".format(flags))
-
- self.client = Client(self.endpoint, no_delay=True, timeout=self.timeout,
- connect_timeout=self.connect_timeout,
- key_prefix='data_model_cache__',
- serializer=serialize_json,
- deserializer=deserialize_json,
- ignore_exc=True)
- return self.client
- except:
- logger.exception('Got exception when creating memcached client to %s', self.endpoint)
- return None
-
- def retrieve(self, cache_key, loader, should_cache=is_not_none):
- not_found = [None]
- client = self._get_client()
- if client is not None:
- logger.debug('Checking cache for key %s', cache_key.key)
- try:
- result = client.get(cache_key.key, default=not_found)
- if result != not_found:
- logger.debug('Found result in cache for key %s: %s', cache_key.key, result)
- return result
- except:
- logger.exception('Got exception when trying to retrieve key %s', cache_key.key)
-
- logger.debug('Found no result in cache for key %s; calling loader', cache_key.key)
- result = loader()
- logger.debug('Got loaded result for key %s: %s', cache_key.key, result)
- if client is not None and should_cache(result):
- try:
- logger.debug('Caching loaded result for key %s with expiration %s: %s', cache_key.key,
- result, cache_key.expiration)
- expires = convert_to_timedelta(cache_key.expiration) if cache_key.expiration else None
- client.set(cache_key.key, result, expire=int(expires.total_seconds()) if expires else None)
- logger.debug('Cached loaded result for key %s with expiration %s: %s', cache_key.key,
- result, cache_key.expiration)
- except:
- logger.exception('Got exception when trying to set key %s to %s', cache_key.key, result)
- else:
- logger.debug('Not caching loaded result for key %s: %s', cache_key.key, result)
-
- return result
diff --git a/data/cache/test/test_cache.py b/data/cache/test/test_cache.py
deleted file mode 100644
index bf0c4cccd..000000000
--- a/data/cache/test/test_cache.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import pytest
-
-from mock import patch
-
-from data.cache import InMemoryDataModelCache, NoopDataModelCache, MemcachedModelCache
-from data.cache.cache_key import CacheKey
-
-class MockClient(object):
- def __init__(self, server, **kwargs):
- self.data = {}
-
- def get(self, key, default=None):
- return self.data.get(key, default)
-
- def set(self, key, value, expire=None):
- self.data[key] = value
-
-
-@pytest.mark.parametrize('cache_type', [
- (NoopDataModelCache),
- (InMemoryDataModelCache),
-])
-def test_caching(cache_type):
- key = CacheKey('foo', '60m')
- cache = cache_type()
-
- # Perform two retrievals, and make sure both return.
- assert cache.retrieve(key, lambda: {'a': 1234}) == {'a': 1234}
- assert cache.retrieve(key, lambda: {'a': 1234}) == {'a': 1234}
-
-
-def test_memcache():
- key = CacheKey('foo', '60m')
- with patch('data.cache.impl.Client', MockClient):
- cache = MemcachedModelCache(('127.0.0.1', '-1'))
- assert cache.retrieve(key, lambda: {'a': 1234}) == {'a': 1234}
- assert cache.retrieve(key, lambda: {'a': 1234}) == {'a': 1234}
-
-
-def test_memcache_should_cache():
- key = CacheKey('foo', None)
-
- def sc(value):
- return value['a'] != 1234
-
- with patch('data.cache.impl.Client', MockClient):
- cache = MemcachedModelCache(('127.0.0.1', '-1'))
- assert cache.retrieve(key, lambda: {'a': 1234}, should_cache=sc) == {'a': 1234}
-
- # Ensure not cached since it was `1234`.
- assert cache._get_client().get(key.key) is None
-
- # Ensure cached.
- assert cache.retrieve(key, lambda: {'a': 2345}, should_cache=sc) == {'a': 2345}
- assert cache._get_client().get(key.key) is not None
- assert cache.retrieve(key, lambda: {'a': 2345}, should_cache=sc) == {'a': 2345}
diff --git a/data/database.py b/data/database.py
index 62c59e6e0..225b0c11c 100644
--- a/data/database.py
+++ b/data/database.py
@@ -1,70 +1,31 @@
-# pylint: disable=old-style-class,no-init
-
-import inspect
-import logging
import string
-import sys
-import time
+import logging
import uuid
-import os
-
-from contextlib import contextmanager
-from collections import defaultdict, namedtuple
-from datetime import datetime
-from random import SystemRandom
-
+import time
import toposort
-from enum import IntEnum, Enum, unique
+from random import SystemRandom
+from datetime import datetime
from peewee import *
-from peewee import __exception_wrapper__, Function
-from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase, PooledSqliteDatabase
-
+from data.read_slave import ReadSlaveModel
from sqlalchemy.engine.url import make_url
+from collections import defaultdict
-import resumablehashlib
-from cachetools.func import lru_cache
-
-from active_migration import ERTMigrationFlags, ActiveDataMigration
-from data.fields import (ResumableSHA256Field, ResumableSHA1Field, JSONField, Base64BinaryField,
- FullIndexedTextField, FullIndexedCharField, EnumField as ClientEnumField,
- EncryptedTextField, EncryptedCharField, CredentialField)
-from data.text import match_mysql, match_like
-from data.encryption import FieldEncrypter
-from data.readreplica import ReadReplicaSupportedModel, ReadOnlyConfig
+from data.read_slave import ReadSlaveModel
from util.names import urn_generator
-from util.validation import validate_postgres_precondition
logger = logging.getLogger(__name__)
-DEFAULT_DB_CONNECT_TIMEOUT = 10 # seconds
-
-# IMAGE_NOT_SCANNED_ENGINE_VERSION is the version found in security_indexed_engine when the
-# image has not yet been scanned.
-IMAGE_NOT_SCANNED_ENGINE_VERSION = -1
-
-schemedriver = namedtuple('schemedriver', ['driver', 'pooled_driver'])
-
-_SCHEME_DRIVERS = {
- 'mysql': schemedriver(MySQLDatabase, PooledMySQLDatabase),
- 'mysql+pymysql': schemedriver(MySQLDatabase, PooledMySQLDatabase),
- 'sqlite': schemedriver(SqliteDatabase, PooledSqliteDatabase),
- 'postgresql': schemedriver(PostgresqlDatabase, PooledPostgresqlDatabase),
- 'postgresql+psycopg2': schemedriver(PostgresqlDatabase, PooledPostgresqlDatabase),
+SCHEME_DRIVERS = {
+ 'mysql': MySQLDatabase,
+ 'mysql+pymysql': MySQLDatabase,
+ 'sqlite': SqliteDatabase,
+ 'postgresql': PostgresqlDatabase,
+ 'postgresql+psycopg2': PostgresqlDatabase,
}
-
-SCHEME_MATCH_FUNCTION = {
- 'mysql': match_mysql,
- 'mysql+pymysql': match_mysql,
- 'sqlite': match_like,
- 'postgresql': match_like,
- 'postgresql+psycopg2': match_like,
-}
-
-
SCHEME_RANDOM_FUNCTION = {
'mysql': fn.Rand,
'mysql+pymysql': fn.Rand,
@@ -73,104 +34,16 @@ SCHEME_RANDOM_FUNCTION = {
'postgresql+psycopg2': fn.Random,
}
-
-PRECONDITION_VALIDATION = {
- 'postgresql': validate_postgres_precondition,
- 'postgresql+psycopg2': validate_postgres_precondition,
-}
-
-
-_EXTRA_ARGS = {
- 'mysql': dict(charset='utf8mb4'),
- 'mysql+pymysql': dict(charset='utf8mb4'),
-}
-
-
-def pipes_concat(arg1, arg2, *extra_args):
- """ Concat function for sqlite, since it doesn't support fn.Concat.
- Concatenates clauses with || characters.
- """
- reduced = arg1.concat(arg2)
- for arg in extra_args:
- reduced = reduced.concat(arg)
- return reduced
-
-
-def function_concat(arg1, arg2, *extra_args):
- """ Default implementation of concat which uses fn.Concat(). Used by all
- database engines except sqlite.
- """
- return fn.Concat(arg1, arg2, *extra_args)
-
-
-SCHEME_SPECIALIZED_CONCAT = {
- 'sqlite': pipes_concat,
-}
-
-
def real_for_update(query):
return query.for_update()
-
def null_for_update(query):
return query
-
-def delete_instance_filtered(instance, model_class, delete_nullable, skip_transitive_deletes):
- """ Deletes the DB instance recursively, skipping any models in the skip_transitive_deletes set.
-
- Callers *must* ensure that any models listed in the skip_transitive_deletes must be capable
- of being directly deleted when the instance is deleted (with automatic sorting handling
- dependency order).
-
- For example, the RepositoryTag and Image tables for Repository will always refer to the
- *same* repository when RepositoryTag references Image, so we can safely skip
- transitive deletion for the RepositoryTag table.
- """
- # We need to sort the ops so that models get cleaned in order of their dependencies
- ops = reversed(list(instance.dependencies(delete_nullable)))
- filtered_ops = []
-
- dependencies = defaultdict(set)
-
- for query, fk in ops:
- # We only want to skip transitive deletes, which are done using subqueries in the form of
- # DELETE FROM in . If an op is not using a subquery, we allow it to be
- # applied directly.
- if fk.model not in skip_transitive_deletes or query.op.lower() != 'in':
- filtered_ops.append((query, fk))
-
- if query.op.lower() == 'in':
- dependencies[fk.model.__name__].add(query.rhs.model.__name__)
- elif query.op == '=':
- dependencies[fk.model.__name__].add(model_class.__name__)
- else:
- raise RuntimeError('Unknown operator in recursive repository delete query')
-
- sorted_models = list(reversed(toposort.toposort_flatten(dependencies)))
- def sorted_model_key(query_fk_tuple):
- cmp_query, cmp_fk = query_fk_tuple
- if cmp_query.op.lower() == 'in':
- return -1
- return sorted_models.index(cmp_fk.model.__name__)
- filtered_ops.sort(key=sorted_model_key)
-
- with db_transaction():
- for query, fk in filtered_ops:
- _model = fk.model
- if fk.null and not delete_nullable:
- _model.update(**{fk.name: None}).where(query).execute()
- else:
- _model.delete().where(query).execute()
-
- return instance.delete().where(instance._pk_expr()).execute()
-
-
SCHEME_SPECIALIZED_FOR_UPDATE = {
'sqlite': null_for_update,
}
-
class CallableProxy(Proxy):
def __call__(self, *args, **kwargs):
if self.obj is None:
@@ -178,24 +51,6 @@ class CallableProxy(Proxy):
return self.obj(*args, **kwargs)
-class RetryOperationalError(object):
-
- def execute_sql(self, sql, params=None, commit=True):
- try:
- cursor = super(RetryOperationalError, self).execute_sql(sql, params, commit)
- except OperationalError:
- if not self.is_closed():
- self.close()
-
- with __exception_wrapper__:
- cursor = self.cursor()
- cursor.execute(sql, params or ())
- if commit and not self.in_transaction():
- self.commit()
-
- return cursor
-
-
class CloseForLongOperation(object):
""" Helper object which disconnects the database then reconnects after the nested operation
completes.
@@ -205,12 +60,9 @@ class CloseForLongOperation(object):
self.config_object = config_object
def __enter__(self):
- if self.config_object.get('TESTING') is True:
- return
-
close_db_filter(None)
- def __exit__(self, typ, value, traceback):
+ def __exit__(self, type, value, traceback):
# Note: Nothing to do. The next SQL call will reconnect automatically.
pass
@@ -222,12 +74,9 @@ class UseThenDisconnect(object):
self.config_object = config_object
def __enter__(self):
- pass
-
- def __exit__(self, typ, value, traceback):
- if self.config_object.get('TESTING') is True:
- return
+ configure(self.config_object)
+ def __exit__(self, type, value, traceback):
close_db_filter(None)
@@ -246,11 +95,11 @@ class TupleSelector(object):
@classmethod
def tuple_reference_key(cls, field):
""" Returns a string key for referencing a field in a TupleSelector. """
- if isinstance(field, Function):
+ if field._node_type == 'func':
return field.name + ','.join([cls.tuple_reference_key(arg) for arg in field.arguments])
- if isinstance(field, Field):
- return field.name + ':' + field.model.__name__
+ if field._node_type == 'field':
+ return field.name + ':' + field.model_class.__name__
raise Exception('Unknown field type %s in TupleSelector' % field._node_type)
@@ -267,57 +116,21 @@ class TupleSelector(object):
db = Proxy()
-read_only_config = Proxy()
+read_slave = Proxy()
db_random_func = CallableProxy()
-db_match_func = CallableProxy()
db_for_update = CallableProxy()
-db_transaction = CallableProxy()
-db_concat_func = CallableProxy()
-db_encrypter = Proxy()
-ensure_under_transaction = CallableProxy()
def validate_database_url(url, db_kwargs, connect_timeout=5):
- """ Validates that we can connect to the given database URL, with the given kwargs. Raises
- an exception if the validation fails. """
db_kwargs = db_kwargs.copy()
+ db_kwargs['connect_timeout'] = connect_timeout
- try:
- driver = _db_from_url(url, db_kwargs, connect_timeout=connect_timeout, allow_retry=False,
- allow_pooling=False)
- driver.connect()
- finally:
- try:
- driver.close()
- except:
- pass
+ driver = _db_from_url(url, db_kwargs)
+ driver.connect()
+ driver.close()
-def validate_database_precondition(url, db_kwargs, connect_timeout=5):
- """ Validates that we can connect to the given database URL and the database meets our
- precondition. Raises an exception if the validation fails. """
- db_kwargs = db_kwargs.copy()
- try:
- driver = _db_from_url(url, db_kwargs, connect_timeout=connect_timeout, allow_retry=False,
- allow_pooling=False)
- driver.connect()
- pre_condition_check = PRECONDITION_VALIDATION.get(make_url(url).drivername)
- if pre_condition_check:
- pre_condition_check(driver)
-
- finally:
- try:
- driver.close()
- except:
- pass
-
-
-def _wrap_for_retry(driver):
- return type('Retrying' + driver.__name__, (RetryOperationalError, driver), {})
-
-
-def _db_from_url(url, db_kwargs, connect_timeout=DEFAULT_DB_CONNECT_TIMEOUT,
- allow_pooling=True, allow_retry=True):
+def _db_from_url(url, db_kwargs):
parsed_url = make_url(url)
if parsed_url.host:
@@ -329,43 +142,14 @@ def _db_from_url(url, db_kwargs, connect_timeout=DEFAULT_DB_CONNECT_TIMEOUT,
if parsed_url.password:
db_kwargs['password'] = parsed_url.password
- # Remove threadlocals. It used to be required.
- db_kwargs.pop('threadlocals', None)
-
# Note: sqlite does not support connect_timeout.
- if parsed_url.drivername != 'sqlite':
- db_kwargs['connect_timeout'] = db_kwargs.get('connect_timeout', connect_timeout)
+ if parsed_url.drivername == 'sqlite' and 'connect_timeout' in db_kwargs:
+ del db_kwargs['connect_timeout']
- drivers = _SCHEME_DRIVERS[parsed_url.drivername]
- driver = drivers.driver
- if allow_pooling and os.getenv('DB_CONNECTION_POOLING', 'false').lower() == 'true':
- driver = drivers.pooled_driver
- db_kwargs['stale_timeout'] = db_kwargs.get('stale_timeout', None)
- db_kwargs['max_connections'] = db_kwargs.get('max_connections', None)
- logger.info('Connection pooling enabled for %s; stale timeout: %s; max connection count: %s',
- parsed_url.drivername, db_kwargs['stale_timeout'], db_kwargs['max_connections'])
- else:
- logger.info('Connection pooling disabled for %s', parsed_url.drivername)
- db_kwargs.pop('stale_timeout', None)
- db_kwargs.pop('max_connections', None)
-
- for key, value in _EXTRA_ARGS.get(parsed_url.drivername, {}).iteritems():
- if key not in db_kwargs:
- db_kwargs[key] = value
-
- if allow_retry:
- driver = _wrap_for_retry(driver)
-
- created = driver(parsed_url.database, **db_kwargs)
-
- # Revert the behavior "fixed" in:
- # https://github.com/coleifer/peewee/commit/36bd887ac07647c60dfebe610b34efabec675706
- if parsed_url.drivername.find("mysql") >= 0:
- created.compound_select_parentheses = 0
- return created
+ return SCHEME_DRIVERS[parsed_url.drivername](parsed_url.database, **db_kwargs)
-def configure(config_object, testing=False):
+def configure(config_object):
logger.debug('Configuring database')
db_kwargs = dict(config_object['DB_CONNECTION_ARGS'])
write_db_uri = config_object['DB_URI']
@@ -373,35 +157,13 @@ def configure(config_object, testing=False):
parsed_write_uri = make_url(write_db_uri)
db_random_func.initialize(SCHEME_RANDOM_FUNCTION[parsed_write_uri.drivername])
- db_match_func.initialize(SCHEME_MATCH_FUNCTION[parsed_write_uri.drivername])
db_for_update.initialize(SCHEME_SPECIALIZED_FOR_UPDATE.get(parsed_write_uri.drivername,
real_for_update))
- db_concat_func.initialize(SCHEME_SPECIALIZED_CONCAT.get(parsed_write_uri.drivername,
- function_concat))
- db_encrypter.initialize(FieldEncrypter(config_object.get('DATABASE_SECRET_KEY')))
- read_replicas = config_object.get('DB_READ_REPLICAS', None)
- is_read_only = config_object.get('REGISTRY_STATE', 'normal') == 'readonly'
+ read_slave_uri = config_object.get('DB_READ_SLAVE_URI', None)
+ if read_slave_uri is not None:
+ read_slave.initialize(_db_from_url(read_slave_uri, db_kwargs))
- read_replica_dbs = []
- if read_replicas:
- read_replica_dbs = [_db_from_url(config['DB_URI'], db_kwargs) for config in read_replicas]
-
- read_only_config.initialize(ReadOnlyConfig(is_read_only, read_replica_dbs))
-
- def _db_transaction():
- return config_object['DB_TRANSACTION_FACTORY'](db)
-
- @contextmanager
- def _ensure_under_transaction():
- if not testing and not config_object['TESTING']:
- if db.transaction_depth() == 0:
- raise Exception('Expected to be under a transaction')
-
- yield
-
- db_transaction.initialize(_db_transaction)
- ensure_under_transaction.initialize(_ensure_under_transaction)
def random_string_generator(length=16):
def random_string():
@@ -416,94 +178,32 @@ def uuid_generator():
get_epoch_timestamp = lambda: int(time.time())
-get_epoch_timestamp_ms = lambda: int(time.time() * 1000)
def close_db_filter(_):
- if db.obj is not None and not db.is_closed():
+ if not db.is_closed():
logger.debug('Disconnecting from database.')
db.close()
- if read_only_config.obj is not None:
- for read_replica in read_only_config.obj.read_replicas:
- if not read_replica.is_closed():
- logger.debug('Disconnecting from read replica.')
- read_replica.close()
+ if read_slave.obj is not None and not read_slave.is_closed():
+ logger.debug('Disconnecting from read slave.')
+ read_slave.close()
class QuayUserField(ForeignKeyField):
def __init__(self, allows_robots=False, robot_null_delete=False, *args, **kwargs):
self.allows_robots = allows_robots
self.robot_null_delete = robot_null_delete
- if 'model' not in kwargs:
- kwargs['model'] = User
+ if not 'rel_model' in kwargs:
+ kwargs['rel_model'] = User
+
super(QuayUserField, self).__init__(*args, **kwargs)
-@lru_cache(maxsize=16)
-def _get_enum_field_values(enum_field):
- values = []
- for row in enum_field.rel_model.select():
- key = getattr(row, enum_field.enum_key_field)
- value = getattr(row, 'id')
- values.append((key, value))
- return Enum(enum_field.rel_model.__name__, values)
-
-
-class EnumField(ForeignKeyField):
- """ Create a cached python Enum from an EnumTable """
- def __init__(self, model, enum_key_field='name', *args, **kwargs):
- """
- model is the EnumTable model-class (see ForeignKeyField)
- enum_key_field is the field from the EnumTable to use as the enum name
- """
- self.enum_key_field = enum_key_field
- super(EnumField, self).__init__(model, *args, **kwargs)
-
- @property
- def enum(self):
- """ Returns a python enun.Enum generated from the associated EnumTable """
- return _get_enum_field_values(self)
-
- def get_id(self, name):
- """ Returns the ForeignKeyId from the name field
- Example:
- >>> Repository.repo_kind.get_id("application")
- 2
- """
- try:
- return self.enum[name].value
- except KeyError:
- raise self.rel_model.DoesNotExist
-
- def get_name(self, value):
- """ Returns the name value from the ForeignKeyId
- Example:
- >>> Repository.repo_kind.get_name(2)
- "application"
- """
- try:
- return self.enum(value).name
- except ValueError:
- raise self.rel_model.DoesNotExist
-
-
-def deprecated_field(field, flag):
- """ Marks a field as deprecated and removes it from the peewee model if the
- flag is not set. A flag is defined in the active_migration module and will
- be associated with one or more migration phases.
- """
- if ActiveDataMigration.has_flag(flag):
- return field
-
- return None
-
-
-class BaseModel(ReadReplicaSupportedModel):
+class BaseModel(ReadSlaveModel):
class Meta:
database = db
- encrypter = db_encrypter
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
def __getattribute__(self, name):
""" Adds _id accessors so that foreign key field IDs can be looked up without making
@@ -512,13 +212,13 @@ class BaseModel(ReadReplicaSupportedModel):
if name.endswith('_id'):
field_name = name[0:len(name) - 3]
if field_name in self._meta.fields:
- return self.__data__.get(field_name)
+ return self._data.get(field_name)
return super(BaseModel, self).__getattribute__(name)
class User(BaseModel):
- uuid = CharField(default=uuid_generator, max_length=36, null=True, index=True)
+ uuid = CharField(default=uuid_generator, max_length=36, null=True)
username = CharField(unique=True, index=True)
password_hash = CharField(null=True)
email = CharField(unique=True, index=True,
@@ -532,16 +232,6 @@ class User(BaseModel):
last_invalid_login = DateTimeField(default=datetime.utcnow)
removed_tag_expiration_s = IntegerField(default=1209600) # Two weeks
enabled = BooleanField(default=True)
- invoice_email_address = CharField(null=True, index=True)
-
- given_name = CharField(null=True)
- family_name = CharField(null=True)
- company = CharField(null=True)
- location = CharField(null=True)
-
- maximum_queued_builds_count = IntegerField(null=True)
- creation_date = DateTimeField(default=datetime.utcnow, null=True)
- last_accessed = DateTimeField(null=True, index=True)
def delete_instance(self, recursive=False, delete_nullable=False):
# If we are deleting a robot account, only execute the subset of queries necessary.
@@ -549,94 +239,22 @@ class User(BaseModel):
# For all the model dependencies, only delete those that allow robots.
for query, fk in reversed(list(self.dependencies(search_nullable=True))):
if isinstance(fk, QuayUserField) and fk.allows_robots:
- _model = fk.model
+ model = fk.model_class
if fk.robot_null_delete:
- _model.update(**{fk.name: None}).where(query).execute()
+ model.update(**{fk.name: None}).where(query).execute()
else:
- _model.delete().where(query).execute()
+ model.delete().where(query).execute()
# Delete the instance itself.
super(User, self).delete_instance(recursive=False, delete_nullable=False)
else:
- if not recursive:
- raise RuntimeError('Non-recursive delete on user.')
-
- # These models don't need to use transitive deletes, because the referenced objects
- # are cleaned up directly in the model.
- skip_transitive_deletes = {Image, Repository, Team, RepositoryBuild, ServiceKeyApproval,
- RepositoryBuildTrigger, ServiceKey, RepositoryPermission,
- TeamMemberInvite, Star, RepositoryAuthorizedEmail, TeamMember,
- RepositoryTag, PermissionPrototype, DerivedStorageForImage,
- TagManifest, AccessToken, OAuthAccessToken, BlobUpload,
- RepositoryNotification, OAuthAuthorizationCode,
- RepositoryActionCount, TagManifestLabel,
- TeamSync, RepositorySearchScore,
- DeletedNamespace, RepoMirrorRule,
- NamespaceGeoRestriction} | appr_classes | v22_classes | transition_classes
- delete_instance_filtered(self, User, delete_nullable, skip_transitive_deletes)
+ super(User, self).delete_instance(recursive=recursive, delete_nullable=delete_nullable)
Namespace = User.alias()
-class RobotAccountMetadata(BaseModel):
- robot_account = QuayUserField(index=True, allows_robots=True, unique=True)
- description = CharField()
- unstructured_json = JSONField()
-
-
-class RobotAccountToken(BaseModel):
- robot_account = QuayUserField(index=True, allows_robots=True, unique=True)
- token = EncryptedCharField(default_token_length=64)
- fully_migrated = BooleanField(default=False)
-
-
-class DeletedNamespace(BaseModel):
- namespace = QuayUserField(index=True, allows_robots=False, unique=True)
- marked = DateTimeField(default=datetime.now)
- original_username = CharField(index=True)
- original_email = CharField(index=True)
- queue_id = CharField(null=True, index=True)
-
-
-class NamespaceGeoRestriction(BaseModel):
- namespace = QuayUserField(index=True, allows_robots=False)
- added = DateTimeField(default=datetime.utcnow)
- description = CharField()
- unstructured_json = JSONField()
- restricted_region_iso_code = CharField(index=True)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('namespace', 'restricted_region_iso_code'), True),
- )
-
-
-class UserPromptTypes(object):
- CONFIRM_USERNAME = 'confirm_username'
- ENTER_NAME = 'enter_name'
- ENTER_COMPANY = 'enter_company'
-
-
-class UserPromptKind(BaseModel):
- name = CharField(index=True)
-
-
-class UserPrompt(BaseModel):
- user = QuayUserField(allows_robots=False, index=True)
- kind = ForeignKeyField(UserPromptKind)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('user', 'kind'), True),
- )
-
-
class TeamRole(BaseModel):
name = CharField(index=True)
@@ -644,12 +262,12 @@ class TeamRole(BaseModel):
class Team(BaseModel):
name = CharField(index=True)
organization = QuayUserField(index=True)
- role = EnumField(TeamRole)
+ role = ForeignKeyField(TeamRole)
description = TextField(default='')
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
# A team name must be unique within an organization
(('name', 'organization'), True),
@@ -658,11 +276,11 @@ class Team(BaseModel):
class TeamMember(BaseModel):
user = QuayUserField(allows_robots=True, index=True)
- team = ForeignKeyField(Team)
+ team = ForeignKeyField(Team, index=True)
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
# A user may belong to a team only once
(('user', 'team'), True),
@@ -673,8 +291,8 @@ class TeamMemberInvite(BaseModel):
# Note: Either user OR email will be filled in, but not both.
user = QuayUserField(index=True, null=True)
email = CharField(null=True)
- team = ForeignKeyField(Team)
- inviter = ForeignKeyField(User, backref='inviter')
+ team = ForeignKeyField(Team, index=True)
+ inviter = ForeignKeyField(User, related_name='inviter')
invite_token = CharField(default=urn_generator(['teaminvite']))
@@ -682,24 +300,15 @@ class LoginService(BaseModel):
name = CharField(unique=True, index=True)
-class TeamSync(BaseModel):
- team = ForeignKeyField(Team, unique=True)
-
- transaction_id = CharField()
- last_updated = DateTimeField(null=True, index=True)
- service = ForeignKeyField(LoginService)
- config = JSONField()
-
-
class FederatedLogin(BaseModel):
user = QuayUserField(allows_robots=True, index=True)
- service = ForeignKeyField(LoginService)
+ service = ForeignKeyField(LoginService, index=True)
service_ident = CharField()
metadata_json = TextField(default='{}')
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
# create a unique index on service and the local service id
(('service', 'service_ident'), True),
@@ -713,36 +322,16 @@ class Visibility(BaseModel):
name = CharField(index=True, unique=True)
-class RepositoryKind(BaseModel):
- name = CharField(index=True, unique=True)
-
-
-@unique
-class RepositoryState(IntEnum):
- """
- Possible states of a repository.
- NORMAL: Regular repo where all actions are possible
- READ_ONLY: Only read actions, such as pull, are allowed regardless of specific user permissions
- MIRROR: Equivalent to READ_ONLY except that mirror robot has write permission
- """
- NORMAL = 0
- READ_ONLY = 1
- MIRROR = 2
-
-
class Repository(BaseModel):
namespace_user = QuayUserField(null=True)
- name = FullIndexedCharField(match_function=db_match_func)
- visibility = EnumField(Visibility)
- description = FullIndexedTextField(match_function=db_match_func, null=True)
+ name = CharField()
+ visibility = ForeignKeyField(Visibility)
+ description = TextField(null=True)
badge_token = CharField(default=uuid_generator)
- kind = EnumField(RepositoryKind)
- trust_enabled = BooleanField(default=False)
- state = ClientEnumField(RepositoryState, default=RepositoryState.NORMAL)
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
# create a unique index on namespace and name
(('namespace_user', 'name'), True),
@@ -754,28 +343,53 @@ class Repository(BaseModel):
# These models don't need to use transitive deletes, because the referenced objects
# are cleaned up directly
- skip_transitive_deletes = ({RepositoryTag, RepositoryBuild, RepositoryBuildTrigger, BlobUpload,
- Image, TagManifest, TagManifestLabel, Label, DerivedStorageForImage,
- RepositorySearchScore, RepoMirrorConfig, RepoMirrorRule}
- | appr_classes | v22_classes | transition_classes)
+ skip_transitive_deletes = {RepositoryTag, RepositoryBuild, RepositoryBuildTrigger}
- delete_instance_filtered(self, Repository, delete_nullable, skip_transitive_deletes)
+ # We need to sort the ops so that models get cleaned in order of their dependencies
+ ops = reversed(list(self.dependencies(delete_nullable)))
+ filtered_ops = []
+ dependencies = defaultdict(set)
-class RepositorySearchScore(BaseModel):
- repository = ForeignKeyField(Repository, unique=True)
- score = BigIntegerField(index=True, default=0)
- last_updated = DateTimeField(null=True)
+ for query, fk in ops:
+ # We only want to skip transitive deletes, which are done using subqueries in the form of
+ # DELETE FROM in . If an op is not using a subquery, we allow it to be
+ # applied directly.
+ if fk.model_class not in skip_transitive_deletes or query.op != 'in':
+ filtered_ops.append((query, fk))
+ if query.op == 'in':
+ dependencies[fk.model_class.__name__].add(query.rhs.model_class.__name__)
+ elif query.op == '=':
+ dependencies[fk.model_class.__name__].add(Repository.__name__)
+ else:
+ raise RuntimeError('Unknown operator in recursive repository delete query')
+
+ sorted_models = list(reversed(toposort.toposort_flatten(dependencies)))
+ def sorted_model_key(query_fk_tuple):
+ cmp_query, cmp_fk = query_fk_tuple
+ if cmp_query.op == 'in':
+ return -1
+ return sorted_models.index(cmp_fk.model_class.__name__)
+ filtered_ops.sort(key=sorted_model_key)
+
+ for query, fk in filtered_ops:
+ model = fk.model_class
+ if fk.null and not delete_nullable:
+ model.update(**{fk.name: None}).where(query).execute()
+ else:
+ model.delete().where(query).execute()
+
+ return self.delete().where(self._pk_expr()).execute()
class Star(BaseModel):
- user = ForeignKeyField(User)
- repository = ForeignKeyField(Repository)
+ user = ForeignKeyField(User, index=True)
+ repository = ForeignKeyField(Repository, index=True)
created = DateTimeField(default=datetime.now)
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
# create a unique index on user and repository
(('user', 'repository'), True),
@@ -787,14 +401,14 @@ class Role(BaseModel):
class RepositoryPermission(BaseModel):
- team = ForeignKeyField(Team, null=True)
- user = QuayUserField(allows_robots=True, null=True)
- repository = ForeignKeyField(Repository)
+ team = ForeignKeyField(Team, index=True, null=True)
+ user = QuayUserField(allows_robots=True, index=True, null=True)
+ repository = ForeignKeyField(Repository, index=True)
role = ForeignKeyField(Role)
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
(('team', 'repository'), True),
(('user', 'repository'), True),
@@ -802,19 +416,19 @@ class RepositoryPermission(BaseModel):
class PermissionPrototype(BaseModel):
- org = QuayUserField(index=True, backref='orgpermissionproto')
- uuid = CharField(default=uuid_generator, index=True)
+ org = QuayUserField(index=True, related_name='orgpermissionproto')
+ uuid = CharField(default=uuid_generator)
activating_user = QuayUserField(allows_robots=True, index=True, null=True,
- backref='userpermissionproto')
- delegate_user = QuayUserField(allows_robots=True, backref='receivingpermission',
+ related_name='userpermissionproto')
+ delegate_user = QuayUserField(allows_robots=True,related_name='receivingpermission',
null=True)
- delegate_team = ForeignKeyField(Team, backref='receivingpermission',
+ delegate_team = ForeignKeyField(Team, related_name='receivingpermission',
null=True)
role = ForeignKeyField(Role)
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
(('org', 'activating_user'), False),
)
@@ -826,65 +440,34 @@ class AccessTokenKind(BaseModel):
class AccessToken(BaseModel):
friendly_name = CharField(null=True)
-
- # TODO(remove-unenc): This field is deprecated and should be removed soon.
- code = deprecated_field(
- CharField(default=random_string_generator(length=64), unique=True, index=True, null=True),
- ERTMigrationFlags.WRITE_OLD_FIELDS)
-
- token_name = CharField(default=random_string_generator(length=32), unique=True, index=True)
- token_code = EncryptedCharField(default_token_length=32)
-
+ code = CharField(default=random_string_generator(length=64), unique=True,
+ index=True)
repository = ForeignKeyField(Repository)
created = DateTimeField(default=datetime.now)
role = ForeignKeyField(Role)
temporary = BooleanField(default=True)
kind = ForeignKeyField(AccessTokenKind, null=True)
- def get_code(self):
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- return self.code
- else:
- return self.token_name + self.token_code.decrypt()
-
class BuildTriggerService(BaseModel):
name = CharField(index=True, unique=True)
-class DisableReason(BaseModel):
- name = CharField(index=True, unique=True)
-
-
class RepositoryBuildTrigger(BaseModel):
- uuid = CharField(default=uuid_generator, index=True)
- service = ForeignKeyField(BuildTriggerService)
- repository = ForeignKeyField(Repository)
+ uuid = CharField(default=uuid_generator)
+ service = ForeignKeyField(BuildTriggerService, index=True)
+ repository = ForeignKeyField(Repository, index=True)
connected_user = QuayUserField()
-
- # TODO(remove-unenc): These fields are deprecated and should be removed soon.
- auth_token = deprecated_field(CharField(null=True), ERTMigrationFlags.WRITE_OLD_FIELDS)
- private_key = deprecated_field(TextField(null=True), ERTMigrationFlags.WRITE_OLD_FIELDS)
-
- secure_auth_token = EncryptedCharField(null=True)
- secure_private_key = EncryptedTextField(null=True)
- fully_migrated = BooleanField(default=False)
-
+ auth_token = CharField(null=True)
+ private_key = TextField(null=True)
config = TextField(default='{}')
write_token = ForeignKeyField(AccessToken, null=True)
- pull_robot = QuayUserField(allows_robots=True, null=True, backref='triggerpullrobot',
+ pull_robot = QuayUserField(allows_robots=True, null=True, related_name='triggerpullrobot',
robot_null_delete=True)
- enabled = BooleanField(default=True)
- disabled_reason = EnumField(DisableReason, null=True)
- disabled_datetime = DateTimeField(default=datetime.utcnow, null=True, index=True)
- successive_failure_count = IntegerField(default=0)
- successive_internal_error_count = IntegerField(default=0)
-
class EmailConfirmation(BaseModel):
code = CharField(default=random_string_generator(), unique=True, index=True)
- verification_code = CredentialField(null=True)
user = QuayUserField()
pw_reset = BooleanField(default=False)
new_email = CharField(null=True)
@@ -894,11 +477,14 @@ class EmailConfirmation(BaseModel):
class ImageStorage(BaseModel):
uuid = CharField(default=uuid_generator, index=True, unique=True)
+ checksum = CharField(null=True)
+ created = DateTimeField(null=True)
+ comment = TextField(null=True)
+ command = TextField(null=True)
image_size = BigIntegerField(null=True)
uncompressed_size = BigIntegerField(null=True)
+ aggregate_size = BigIntegerField(null=True)
uploading = BooleanField(default=True, null=True)
- cas_path = BooleanField(default=True)
- content_checksum = CharField(null=True, index=True)
class ImageStorageTransformation(BaseModel):
@@ -910,19 +496,32 @@ class ImageStorageSignatureKind(BaseModel):
class ImageStorageSignature(BaseModel):
- storage = ForeignKeyField(ImageStorage)
+ storage = ForeignKeyField(ImageStorage, index=True)
kind = ForeignKeyField(ImageStorageSignatureKind)
signature = TextField(null=True)
uploading = BooleanField(default=True, null=True)
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
(('kind', 'storage'), True),
)
+class DerivedImageStorage(BaseModel):
+ source = ForeignKeyField(ImageStorage, null=True, related_name='source')
+ derivative = ForeignKeyField(ImageStorage, related_name='derivative')
+ transformation = ForeignKeyField(ImageStorageTransformation)
+
+ class Meta:
+ database = db
+ read_slaves = (read_slave,)
+ indexes = (
+ (('source', 'transformation'), True),
+ )
+
+
class ImageStorageLocation(BaseModel):
name = CharField(unique=True, index=True)
@@ -933,22 +532,13 @@ class ImageStoragePlacement(BaseModel):
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
# An image can only be placed in the same place once
(('storage', 'location'), True),
)
-class UserRegion(BaseModel):
- user = QuayUserField(index=True, allows_robots=False)
- location = ForeignKeyField(ImageStorageLocation)
-
- indexes = (
- (('user', 'location'), True),
- )
-
-
class Image(BaseModel):
# This class is intentionally denormalized. Even though images are supposed
# to be globally unique we can't treat them as such for permissions and
@@ -960,49 +550,14 @@ class Image(BaseModel):
# '/' separated list of ancestory ids, e.g. /1/2/6/7/10/
ancestors = CharField(index=True, default='/', max_length=64535, null=True)
- storage = ForeignKeyField(ImageStorage, null=True)
-
- created = DateTimeField(null=True)
- comment = TextField(null=True)
- command = TextField(null=True)
- aggregate_size = BigIntegerField(null=True)
- v1_json_metadata = TextField(null=True)
- v1_checksum = CharField(null=True)
-
- security_indexed = BooleanField(default=False, index=True)
- security_indexed_engine = IntegerField(default=IMAGE_NOT_SCANNED_ENGINE_VERSION, index=True)
-
- # We use a proxy here instead of 'self' in order to disable the foreign key constraint
- parent = DeferredForeignKey('Image', null=True, backref='children')
+ storage = ForeignKeyField(ImageStorage, index=True, null=True)
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
# we don't really want duplicates
(('repository', 'docker_image_id'), True),
-
- (('security_indexed_engine', 'security_indexed'), False),
- )
-
- def ancestor_id_list(self):
- """ Returns an integer list of ancestor ids, ordered chronologically from
- root to direct parent.
- """
- return map(int, self.ancestors.split('/')[1:-1])
-
-
-class DerivedStorageForImage(BaseModel):
- source_image = ForeignKeyField(Image)
- derivative = ForeignKeyField(ImageStorage)
- transformation = ForeignKeyField(ImageStorageTransformation)
- uniqueness_hash = CharField(null=True)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('source_image', 'transformation', 'uniqueness_hash'), True),
)
@@ -1017,17 +572,21 @@ class RepositoryTag(BaseModel):
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
(('repository', 'name'), False),
- (('repository', 'lifetime_start_ts'), False),
- (('repository', 'lifetime_end_ts'), False),
# This unique index prevents deadlocks when concurrently moving and deleting tags
(('repository', 'name', 'lifetime_end_ts'), True),
)
+class TagManifest(BaseModel):
+ tag = ForeignKeyField(RepositoryTag, index=True, unique=True)
+ digest = CharField(index=True, unique=True)
+ json_data = TextField()
+
+
class BUILD_PHASE(object):
""" Build phases enum """
ERROR = 'error'
@@ -1039,147 +598,67 @@ class BUILD_PHASE(object):
PUSHING = 'pushing'
WAITING = 'waiting'
COMPLETE = 'complete'
- CANCELLED = 'cancelled'
-
- @classmethod
- def is_terminal_phase(cls, phase):
- return (phase == cls.COMPLETE or
- phase == cls.ERROR or
- phase == cls.INTERNAL_ERROR or
- phase == cls.CANCELLED)
-
-
-class TRIGGER_DISABLE_REASON(object):
- """ Build trigger disable reason enum """
- BUILD_FALURES = 'successive_build_failures'
- INTERNAL_ERRORS = 'successive_build_internal_errors'
- USER_TOGGLED = 'user_toggled'
class QueueItem(BaseModel):
queue_name = CharField(index=True, max_length=1024)
body = TextField()
- available_after = DateTimeField(default=datetime.utcnow)
- available = BooleanField(default=True)
- processing_expires = DateTimeField(null=True)
- retries_remaining = IntegerField(default=5)
- state_id = CharField(default=uuid_generator, index=True, unique=True)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- only_save_dirty = True
- indexes = (
- (('processing_expires', 'available'), False),
- (('processing_expires', 'queue_name', 'available'), False),
- (('processing_expires', 'available_after', 'retries_remaining', 'available'), False),
- (('processing_expires', 'available_after', 'queue_name', 'retries_remaining', 'available'), False),
- )
-
- def save(self, *args, **kwargs):
- # Always change the queue item's state ID when we update it.
- self.state_id = str(uuid.uuid4())
- super(QueueItem, self).save(*args, **kwargs)
+ available_after = DateTimeField(default=datetime.utcnow, index=True)
+ available = BooleanField(default=True, index=True)
+ processing_expires = DateTimeField(null=True, index=True)
+ retries_remaining = IntegerField(default=5, index=True)
class RepositoryBuild(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
- repository = ForeignKeyField(Repository)
+ repository = ForeignKeyField(Repository, index=True)
access_token = ForeignKeyField(AccessToken)
resource_key = CharField(index=True, null=True)
job_config = TextField()
phase = CharField(default=BUILD_PHASE.WAITING)
- started = DateTimeField(default=datetime.now, index=True)
+ started = DateTimeField(default=datetime.now)
display_name = CharField()
- trigger = ForeignKeyField(RepositoryBuildTrigger, null=True)
- pull_robot = QuayUserField(null=True, backref='buildpullrobot', allows_robots=True,
+ trigger = ForeignKeyField(RepositoryBuildTrigger, null=True, index=True)
+ pull_robot = QuayUserField(null=True, related_name='buildpullrobot', allows_robots=True,
robot_null_delete=True)
- logs_archived = BooleanField(default=False, index=True)
+ logs_archived = BooleanField(default=False)
queue_id = CharField(null=True, index=True)
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('repository', 'started', 'phase'), False),
- (('started', 'logs_archived', 'phase'), False),
- )
-
class LogEntryKind(BaseModel):
name = CharField(index=True, unique=True)
class LogEntry(BaseModel):
- id = BigAutoField()
- kind = ForeignKeyField(LogEntryKind)
- account = IntegerField(index=True, column_name='account_id')
- performer = IntegerField(index=True, null=True, column_name='performer_id')
- repository = IntegerField(index=True, null=True, column_name='repository_id')
+ kind = ForeignKeyField(LogEntryKind, index=True)
+ account = QuayUserField(index=True, related_name='account')
+ performer = QuayUserField(allows_robots=True, index=True, null=True,
+ related_name='performer', robot_null_delete=True)
+ repository = ForeignKeyField(Repository, index=True, null=True)
datetime = DateTimeField(default=datetime.now, index=True)
ip = CharField(null=True)
metadata_json = TextField(default='{}')
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
- (('account', 'datetime'), False),
- (('performer', 'datetime'), False),
+ # create an index on repository and date
(('repository', 'datetime'), False),
- (('repository', 'datetime', 'kind'), False),
- )
-
-class LogEntry2(BaseModel):
- """ TEMP FOR QUAY.IO ONLY. DO NOT RELEASE INTO QUAY ENTERPRISE. """
- kind = ForeignKeyField(LogEntryKind)
- account = IntegerField(index=True, db_column='account_id')
- performer = IntegerField(index=True, null=True, db_column='performer_id')
- repository = IntegerField(index=True, null=True, db_column='repository_id')
- datetime = DateTimeField(default=datetime.now, index=True)
- ip = CharField(null=True)
- metadata_json = TextField(default='{}')
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('account', 'datetime'), False),
- (('performer', 'datetime'), False),
- (('repository', 'datetime'), False),
- (('repository', 'datetime', 'kind'), False),
- )
-
-
-class LogEntry3(BaseModel):
- id = BigAutoField()
- kind = IntegerField(db_column='kind_id')
- account = IntegerField(db_column='account_id')
- performer = IntegerField(null=True, db_column='performer_id')
- repository = IntegerField(null=True, db_column='repository_id')
- datetime = DateTimeField(default=datetime.now, index=True)
- ip = CharField(null=True)
- metadata_json = TextField(default='{}')
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('account', 'datetime'), False),
- (('performer', 'datetime'), False),
+ # create an index on repository, date and kind
(('repository', 'datetime', 'kind'), False),
)
class RepositoryActionCount(BaseModel):
- repository = ForeignKeyField(Repository)
+ repository = ForeignKeyField(Repository, index=True)
count = IntegerField()
date = DateField(index=True)
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
# create a unique index on repository and date
(('repository', 'date'), True),
@@ -1188,34 +667,19 @@ class RepositoryActionCount(BaseModel):
class OAuthApplication(BaseModel):
client_id = CharField(index=True, default=random_string_generator(length=20))
- secure_client_secret = EncryptedCharField(default_token_length=40, null=True)
- fully_migrated = BooleanField(default=False)
-
- # TODO(remove-unenc): This field is deprecated and should be removed soon.
- client_secret = deprecated_field(
- CharField(default=random_string_generator(length=40), null=True),
- ERTMigrationFlags.WRITE_OLD_FIELDS)
-
+ client_secret = CharField(default=random_string_generator(length=40))
redirect_uri = CharField()
application_uri = CharField()
organization = QuayUserField()
name = CharField()
description = TextField(default='')
- avatar_email = CharField(null=True, column_name='gravatar_email')
+ avatar_email = CharField(null=True, db_column='gravatar_email')
class OAuthAuthorizationCode(BaseModel):
application = ForeignKeyField(OAuthApplication)
-
- # TODO(remove-unenc): This field is deprecated and should be removed soon.
- code = deprecated_field(
- CharField(index=True, unique=True, null=True),
- ERTMigrationFlags.WRITE_OLD_FIELDS)
-
- code_name = CharField(index=True, unique=True)
- code_credential = CredentialField()
-
+ code = CharField(index=True)
scope = CharField()
data = TextField() # Context for the code, such as the user
@@ -1225,16 +689,10 @@ class OAuthAccessToken(BaseModel):
application = ForeignKeyField(OAuthApplication)
authorized_user = QuayUserField()
scope = CharField()
- token_name = CharField(index=True, unique=True)
- token_code = CredentialField()
-
- # TODO(remove-unenc): This field is deprecated and should be removed soon.
- access_token = deprecated_field(
- CharField(index=True, null=True),
- ERTMigrationFlags.WRITE_OLD_FIELDS)
-
+ access_token = CharField(index=True)
token_type = CharField(default='Bearer')
expires_at = DateTimeField()
+ refresh_token = CharField(index=True, null=True)
data = TextField() # This is context for which this token was generated, such as the user
@@ -1244,12 +702,11 @@ class NotificationKind(BaseModel):
class Notification(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
- kind = ForeignKeyField(NotificationKind)
- target = QuayUserField(index=True, allows_robots=True)
+ kind = ForeignKeyField(NotificationKind, index=True)
+ target = QuayUserField(index=True)
metadata_json = TextField(default='{}')
created = DateTimeField(default=datetime.now, index=True)
dismissed = BooleanField(default=False)
- lookup_path = CharField(null=True, index=True)
class ExternalNotificationEvent(BaseModel):
@@ -1262,532 +719,35 @@ class ExternalNotificationMethod(BaseModel):
class RepositoryNotification(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
- repository = ForeignKeyField(Repository)
+ repository = ForeignKeyField(Repository, index=True)
event = ForeignKeyField(ExternalNotificationEvent)
method = ForeignKeyField(ExternalNotificationMethod)
- title = CharField(null=True)
config_json = TextField()
- event_config_json = TextField(default='{}')
- number_of_failures = IntegerField(default=0)
class RepositoryAuthorizedEmail(BaseModel):
- repository = ForeignKeyField(Repository)
+ repository = ForeignKeyField(Repository, index=True)
email = CharField()
code = CharField(default=random_string_generator(), unique=True, index=True)
confirmed = BooleanField(default=False)
class Meta:
database = db
- read_only_config = read_only_config
+ read_slaves = (read_slave,)
indexes = (
# create a unique index on email and repository
(('email', 'repository'), True),
)
-class BlobUpload(BaseModel):
- repository = ForeignKeyField(Repository)
- uuid = CharField(index=True, unique=True)
- byte_count = BigIntegerField(default=0)
- sha_state = ResumableSHA256Field(null=True, default=resumablehashlib.sha256)
- location = ForeignKeyField(ImageStorageLocation)
- storage_metadata = JSONField(null=True, default={})
- chunk_count = IntegerField(default=0)
- uncompressed_byte_count = BigIntegerField(null=True)
- created = DateTimeField(default=datetime.now, index=True)
- piece_sha_state = ResumableSHA1Field(null=True)
- piece_hashes = Base64BinaryField(null=True)
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- # create a unique index on email and repository
- (('repository', 'uuid'), True),
- )
-
-
-class QuayService(BaseModel):
- name = CharField(index=True, unique=True)
-
-
-class QuayRegion(BaseModel):
- name = CharField(index=True, unique=True)
-
-
-class QuayRelease(BaseModel):
- service = ForeignKeyField(QuayService)
- version = CharField()
- region = ForeignKeyField(QuayRegion)
- reverted = BooleanField(default=False)
- created = DateTimeField(default=datetime.now, index=True)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- # unique release per region
- (('service', 'version', 'region'), True),
-
- # get recent releases
- (('service', 'region', 'created'), False),
- )
-
-
-class TorrentInfo(BaseModel):
- storage = ForeignKeyField(ImageStorage)
- piece_length = IntegerField()
- pieces = Base64BinaryField()
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- # we may want to compute the piece hashes multiple times with different piece lengths
- (('storage', 'piece_length'), True),
- )
-
-
-class ServiceKeyApprovalType(Enum):
- SUPERUSER = 'Super User API'
- KEY_ROTATION = 'Key Rotation'
- AUTOMATIC = 'Automatic'
-
-
-class ServiceKeyApproval(BaseModel):
- approver = QuayUserField(null=True)
- approval_type = CharField(index=True)
- approved_date = DateTimeField(default=datetime.utcnow)
- notes = TextField(default='')
-
-
-class ServiceKey(BaseModel):
- name = CharField()
- kid = CharField(unique=True, index=True)
- service = CharField(index=True)
- jwk = JSONField()
- metadata = JSONField()
- created_date = DateTimeField(default=datetime.utcnow)
- expiration_date = DateTimeField(null=True)
- rotation_duration = IntegerField(null=True)
- approval = ForeignKeyField(ServiceKeyApproval, null=True)
-
-
-class MediaType(BaseModel):
- """ MediaType is an enumeration of the possible formats of various objects in the data model.
- """
- name = CharField(index=True, unique=True)
-
-
-class Messages(BaseModel):
- content = TextField()
- uuid = CharField(default=uuid_generator, max_length=36, index=True)
- severity = CharField(default='info', index=True)
- media_type = ForeignKeyField(MediaType)
-
-
-class LabelSourceType(BaseModel):
- """ LabelSourceType is an enumeration of the possible sources for a label.
- """
- name = CharField(index=True, unique=True)
- mutable = BooleanField(default=False)
-
-
-class Label(BaseModel):
- """ Label represents user-facing metadata associated with another entry in the database (e.g. a
- Manifest).
- """
- uuid = CharField(default=uuid_generator, index=True, unique=True)
- key = CharField(index=True)
- value = TextField()
- media_type = EnumField(MediaType)
- source_type = EnumField(LabelSourceType)
-
-
-class ApprBlob(BaseModel):
- """ ApprBlob represents a content-addressable object stored outside of the database.
- """
- digest = CharField(index=True, unique=True)
- media_type = EnumField(MediaType)
- size = BigIntegerField()
- uncompressed_size = BigIntegerField(null=True)
-
-
-class ApprBlobPlacementLocation(BaseModel):
- """ ApprBlobPlacementLocation is an enumeration of the possible storage locations for ApprBlobs.
- """
- name = CharField(index=True, unique=True)
-
-
-class ApprBlobPlacement(BaseModel):
- """ ApprBlobPlacement represents the location of a Blob.
- """
- blob = ForeignKeyField(ApprBlob)
- location = EnumField(ApprBlobPlacementLocation)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('blob', 'location'), True),
- )
-
-
-class ApprManifest(BaseModel):
- """ ApprManifest represents the metadata and collection of blobs that comprise an Appr image.
- """
- digest = CharField(index=True, unique=True)
- media_type = EnumField(MediaType)
- manifest_json = JSONField()
-
-
-class ApprManifestBlob(BaseModel):
- """ ApprManifestBlob is a many-to-many relation table linking ApprManifests and ApprBlobs.
- """
- manifest = ForeignKeyField(ApprManifest, index=True)
- blob = ForeignKeyField(ApprBlob, index=True)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('manifest', 'blob'), True),
- )
-
-
-class ApprManifestList(BaseModel):
- """ ApprManifestList represents all of the various Appr manifests that compose an ApprTag.
- """
- digest = CharField(index=True, unique=True)
- manifest_list_json = JSONField()
- schema_version = CharField()
- media_type = EnumField(MediaType)
-
-
-
-class ApprTagKind(BaseModel):
- """ ApprTagKind is a enumtable to reference tag kinds.
- """
- name = CharField(index=True, unique=True)
-
-
-class ApprTag(BaseModel):
- """ ApprTag represents a user-facing alias for referencing an ApprManifestList.
- """
- name = CharField()
- repository = ForeignKeyField(Repository)
- manifest_list = ForeignKeyField(ApprManifestList, null=True)
- lifetime_start = BigIntegerField(default=get_epoch_timestamp_ms)
- lifetime_end = BigIntegerField(null=True, index=True)
- hidden = BooleanField(default=False)
- reverted = BooleanField(default=False)
- protected = BooleanField(default=False)
- tag_kind = EnumField(ApprTagKind)
- linked_tag = ForeignKeyField('self', null=True, backref='tag_parents')
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('repository', 'name'), False),
- (('repository', 'name', 'hidden'), False),
- # This unique index prevents deadlocks when concurrently moving and deleting tags
- (('repository', 'name', 'lifetime_end'), True),
- )
-
-ApprChannel = ApprTag.alias()
-
-
-class ApprManifestListManifest(BaseModel):
- """ ApprManifestListManifest is a many-to-many relation table linking ApprManifestLists and
- ApprManifests.
- """
- manifest_list = ForeignKeyField(ApprManifestList, index=True)
- manifest = ForeignKeyField(ApprManifest, index=True)
- operating_system = CharField(null=True)
- architecture = CharField(null=True)
- platform_json = JSONField(null=True)
- media_type = EnumField(MediaType)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('manifest_list', 'media_type'), False),
- )
-
-
-class AppSpecificAuthToken(BaseModel):
- """ AppSpecificAuthToken represents a token generated by a user for use with an external
- application where putting the user's credentials, even encrypted, is deemed too risky.
- """
- user = QuayUserField()
- uuid = CharField(default=uuid_generator, max_length=36, index=True)
- title = CharField()
- token_name = CharField(index=True, unique=True, default=random_string_generator(60))
- token_secret = EncryptedCharField(default_token_length=60)
-
- # TODO(remove-unenc): This field is deprecated and should be removed soon.
- token_code = deprecated_field(
- CharField(default=random_string_generator(length=120), unique=True, index=True, null=True),
- ERTMigrationFlags.WRITE_OLD_FIELDS)
-
- created = DateTimeField(default=datetime.now)
- expiration = DateTimeField(null=True)
- last_accessed = DateTimeField(null=True)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('user', 'expiration'), False),
- )
-
-
-class Manifest(BaseModel):
- """ Manifest represents a single manifest under a repository. Within a repository,
- there can only be one manifest with the same digest.
- """
- repository = ForeignKeyField(Repository)
- digest = CharField(index=True)
- media_type = EnumField(MediaType)
- manifest_bytes = TextField()
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('repository', 'digest'), True),
- (('repository', 'media_type'), False),
- )
-
-
-class TagKind(BaseModel):
- """ TagKind describes the various kinds of tags that can be found in the registry.
- """
- name = CharField(index=True, unique=True)
-
-
-class Tag(BaseModel):
- """ Tag represents a user-facing alias for referencing a Manifest or as an alias to another tag.
- """
- name = CharField()
- repository = ForeignKeyField(Repository)
- manifest = ForeignKeyField(Manifest, null=True)
- lifetime_start_ms = BigIntegerField(default=get_epoch_timestamp_ms)
- lifetime_end_ms = BigIntegerField(null=True, index=True)
- hidden = BooleanField(default=False)
- reversion = BooleanField(default=False)
- tag_kind = EnumField(TagKind)
- linked_tag = ForeignKeyField('self', null=True, backref='tag_parents')
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('repository', 'name'), False),
- (('repository', 'name', 'hidden'), False),
- (('repository', 'name', 'tag_kind'), False),
-
- (('repository', 'lifetime_start_ms'), False),
- (('repository', 'lifetime_end_ms'), False),
-
- # This unique index prevents deadlocks when concurrently moving and deleting tags
- (('repository', 'name', 'lifetime_end_ms'), True),
- )
-
-
-class ManifestChild(BaseModel):
- """ ManifestChild represents a relationship between a manifest and its child manifest(s).
- Multiple manifests can share the same children. Note that since Manifests are stored
- per-repository, the repository here is a bit redundant, but we do so to make cleanup easier.
- """
- repository = ForeignKeyField(Repository)
- manifest = ForeignKeyField(Manifest)
- child_manifest = ForeignKeyField(Manifest)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('repository', 'manifest'), False),
- (('repository', 'child_manifest'), False),
- (('repository', 'manifest', 'child_manifest'), False),
- (('manifest', 'child_manifest'), True),
- )
-
-
-class ManifestLabel(BaseModel):
- """ ManifestLabel represents a label applied to a Manifest, within a repository.
- Note that since Manifests are stored per-repository, the repository here is
- a bit redundant, but we do so to make cleanup easier.
- """
- repository = ForeignKeyField(Repository, index=True)
- manifest = ForeignKeyField(Manifest)
- label = ForeignKeyField(Label)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('manifest', 'label'), True),
- )
-
-
-class ManifestBlob(BaseModel):
- """ ManifestBlob represents a blob that is used by a manifest. """
- repository = ForeignKeyField(Repository, index=True)
- manifest = ForeignKeyField(Manifest)
- blob = ForeignKeyField(ImageStorage)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('manifest', 'blob'), True),
- )
-
-
-class ManifestLegacyImage(BaseModel):
- """ For V1-compatible manifests only, this table maps from the manifest to its associated
- Docker image.
- """
- repository = ForeignKeyField(Repository, index=True)
- manifest = ForeignKeyField(Manifest, unique=True)
- image = ForeignKeyField(Image)
-
-
-class TagManifest(BaseModel):
- """ TO BE DEPRECATED: The manifest for a tag. """
- tag = ForeignKeyField(RepositoryTag, unique=True)
- digest = CharField(index=True)
- json_data = TextField()
-
-
-class TagManifestToManifest(BaseModel):
- """ NOTE: Only used for the duration of the migrations. """
- tag_manifest = ForeignKeyField(TagManifest, index=True, unique=True)
- manifest = ForeignKeyField(Manifest, index=True)
- broken = BooleanField(index=True, default=False)
-
-
-class TagManifestLabel(BaseModel):
- """ TO BE DEPRECATED: Mapping from a tag manifest to a label.
- """
- repository = ForeignKeyField(Repository, index=True)
- annotated = ForeignKeyField(TagManifest, index=True)
- label = ForeignKeyField(Label)
-
- class Meta:
- database = db
- read_only_config = read_only_config
- indexes = (
- (('annotated', 'label'), True),
- )
-
-
-class TagManifestLabelMap(BaseModel):
- """ NOTE: Only used for the duration of the migrations. """
- tag_manifest = ForeignKeyField(TagManifest, index=True)
- manifest = ForeignKeyField(Manifest, null=True, index=True)
-
- label = ForeignKeyField(Label, index=True)
-
- tag_manifest_label = ForeignKeyField(TagManifestLabel, index=True)
- manifest_label = ForeignKeyField(ManifestLabel, null=True, index=True)
-
- broken_manifest = BooleanField(index=True, default=False)
-
-
-class TagToRepositoryTag(BaseModel):
- """ NOTE: Only used for the duration of the migrations. """
- repository = ForeignKeyField(Repository, index=True)
- tag = ForeignKeyField(Tag, index=True, unique=True)
- repository_tag = ForeignKeyField(RepositoryTag, index=True, unique=True)
-
-
-@unique
-class RepoMirrorRuleType(IntEnum):
- """
- Types of mirroring rules.
- TAG_GLOB_CSV: Comma separated glob values (eg. "7.6,7.6-1.*")
- """
- TAG_GLOB_CSV = 1
-
-
-class RepoMirrorRule(BaseModel):
- """
- Determines how a given Repository should be mirrored.
- """
- uuid = CharField(default=uuid_generator, max_length=36, index=True)
- repository = ForeignKeyField(Repository, index=True)
- creation_date = DateTimeField(default=datetime.utcnow)
-
- rule_type = ClientEnumField(RepoMirrorRuleType, default=RepoMirrorRuleType.TAG_GLOB_CSV)
- rule_value = JSONField()
-
- # Optional associations to allow the generation of a ruleset tree
- left_child = ForeignKeyField('self', null=True, backref='left_child')
- right_child = ForeignKeyField('self', null=True, backref='right_child')
-
-
-@unique
-class RepoMirrorType(IntEnum):
- """
- Types of repository mirrors.
- """
- PULL = 1 # Pull images from the external repo
-
-
-@unique
-class RepoMirrorStatus(IntEnum):
- """
- Possible statuses of repository mirroring.
- """
- FAIL = -1
- NEVER_RUN = 0
- SUCCESS = 1
- SYNCING = 2
- SYNC_NOW = 3
-
-
-class RepoMirrorConfig(BaseModel):
- """
- Represents a repository to be mirrored and any additional configuration
- required to perform the mirroring.
- """
- repository = ForeignKeyField(Repository, index=True, unique=True, backref='mirror')
- creation_date = DateTimeField(default=datetime.utcnow)
- is_enabled = BooleanField(default=True)
-
- # Mirror Configuration
- mirror_type = ClientEnumField(RepoMirrorType, default=RepoMirrorType.PULL)
- internal_robot = QuayUserField(allows_robots=True, null=True, backref='mirrorpullrobot',
- robot_null_delete=True)
- external_reference = CharField()
- external_registry_username = EncryptedCharField(max_length=2048, null=True)
- external_registry_password = EncryptedCharField(max_length=2048, null=True)
- external_registry_config = JSONField(default={})
-
- # Worker Queuing
- sync_interval = IntegerField() # seconds between syncs
- sync_start_date = DateTimeField(null=True) # next start time
- sync_expiration_date = DateTimeField(null=True) # max duration
- sync_retries_remaining = IntegerField(default=3)
- sync_status = ClientEnumField(RepoMirrorStatus, default=RepoMirrorStatus.NEVER_RUN)
- sync_transaction_id = CharField(default=uuid_generator, max_length=36)
-
- # Tag-Matching Rules
- root_rule = ForeignKeyField(RepoMirrorRule)
-
-
-appr_classes = set([ApprTag, ApprTagKind, ApprBlobPlacementLocation, ApprManifestList,
- ApprManifestBlob, ApprBlob, ApprManifestListManifest, ApprManifest,
- ApprBlobPlacement])
-v22_classes = set([Manifest, ManifestLabel, ManifestBlob, ManifestLegacyImage, TagKind,
- ManifestChild, Tag])
-transition_classes = set([TagManifestToManifest, TagManifestLabelMap, TagToRepositoryTag])
-
-is_model = lambda x: inspect.isclass(x) and issubclass(x, BaseModel) and x is not BaseModel
-all_models = [model[1] for model in inspect.getmembers(sys.modules[__name__], is_model)]
+all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission, Visibility,
+ RepositoryTag, EmailConfirmation, FederatedLogin, LoginService, QueueItem,
+ RepositoryBuild, Team, TeamMember, TeamRole, LogEntryKind, LogEntry,
+ PermissionPrototype, ImageStorage, BuildTriggerService, RepositoryBuildTrigger,
+ OAuthApplication, OAuthAuthorizationCode, OAuthAccessToken, NotificationKind,
+ Notification, ImageStorageLocation, ImageStoragePlacement,
+ ExternalNotificationEvent, ExternalNotificationMethod, RepositoryNotification,
+ RepositoryAuthorizedEmail, ImageStorageTransformation, DerivedImageStorage,
+ TeamMemberInvite, ImageStorageSignature, ImageStorageSignatureKind,
+ AccessTokenKind, Star, RepositoryActionCount, TagManifest]
diff --git a/data/encryption.py b/data/encryption.py
deleted file mode 100644
index 429f09827..000000000
--- a/data/encryption.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import logging
-import base64
-
-from collections import namedtuple
-from cryptography.hazmat.primitives.ciphers.aead import AESCCM
-
-from util.security.secret import convert_secret_key
-
-class DecryptionFailureException(Exception):
- """ Exception raised if a field could not be decrypted. """
-
-
-EncryptionVersion = namedtuple('EncryptionVersion', ['prefix', 'encrypt', 'decrypt'])
-
-logger = logging.getLogger(__name__)
-
-
-_SEPARATOR = '$$'
-AES_CCM_NONCE_LENGTH = 13
-
-
-def _encrypt_ccm(secret_key, value, field_max_length=None):
- aesccm = AESCCM(secret_key)
- nonce = os.urandom(AES_CCM_NONCE_LENGTH)
- ct = aesccm.encrypt(nonce, value.encode('utf-8'), None)
- encrypted = base64.b64encode(nonce + ct)
- if field_max_length:
- msg = 'Tried to encode a value too large for this field'
- assert (len(encrypted) + _RESERVED_FIELD_SPACE) <= field_max_length, msg
-
- return encrypted
-
-
-def _decrypt_ccm(secret_key, value):
- aesccm = AESCCM(secret_key)
- try:
- decoded = base64.b64decode(value)
- nonce = decoded[:AES_CCM_NONCE_LENGTH]
- ct = decoded[AES_CCM_NONCE_LENGTH:]
- decrypted = aesccm.decrypt(nonce, ct, None)
- return decrypted.decode('utf-8')
- except Exception:
- logger.exception('Got exception when trying to decrypt value `%s`', value)
- raise DecryptionFailureException()
-
-
-# Defines the versions of encryptions we support. This will allow us to upgrade to newer encryption
-# protocols (fairly seamlessly) if need be in the future.
-_VERSIONS = {
- 'v0': EncryptionVersion('v0', _encrypt_ccm, _decrypt_ccm),
-}
-
-_RESERVED_FIELD_SPACE = len(_SEPARATOR) + max([len(k) for k in _VERSIONS.keys()])
-
-
-class FieldEncrypter(object):
- """ Helper object for defining how fields are encrypted and decrypted between the database
- and the application.
- """
- def __init__(self, secret_key, version='v0'):
- # NOTE: secret_key will be None when the system is being first initialized, so we allow that
- # case here, but make sure to assert that it is *not* None below if any encryption is actually
- # needed.
- self._secret_key = convert_secret_key(secret_key) if secret_key is not None else None
- self._encryption_version = _VERSIONS[version]
-
- def encrypt_value(self, value, field_max_length=None):
- """ Encrypts the value using the current version of encryption. """
- assert self._secret_key is not None
- encrypted_value = self._encryption_version.encrypt(self._secret_key, value, field_max_length)
- return '%s%s%s' % (self._encryption_version.prefix, _SEPARATOR, encrypted_value)
-
- def decrypt_value(self, value):
- """ Decrypts the value, returning it. If the value cannot be decrypted
- raises a DecryptionFailureException.
- """
- assert self._secret_key is not None
- if _SEPARATOR not in value:
- raise DecryptionFailureException('Invalid encrypted value')
-
- version_prefix, data = value.split(_SEPARATOR, 1)
- if version_prefix not in _VERSIONS:
- raise DecryptionFailureException('Unknown version prefix %s' % version_prefix)
-
- return _VERSIONS[version_prefix].decrypt(self._secret_key, data)
-
diff --git a/data/fields.py b/data/fields.py
deleted file mode 100644
index c79a7e6bd..000000000
--- a/data/fields.py
+++ /dev/null
@@ -1,297 +0,0 @@
-import base64
-import string
-import json
-
-from random import SystemRandom
-
-import bcrypt
-import resumablehashlib
-
-from peewee import TextField, CharField, SmallIntegerField
-from data.text import prefix_search
-
-
-def random_string(length=16):
- random = SystemRandom()
- return ''.join([random.choice(string.ascii_uppercase + string.digits)
- for _ in range(length)])
-
-
-class _ResumableSHAField(TextField):
- def _create_sha(self):
- raise NotImplementedError
-
- def db_value(self, value):
- if value is None:
- return None
-
- sha_state = value.state()
-
- # One of the fields is a byte string, let's base64 encode it to make sure
- # we can store and fetch it regardless of default collocation.
- sha_state[3] = base64.b64encode(sha_state[3])
-
- return json.dumps(sha_state)
-
- def python_value(self, value):
- if value is None:
- return None
-
- sha_state = json.loads(value)
-
- # We need to base64 decode the data bytestring.
- sha_state[3] = base64.b64decode(sha_state[3])
- to_resume = self._create_sha()
- to_resume.set_state(sha_state)
- return to_resume
-
-
-class ResumableSHA256Field(_ResumableSHAField):
- def _create_sha(self):
- return resumablehashlib.sha256()
-
-
-class ResumableSHA1Field(_ResumableSHAField):
- def _create_sha(self):
- return resumablehashlib.sha1()
-
-
-class JSONField(TextField):
- def db_value(self, value):
- return json.dumps(value)
-
- def python_value(self, value):
- if value is None or value == "":
- return {}
- return json.loads(value)
-
-
-class Base64BinaryField(TextField):
- def db_value(self, value):
- if value is None:
- return None
- return base64.b64encode(value)
-
- def python_value(self, value):
- if value is None:
- return None
- return base64.b64decode(value)
-
-
-class DecryptedValue(object):
- """ Wrapper around an already decrypted value to be placed into an encrypted field. """
- def __init__(self, decrypted_value):
- assert decrypted_value is not None
- self.value = decrypted_value
-
- def decrypt(self):
- return self.value
-
- def matches(self, unencrypted_value):
- """ Returns whether the value of this field matches the unencrypted_value. """
- return self.decrypt() == unencrypted_value
-
-
-class LazyEncryptedValue(object):
- """ Wrapper around an encrypted value in an encrypted field. Will decrypt lazily. """
- def __init__(self, encrypted_value, field):
- self.encrypted_value = encrypted_value
- self._field = field
-
- def decrypt(self):
- """ Decrypts the value. """
- return self._field.model._meta.encrypter.decrypt_value(self.encrypted_value)
-
- def matches(self, unencrypted_value):
- """ Returns whether the value of this field matches the unencrypted_value. """
- return self.decrypt() == unencrypted_value
-
- def __eq__(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def __mod__(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def __pow__(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def __contains__(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def contains(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def startswith(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def endswith(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
-
-def _add_encryption(field_class, requires_length_check=True):
- """ Adds support for encryption and decryption to the given field class. """
- class indexed_class(field_class):
- def __init__(self, default_token_length=None, *args, **kwargs):
- def _generate_default():
- return DecryptedValue(random_string(default_token_length))
-
- if default_token_length is not None:
- kwargs['default'] = _generate_default
-
- field_class.__init__(self, *args, **kwargs)
- assert not self.index
-
- def db_value(self, value):
- if value is None:
- return None
-
- if isinstance(value, LazyEncryptedValue):
- return value.encrypted_value
-
- if isinstance(value, DecryptedValue):
- value = value.value
-
- meta = self.model._meta
- return meta.encrypter.encrypt_value(value, self.max_length if requires_length_check else None)
-
- def python_value(self, value):
- if value is None:
- return None
-
- return LazyEncryptedValue(value, self)
-
- def __eq__(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def __mod__(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def __pow__(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def __contains__(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def contains(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def startswith(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- def endswith(self, _):
- raise Exception('Disallowed operation; use `matches`')
-
- return indexed_class
-
-
-EncryptedCharField = _add_encryption(CharField)
-EncryptedTextField = _add_encryption(TextField, requires_length_check=False)
-
-
-class EnumField(SmallIntegerField):
- def __init__(self, enum_type, *args, **kwargs):
- kwargs.pop('index', None)
-
- super(EnumField, self).__init__(index=True, *args, **kwargs)
- self.enum_type = enum_type
-
- def db_value(self, value):
- """Convert the python value for storage in the database."""
- return int(value.value)
-
- def python_value(self, value):
- """Convert the database value to a pythonic value."""
- return self.enum_type(value) if value is not None else None
-
- def clone_base(self, **kwargs):
- return super(EnumField, self).clone_base(
- enum_type=self.enum_type,
- **kwargs)
-
-
-def _add_fulltext(field_class):
- """ Adds support for full text indexing and lookup to the given field class. """
- class indexed_class(field_class):
- # Marker used by SQLAlchemy translation layer to add the proper index for full text searching.
- __fulltext__ = True
-
- def __init__(self, match_function, *args, **kwargs):
- field_class.__init__(self, *args, **kwargs)
- self.match_function = match_function
-
- def match(self, query):
- return self.match_function(self, query)
-
- def match_prefix(self, query):
- return prefix_search(self, query)
-
- def __mod__(self, _):
- raise Exception('Unsafe operation: Use `match` or `match_prefix`')
-
- def __pow__(self, _):
- raise Exception('Unsafe operation: Use `match` or `match_prefix`')
-
- def __contains__(self, _):
- raise Exception('Unsafe operation: Use `match` or `match_prefix`')
-
- def contains(self, _):
- raise Exception('Unsafe operation: Use `match` or `match_prefix`')
-
- def startswith(self, _):
- raise Exception('Unsafe operation: Use `match` or `match_prefix`')
-
- def endswith(self, _):
- raise Exception('Unsafe operation: Use `match` or `match_prefix`')
-
- return indexed_class
-
-
-FullIndexedCharField = _add_fulltext(CharField)
-FullIndexedTextField = _add_fulltext(TextField)
-
-
-class Credential(object):
- """ Credential represents a hashed credential. """
- def __init__(self, hashed):
- self.hashed = hashed
-
- def matches(self, value):
- """ Returns true if this credential matches the unhashed value given. """
- return bcrypt.hashpw(value.encode('utf-8'), self.hashed) == self.hashed
-
- @classmethod
- def from_string(cls, string_value):
- """ Returns a Credential object from an unhashed string value. """
- return Credential(bcrypt.hashpw(string_value.encode('utf-8'), bcrypt.gensalt()))
-
- @classmethod
- def generate(cls, length=20):
- """ Generates a new credential and returns it, along with its unhashed form. """
- token = random_string(length)
- return Credential.from_string(token), token
-
-
-class CredentialField(CharField):
- """ A character field that stores crytographically hashed credentials that should never be
- available to the user in plaintext after initial creation. This field automatically
- provides verification.
- """
- def __init__(self, *args, **kwargs):
- CharField.__init__(self, *args, **kwargs)
- assert 'default' not in kwargs
- assert not self.index
-
- def db_value(self, value):
- if value is None:
- return None
-
- if isinstance(value, basestring):
- raise Exception('A string cannot be given to a CredentialField; please wrap in a Credential')
-
- return value.hashed
-
- def python_value(self, value):
- if value is None:
- return None
-
- return Credential(value)
diff --git a/data/logs_model/__init__.py b/data/logs_model/__init__.py
deleted file mode 100644
index be8cc9402..000000000
--- a/data/logs_model/__init__.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import logging
-
-from data.logs_model.table_logs_model import TableLogsModel
-from data.logs_model.document_logs_model import DocumentLogsModel
-from data.logs_model.combined_model import CombinedLogsModel
-
-logger = logging.getLogger(__name__)
-
-
-def _transition_model(*args, **kwargs):
- return CombinedLogsModel(
- DocumentLogsModel(*args, **kwargs),
- TableLogsModel(*args, **kwargs),
- )
-
-
-_LOG_MODELS = {
- 'database': TableLogsModel,
- 'transition_reads_both_writes_es': _transition_model,
- 'elasticsearch': DocumentLogsModel,
-}
-
-_PULL_LOG_KINDS = {'pull_repo', 'repo_verb'}
-
-class LogsModelProxy(object):
- def __init__(self):
- self._model = None
-
- def initialize(self, model):
- self._model = model
- logger.info('===============================')
- logger.info('Using logs model `%s`', self._model)
- logger.info('===============================')
-
- def __getattr__(self, attr):
- if not self._model:
- raise AttributeError("LogsModelProxy is not initialized")
- return getattr(self._model, attr)
-
-
-logs_model = LogsModelProxy()
-
-
-def configure(app_config):
- logger.debug('Configuring log lodel')
- model_name = app_config.get('LOGS_MODEL', 'database')
- model_config = app_config.get('LOGS_MODEL_CONFIG', {})
-
- def should_skip_logging(kind_name, namespace_name, is_free_namespace):
- if namespace_name and namespace_name in app_config.get('DISABLED_FOR_AUDIT_LOGS', {}):
- return True
-
- if kind_name in _PULL_LOG_KINDS:
- if namespace_name and namespace_name in app_config.get('DISABLED_FOR_PULL_LOGS', {}):
- return True
-
- if app_config.get('FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES'):
- if is_free_namespace:
- return True
-
- return False
-
- model_config['should_skip_logging'] = should_skip_logging
- logs_model.initialize(_LOG_MODELS[model_name](**model_config))
diff --git a/data/logs_model/combined_model.py b/data/logs_model/combined_model.py
deleted file mode 100644
index 735101601..000000000
--- a/data/logs_model/combined_model.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import logging
-import itertools
-
-from data.logs_model.datatypes import AggregatedLogCount, LogEntriesPage
-from data.logs_model.interface import ActionLogsDataInterface
-from data.logs_model.shared import SharedModel
-
-logger = logging.getLogger(__name__)
-
-
-def _merge_aggregated_log_counts(*args):
- """ Merge two lists of AggregatedLogCount based on the value of their kind_id and datetime.
- """
- matching_keys = {}
- aggregated_log_counts_list = itertools.chain.from_iterable(args)
-
- def canonical_key_from_kind_date_tuple(kind_id, dt):
- """ Return a comma separated key from an AggregatedLogCount's kind_id and datetime. """
- return str(kind_id) + ',' + str(dt)
-
- for kind_id, count, dt in aggregated_log_counts_list:
- kind_date_key = canonical_key_from_kind_date_tuple(kind_id, dt)
- if kind_date_key in matching_keys:
- existing_count = matching_keys[kind_date_key][2]
- matching_keys[kind_date_key] = (kind_id, dt, existing_count + count)
- else:
- matching_keys[kind_date_key] = (kind_id, dt, count)
-
- return [AggregatedLogCount(kind_id, count, dt) for (kind_id, dt, count) in matching_keys.values()]
-
-
-class CombinedLogsModel(SharedModel, ActionLogsDataInterface):
- """
- CombinedLogsModel implements the data model that logs to the first logs model and reads from
- both.
- """
-
- def __init__(self, read_write_logs_model, read_only_logs_model):
- self.read_write_logs_model = read_write_logs_model
- self.read_only_logs_model = read_only_logs_model
-
- def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
- repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
- return self.read_write_logs_model.log_action(kind_name, namespace_name, performer, ip, metadata,
- repository, repository_name, timestamp,
- is_free_namespace)
-
- def count_repository_actions(self, repository, day):
- rw_count = self.read_write_logs_model.count_repository_actions(repository, day)
- ro_count = self.read_only_logs_model.count_repository_actions(repository, day)
- return rw_count + ro_count
-
- def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
- repository_name=None, namespace_name=None, filter_kinds=None):
- rw_model = self.read_write_logs_model
- ro_model = self.read_only_logs_model
- rw_count = rw_model.get_aggregated_log_counts(start_datetime, end_datetime,
- performer_name=performer_name,
- repository_name=repository_name,
- namespace_name=namespace_name,
- filter_kinds=filter_kinds)
- ro_count = ro_model.get_aggregated_log_counts(start_datetime, end_datetime,
- performer_name=performer_name,
- repository_name=repository_name,
- namespace_name=namespace_name,
- filter_kinds=filter_kinds)
- return _merge_aggregated_log_counts(rw_count, ro_count)
-
- def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
- namespace_id=None, max_query_time=None):
- rw_model = self.read_write_logs_model
- ro_model = self.read_only_logs_model
- rw_logs = rw_model.yield_logs_for_export(start_datetime, end_datetime, repository_id,
- namespace_id, max_query_time)
- ro_logs = ro_model.yield_logs_for_export(start_datetime, end_datetime, repository_id,
- namespace_id, max_query_time)
- for batch in itertools.chain(rw_logs, ro_logs):
- yield batch
-
- def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
- namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
- rw_model = self.read_write_logs_model
- ro_model = self.read_only_logs_model
-
- page_token = page_token or {}
-
- new_page_token = {}
- if page_token is None or not page_token.get('under_readonly_model', False):
- rw_page_token = page_token.get('readwrite_page_token')
- rw_logs = rw_model.lookup_logs(start_datetime, end_datetime, performer_name,
- repository_name, namespace_name, filter_kinds,
- rw_page_token, max_page_count)
- logs, next_page_token = rw_logs
- new_page_token['under_readonly_model'] = next_page_token is None
- new_page_token['readwrite_page_token'] = next_page_token
- return LogEntriesPage(logs, new_page_token)
- else:
- readonly_page_token = page_token.get('readonly_page_token')
- ro_logs = ro_model.lookup_logs(start_datetime, end_datetime, performer_name,
- repository_name, namespace_name, filter_kinds,
- readonly_page_token, max_page_count)
- logs, next_page_token = ro_logs
- if next_page_token is None:
- return LogEntriesPage(logs, None)
-
- new_page_token['under_readonly_model'] = True
- new_page_token['readonly_page_token'] = next_page_token
- return LogEntriesPage(logs, new_page_token)
-
- def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
- filter_kinds=None, size=20):
- latest_logs = []
- rw_model = self.read_write_logs_model
- ro_model = self.read_only_logs_model
-
- rw_logs = rw_model.lookup_latest_logs(performer_name, repository_name, namespace_name,
- filter_kinds, size)
- latest_logs.extend(rw_logs)
- if len(latest_logs) < size:
- ro_logs = ro_model.lookup_latest_logs(performer_name, repository_name, namespace_name,
- filter_kinds, size - len(latest_logs))
- latest_logs.extend(ro_logs)
-
- return latest_logs
-
- def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
- ro_model = self.read_only_logs_model
- rw_model = self.read_write_logs_model
- ro_ctx = ro_model.yield_log_rotation_context(cutoff_date, min_logs_per_rotation)
- rw_ctx = rw_model.yield_log_rotation_context(cutoff_date, min_logs_per_rotation)
- for ctx in itertools.chain(ro_ctx, rw_ctx):
- yield ctx
diff --git a/data/logs_model/datatypes.py b/data/logs_model/datatypes.py
deleted file mode 100644
index 03db6756f..000000000
--- a/data/logs_model/datatypes.py
+++ /dev/null
@@ -1,155 +0,0 @@
-import json
-
-from calendar import timegm
-from collections import namedtuple
-from email.utils import formatdate
-
-from cachetools.func import lru_cache
-
-from data import model
-from util.morecollections import AttrDict
-
-
-def _format_date(date):
- """ Output an RFC822 date format. """
- if date is None:
- return None
-
- return formatdate(timegm(date.utctimetuple()))
-
-
-@lru_cache(maxsize=1)
-def _kinds():
- return model.log.get_log_entry_kinds()
-
-
-class LogEntriesPage(namedtuple('LogEntriesPage', ['logs', 'next_page_token'])):
- """ Represents a page returned by the lookup_logs call. The `logs` contains the logs
- found for the page and `next_page_token`, if not None, contains the token to be
- encoded and returned for the followup call.
- """
-
-
-class Log(namedtuple('Log', [
- 'metadata_json', 'ip', 'datetime', 'performer_email', 'performer_username', 'performer_robot',
- 'account_organization', 'account_username', 'account_email', 'account_robot', 'kind_id'])):
- """ Represents a single log entry returned by the logs model. """
-
- @classmethod
- def for_logentry(cls, log):
- account_organization = None
- account_username = None
- account_email = None
- account_robot = None
-
- try:
- account_organization = log.account.organization
- account_username = log.account.username
- account_email = log.account.email
- account_robot = log.account.robot
- except AttributeError:
- pass
-
- performer_robot = None
- performer_username = None
- performer_email = None
-
- try:
- performer_robot = log.performer.robot
- performer_username = log.performer.username
- performer_email = log.performer.email
- except AttributeError:
- pass
-
- return Log(log.metadata_json, log.ip, log.datetime, performer_email, performer_username,
- performer_robot, account_organization, account_username, account_email,
- account_robot, log.kind_id)
-
- @classmethod
- def for_elasticsearch_log(cls, log, id_user_map):
- account_organization = None
- account_username = None
- account_email = None
- account_robot = None
-
- try:
- if log.account_id:
- account = id_user_map[log.account_id]
- account_organization = account.organization
- account_username = account.username
- account_email = account.email
- account_robot = account.robot
- except AttributeError:
- pass
-
- performer_robot = None
- performer_username = None
- performer_email = None
-
- try:
- if log.performer_id:
- performer = id_user_map[log.performer_id]
- performer_robot = performer.robot
- performer_username = performer.username
- performer_email = performer.email
- except AttributeError:
- pass
-
- return Log(log.metadata_json, str(log.ip), log.datetime, performer_email, performer_username,
- performer_robot, account_organization, account_username, account_email,
- account_robot, log.kind_id)
-
- def to_dict(self, avatar, include_namespace=False):
- view = {
- 'kind': _kinds()[self.kind_id],
- 'metadata': json.loads(self.metadata_json),
- 'ip': self.ip,
- 'datetime': _format_date(self.datetime),
- }
-
- if self.performer_username:
- performer = AttrDict({'username': self.performer_username, 'email': self.performer_email})
- performer.robot = None
- if self.performer_robot:
- performer.robot = self.performer_robot
-
- view['performer'] = {
- 'kind': 'user',
- 'name': self.performer_username,
- 'is_robot': self.performer_robot,
- 'avatar': avatar.get_data_for_user(performer),
- }
-
- if include_namespace:
- if self.account_username:
- account = AttrDict({'username': self.account_username, 'email': self.account_email})
- if self.account_organization:
-
- view['namespace'] = {
- 'kind': 'org',
- 'name': self.account_username,
- 'avatar': avatar.get_data_for_org(account),
- }
- else:
- account.robot = None
- if self.account_robot:
- account.robot = self.account_robot
- view['namespace'] = {
- 'kind': 'user',
- 'name': self.account_username,
- 'avatar': avatar.get_data_for_user(account),
- }
-
- return view
-
-
-class AggregatedLogCount(namedtuple('AggregatedLogCount', ['kind_id', 'count', 'datetime'])):
- """ Represents the aggregated count of the number of logs, of a particular kind, on a day. """
- def to_dict(self):
- view = {
- 'kind': _kinds()[self.kind_id],
- 'count': self.count,
- 'datetime': _format_date(self.datetime),
- }
-
- return view
diff --git a/data/logs_model/document_logs_model.py b/data/logs_model/document_logs_model.py
deleted file mode 100644
index e93cd2062..000000000
--- a/data/logs_model/document_logs_model.py
+++ /dev/null
@@ -1,532 +0,0 @@
-# pylint: disable=protected-access
-
-import json
-import logging
-import uuid
-
-from time import time
-from datetime import timedelta, datetime, date
-from dateutil.parser import parse as parse_datetime
-
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-from elasticsearch.exceptions import ConnectionTimeout, NotFoundError
-
-from data import model
-from data.database import CloseForLongOperation
-from data.model import config
-from data.model.log import (_json_serialize, ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING,
- DataModelException)
-from data.logs_model.elastic_logs import LogEntry, configure_es
-from data.logs_model.datatypes import Log, AggregatedLogCount, LogEntriesPage
-from data.logs_model.interface import (ActionLogsDataInterface, LogRotationContextInterface,
- LogsIterationTimeout)
-from data.logs_model.shared import SharedModel, epoch_ms
-
-from data.logs_model.logs_producer import LogProducerProxy, LogSendException
-from data.logs_model.logs_producer.kafka_logs_producer import KafkaLogsProducer
-from data.logs_model.logs_producer.elasticsearch_logs_producer import ElasticsearchLogsProducer
-from data.logs_model.logs_producer.kinesis_stream_logs_producer import KinesisStreamLogsProducer
-
-
-logger = logging.getLogger(__name__)
-
-PAGE_SIZE = 20
-DEFAULT_RESULT_WINDOW = 5000
-MAX_RESULT_WINDOW = 10000
-
-# DATE_RANGE_LIMIT is to limit the query date time range to at most 1 month.
-DATE_RANGE_LIMIT = 32
-
-# Timeout for count_repository_actions
-COUNT_REPOSITORY_ACTION_TIMEOUT = 30
-
-
-
-def _date_range_descending(start_datetime, end_datetime, includes_end_datetime=False):
- """ Generate the dates between `end_datetime` and `start_datetime`.
-
- If `includes_end_datetime` is set, the generator starts at `end_datetime`,
- otherwise, starts the generator at `end_datetime` minus 1 second.
- """
- assert end_datetime >= start_datetime
- start_date = start_datetime.date()
-
- if includes_end_datetime:
- current_date = end_datetime.date()
- else:
- current_date = (end_datetime - timedelta(seconds=1)).date()
-
- while current_date >= start_date:
- yield current_date
- current_date = current_date - timedelta(days=1)
-
-
-def _date_range_in_single_index(dt1, dt2):
- """ Determine whether a single index can be searched given a range
- of dates or datetimes. If date instances are given, difference should be 1 day.
-
- NOTE: dt2 is exclusive to the search result set.
- i.e. The date range is larger or equal to dt1 and strictly smaller than dt2
- """
- assert isinstance(dt1, date) and isinstance(dt2, date)
-
- dt = dt2 - dt1
-
- # Check if date or datetime
- if not isinstance(dt1, datetime) and not isinstance(dt2, datetime):
- return dt == timedelta(days=1)
-
- if dt < timedelta(days=1) and dt >= timedelta(days=0):
- return dt2.day == dt1.day
-
- # Check if datetime can be interpreted as a date: hour, minutes, seconds or microseconds set to 0
- if dt == timedelta(days=1):
- return dt1.hour == 0 and dt1.minute == 0 and dt1.second == 0 and dt1.microsecond == 0
-
- return False
-
-
-def _for_elasticsearch_logs(logs, repository_id=None, namespace_id=None):
- namespace_ids = set()
- for log in logs:
- namespace_ids.add(log.account_id)
- namespace_ids.add(log.performer_id)
- assert namespace_id is None or log.account_id == namespace_id
- assert repository_id is None or log.repository_id == repository_id
-
- id_user_map = model.user.get_user_map_by_ids(namespace_ids)
- return [Log.for_elasticsearch_log(log, id_user_map) for log in logs]
-
-
-def _random_id():
- """ Generates a unique uuid4 string for the random_id field in LogEntry.
- It is used as tie-breaker for sorting logs based on datetime:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-after.html
- """
- return str(uuid.uuid4())
-
-
-@add_metaclass(ABCMeta)
-class ElasticsearchLogsModelInterface(object):
- """
- Interface for Elasticsearch specific operations with the logs model.
- These operations are usually index based.
- """
-
- @abstractmethod
- def can_delete_index(self, index, cutoff_date):
- """ Return whether the given index is older than the given cutoff date. """
-
- @abstractmethod
- def list_indices(self):
- """ List the logs model's indices. """
-
-
-class DocumentLogsModel(SharedModel, ActionLogsDataInterface, ElasticsearchLogsModelInterface):
- """
- DocumentLogsModel implements the data model for the logs API backed by an
- elasticsearch service.
- """
- def __init__(self, should_skip_logging=None, elasticsearch_config=None, producer=None, **kwargs):
- self._should_skip_logging = should_skip_logging
- self._logs_producer = LogProducerProxy()
- self._es_client = configure_es(**elasticsearch_config)
-
- if producer == 'kafka':
- kafka_config = kwargs['kafka_config']
- self._logs_producer.initialize(KafkaLogsProducer(**kafka_config))
- elif producer == 'elasticsearch':
- self._logs_producer.initialize(ElasticsearchLogsProducer())
- elif producer == 'kinesis_stream':
- kinesis_stream_config = kwargs['kinesis_stream_config']
- self._logs_producer.initialize(KinesisStreamLogsProducer(**kinesis_stream_config))
- else:
- raise Exception('Invalid log producer: %s' % producer)
-
- @staticmethod
- def _get_ids_by_names(repository_name, namespace_name, performer_name):
- """ Retrieve repository/namespace/performer ids based on their names.
- throws DataModelException when the namespace_name does not match any
- user in the database.
- returns database ID or None if not exists.
- """
- repository_id = None
- account_id = None
- performer_id = None
-
- if repository_name and namespace_name:
- repository = model.repository.get_repository(namespace_name, repository_name)
- if repository:
- repository_id = repository.id
- account_id = repository.namespace_user.id
-
- if namespace_name and account_id is None:
- account = model.user.get_user_or_org(namespace_name)
- if account is None:
- raise DataModelException('Invalid namespace requested')
-
- account_id = account.id
-
- if performer_name:
- performer = model.user.get_user(performer_name)
- if performer:
- performer_id = performer.id
-
- return repository_id, account_id, performer_id
-
- def _base_query(self, performer_id=None, repository_id=None, account_id=None, filter_kinds=None,
- index=None):
- if filter_kinds is not None:
- assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
-
- if index is not None:
- search = LogEntry.search(index=index)
- else:
- search = LogEntry.search()
-
- if performer_id is not None:
- assert isinstance(performer_id, int)
- search = search.filter('term', performer_id=performer_id)
-
- if repository_id is not None:
- assert isinstance(repository_id, int)
- search = search.filter('term', repository_id=repository_id)
-
- if account_id is not None and repository_id is None:
- assert isinstance(account_id, int)
- search = search.filter('term', account_id=account_id)
-
- if filter_kinds is not None:
- kind_map = model.log.get_log_entry_kinds()
- ignore_ids = [kind_map[kind_name] for kind_name in filter_kinds]
- search = search.exclude('terms', kind_id=ignore_ids)
-
- return search
-
- def _base_query_date_range(self, start_datetime, end_datetime, performer_id, repository_id,
- account_id, filter_kinds, index=None):
- skip_datetime_check = False
- if _date_range_in_single_index(start_datetime, end_datetime):
- index = self._es_client.index_name(start_datetime)
- skip_datetime_check = self._es_client.index_exists(index)
-
- if index and (skip_datetime_check or self._es_client.index_exists(index)):
- search = self._base_query(performer_id, repository_id, account_id, filter_kinds,
- index=index)
- else:
- search = self._base_query(performer_id, repository_id, account_id, filter_kinds)
-
- if not skip_datetime_check:
- search = search.query('range', datetime={'gte': start_datetime, 'lt': end_datetime})
-
- return search
-
- def _load_logs_for_day(self, logs_date, performer_id, repository_id, account_id, filter_kinds,
- after_datetime=None, after_random_id=None, size=PAGE_SIZE):
- index = self._es_client.index_name(logs_date)
- if not self._es_client.index_exists(index):
- return []
-
- search = self._base_query(performer_id, repository_id, account_id, filter_kinds,
- index=index)
- search = search.sort({'datetime': 'desc'}, {'random_id.keyword': 'desc'})
- search = search.extra(size=size)
-
- if after_datetime is not None and after_random_id is not None:
- after_datetime_epoch_ms = epoch_ms(after_datetime)
- search = search.extra(search_after=[after_datetime_epoch_ms, after_random_id])
-
- return search.execute()
-
- def _load_latest_logs(self, performer_id, repository_id, account_id, filter_kinds, size):
- """ Return the latest logs from Elasticsearch.
-
- Look at indices up to theset logrotateworker threshold, or up to 30 days if not defined.
- """
- # Set the last index to check to be the logrotateworker threshold, or 30 days
- end_datetime = datetime.now()
- start_datetime = end_datetime - timedelta(days=DATE_RANGE_LIMIT)
-
- latest_logs = []
- for day in _date_range_descending(start_datetime, end_datetime, includes_end_datetime=True):
- try:
- logs = self._load_logs_for_day(day, performer_id, repository_id, account_id, filter_kinds,
- size=size)
- latest_logs.extend(logs)
- except NotFoundError:
- continue
-
- if len(latest_logs) >= size:
- break
-
- return _for_elasticsearch_logs(latest_logs[:size], repository_id, account_id)
-
- def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
- namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
- assert start_datetime is not None and end_datetime is not None
-
- # Check for a valid combined model token when migrating online from a combined model
- if page_token is not None and page_token.get('readwrite_page_token') is not None:
- page_token = page_token.get('readwrite_page_token')
-
- if page_token is not None and max_page_count is not None:
- page_number = page_token.get('page_number')
- if page_number is not None and page_number + 1 > max_page_count:
- return LogEntriesPage([], None)
-
- repository_id, account_id, performer_id = DocumentLogsModel._get_ids_by_names(
- repository_name, namespace_name, performer_name)
-
- after_datetime = None
- after_random_id = None
- if page_token is not None:
- after_datetime = parse_datetime(page_token['datetime'])
- after_random_id = page_token['random_id']
-
- if after_datetime is not None:
- end_datetime = min(end_datetime, after_datetime)
-
- all_logs = []
-
- with CloseForLongOperation(config.app_config):
- for current_date in _date_range_descending(start_datetime, end_datetime):
- try:
- logs = self._load_logs_for_day(current_date, performer_id, repository_id, account_id,
- filter_kinds, after_datetime, after_random_id,
- size=PAGE_SIZE+1)
-
- all_logs.extend(logs)
- except NotFoundError:
- continue
-
- if len(all_logs) > PAGE_SIZE:
- break
-
- next_page_token = None
- all_logs = all_logs[0:PAGE_SIZE+1]
-
- if len(all_logs) == PAGE_SIZE + 1:
- # The last element in the response is used to check if there's more elements.
- # The second element in the response is used as the pagination token because search_after does
- # not include the exact match, and so the next page will start with the last element.
- # This keeps the behavior exactly the same as table_logs_model, so that
- # the caller can expect when a pagination token is non-empty, there must be
- # at least 1 log to be retrieved.
- next_page_token = {
- 'datetime': all_logs[-2].datetime.isoformat(),
- 'random_id': all_logs[-2].random_id,
- 'page_number': page_token['page_number'] + 1 if page_token else 1,
- }
-
- return LogEntriesPage(_for_elasticsearch_logs(all_logs[:PAGE_SIZE], repository_id, account_id),
- next_page_token)
-
- def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
- filter_kinds=None, size=20):
- repository_id, account_id, performer_id = DocumentLogsModel._get_ids_by_names(
- repository_name, namespace_name, performer_name)
-
- with CloseForLongOperation(config.app_config):
- latest_logs = self._load_latest_logs(performer_id, repository_id, account_id, filter_kinds,
- size)
-
- return latest_logs
-
-
- def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
- repository_name=None, namespace_name=None, filter_kinds=None):
- if end_datetime - start_datetime >= timedelta(days=DATE_RANGE_LIMIT):
- raise Exception('Cannot lookup aggregated logs over a period longer than a month')
-
- repository_id, account_id, performer_id = DocumentLogsModel._get_ids_by_names(
- repository_name, namespace_name, performer_name)
-
- with CloseForLongOperation(config.app_config):
- search = self._base_query_date_range(start_datetime, end_datetime, performer_id,
- repository_id, account_id, filter_kinds)
- search.aggs.bucket('by_id', 'terms', field='kind_id').bucket('by_date', 'date_histogram',
- field='datetime', interval='day')
- # es returns all buckets when size=0
- search = search.extra(size=0)
- resp = search.execute()
-
- if not resp.aggregations:
- return []
-
- counts = []
- by_id = resp.aggregations['by_id']
-
- for id_bucket in by_id.buckets:
- for date_bucket in id_bucket.by_date.buckets:
- if date_bucket.doc_count > 0:
- counts.append(AggregatedLogCount(id_bucket.key, date_bucket.doc_count, date_bucket.key))
-
- return counts
-
- def count_repository_actions(self, repository, day):
- index = self._es_client.index_name(day)
- search = self._base_query_date_range(day, day + timedelta(days=1),
- None,
- repository.id,
- None,
- None,
- index=index)
- search = search.params(request_timeout=COUNT_REPOSITORY_ACTION_TIMEOUT)
-
- try:
- return search.count()
- except NotFoundError:
- return 0
-
- def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
- repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
- if self._should_skip_logging and self._should_skip_logging(kind_name, namespace_name,
- is_free_namespace):
- return
-
- if repository_name is not None:
- assert repository is None
- assert namespace_name is not None
- repository = model.repository.get_repository(namespace_name, repository_name)
-
- if timestamp is None:
- timestamp = datetime.today()
-
- account_id = None
- performer_id = None
- repository_id = None
-
- if namespace_name is not None:
- account_id = model.user.get_namespace_user(namespace_name).id
-
- if performer is not None:
- performer_id = performer.id
-
- if repository is not None:
- repository_id = repository.id
-
- metadata_json = json.dumps(metadata or {}, default=_json_serialize)
- kind_id = model.log._get_log_entry_kind(kind_name)
- log = LogEntry(random_id=_random_id(), kind_id=kind_id, account_id=account_id,
- performer_id=performer_id, ip=ip, metadata_json=metadata_json,
- repository_id=repository_id, datetime=timestamp)
-
- try:
- self._logs_producer.send(log)
- except LogSendException as lse:
- strict_logging_disabled = config.app_config.get('ALLOW_PULLS_WITHOUT_STRICT_LOGGING')
- logger.exception('log_action failed', extra=({'exception': lse}).update(log.to_dict()))
- if not (strict_logging_disabled and kind_name in ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING):
- raise
-
- def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
- namespace_id=None, max_query_time=None):
- max_query_time = max_query_time.total_seconds() if max_query_time is not None else 300
- search = self._base_query_date_range(start_datetime, end_datetime, None, repository_id,
- namespace_id, None)
-
- def raise_on_timeout(batch_generator):
- start = time()
- for batch in batch_generator:
- elapsed = time() - start
- if elapsed > max_query_time:
- logger.error('Retrieval of logs `%s/%s` timed out with time of `%s`', namespace_id,
- repository_id, elapsed)
- raise LogsIterationTimeout()
-
- yield batch
- start = time()
-
- def read_batch(scroll):
- batch = []
- for log in scroll:
- batch.append(log)
- if len(batch) == DEFAULT_RESULT_WINDOW:
- yield _for_elasticsearch_logs(batch, repository_id=repository_id,
- namespace_id=namespace_id)
- batch = []
-
- if batch:
- yield _for_elasticsearch_logs(batch, repository_id=repository_id, namespace_id=namespace_id)
-
- search = search.params(size=DEFAULT_RESULT_WINDOW, request_timeout=max_query_time)
-
- try:
- with CloseForLongOperation(config.app_config):
- for batch in raise_on_timeout(read_batch(search.scan())):
- yield batch
- except ConnectionTimeout:
- raise LogsIterationTimeout()
-
- def can_delete_index(self, index, cutoff_date):
- return self._es_client.can_delete_index(index, cutoff_date)
-
- def list_indices(self):
- return self._es_client.list_indices()
-
- def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
- """ Yield a context manager for a group of outdated logs. """
- all_indices = self.list_indices()
- for index in all_indices:
- if not self.can_delete_index(index, cutoff_date):
- continue
-
- context = ElasticsearchLogRotationContext(index, min_logs_per_rotation, self._es_client)
- yield context
-
-
-class ElasticsearchLogRotationContext(LogRotationContextInterface):
- """
- ElasticsearchLogRotationContext yield batch of logs from an index.
-
- When completed without exceptions, this context will delete its associated
- Elasticsearch index.
- """
- def __init__(self, index, min_logs_per_rotation, es_client):
- self._es_client = es_client
- self.min_logs_per_rotation = min_logs_per_rotation
- self.index = index
-
- self.start_pos = 0
- self.end_pos = 0
-
- self.scroll = None
-
- def __enter__(self):
- search = self._base_query()
- self.scroll = search.scan()
- return self
-
- def __exit__(self, ex_type, ex_value, ex_traceback):
- if ex_type is None and ex_value is None and ex_traceback is None:
- logger.debug('Deleting index %s', self.index)
- self._es_client.delete_index(self.index)
-
- def yield_logs_batch(self):
- def batched_logs(gen, size):
- batch = []
- for log in gen:
- batch.append(log)
- if len(batch) == size:
- yield batch
- batch = []
-
- if batch:
- yield batch
-
- for batch in batched_logs(self.scroll, self.min_logs_per_rotation):
- self.end_pos = self.start_pos + len(batch) - 1
- yield batch, self._generate_filename()
- self.start_pos = self.end_pos + 1
-
- def _base_query(self):
- search = LogEntry.search(index=self.index)
- return search
-
- def _generate_filename(self):
- """ Generate the filenames used to archive the action logs. """
- filename = '%s_%d-%d' % (self.index, self.start_pos, self.end_pos)
- filename = '.'.join((filename, 'txt.gz'))
- return filename
diff --git a/data/logs_model/elastic_logs.py b/data/logs_model/elastic_logs.py
deleted file mode 100644
index cd3ff675d..000000000
--- a/data/logs_model/elastic_logs.py
+++ /dev/null
@@ -1,255 +0,0 @@
-import os
-import logging
-import re
-from datetime import datetime, timedelta
-
-from requests_aws4auth import AWS4Auth
-
-from elasticsearch import RequestsHttpConnection
-from elasticsearch.exceptions import NotFoundError, AuthorizationException
-from elasticsearch_dsl import Index, Document, Integer, Date, Text, Ip, Keyword
-from elasticsearch_dsl.connections import connections
-
-
-logger = logging.getLogger(__name__)
-
-# Name of the connection used for Elasticearch's template API
-ELASTICSEARCH_TEMPLATE_CONNECTION_ALIAS = 'logentry_template'
-
-# Prefix of autogenerated indices
-INDEX_NAME_PREFIX = 'logentry_'
-
-# Time-based index date format
-INDEX_DATE_FORMAT = '%Y-%m-%d'
-
-# Timeout for default connection
-ELASTICSEARCH_DEFAULT_CONNECTION_TIMEOUT = 15
-
-# Timeout for template api Connection
-ELASTICSEARCH_TEMPLATE_CONNECTION_TIMEOUT = 60
-
-# Force an index template update
-ELASTICSEARCH_FORCE_INDEX_TEMPLATE_UPDATE = os.environ.get('FORCE_INDEX_TEMPLATE_UPDATE', '')
-
-# Valid index prefix pattern
-VALID_INDEX_PATTERN = r'^((?!\.$|\.\.$|[-_+])([^A-Z:\/*?\"<>|,# ]){1,255})$'
-
-
-class LogEntry(Document):
- # random_id is the tie-breaker for sorting in pagination.
- # random_id is also used for deduplication of records when using a "at-least-once" delivery stream.
- # Reference: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-after.html
- #
- # We use don't use the _id of a document since a `doc_values` is not build for this field:
- # An on-disk data structure that stores the same data in a columnar format
- # for optimized sorting and aggregations.
- # Reference: https://github.com/elastic/elasticsearch/issues/35369
- random_id = Text(fields={'keyword': Keyword()})
- kind_id = Integer()
- account_id = Integer()
- performer_id = Integer()
- repository_id = Integer()
- ip = Ip()
- metadata_json = Text()
- datetime = Date()
-
- _initialized = False
-
- @classmethod
- def init(cls, index_prefix, index_settings=None, skip_template_init=False):
- """
- Create the index template, and populate LogEntry's mapping and index settings.
- """
- wildcard_index = Index(name=index_prefix + '*')
- wildcard_index.settings(**(index_settings or {}))
- wildcard_index.document(cls)
- cls._index = wildcard_index
- cls._index_prefix = index_prefix
-
- if not skip_template_init:
- cls.create_or_update_template()
-
- # Since the elasticsearch-dsl API requires the document's index being defined as an inner class at the class level,
- # this function needs to be called first before being able to call `save`.
- cls._initialized = True
-
- @classmethod
- def create_or_update_template(cls):
- assert cls._index and cls._index_prefix
- index_template = cls._index.as_template(cls._index_prefix)
- index_template.save(using=ELASTICSEARCH_TEMPLATE_CONNECTION_ALIAS)
-
- def save(self, **kwargs):
- # We group the logs based on year, month and day as different indexes, so that
- # dropping those indexes based on retention range is easy.
- #
- # NOTE: This is only used if logging directly to Elasticsearch
- # When using Kinesis or Kafka, the consumer of these streams
- # will be responsible for the management of the indices' lifecycle.
- assert LogEntry._initialized
- kwargs['index'] = self.datetime.strftime(self._index_prefix + INDEX_DATE_FORMAT)
- return super(LogEntry, self).save(**kwargs)
-
-
-class ElasticsearchLogs(object):
- """
- Model for logs operations stored in an Elasticsearch cluster.
- """
-
- def __init__(self, host=None, port=None, access_key=None, secret_key=None, aws_region=None,
- index_settings=None, use_ssl=True, index_prefix=INDEX_NAME_PREFIX):
- # For options in index_settings, refer to:
- # https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html
- # some index settings are set at index creation time, and therefore, you should NOT
- # change those settings once the index is set.
- self._host = host
- self._port = port
- self._access_key = access_key
- self._secret_key = secret_key
- self._aws_region = aws_region
- self._index_prefix = index_prefix
- self._index_settings = index_settings
- self._use_ssl = use_ssl
-
- self._client = None
- self._initialized = False
-
- def _initialize(self):
- """
- Initialize a connection to an ES cluster and
- creates an index template if it does not exist.
- """
- if not self._initialized:
- http_auth = None
- if self._access_key and self._secret_key and self._aws_region:
- http_auth = AWS4Auth(self._access_key, self._secret_key, self._aws_region, 'es')
- elif self._access_key and self._secret_key:
- http_auth = (self._access_key, self._secret_key)
- else:
- logger.warn("Connecting to Elasticsearch without HTTP auth")
-
- self._client = connections.create_connection(
- hosts=[{
- 'host': self._host,
- 'port': self._port
- }],
- http_auth=http_auth,
- use_ssl=self._use_ssl,
- verify_certs=True,
- connection_class=RequestsHttpConnection,
- timeout=ELASTICSEARCH_DEFAULT_CONNECTION_TIMEOUT,
- )
-
- # Create a second connection with a timeout of 60s vs 10s.
- # For some reason the PUT template API can take anywhere between
- # 10s and 30s on the test cluster.
- # This only needs to be done once to initialize the index template
- connections.create_connection(
- alias=ELASTICSEARCH_TEMPLATE_CONNECTION_ALIAS,
- hosts=[{
- 'host': self._host,
- 'port': self._port
- }],
- http_auth=http_auth,
- use_ssl=self._use_ssl,
- verify_certs=True,
- connection_class=RequestsHttpConnection,
- timeout=ELASTICSEARCH_TEMPLATE_CONNECTION_TIMEOUT,
- )
-
- try:
- force_template_update = ELASTICSEARCH_FORCE_INDEX_TEMPLATE_UPDATE.lower() == 'true'
- self._client.indices.get_template(self._index_prefix)
- LogEntry.init(self._index_prefix, self._index_settings,
- skip_template_init=not force_template_update)
- except NotFoundError:
- LogEntry.init(self._index_prefix, self._index_settings, skip_template_init=False)
- finally:
- try:
- connections.remove_connection(ELASTICSEARCH_TEMPLATE_CONNECTION_ALIAS)
- except KeyError as ke:
- logger.exception('Elasticsearch connection not found to remove %s: %s',
- ELASTICSEARCH_TEMPLATE_CONNECTION_ALIAS, ke)
-
- self._initialized = True
-
- def index_name(self, day):
- """ Return an index name for the given day. """
- return self._index_prefix + day.strftime(INDEX_DATE_FORMAT)
-
- def index_exists(self, index):
- try:
- return index in self._client.indices.get(index)
- except NotFoundError:
- return False
-
- @staticmethod
- def _valid_index_prefix(prefix):
- """ Check that the given index prefix is valid with the set of
- indices used by this class.
- """
- return re.match(VALID_INDEX_PATTERN, prefix) is not None
-
- def _valid_index_name(self, index):
- """ Check that the given index name is valid and follows the format:
- YYYY-MM-DD
- """
- if not ElasticsearchLogs._valid_index_prefix(index):
- return False
-
- if not index.startswith(self._index_prefix) or len(index) > 255:
- return False
-
- index_dt_str = index.split(self._index_prefix, 1)[-1]
- try:
- datetime.strptime(index_dt_str, INDEX_DATE_FORMAT)
- return True
- except ValueError:
- logger.exception('Invalid date format (YYYY-MM-DD) for index: %s', index)
- return False
-
- def can_delete_index(self, index, cutoff_date):
- """ Check if the given index can be deleted based on the given index's date and cutoff date. """
- assert self._valid_index_name(index)
- index_dt = datetime.strptime(index[len(self._index_prefix):], INDEX_DATE_FORMAT)
- return index_dt < cutoff_date and cutoff_date - index_dt >= timedelta(days=1)
-
- def list_indices(self):
- self._initialize()
- try:
- return self._client.indices.get(self._index_prefix + '*').keys()
- except NotFoundError as nfe:
- logger.exception('`%s` indices not found: %s', self._index_prefix, nfe.info)
- return []
- except AuthorizationException as ae:
- logger.exception('Unauthorized for indices `%s`: %s', self._index_prefix, ae.info)
- return None
-
- def delete_index(self, index):
- self._initialize()
- assert self._valid_index_name(index)
-
- try:
- self._client.indices.delete(index)
- return index
- except NotFoundError as nfe:
- logger.exception('`%s` indices not found: %s', index, nfe.info)
- return None
- except AuthorizationException as ae:
- logger.exception('Unauthorized to delete index `%s`: %s', index, ae.info)
- return None
-
-
-def configure_es(host, port, access_key=None, secret_key=None, aws_region=None,
- index_prefix=None, use_ssl=True, index_settings=None):
- """
- For options in index_settings, refer to:
- https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html
- some index settings are set at index creation time, and therefore, you should NOT
- change those settings once the index is set.
- """
- es_client = ElasticsearchLogs(host=host, port=port, access_key=access_key, secret_key=secret_key,
- aws_region=aws_region, index_prefix=index_prefix or INDEX_NAME_PREFIX,
- use_ssl=use_ssl, index_settings=index_settings)
- es_client._initialize()
- return es_client
diff --git a/data/logs_model/inmemory_model.py b/data/logs_model/inmemory_model.py
deleted file mode 100644
index f9a219f51..000000000
--- a/data/logs_model/inmemory_model.py
+++ /dev/null
@@ -1,244 +0,0 @@
-import logging
-import json
-
-from collections import namedtuple
-from datetime import datetime
-from tzlocal import get_localzone
-from dateutil.relativedelta import relativedelta
-
-from data import model
-from data.logs_model.datatypes import AggregatedLogCount, LogEntriesPage, Log
-from data.logs_model.interface import (ActionLogsDataInterface, LogRotationContextInterface,
- LogsIterationTimeout)
-
-logger = logging.getLogger(__name__)
-
-LogAndRepository = namedtuple('LogAndRepository', ['log', 'stored_log', 'repository'])
-
-StoredLog = namedtuple('StoredLog', ['kind_id',
- 'account_id',
- 'performer_id',
- 'ip',
- 'metadata_json',
- 'repository_id',
- 'datetime'])
-
-class InMemoryModel(ActionLogsDataInterface):
- """
- InMemoryModel implements the data model for logs in-memory. FOR TESTING ONLY.
- """
- def __init__(self):
- self.logs = []
-
- def _filter_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
- namespace_name=None, filter_kinds=None):
- if filter_kinds is not None:
- assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
-
- for log_and_repo in self.logs:
- if log_and_repo.log.datetime < start_datetime or log_and_repo.log.datetime > end_datetime:
- continue
-
- if performer_name and log_and_repo.log.performer_username != performer_name:
- continue
-
- if (repository_name and
- (not log_and_repo.repository or log_and_repo.repository.name != repository_name)):
- continue
-
- if namespace_name and log_and_repo.log.account_username != namespace_name:
- continue
-
- if filter_kinds:
- kind_map = model.log.get_log_entry_kinds()
- ignore_ids = [kind_map[kind_name] for kind_name in filter_kinds]
- if log_and_repo.log.kind_id in ignore_ids:
- continue
-
- yield log_and_repo
-
- def _filter_latest_logs(self, performer_name=None, repository_name=None,
- namespace_name=None, filter_kinds=None):
- if filter_kinds is not None:
- assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
-
- for log_and_repo in sorted(self.logs, key=lambda t: t.log.datetime, reverse=True):
- if performer_name and log_and_repo.log.performer_username != performer_name:
- continue
-
- if (repository_name and
- (not log_and_repo.repository or log_and_repo.repository.name != repository_name)):
- continue
-
- if namespace_name and log_and_repo.log.account_username != namespace_name:
- continue
-
- if filter_kinds:
- kind_map = model.log.get_log_entry_kinds()
- ignore_ids = [kind_map[kind_name] for kind_name in filter_kinds]
- if log_and_repo.log.kind_id in ignore_ids:
- continue
-
- yield log_and_repo
-
- def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
- namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
- logs = []
- for log_and_repo in self._filter_logs(start_datetime, end_datetime, performer_name,
- repository_name, namespace_name, filter_kinds):
- logs.append(log_and_repo.log)
- return LogEntriesPage(logs, None)
-
- def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
- filter_kinds=None, size=20):
- latest_logs = []
- for log_and_repo in self._filter_latest_logs(performer_name, repository_name, namespace_name,
- filter_kinds):
- if size is not None and len(latest_logs) == size:
- break
-
- latest_logs.append(log_and_repo.log)
-
- return latest_logs
-
- def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
- repository_name=None, namespace_name=None, filter_kinds=None):
- entries = {}
- for log_and_repo in self._filter_logs(start_datetime, end_datetime, performer_name,
- repository_name, namespace_name, filter_kinds):
- entry = log_and_repo.log
- synthetic_date = datetime(start_datetime.year, start_datetime.month, int(entry.datetime.day),
- tzinfo=get_localzone())
- if synthetic_date.day < start_datetime.day:
- synthetic_date = synthetic_date + relativedelta(months=1)
-
- key = '%s-%s' % (entry.kind_id, entry.datetime.day)
-
- if key in entries:
- entries[key] = AggregatedLogCount(entry.kind_id, entries[key].count + 1,
- synthetic_date)
- else:
- entries[key] = AggregatedLogCount(entry.kind_id, 1, synthetic_date)
-
- return entries.values()
-
- def count_repository_actions(self, repository, day):
- count = 0
- for log_and_repo in self.logs:
- if log_and_repo.repository != repository:
- continue
-
- if log_and_repo.log.datetime.day != day.day:
- continue
-
- count += 1
-
- return count
-
- def queue_logs_export(self, start_datetime, end_datetime, export_action_logs_queue,
- namespace_name=None, repository_name=None, callback_url=None,
- callback_email=None, filter_kinds=None):
- raise NotImplementedError
-
- def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
- repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
- timestamp = timestamp or datetime.today()
-
- if not repository and repository_name and namespace_name:
- repository = model.repository.get_repository(namespace_name, repository_name)
-
- account = None
- account_id = None
- performer_id = None
- repository_id = None
-
- if namespace_name is not None:
- account = model.user.get_namespace_user(namespace_name)
- account_id = account.id
-
- if performer is not None:
- performer_id = performer.id
-
- if repository is not None:
- repository_id = repository.id
-
- metadata_json = json.dumps(metadata or {})
- kind_id = model.log.get_log_entry_kinds()[kind_name]
-
- stored_log = StoredLog(
- kind_id,
- account_id,
- performer_id,
- ip,
- metadata_json,
- repository_id,
- timestamp
- )
-
- log = Log(metadata_json=metadata,
- ip=ip,
- datetime=timestamp,
- performer_email=performer.email if performer else None,
- performer_username=performer.username if performer else None,
- performer_robot=performer.robot if performer else None,
- account_organization=account.organization if account else None,
- account_username=account.username if account else None,
- account_email=account.email if account else None,
- account_robot=account.robot if account else None,
- kind_id=kind_id)
-
- self.logs.append(LogAndRepository(log, stored_log, repository))
-
- def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
- namespace_id=None, max_query_time=None):
- # Just for testing.
- if max_query_time is not None:
- raise LogsIterationTimeout()
-
- logs = []
- for log_and_repo in self._filter_logs(start_datetime, end_datetime):
- if (repository_id and
- (not log_and_repo.repository or log_and_repo.repository.id != repository_id)):
- continue
-
- if namespace_id:
- if log_and_repo.log.account_username is None:
- continue
-
- namespace = model.user.get_namespace_user(log_and_repo.log.account_username)
- if namespace.id != namespace_id:
- continue
-
- logs.append(log_and_repo.log)
-
- yield logs
-
- def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
- expired_logs = [log_and_repo for log_and_repo in self.logs
- if log_and_repo.log.datetime <= cutoff_date]
- while True:
- if not expired_logs:
- break
- context = InMemoryLogRotationContext(expired_logs[:min_logs_per_rotation], self.logs)
- expired_logs = expired_logs[min_logs_per_rotation:]
- yield context
-
-
-class InMemoryLogRotationContext(LogRotationContextInterface):
- def __init__(self, expired_logs, all_logs):
- self.expired_logs = expired_logs
- self.all_logs = all_logs
-
- def __enter__(self):
- return self
-
- def __exit__(self, ex_type, ex_value, ex_traceback):
- if ex_type is None and ex_value is None and ex_traceback is None:
- for log in self.expired_logs:
- self.all_logs.remove(log)
-
- def yield_logs_batch(self):
- """ Yield a batch of logs and a filename for that batch. """
- filename = 'inmemory_model_filename_placeholder'
- filename = '.'.join((filename, 'txt.gz'))
- yield [log_and_repo.stored_log for log_and_repo in self.expired_logs], filename
diff --git a/data/logs_model/interface.py b/data/logs_model/interface.py
deleted file mode 100644
index 705d46cc0..000000000
--- a/data/logs_model/interface.py
+++ /dev/null
@@ -1,95 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-class LogsIterationTimeout(Exception):
- """ Exception raised if logs iteration times out. """
-
-
-@add_metaclass(ABCMeta)
-class ActionLogsDataInterface(object):
- """ Interface for code to work with the logs data model. The logs data model consists
- of all access for reading and writing action logs.
- """
- @abstractmethod
- def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
- namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
- """ Looks up all logs between the start_datetime and end_datetime, filtered
- by performer (a user), repository or namespace. Note that one (and only one) of the three
- can be specified. Returns a LogEntriesPage. `filter_kinds`, if specified, is a set/list
- of the kinds of logs to filter out.
- """
-
- @abstractmethod
- def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
- filter_kinds=None, size=20):
- """ Looks up latest logs of a specific kind, filtered by performer (a user),
- repository or namespace. Note that one (and only one) of the three can be specified.
- Returns a list of `Log`.
- """
-
- @abstractmethod
- def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
- repository_name=None, namespace_name=None, filter_kinds=None):
- """ Returns the aggregated count of logs, by kind, between the start_datetime and end_datetime,
- filtered by performer (a user), repository or namespace. Note that one (and only one) of
- the three can be specified. Returns a list of AggregatedLogCount.
- """
-
- @abstractmethod
- def count_repository_actions(self, repository, day):
- """ Returns the total number of repository actions over the given day, in the given repository
- or None on error.
- """
-
- @abstractmethod
- def queue_logs_export(self, start_datetime, end_datetime, export_action_logs_queue,
- namespace_name=None, repository_name=None, callback_url=None,
- callback_email=None, filter_kinds=None):
- """ Queues logs between the start_datetime and end_time, filtered by a repository or namespace,
- for export to the specified URL and/or email address. Returns the ID of the export job
- queued or None if error.
- """
-
- @abstractmethod
- def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
- repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
- """ Logs a single action as having taken place. """
-
- @abstractmethod
- def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
- namespace_id=None, max_query_time=None):
- """ Returns an iterator that yields bundles of all logs found between the start_datetime and
- end_datetime, optionally filtered by the repository or namespace. This function should be
- used for any bulk lookup operations, and should be implemented by implementors to put
- minimal strain on the backing storage for large operations. If there was an error in setting
- up, returns None.
-
- If max_query_time is specified, each iteration that yields a log bundle will have its
- queries run with a maximum timeout of that specified, and, if any exceed that threshold,
- LogsIterationTimeout will be raised instead of returning the logs bundle.
- """
-
- @abstractmethod
- def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
- """
- A generator that yields contexts implementing the LogRotationContextInterface.
- Each context represents a set of logs to be archived and deleted once
- the context completes without exceptions.
-
- For database logs, the LogRotationContext abstracts over a set of rows. When the context
- finishes, its associated rows get deleted.
-
- For Elasticsearch logs, the LogRotationContext abstracts over indices. When the context
- finishes, its associated index gets deleted.
- """
-
-
-@add_metaclass(ABCMeta)
-class LogRotationContextInterface(object):
- """ Interface for iterating over a set of logs to be archived. """
- @abstractmethod
- def yield_logs_batch(self):
- """
- Generator yielding batch of logs and a filename for that batch.
- A batch is a subset of the logs part of the context.
- """
diff --git a/data/logs_model/logs_producer/__init__.py b/data/logs_model/logs_producer/__init__.py
deleted file mode 100644
index 17bd605ad..000000000
--- a/data/logs_model/logs_producer/__init__.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import logging
-
-
-logger = logging.getLogger(__name__)
-
-
-class LogSendException(Exception):
- """ A generic error when sending the logs to its destination.
- e.g. Kinesis, Kafka, Elasticsearch, ...
- """
- pass
-
-
-class LogProducerProxy(object):
- def __init__(self):
- self._model = None
-
- def initialize(self, model):
- self._model = model
- logger.info('===============================')
- logger.info('Using producer `%s`', self._model)
- logger.info('===============================')
-
- def __getattr__(self, attr):
- if not self._model:
- raise AttributeError("LogsModelProxy is not initialized")
- return getattr(self._model, attr)
diff --git a/data/logs_model/logs_producer/elasticsearch_logs_producer.py b/data/logs_model/logs_producer/elasticsearch_logs_producer.py
deleted file mode 100644
index 175fb4ac6..000000000
--- a/data/logs_model/logs_producer/elasticsearch_logs_producer.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import logging
-
-from elasticsearch.exceptions import ElasticsearchException
-
-from data.logs_model.logs_producer.interface import LogProducerInterface
-from data.logs_model.logs_producer import LogSendException
-
-
-logger = logging.getLogger(__name__)
-
-
-class ElasticsearchLogsProducer(LogProducerInterface):
- """ Log producer writing log entries to Elasticsearch.
-
- This implementation writes directly to Elasticsearch without a streaming/queueing service.
- """
- def send(self, logentry):
- try:
- logentry.save()
- except ElasticsearchException as ex:
- logger.exception('ElasticsearchLogsProducer error sending log to Elasticsearch: %s', ex)
- raise LogSendException('ElasticsearchLogsProducer error sending log to Elasticsearch: %s' % ex)
- except Exception as e:
- logger.exception('ElasticsearchLogsProducer exception sending log to Elasticsearch: %s', e)
- raise LogSendException('ElasticsearchLogsProducer exception sending log to Elasticsearch: %s' % e)
diff --git a/data/logs_model/logs_producer/interface.py b/data/logs_model/logs_producer/interface.py
deleted file mode 100644
index d0d9b71d4..000000000
--- a/data/logs_model/logs_producer/interface.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-@add_metaclass(ABCMeta)
-class LogProducerInterface(object):
- @abstractmethod
- def send(self, logentry):
- """ Send a log entry to the configured log infrastructure. """
diff --git a/data/logs_model/logs_producer/kafka_logs_producer.py b/data/logs_model/logs_producer/kafka_logs_producer.py
deleted file mode 100644
index 9c13a441d..000000000
--- a/data/logs_model/logs_producer/kafka_logs_producer.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import logging
-
-from kafka.errors import KafkaError, KafkaTimeoutError
-from kafka import KafkaProducer
-
-from data.logs_model.shared import epoch_ms
-from data.logs_model.logs_producer.interface import LogProducerInterface
-from data.logs_model.logs_producer.util import logs_json_serializer
-from data.logs_model.logs_producer import LogSendException
-
-
-logger = logging.getLogger(__name__)
-
-DEFAULT_MAX_BLOCK_SECONDS = 5
-
-
-class KafkaLogsProducer(LogProducerInterface):
- """ Log producer writing log entries to a Kafka stream. """
- def __init__(self, bootstrap_servers=None, topic=None, client_id=None, max_block_seconds=None):
- self.bootstrap_servers = bootstrap_servers
- self.topic = topic
- self.client_id = client_id
- self.max_block_ms = (max_block_seconds or DEFAULT_MAX_BLOCK_SECONDS) * 1000
-
- self._producer = KafkaProducer(bootstrap_servers=self.bootstrap_servers,
- client_id=self.client_id,
- max_block_ms=self.max_block_ms,
- value_serializer=logs_json_serializer)
-
- def send(self, logentry):
- try:
- # send() has a (max_block_ms) timeout and get() has a (max_block_ms) timeout
- # for an upper bound of 2x(max_block_ms) before guaranteed delivery
- future = self._producer.send(self.topic, logentry.to_dict(), timestamp_ms=epoch_ms(logentry.datetime))
- record_metadata = future.get(timeout=self.max_block_ms)
- assert future.succeeded
- except KafkaTimeoutError as kte:
- logger.exception('KafkaLogsProducer timeout sending log to Kafka: %s', kte)
- raise LogSendException('KafkaLogsProducer timeout sending log to Kafka: %s' % kte)
- except KafkaError as ke:
- logger.exception('KafkaLogsProducer error sending log to Kafka: %s', ke)
- raise LogSendException('KafkaLogsProducer error sending log to Kafka: %s' % ke)
- except Exception as e:
- logger.exception('KafkaLogsProducer exception sending log to Kafka: %s', e)
- raise LogSendException('KafkaLogsProducer exception sending log to Kafka: %s' % e)
diff --git a/data/logs_model/logs_producer/kinesis_stream_logs_producer.py b/data/logs_model/logs_producer/kinesis_stream_logs_producer.py
deleted file mode 100644
index d4c03f711..000000000
--- a/data/logs_model/logs_producer/kinesis_stream_logs_producer.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import logging
-import hashlib
-import random
-
-import boto3
-from botocore.exceptions import ClientError
-from botocore.client import Config
-
-from data.logs_model.logs_producer.interface import LogProducerInterface
-from data.logs_model.logs_producer.util import logs_json_serializer
-from data.logs_model.logs_producer import LogSendException
-
-
-logger = logging.getLogger(__name__)
-
-KINESIS_PARTITION_KEY_PREFIX = 'logentry_partition_key_'
-DEFAULT_CONNECT_TIMEOUT = 5
-DEFAULT_READ_TIMEOUT = 5
-MAX_RETRY_ATTEMPTS = 5
-DEFAULT_MAX_POOL_CONNECTIONS = 10
-
-
-def _partition_key(number_of_shards=None):
- """ Generate a partition key for AWS Kinesis stream.
- If the number of shards is specified, generate keys where the size of the key space is
- the number of shards.
- """
- key = None
- if number_of_shards is not None:
- shard_number = random.randrange(0, number_of_shards)
- key = hashlib.sha1(KINESIS_PARTITION_KEY_PREFIX + str(shard_number)).hexdigest()
- else:
- key = hashlib.sha1(KINESIS_PARTITION_KEY_PREFIX + str(random.getrandbits(256))).hexdigest()
-
- return key
-
-
-class KinesisStreamLogsProducer(LogProducerInterface):
- """ Log producer writing log entries to an Amazon Kinesis Data Stream. """
- def __init__(self, stream_name, aws_region, aws_access_key=None, aws_secret_key=None,
- connect_timeout=None, read_timeout=None, max_retries=None,
- max_pool_connections=None):
- self._stream_name = stream_name
- self._aws_region = aws_region
- self._aws_access_key = aws_access_key
- self._aws_secret_key = aws_secret_key
- self._connect_timeout = connect_timeout or DEFAULT_CONNECT_TIMEOUT
- self._read_timeout = read_timeout or DEFAULT_READ_TIMEOUT
- self._max_retries = max_retries or MAX_RETRY_ATTEMPTS
- self._max_pool_connections=max_pool_connections or DEFAULT_MAX_POOL_CONNECTIONS
-
- client_config = Config(connect_timeout=self._connect_timeout,
- read_timeout=self._read_timeout ,
- retries={'max_attempts': self._max_retries},
- max_pool_connections=self._max_pool_connections)
- self._producer = boto3.client('kinesis', use_ssl=True,
- region_name=self._aws_region,
- aws_access_key_id=self._aws_access_key,
- aws_secret_access_key=self._aws_secret_key,
- config=client_config)
-
- def send(self, logentry):
- try:
- data = logs_json_serializer(logentry)
- self._producer.put_record(
- StreamName=self._stream_name,
- Data=data,
- PartitionKey=_partition_key()
- )
- except ClientError as ce:
- logger.exception('KinesisStreamLogsProducer client error sending log to Kinesis: %s', ce)
- raise LogSendException('KinesisStreamLogsProducer client error sending log to Kinesis: %s' % ce)
- except Exception as e:
- logger.exception('KinesisStreamLogsProducer exception sending log to Kinesis: %s', e)
- raise LogSendException('KinesisStreamLogsProducer exception sending log to Kinesis: %s' % e)
diff --git a/data/logs_model/logs_producer/test/test_json_logs_serializer.py b/data/logs_model/logs_producer/test/test_json_logs_serializer.py
deleted file mode 100644
index a45b0c5bb..000000000
--- a/data/logs_model/logs_producer/test/test_json_logs_serializer.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import logging
-import json
-from datetime import datetime
-import pytest
-
-from data.logs_model.logs_producer.util import logs_json_serializer
-from data.logs_model.elastic_logs import LogEntry
-
-
-logger = logging.getLogger(__name__)
-
-
-TEST_DATETIME = datetime.utcnow()
-
-TEST_JSON_STRING = '{"a": "b", "c": "d"}'
-TEST_JSON_STRING_WITH_UNICODE = u'{"éëê": "îôû"}'
-
-VALID_LOGENTRY = LogEntry(random_id='123-45', ip='0.0.0.0', metadata_json=TEST_JSON_STRING, datetime=TEST_DATETIME)
-VALID_LOGENTRY_WITH_UNICODE = LogEntry(random_id='123-45', ip='0.0.0.0', metadata_json=TEST_JSON_STRING_WITH_UNICODE, datetime=TEST_DATETIME)
-
-VALID_LOGENTRY_EXPECTED_OUTPUT = '{"datetime": "%s", "ip": "0.0.0.0", "metadata_json": "{\\"a\\": \\"b\\", \\"c\\": \\"d\\"}", "random_id": "123-45"}' % TEST_DATETIME.isoformat()
-VALID_LOGENTRY_WITH_UNICODE_EXPECTED_OUTPUT = '{"datetime": "%s", "ip": "0.0.0.0", "metadata_json": "{\\"\\u00e9\\u00eb\\u00ea\\": \\"\\u00ee\\u00f4\\u00fb\\"}", "random_id": "123-45"}' % TEST_DATETIME.isoformat()
-
-
-@pytest.mark.parametrize(
- 'is_valid, given_input, expected_output',
- [
- # Valid inputs
- pytest.param(True, VALID_LOGENTRY, VALID_LOGENTRY_EXPECTED_OUTPUT),
- # With unicode
- pytest.param(True, VALID_LOGENTRY_WITH_UNICODE, VALID_LOGENTRY_WITH_UNICODE_EXPECTED_OUTPUT),
- ])
-def test_logs_json_serializer(is_valid, given_input, expected_output):
- if not is_valid:
- with pytest.raises(ValueError) as ve:
- data = logs_json_serializer(given_input)
- else:
- data = logs_json_serializer(given_input, sort_keys=True)
- assert data == expected_output
-
- # Make sure the datetime was serialized in the correct ISO8601
- datetime_str = json.loads(data)['datetime']
- assert datetime_str == TEST_DATETIME.isoformat()
diff --git a/data/logs_model/logs_producer/util.py b/data/logs_model/logs_producer/util.py
deleted file mode 100644
index d6c3e2d8d..000000000
--- a/data/logs_model/logs_producer/util.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import json
-from datetime import datetime
-
-class LogEntryJSONEncoder(json.JSONEncoder):
- """ JSON encoder to encode datetimes to ISO8601 format. """
- def default(self, obj):
- if isinstance(obj, datetime):
- return obj.isoformat()
-
- return super(LogEntryJSONEncoder, self).default(obj)
-
-def logs_json_serializer(logentry, sort_keys=False):
- """ Serializes a LogEntry to json bytes. """
- return json.dumps(logentry.to_dict(), cls=LogEntryJSONEncoder,
- ensure_ascii=True, sort_keys=sort_keys).encode('ascii')
diff --git a/data/logs_model/shared.py b/data/logs_model/shared.py
deleted file mode 100644
index 550cac95e..000000000
--- a/data/logs_model/shared.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import uuid
-import json
-
-from calendar import timegm
-
-from data import model
-
-
-class SharedModel:
- def queue_logs_export(self, start_datetime, end_datetime, export_action_logs_queue,
- namespace_name=None, repository_name=None, callback_url=None,
- callback_email=None, filter_kinds=None):
- """ Queues logs between the start_datetime and end_time, filtered by a repository or namespace,
- for export to the specified URL and/or email address. Returns the ID of the export job
- queued or None if error.
- """
- export_id = str(uuid.uuid4())
- namespace = model.user.get_namespace_user(namespace_name)
- if namespace is None:
- return None
-
- repository = None
- if repository_name is not None:
- repository = model.repository.get_repository(namespace_name, repository_name)
- if repository is None:
- return None
-
- export_action_logs_queue.put([namespace_name],
- json.dumps({
- 'export_id': export_id,
- 'repository_id': repository.id if repository else None,
- 'namespace_id': namespace.id,
- 'namespace_name': namespace.username,
- 'repository_name': repository.name if repository else None,
- 'start_time': start_datetime.strftime('%m/%d/%Y'),
- 'end_time': end_datetime.strftime('%m/%d/%Y'),
- 'callback_url': callback_url,
- 'callback_email': callback_email,
- }), retries_remaining=3)
-
- return export_id
-
-
-def epoch_ms(dt):
- return (timegm(dt.timetuple()) * 1000) + (dt.microsecond / 1000)
-
-
-def get_kinds_filter(kinds):
- """ Given a list of kinds, return the set of kinds not that are not part of that list.
- i.e Returns the list of kinds to be filtered out. """
- kind_map = model.log.get_log_entry_kinds()
- kind_map = {key: kind_map[key] for key in kind_map if not isinstance(key, int)}
- return [kind_name for kind_name in kind_map if kind_name not in kinds]
diff --git a/data/logs_model/table_logs_model.py b/data/logs_model/table_logs_model.py
deleted file mode 100644
index 697bf2dc6..000000000
--- a/data/logs_model/table_logs_model.py
+++ /dev/null
@@ -1,291 +0,0 @@
-# pylint: disable=protected-access
-
-import logging
-
-from datetime import datetime, timedelta
-
-from tzlocal import get_localzone
-from dateutil.relativedelta import relativedelta
-
-from data import model
-from data.model import config
-from data.database import LogEntry, LogEntry2, LogEntry3, UseThenDisconnect
-from data.logs_model.interface import ActionLogsDataInterface, LogsIterationTimeout, \
- LogRotationContextInterface
-from data.logs_model.datatypes import Log, AggregatedLogCount, LogEntriesPage
-from data.logs_model.shared import SharedModel
-from data.model.log import get_stale_logs, get_stale_logs_start_id, delete_stale_logs
-
-logger = logging.getLogger(__name__)
-
-MINIMUM_RANGE_SIZE = 1 # second
-MAXIMUM_RANGE_SIZE = 60 * 60 * 24 * 30 # seconds ~= 1 month
-EXPECTED_ITERATION_LOG_COUNT = 1000
-
-
-LOG_MODELS = [LogEntry3, LogEntry2, LogEntry]
-
-
-class TableLogsModel(SharedModel, ActionLogsDataInterface):
- """
- TableLogsModel implements the data model for the logs API backed by a single table
- in the database.
- """
- def __init__(self, should_skip_logging=None, **kwargs):
- self._should_skip_logging = should_skip_logging
-
- def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
- namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
- if filter_kinds is not None:
- assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
-
- assert start_datetime is not None
- assert end_datetime is not None
-
- repository = None
- if repository_name and namespace_name:
- repository = model.repository.get_repository(namespace_name, repository_name)
- assert repository
-
- performer = None
- if performer_name:
- performer = model.user.get_user(performer_name)
- assert performer
-
- def get_logs(m, page_token):
- logs_query = model.log.get_logs_query(start_datetime, end_datetime, performer=performer,
- repository=repository, namespace=namespace_name,
- ignore=filter_kinds, model=m)
-
- logs, next_page_token = model.modelutil.paginate(logs_query, m,
- descending=True,
- page_token=page_token,
- limit=20,
- max_page=max_page_count,
- sort_field_name='datetime')
-
- return logs, next_page_token
-
- TOKEN_TABLE_ID = 'tti'
- table_index = 0
- logs = []
- next_page_token = page_token or None
-
- # Skip empty pages (empty table)
- while len(logs) == 0 and table_index < len(LOG_MODELS) - 1:
- table_specified = next_page_token is not None and next_page_token.get(TOKEN_TABLE_ID) is not None
- if table_specified:
- table_index = next_page_token.get(TOKEN_TABLE_ID)
-
- logs_result, next_page_token = get_logs(LOG_MODELS[table_index], next_page_token)
- logs.extend(logs_result)
-
- if next_page_token is None and table_index < len(LOG_MODELS) - 1:
- next_page_token = {TOKEN_TABLE_ID: table_index + 1}
-
- return LogEntriesPage([Log.for_logentry(log) for log in logs], next_page_token)
-
- def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
- filter_kinds=None, size=20):
- if filter_kinds is not None:
- assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
-
- repository = None
- if repository_name and namespace_name:
- repository = model.repository.get_repository(namespace_name, repository_name)
- assert repository
-
- performer = None
- if performer_name:
- performer = model.user.get_user(performer_name)
- assert performer
-
- def get_latest_logs(m):
- logs_query = model.log.get_latest_logs_query(performer=performer, repository=repository,
- namespace=namespace_name, ignore=filter_kinds,
- model=m, size=size)
-
- logs = list(logs_query)
- return [Log.for_logentry(log) for log in logs]
-
- return get_latest_logs(LOG_MODELS[0])
-
- def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
- repository_name=None, namespace_name=None, filter_kinds=None):
- if filter_kinds is not None:
- assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
-
- if end_datetime - start_datetime >= timedelta(weeks=4):
- raise Exception('Cannot lookup aggregated logs over a period longer than a month')
-
- repository = None
- if repository_name and namespace_name:
- repository = model.repository.get_repository(namespace_name, repository_name)
-
- performer = None
- if performer_name:
- performer = model.user.get_user(performer_name)
-
- entries = {}
- for log_model in LOG_MODELS:
- aggregated = model.log.get_aggregated_logs(start_datetime, end_datetime,
- performer=performer,
- repository=repository,
- namespace=namespace_name,
- ignore=filter_kinds,
- model=log_model)
-
- for entry in aggregated:
- synthetic_date = datetime(start_datetime.year, start_datetime.month, int(entry.day),
- tzinfo=get_localzone())
- if synthetic_date.day < start_datetime.day:
- synthetic_date = synthetic_date + relativedelta(months=1)
-
- key = '%s-%s' % (entry.kind_id, entry.day)
-
- if key in entries:
- entries[key] = AggregatedLogCount(entry.kind_id, entry.count + entries[key].count,
- synthetic_date)
- else:
- entries[key] = AggregatedLogCount(entry.kind_id, entry.count, synthetic_date)
-
- return entries.values()
-
- def count_repository_actions(self, repository, day):
- return model.repositoryactioncount.count_repository_actions(repository, day)
-
- def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
- repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
- if self._should_skip_logging and self._should_skip_logging(kind_name, namespace_name,
- is_free_namespace):
- return
-
- if repository_name is not None:
- assert repository is None
- assert namespace_name is not None
- repository = model.repository.get_repository(namespace_name, repository_name)
-
- model.log.log_action(kind_name, namespace_name, performer=performer, repository=repository,
- ip=ip, metadata=metadata or {}, timestamp=timestamp)
-
- def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
- namespace_id=None, max_query_time=None):
- # Using an adjusting scale, start downloading log rows in batches, starting at
- # MINIMUM_RANGE_SIZE and doubling until we've reached EXPECTED_ITERATION_LOG_COUNT or
- # the lookup range has reached MAXIMUM_RANGE_SIZE. If at any point this operation takes
- # longer than the MAXIMUM_WORK_PERIOD_SECONDS, terminate the batch operation as timed out.
- batch_start_time = datetime.utcnow()
-
- current_start_datetime = start_datetime
- current_batch_size = timedelta(seconds=MINIMUM_RANGE_SIZE)
-
- while current_start_datetime < end_datetime:
- # Verify we haven't been working for too long.
- work_elapsed = datetime.utcnow() - batch_start_time
- if max_query_time is not None and work_elapsed > max_query_time:
- logger.error('Retrieval of logs `%s/%s` timed out with time of `%s`',
- namespace_id, repository_id, work_elapsed)
- raise LogsIterationTimeout()
-
- current_end_datetime = current_start_datetime + current_batch_size
- current_end_datetime = min(current_end_datetime, end_datetime)
-
- # Load the next set of logs.
- def load_logs():
- logger.debug('Retrieving logs over range %s -> %s with namespace %s and repository %s',
- current_start_datetime, current_end_datetime, namespace_id, repository_id)
-
- logs_query = model.log.get_logs_query(namespace=namespace_id,
- repository=repository_id,
- start_time=current_start_datetime,
- end_time=current_end_datetime)
- logs = list(logs_query)
- for log in logs:
- if namespace_id is not None:
- assert log.account_id == namespace_id
-
- if repository_id is not None:
- assert log.repository_id == repository_id
-
- logs = [Log.for_logentry(log) for log in logs]
- return logs
-
- logs, elapsed = _run_and_time(load_logs)
- if max_query_time is not None and elapsed > max_query_time:
- logger.error('Retrieval of logs for export `%s/%s` with range `%s-%s` timed out at `%s`',
- namespace_id, repository_id, current_start_datetime, current_end_datetime,
- elapsed)
- raise LogsIterationTimeout()
-
- yield logs
-
- # Move forward.
- current_start_datetime = current_end_datetime
-
- # Increase the batch size if necessary.
- if len(logs) < EXPECTED_ITERATION_LOG_COUNT:
- seconds = min(MAXIMUM_RANGE_SIZE, current_batch_size.total_seconds() * 2)
- current_batch_size = timedelta(seconds=seconds)
-
- def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
- """ Yield a context manager for a group of outdated logs. """
- for log_model in LOG_MODELS:
- while True:
- with UseThenDisconnect(config.app_config):
- start_id = get_stale_logs_start_id(log_model)
-
- if start_id is None:
- logger.warning('Failed to find start id')
- break
-
- logger.debug('Found starting ID %s', start_id)
- lookup_end_id = start_id + min_logs_per_rotation
- logs = [log for log in get_stale_logs(start_id, lookup_end_id,
- log_model, cutoff_date)]
-
- if not logs:
- logger.debug('No further logs found')
- break
-
- end_id = max([log.id for log in logs])
- context = DatabaseLogRotationContext(logs, log_model, start_id, end_id)
- yield context
-
-
-def _run_and_time(fn):
- start_time = datetime.utcnow()
- result = fn()
- return result, datetime.utcnow() - start_time
-
-
-table_logs_model = TableLogsModel()
-
-
-class DatabaseLogRotationContext(LogRotationContextInterface):
- """
- DatabaseLogRotationContext represents a batch of logs to be archived together.
- i.e A set of logs to be archived in the same file (based on the number of logs per rotation).
-
- When completed without exceptions, this context will delete the stale logs
- from rows `start_id` to `end_id`.
- """
- def __init__(self, logs, log_model, start_id, end_id):
- self.logs = logs
- self.log_model = log_model
- self.start_id = start_id
- self.end_id = end_id
-
- def __enter__(self):
- return self
-
- def __exit__(self, ex_type, ex_value, ex_traceback):
- if ex_type is None and ex_value is None and ex_traceback is None:
- with UseThenDisconnect(config.app_config):
- logger.debug('Deleting logs from IDs %s to %s', self.start_id, self.end_id)
- delete_stale_logs(self.start_id, self.end_id, self.log_model)
-
- def yield_logs_batch(self):
- """ Yield a batch of logs and a filename for that batch. """
- filename = '%d-%d-%s.txt.gz' % (self.start_id, self.end_id,
- self.log_model.__name__.lower())
- yield self.logs, filename
diff --git a/data/logs_model/test/__init__.py b/data/logs_model/test/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/data/logs_model/test/fake_elasticsearch.py b/data/logs_model/test/fake_elasticsearch.py
deleted file mode 100644
index 439dfd806..000000000
--- a/data/logs_model/test/fake_elasticsearch.py
+++ /dev/null
@@ -1,390 +0,0 @@
-import json
-import uuid
-import fnmatch
-
-from collections import defaultdict
-from contextlib import contextmanager
-from datetime import datetime
-
-import dateutil.parser
-
-from httmock import urlmatch, HTTMock
-
-FAKE_ES_HOST = 'fakees'
-
-EMPTY_RESULT = {
- 'hits': {'hits': [], 'total': 0},
- '_shards': {'successful': 1, 'total': 1},
-}
-
-def parse_query(query):
- if not query:
- return {}
-
- return {s.split('=')[0]: s.split('=')[1] for s in query.split("&")}
-
-
-@contextmanager
-def fake_elasticsearch(allow_wildcard=True):
- templates = {}
- docs = defaultdict(list)
- scrolls = {}
- id_counter = [1]
-
- def transform(value, field_name):
- # TODO: implement this using a real index template if we ever need more than a few
- # fields here.
- if field_name == 'datetime':
- if isinstance(value, int):
- return datetime.utcfromtimestamp(value / 1000)
-
- parsed = dateutil.parser.parse(value)
- return parsed
-
- return value
-
- @urlmatch(netloc=FAKE_ES_HOST, path=r'/_template/(.+)', method='GET')
- def get_template(url, request):
- template_name = url[len('/_template/'):]
- if template_name in templates:
- return {'status_code': 200}
-
- return {'status_code': 404}
-
- @urlmatch(netloc=FAKE_ES_HOST, path=r'/_template/(.+)', method='PUT')
- def put_template(url, request):
- template_name = url[len('/_template/'):]
- templates[template_name] = True
- return {'status_code': 201}
-
- @urlmatch(netloc=FAKE_ES_HOST, path=r'/([^/]+)/_doc', method='POST')
- def post_doc(url, request):
- index_name, _ = url.path[1:].split('/')
- item = json.loads(request.body)
- item['_id'] = item['random_id']
- id_counter[0] += 1
- docs[index_name].append(item)
- return {
- 'status_code': 204,
- 'headers': {
- 'Content-Type': 'application/json',
- },
- 'content': json.dumps({
- "result": "created",
- }),
- }
-
- @urlmatch(netloc=FAKE_ES_HOST, path=r'/([^/]+)$', method='DELETE')
- def index_delete(url, request):
- index_name_or_pattern = url.path[1:]
- to_delete = []
- for index_name in docs.keys():
- if not fnmatch.fnmatch(index_name, index_name_or_pattern):
- continue
-
- to_delete.append(index_name)
-
- for index in to_delete:
- docs.pop(index)
-
- return {
- 'status_code': 200,
- 'headers': {
- 'Content-Type': 'application/json',
- },
- 'content': {'acknowledged': True}
- }
-
- @urlmatch(netloc=FAKE_ES_HOST, path=r'/([^/]+)$', method='GET')
- def index_lookup(url, request):
- index_name_or_pattern = url.path[1:]
- found = {}
- for index_name in docs.keys():
- if not fnmatch.fnmatch(index_name, index_name_or_pattern):
- continue
-
- found[index_name] = {}
-
- if not found:
- return {
- 'status_code': 404,
- }
-
- return {
- 'status_code': 200,
- 'headers': {
- 'Content-Type': 'application/json',
- },
- 'content': json.dumps(found),
- }
-
- def _match_query(index_name_or_pattern, query):
- found = []
- found_index = False
-
- for index_name in docs.keys():
- if not allow_wildcard and index_name_or_pattern.find('*') >= 0:
- break
-
- if not fnmatch.fnmatch(index_name, index_name_or_pattern):
- continue
-
- found_index = True
-
- def _is_match(doc, current_query):
- if current_query is None:
- return True
-
- for filter_type, filter_params in current_query.iteritems():
- for field_name, filter_props in filter_params.iteritems():
- if filter_type == 'range':
- lt = transform(filter_props['lt'], field_name)
- gte = transform(filter_props['gte'], field_name)
- doc_value = transform(doc[field_name], field_name)
- if not (doc_value < lt and doc_value >= gte):
- return False
- elif filter_type == 'term':
- doc_value = transform(doc[field_name], field_name)
- return doc_value == filter_props
- elif filter_type == 'terms':
- doc_value = transform(doc[field_name], field_name)
- return doc_value in filter_props
- elif filter_type == 'bool':
- assert not 'should' in filter_params, 'should is unsupported'
-
- must = filter_params.get('must')
- must_not = filter_params.get('must_not')
- filter_bool = filter_params.get('filter')
-
- if must:
- for check in must:
- if not _is_match(doc, check):
- return False
-
- if must_not:
- for check in must_not:
- if _is_match(doc, check):
- return False
-
- if filter_bool:
- for check in filter_bool:
- if not _is_match(doc, check):
- return False
- else:
- raise Exception('Unimplemented query %s: %s' % (filter_type, query))
-
- return True
-
- for doc in docs[index_name]:
- if not _is_match(doc, query):
- continue
-
- found.append({'_source': doc, '_index': index_name})
-
- return found, found_index or (index_name_or_pattern.find('*') >= 0)
-
- @urlmatch(netloc=FAKE_ES_HOST, path=r'/([^/]+)/_count$', method='GET')
- def count_docs(url, request):
- request = json.loads(request.body)
- index_name_or_pattern, _ = url.path[1:].split('/')
-
- found, found_index = _match_query(index_name_or_pattern, request['query'])
- if not found_index:
- return {
- 'status_code': 404,
- }
-
- return {
- 'status_code': 200,
- 'headers': {
- 'Content-Type': 'application/json',
- },
- 'content': json.dumps({'count': len(found)}),
- }
-
- @urlmatch(netloc=FAKE_ES_HOST, path=r'/_search/scroll$', method='GET')
- def lookup_scroll(url, request):
- request_obj = json.loads(request.body)
- scroll_id = request_obj['scroll_id']
- if scroll_id in scrolls:
- return {
- 'status_code': 200,
- 'headers': {
- 'Content-Type': 'application/json',
- },
- 'content': json.dumps(scrolls[scroll_id]),
- }
-
- return {
- 'status_code': 404,
- }
-
- @urlmatch(netloc=FAKE_ES_HOST, path=r'/_search/scroll$', method='DELETE')
- def delete_scroll(url, request):
- request = json.loads(request.body)
- for scroll_id in request['scroll_id']:
- scrolls.pop(scroll_id, None)
-
- return {
- 'status_code': 404,
- }
-
- @urlmatch(netloc=FAKE_ES_HOST, path=r'/([^/]+)/_search$', method='GET')
- def lookup_docs(url, request):
- query_params = parse_query(url.query)
-
- request = json.loads(request.body)
- index_name_or_pattern, _ = url.path[1:].split('/')
-
- # Find matching docs.
- query = request.get('query')
- found, found_index = _match_query(index_name_or_pattern, query)
- if not found_index:
- return {
- 'status_code': 404,
- }
-
- # Sort.
- sort = request.get('sort')
- if sort:
- if sort == ['_doc'] or sort == '_doc':
- found.sort(key=lambda x: x['_source']['_id'])
- else:
- def get_sort_key(item):
- source = item['_source']
- key = ''
- for sort_config in sort:
- for sort_key, direction in sort_config.iteritems():
- assert direction == 'desc'
- sort_key = sort_key.replace('.keyword', '')
- key += str(transform(source[sort_key], sort_key))
- key += '|'
- return key
-
- found.sort(key=get_sort_key, reverse=True)
-
- # Search after.
- search_after = request.get('search_after')
- if search_after:
- sort_fields = []
- for sort_config in sort:
- if isinstance(sort_config, unicode):
- sort_fields.append(sort_config)
- continue
-
- for sort_key, _ in sort_config.iteritems():
- sort_key = sort_key.replace('.keyword', '')
- sort_fields.append(sort_key)
-
- for index, search_after_value in enumerate(search_after):
- field_name = sort_fields[index]
- value = transform(search_after_value, field_name)
- if field_name == '_doc':
- found = [f for f in found if transform(f['_source']['_id'], field_name) > value]
- else:
- found = [f for f in found if transform(f['_source'][field_name], field_name) < value]
- if len(found) < 2:
- break
-
- if field_name == '_doc':
- if found[0]['_source']['_id'] != found[1]['_source']:
- break
- else:
- if found[0]['_source'][field_name] != found[1]['_source']:
- break
-
- # Size.
- size = request.get('size')
- if size:
- found = found[0:size]
-
- # Aggregation.
- # {u'query':
- # {u'range':
- # {u'datetime': {u'lt': u'2019-06-27T15:45:09.768085',
- # u'gte': u'2019-06-27T15:35:09.768085'}}},
- # u'aggs': {
- # u'by_id': {
- # u'terms': {u'field': u'kind_id'},
- # u'aggs': {
- # u'by_date': {u'date_histogram': {u'field': u'datetime', u'interval': u'day'}}}}},
- # u'size': 0}
- def _by_field(agg_field_params, results):
- aggregated_by_field = defaultdict(list)
-
- for agg_means, agg_means_params in agg_field_params.iteritems():
- if agg_means == 'terms':
- field_name = agg_means_params['field']
- for result in results:
- value = result['_source'][field_name]
- aggregated_by_field[value].append(result)
- elif agg_means == 'date_histogram':
- field_name = agg_means_params['field']
- interval = agg_means_params['interval']
- for result in results:
- value = transform(result['_source'][field_name], field_name)
- aggregated_by_field[getattr(value, interval)].append(result)
- elif agg_means == 'aggs':
- # Skip. Handled below.
- continue
- else:
- raise Exception('Unsupported aggregation method: %s' % agg_means)
-
- # Invoke the aggregation recursively.
- buckets = []
- for field_value, field_results in aggregated_by_field.iteritems():
- aggregated = _aggregate(agg_field_params, field_results)
- if isinstance(aggregated, list):
- aggregated = {'doc_count': len(aggregated)}
-
- aggregated['key'] = field_value
- buckets.append(aggregated)
-
- return {'buckets': buckets}
-
- def _aggregate(query_config, results):
- agg_params = query_config.get(u'aggs')
- if not agg_params:
- return results
-
- by_field_name = {}
- for agg_field_name, agg_field_params in agg_params.iteritems():
- by_field_name[agg_field_name] = _by_field(agg_field_params, results)
-
- return by_field_name
-
- final_result = {
- 'hits': {
- 'hits': found,
- 'total': len(found),
- },
- '_shards': {
- 'successful': 1,
- 'total': 1,
- },
- 'aggregations': _aggregate(request, found),
- }
-
- if query_params.get('scroll'):
- scroll_id = str(uuid.uuid4())
- scrolls[scroll_id] = EMPTY_RESULT
- final_result['_scroll_id'] = scroll_id
-
- return {
- 'status_code': 200,
- 'headers': {
- 'Content-Type': 'application/json',
- },
- 'content': json.dumps(final_result),
- }
-
- @urlmatch(netloc=FAKE_ES_HOST)
- def catchall_handler(url, request):
- print "Unsupported URL: %s %s" % (request.method, url, )
- return {'status_code': 501}
-
- handlers = [get_template, put_template, index_delete, index_lookup, post_doc, count_docs,
- lookup_docs, lookup_scroll, delete_scroll, catchall_handler]
-
- with HTTMock(*handlers):
- yield
diff --git a/data/logs_model/test/mock_elasticsearch.py b/data/logs_model/test/mock_elasticsearch.py
deleted file mode 100644
index bd26a10c7..000000000
--- a/data/logs_model/test/mock_elasticsearch.py
+++ /dev/null
@@ -1,400 +0,0 @@
-# -*- coding: utf-8 -*-
-import json
-
-from datetime import datetime
-from dateutil.parser import parse
-
-from data.logs_model.datatypes import LogEntriesPage, Log, AggregatedLogCount
-
-
-def _status(d, code=200):
- return {"status_code": code, "content": json.dumps(d)}
-
-
-def _shards(d, total=5, failed=0, successful=5):
- d.update({"_shards": {"total": total, "failed": failed, "successful": successful}})
- return d
-
-
-def _hits(hits):
- return {"hits": {"total": len(hits), "max_score": None, "hits": hits}}
-
-
-INDEX_LIST_RESPONSE_HIT1_HIT2 = _status({
- "logentry_2018-03-08": {},
- "logentry_2018-04-02": {}
-})
-
-
-INDEX_LIST_RESPONSE_HIT2 = _status({
- "logentry_2018-04-02": {}
-})
-
-
-INDEX_LIST_RESPONSE = _status({
- "logentry_2019-01-01": {},
- "logentry_2017-03-08": {},
- "logentry_2018-03-08": {},
- "logentry_2018-04-02": {}
-})
-
-
-DEFAULT_TEMPLATE_RESPONSE = _status({"acknowledged": True})
-INDEX_RESPONSE_2019_01_01 = _status(
- _shards({
- "_index": "logentry_2019-01-01",
- "_type": "_doc",
- "_id": "1",
- "_version": 1,
- "_seq_no": 0,
- "_primary_term": 1,
- "result": "created"
- }))
-
-INDEX_RESPONSE_2017_03_08 = _status(
- _shards({
- "_index": "logentry_2017-03-08",
- "_type": "_doc",
- "_id": "1",
- "_version": 1,
- "_seq_no": 0,
- "_primary_term": 1,
- "result": "created"
- }))
-
-FAILURE_400 = _status({}, 400)
-
-INDEX_REQUEST_2019_01_01 = [
- "logentry_2019-01-01", {
- "account_id":
- 1,
- "repository_id":
- 1,
- "ip":
- "192.168.1.1",
- "random_id":
- 233,
- "datetime":
- "2019-01-01T03:30:00",
- "metadata_json": json.loads("{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1520479800}"),
- "performer_id":
- 1,
- "kind_id":
- 1
- }
-]
-
-INDEX_REQUEST_2017_03_08 = [
- "logentry_2017-03-08", {
- "repository_id":
- 1,
- "account_id":
- 1,
- "ip":
- "192.168.1.1",
- "random_id":
- 233,
- "datetime":
- "2017-03-08T03:30:00",
- "metadata_json": json.loads("{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1520479800}"),
- "performer_id":
- 1,
- "kind_id":
- 2
- }
-]
-
-_hit1 = {
- "_index": "logentry_2018-03-08",
- "_type": "doc",
- "_id": "1",
- "_score": None,
- "_source": {
- "random_id":
- 233,
- "kind_id":
- 1,
- "account_id":
- 1,
- "performer_id":
- 1,
- "repository_id":
- 1,
- "ip":
- "192.168.1.1",
- "metadata_json":
- "{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1520479800}",
- "datetime":
- "2018-03-08T03:30",
- },
- "sort": [1520479800000, 233]
-}
-
-_hit2 = {
- "_index": "logentry_2018-04-02",
- "_type": "doc",
- "_id": "2",
- "_score": None,
- "_source": {
- "random_id":
- 233,
- "kind_id":
- 2,
- "account_id":
- 1,
- "performer_id":
- 1,
- "repository_id":
- 1,
- "ip":
- "192.168.1.2",
- "metadata_json":
- "{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1522639800}",
- "datetime":
- "2018-04-02T03:30",
- },
- "sort": [1522639800000, 233]
-}
-
-_log1 = Log(
- "{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1520479800}",
- "192.168.1.1", parse("2018-03-08T03:30"), "user1.email", "user1.username", "user1.robot",
- "user1.organization", "user1.username", "user1.email", "user1.robot", 1)
-_log2 = Log(
- "{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1522639800}",
- "192.168.1.2", parse("2018-04-02T03:30"), "user1.email", "user1.username", "user1.robot",
- "user1.organization", "user1.username", "user1.email", "user1.robot", 2)
-
-SEARCH_RESPONSE_START = _status(_shards(_hits([_hit1, _hit2])))
-SEARCH_RESPONSE_END = _status(_shards(_hits([_hit2])))
-SEARCH_REQUEST_START = {
- "sort": [{
- "datetime": "desc"
- }, {
- "random_id.keyword": "desc"
- }],
- "query": {
- "bool": {
- "filter": [{
- "term": {
- "performer_id": 1
- }
- }, {
- "term": {
- "repository_id": 1
- }
- }]
- }
- },
- "size": 2
-}
-SEARCH_REQUEST_END = {
- "sort": [{
- "datetime": "desc"
- }, {
- "random_id.keyword": "desc"
- }],
- "query": {
- "bool": {
- "filter": [{
- "term": {
- "performer_id": 1
- }
- }, {
- "term": {
- "repository_id": 1
- }
- }]
- }
- },
- "search_after": [1520479800000, 233],
- "size": 2
-}
-SEARCH_REQUEST_FILTER = {
- "sort": [{
- "datetime": "desc"
- }, {
- "random_id.keyword": "desc"
- }],
- "query": {
- "bool": {
- "filter": [{
- "term": {
- "performer_id": 1
- }
- }, {
- "term": {
- "repository_id": 1
- }
- }, {
- "bool": {
- "must_not": [{
- "terms": {
- "kind_id": [1]
- }
- }]
- }
- }]
- }
- },
- "size": 2
-}
-SEARCH_PAGE_TOKEN = {
- "datetime": datetime(2018, 3, 8, 3, 30).isoformat(),
- "random_id": 233,
- "page_number": 1
-}
-SEARCH_PAGE_START = LogEntriesPage(logs=[_log1], next_page_token=SEARCH_PAGE_TOKEN)
-SEARCH_PAGE_END = LogEntriesPage(logs=[_log2], next_page_token=None)
-SEARCH_PAGE_EMPTY = LogEntriesPage([], None)
-
-AGGS_RESPONSE = _status(
- _shards({
- "hits": {
- "total": 4,
- "max_score": None,
- "hits": []
- },
- "aggregations": {
- "by_id": {
- "doc_count_error_upper_bound":
- 0,
- "sum_other_doc_count":
- 0,
- "buckets": [{
- "key": 2,
- "doc_count": 3,
- "by_date": {
- "buckets": [{
- "key_as_string": "2009-11-12T00:00:00.000Z",
- "key": 1257984000000,
- "doc_count": 1
- }, {
- "key_as_string": "2009-11-13T00:00:00.000Z",
- "key": 1258070400000,
- "doc_count": 0
- }, {
- "key_as_string": "2009-11-14T00:00:00.000Z",
- "key": 1258156800000,
- "doc_count": 2
- }]
- }
- }, {
- "key": 1,
- "doc_count": 1,
- "by_date": {
- "buckets": [{
- "key_as_string": "2009-11-15T00:00:00.000Z",
- "key": 1258243200000,
- "doc_count": 1
- }]
- }
- }]
- }
- }
- }))
-
-AGGS_REQUEST = {
- "query": {
- "bool": {
- "filter": [{
- "term": {
- "performer_id": 1
- }
- }, {
- "term": {
- "repository_id": 1
- }
- }, {
- "bool": {
- "must_not": [{
- "terms": {
- "kind_id": [2]
- }
- }]
- }
- }],
- "must": [{
- "range": {
- "datetime": {
- "lt": "2018-04-08T03:30:00",
- "gte": "2018-03-08T03:30:00"
- }
- }
- }]
- }
- },
- "aggs": {
- "by_id": {
- "terms": {
- "field": "kind_id"
- },
- "aggs": {
- "by_date": {
- "date_histogram": {
- "field": "datetime",
- "interval": "day"
- }
- }
- }
- }
- },
- "size": 0
-}
-
-AGGS_COUNT = [
- AggregatedLogCount(1, 1, parse("2009-11-15T00:00:00.000")),
- AggregatedLogCount(2, 1, parse("2009-11-12T00:00:00.000")),
- AggregatedLogCount(2, 2, parse("2009-11-14T00:00:00.000"))
-]
-
-COUNT_REQUEST = {
- "query": {
- "bool": {
- "filter": [{
- "term": {
- "repository_id": 1
- }
- }]
- }
- }
-}
-COUNT_RESPONSE = _status(_shards({
- "count": 1,
-}))
-
-# assume there are 2 pages
-_scroll_id = "DnF1ZXJ5VGhlbkZldGNoBQAAAAAAACEmFkk1aGlTRzdSUWllejZmYTlEYTN3SVEAAAAAAAAhJRZJNWhpU0c3UlFpZXo2ZmE5RGEzd0lRAAAAAAAAHtAWLWZpaFZXVzVSTy1OTXA5V3MwcHZrZwAAAAAAAB7RFi1maWhWV1c1Uk8tTk1wOVdzMHB2a2cAAAAAAAAhJxZJNWhpU0c3UlFpZXo2ZmE5RGEzd0lR"
-
-
-def _scroll(d):
- d["_scroll_id"] = _scroll_id
- return d
-
-
-SCROLL_CREATE = _status(_shards(_scroll(_hits([_hit1]))))
-SCROLL_GET = _status(_shards(_scroll(_hits([_hit2]))))
-SCROLL_GET_2 = _status(_shards(_scroll(_hits([]))))
-SCROLL_DELETE = _status({"succeeded": True, "num_freed": 5})
-SCROLL_LOGS = [[_log1], [_log2]]
-
-SCROLL_REQUESTS = [
- [
- "5m", 1, {
- "sort": "_doc",
- "query": {
- "range": {
- "datetime": {
- "lt": "2018-04-02T00:00:00",
- "gte": "2018-03-08T00:00:00"
- }
- }
- }
- }
- ],
- [{"scroll": "5m", "scroll_id": _scroll_id}],
- [{"scroll":"5m", "scroll_id": _scroll_id}],
- [{"scroll_id": [_scroll_id]}],
-]
-
-SCROLL_RESPONSES = [SCROLL_CREATE, SCROLL_GET, SCROLL_GET_2, SCROLL_DELETE]
diff --git a/data/logs_model/test/test_combined_model.py b/data/logs_model/test/test_combined_model.py
deleted file mode 100644
index 7b288e72f..000000000
--- a/data/logs_model/test/test_combined_model.py
+++ /dev/null
@@ -1,130 +0,0 @@
-from datetime import date, datetime, timedelta
-
-from freezegun import freeze_time
-
-from data.logs_model.inmemory_model import InMemoryModel
-from data.logs_model.combined_model import CombinedLogsModel
-
-from test.fixtures import *
-
-
-@pytest.fixture()
-def first_model():
- return InMemoryModel()
-
-
-@pytest.fixture()
-def second_model():
- return InMemoryModel()
-
-
-@pytest.fixture()
-def combined_model(first_model, second_model, initialized_db):
- return CombinedLogsModel(first_model, second_model)
-
-
-def test_log_action(first_model, second_model, combined_model, initialized_db):
- day = date(2019, 1, 1)
-
- # Write to the combined model.
- with freeze_time(day):
- combined_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- simple_repo = model.repository.get_repository('devtable', 'simple')
-
- # Make sure it is found in the first model but not the second.
- assert combined_model.count_repository_actions(simple_repo, day) == 1
- assert first_model.count_repository_actions(simple_repo, day) == 1
- assert second_model.count_repository_actions(simple_repo, day) == 0
-
-
-def test_count_repository_actions(first_model, second_model, combined_model, initialized_db):
- # Write to each model.
- first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- # Ensure the counts match as expected.
- day = datetime.today() - timedelta(minutes=60)
- simple_repo = model.repository.get_repository('devtable', 'simple')
-
- assert first_model.count_repository_actions(simple_repo, day) == 3
- assert second_model.count_repository_actions(simple_repo, day) == 2
- assert combined_model.count_repository_actions(simple_repo, day) == 5
-
-
-def test_yield_logs_for_export(first_model, second_model, combined_model, initialized_db):
- now = datetime.now()
-
- # Write to each model.
- first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- later = datetime.now()
-
- # Ensure the full set of logs is yielded.
- first_logs = list(first_model.yield_logs_for_export(now, later))[0]
- second_logs = list(second_model.yield_logs_for_export(now, later))[0]
-
- combined = list(combined_model.yield_logs_for_export(now, later))
- full_combined = []
- for subset in combined:
- full_combined.extend(subset)
-
- assert len(full_combined) == len(first_logs) + len(second_logs)
- assert full_combined == (first_logs + second_logs)
-
-
-def test_lookup_logs(first_model, second_model, combined_model, initialized_db):
- now = datetime.now()
-
- # Write to each model.
- first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- later = datetime.now()
-
- def _collect_logs(model):
- page_token = None
- all_logs = []
- while True:
- paginated_logs = model.lookup_logs(now, later, page_token=page_token)
- page_token = paginated_logs.next_page_token
- all_logs.extend(paginated_logs.logs)
- if page_token is None:
- break
- return all_logs
-
- first_logs = _collect_logs(first_model)
- second_logs = _collect_logs(second_model)
- combined = _collect_logs(combined_model)
-
- assert len(combined) == len(first_logs) + len(second_logs)
- assert combined == (first_logs + second_logs)
diff --git a/data/logs_model/test/test_elasticsearch.py b/data/logs_model/test/test_elasticsearch.py
deleted file mode 100644
index a305010f4..000000000
--- a/data/logs_model/test/test_elasticsearch.py
+++ /dev/null
@@ -1,529 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# pylint: disable=redefined-outer-name, wildcard-import
-
-import json
-from datetime import datetime, timedelta
-
-import pytest
-from mock import patch, Mock
-from dateutil.parser import parse
-
-from httmock import urlmatch, HTTMock
-
-from data.model.log import _json_serialize
-from data.logs_model.elastic_logs import ElasticsearchLogs, INDEX_NAME_PREFIX, INDEX_DATE_FORMAT
-from data.logs_model import configure, LogsModelProxy
-from mock_elasticsearch import *
-
-FAKE_ES_HOST = 'fakees'
-FAKE_ES_HOST_PATTERN = r'fakees.*'
-FAKE_ES_PORT = 443
-FAKE_AWS_ACCESS_KEY = None
-FAKE_AWS_SECRET_KEY = None
-FAKE_AWS_REGION = None
-
-@pytest.fixture()
-def logs_model_config():
- conf = {
- 'LOGS_MODEL': 'elasticsearch',
- 'LOGS_MODEL_CONFIG': {
- 'producer': 'elasticsearch',
- 'elasticsearch_config': {
- 'host': FAKE_ES_HOST,
- 'port': FAKE_ES_PORT,
- 'access_key': FAKE_AWS_ACCESS_KEY,
- 'secret_key': FAKE_AWS_SECRET_KEY,
- 'aws_region': FAKE_AWS_REGION
- }
- }
- }
- return conf
-
-
-FAKE_LOG_ENTRY_KINDS = {'push_repo': 1, 'pull_repo': 2}
-FAKE_NAMESPACES = {
- 'user1':
- Mock(id=1, organization="user1.organization", username="user1.username", email="user1.email",
- robot="user1.robot"),
- 'user2':
- Mock(id=2, organization="user2.organization", username="user2.username", email="user2.email",
- robot="user2.robot")
-}
-FAKE_REPOSITORIES = {
- 'user1/repo1': Mock(id=1, namespace_user=FAKE_NAMESPACES['user1']),
- 'user2/repo2': Mock(id=2, namespace_user=FAKE_NAMESPACES['user2']),
-}
-
-
-@pytest.fixture()
-def logs_model():
- # prevent logs model from changing
- logs_model = LogsModelProxy()
- with patch('data.logs_model.logs_model', logs_model):
- yield logs_model
-
-
-@pytest.fixture(scope='function')
-def app_config(logs_model_config):
- fake_config = {}
- fake_config.update(logs_model_config)
- with patch("data.logs_model.document_logs_model.config.app_config", fake_config):
- yield fake_config
-
-
-@pytest.fixture()
-def mock_page_size():
- with patch('data.logs_model.document_logs_model.PAGE_SIZE', 1):
- yield
-
-
-@pytest.fixture()
-def mock_max_result_window():
- with patch('data.logs_model.document_logs_model.DEFAULT_RESULT_WINDOW', 1):
- yield
-
-
-@pytest.fixture
-def mock_random_id():
- mock_random = Mock(return_value=233)
- with patch('data.logs_model.document_logs_model._random_id', mock_random):
- yield
-
-
-@pytest.fixture()
-def mock_db_model():
- def get_user_map_by_ids(namespace_ids):
- mapping = {}
- for i in namespace_ids:
- for name in FAKE_NAMESPACES:
- if FAKE_NAMESPACES[name].id == i:
- mapping[i] = FAKE_NAMESPACES[name]
- return mapping
-
- model = Mock(
- user=Mock(
- get_namespace_user=FAKE_NAMESPACES.get,
- get_user_or_org=FAKE_NAMESPACES.get,
- get_user=FAKE_NAMESPACES.get,
- get_user_map_by_ids=get_user_map_by_ids,
- ),
- repository=Mock(get_repository=lambda user_name, repo_name: FAKE_REPOSITORIES.get(
- user_name + '/' + repo_name),
- ),
- log=Mock(
- _get_log_entry_kind=lambda name: FAKE_LOG_ENTRY_KINDS[name],
- _json_serialize=_json_serialize,
- get_log_entry_kinds=Mock(return_value=FAKE_LOG_ENTRY_KINDS),
- ),
- )
-
- with patch('data.logs_model.document_logs_model.model', model), patch(
- 'data.logs_model.datatypes.model', model):
- yield
-
-
-def parse_query(query):
- return {s.split('=')[0]: s.split('=')[1] for s in query.split("&") if s != ""}
-
-
-@pytest.fixture()
-def mock_elasticsearch():
- mock = Mock()
- mock.template.side_effect = NotImplementedError
- mock.index.side_effect = NotImplementedError
- mock.count.side_effect = NotImplementedError
- mock.scroll_get.side_effect = NotImplementedError
- mock.scroll_delete.side_effect = NotImplementedError
- mock.search_scroll_create.side_effect = NotImplementedError
- mock.search_aggs.side_effect = NotImplementedError
- mock.search_after.side_effect = NotImplementedError
- mock.list_indices.side_effect = NotImplementedError
-
- @urlmatch(netloc=r'.*', path=r'.*')
- def default(url, req):
- raise Exception('\nurl={}\nmethod={}\nreq.url={}\nheaders={}\nbody={}'.format(
- url, req.method, req.url, req.headers, req.body))
-
- @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/_template/.*')
- def template(url, req):
- return mock.template(url.query.split('/')[-1], req.body)
-
- @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/logentry_(\*|[0-9\-]+)')
- def list_indices(url, req):
- return mock.list_indices()
-
- @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/logentry_[0-9\-]*/_doc')
- def index(url, req):
- index = url.path.split('/')[1]
- body = json.loads(req.body)
- body['metadata_json'] = json.loads(body['metadata_json'])
- return mock.index(index, body)
-
- @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/logentry_([0-9\-]*|\*)/_count')
- def count(_, req):
- return mock.count(json.loads(req.body))
-
- @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/_search/scroll')
- def scroll(url, req):
- if req.method == 'DELETE':
- return mock.scroll_delete(json.loads(req.body))
- elif req.method == 'GET':
- request_obj = json.loads(req.body)
- return mock.scroll_get(request_obj)
- raise NotImplementedError()
-
- @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/logentry_(\*|[0-9\-]*)/_search')
- def search(url, req):
- if "scroll" in url.query:
- query = parse_query(url.query)
- window_size = query['scroll']
- maximum_result_size = int(query['size'])
- return mock.search_scroll_create(window_size, maximum_result_size, json.loads(req.body))
- elif "aggs" in req.body:
- return mock.search_aggs(json.loads(req.body))
- else:
- return mock.search_after(json.loads(req.body))
-
- with HTTMock(scroll, count, search, index, template, list_indices, default):
- yield mock
-
-
-@pytest.mark.parametrize(
- """
- unlogged_pulls_ok, kind_name, namespace_name, repository, repository_name,
- timestamp,
- index_response, expected_request, throws
- """,
- [
- # Invalid inputs
- pytest.param(
- False, 'non-existing', None, None, None,
- None,
- None, None, True,
- id="Invalid Kind"
- ),
- pytest.param(
- False, 'pull_repo', 'user1', Mock(id=1), 'repo1',
- None,
- None, None, True,
- id="Invalid Parameters"
- ),
-
- # Remote exceptions
- pytest.param(
- False, 'pull_repo', 'user1', Mock(id=1), None,
- None,
- FAILURE_400, None, True,
- id="Throw on pull log failure"
- ),
- pytest.param(
- True, 'pull_repo', 'user1', Mock(id=1), None,
- parse("2017-03-08T03:30"),
- FAILURE_400, INDEX_REQUEST_2017_03_08, False,
- id="Ok on pull log failure"
- ),
-
- # Success executions
- pytest.param(
- False, 'pull_repo', 'user1', Mock(id=1), None,
- parse("2017-03-08T03:30"),
- INDEX_RESPONSE_2017_03_08, INDEX_REQUEST_2017_03_08, False,
- id="Log with namespace name and repository"
- ),
- pytest.param(
- False, 'push_repo', 'user1', None, 'repo1',
- parse("2019-01-01T03:30"),
- INDEX_RESPONSE_2019_01_01, INDEX_REQUEST_2019_01_01, False,
- id="Log with namespace name and repository name"
- ),
- ])
-def test_log_action(unlogged_pulls_ok, kind_name, namespace_name, repository, repository_name,
- timestamp,
- index_response, expected_request, throws,
- app_config, logs_model, mock_elasticsearch, mock_db_model, mock_random_id):
- mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
- mock_elasticsearch.index = Mock(return_value=index_response)
- app_config['ALLOW_PULLS_WITHOUT_STRICT_LOGGING'] = unlogged_pulls_ok
- configure(app_config)
-
- performer = Mock(id=1)
- ip = "192.168.1.1"
- metadata = {'key': 'value', 'time': parse("2018-03-08T03:30"), '😂': '😂👌👌👌👌'}
- if throws:
- with pytest.raises(Exception):
- logs_model.log_action(kind_name, namespace_name, performer, ip, metadata, repository,
- repository_name, timestamp)
- else:
- logs_model.log_action(kind_name, namespace_name, performer, ip, metadata, repository,
- repository_name, timestamp)
- mock_elasticsearch.index.assert_called_with(*expected_request)
-
-
-@pytest.mark.parametrize(
- """
- start_datetime, end_datetime,
- performer_name, repository_name, namespace_name,
- filter_kinds,
- page_token,
- max_page_count,
- search_response,
- list_indices_response,
- expected_request,
- expected_page,
- throws
- """,
- [
- # 1st page
- pytest.param(
- parse('2018-03-08T03:30'), parse('2018-04-08T03:30'),
- 'user1', 'repo1', 'user1',
- None,
- None,
- None,
- SEARCH_RESPONSE_START,
- INDEX_LIST_RESPONSE_HIT1_HIT2,
- SEARCH_REQUEST_START,
- SEARCH_PAGE_START,
- False,
- id="1st page"
- ),
-
- # Last page
- pytest.param(
- parse('2018-03-08T03:30'), parse('2018-04-08T03:30'),
- 'user1', 'repo1', 'user1',
- None,
- SEARCH_PAGE_TOKEN,
- None,
- SEARCH_RESPONSE_END,
- INDEX_LIST_RESPONSE_HIT1_HIT2,
- SEARCH_REQUEST_END,
- SEARCH_PAGE_END,
- False,
- id="Search using pagination token"
- ),
-
- # Filter
- pytest.param(
- parse('2018-03-08T03:30'), parse('2018-04-08T03:30'),
- 'user1', 'repo1', 'user1',
- ['push_repo'],
- None,
- None,
- SEARCH_RESPONSE_END,
- INDEX_LIST_RESPONSE_HIT2,
- SEARCH_REQUEST_FILTER,
- SEARCH_PAGE_END,
- False,
- id="Filtered search"
- ),
-
- # Max page count
- pytest.param(
- parse('2018-03-08T03:30'), parse('2018-04-08T03:30'),
- 'user1', 'repo1', 'user1',
- None,
- SEARCH_PAGE_TOKEN,
- 1,
- AssertionError, # Assert that it should not reach the ES server
- None,
- None,
- SEARCH_PAGE_EMPTY,
- False,
- id="Page token reaches maximum page count",
- ),
- ])
-def test_lookup_logs(start_datetime, end_datetime,
- performer_name, repository_name, namespace_name,
- filter_kinds,
- page_token,
- max_page_count,
- search_response,
- list_indices_response,
- expected_request,
- expected_page,
- throws,
- logs_model, mock_elasticsearch, mock_db_model, mock_page_size, app_config):
- mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
- mock_elasticsearch.search_after = Mock(return_value=search_response)
- mock_elasticsearch.list_indices = Mock(return_value=list_indices_response)
-
- configure(app_config)
- if throws:
- with pytest.raises(Exception):
- logs_model.lookup_logs(start_datetime, end_datetime, performer_name, repository_name,
- namespace_name, filter_kinds, page_token, max_page_count)
- else:
- page = logs_model.lookup_logs(start_datetime, end_datetime, performer_name, repository_name,
- namespace_name, filter_kinds, page_token, max_page_count)
- assert page == expected_page
- if expected_request:
- mock_elasticsearch.search_after.assert_called_with(expected_request)
-
-
-@pytest.mark.parametrize(
- """
- start_datetime, end_datetime,
- performer_name, repository_name, namespace_name,
- filter_kinds, search_response, expected_request, expected_counts, throws
- """,
- [
- # Valid
- pytest.param(
- parse('2018-03-08T03:30'), parse('2018-04-08T03:30'),
- 'user1', 'repo1', 'user1',
- ['pull_repo'], AGGS_RESPONSE, AGGS_REQUEST, AGGS_COUNT, False,
- id="Valid Counts"
- ),
-
- # Invalid case: date range too big
- pytest.param(
- parse('2018-03-08T03:30'), parse('2018-04-09T03:30'),
- 'user1', 'repo1', 'user1',
- [], None, None, None, True,
- id="Throw on date range too big"
- )
- ])
-def test_get_aggregated_log_counts(start_datetime, end_datetime,
- performer_name, repository_name, namespace_name,
- filter_kinds, search_response, expected_request, expected_counts, throws,
- logs_model, mock_elasticsearch, mock_db_model, app_config):
- mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
- mock_elasticsearch.search_aggs = Mock(return_value=search_response)
-
- configure(app_config)
- if throws:
- with pytest.raises(Exception):
- logs_model.get_aggregated_log_counts(start_datetime, end_datetime, performer_name,
- repository_name, namespace_name, filter_kinds)
- else:
- counts = logs_model.get_aggregated_log_counts(start_datetime, end_datetime, performer_name,
- repository_name, namespace_name, filter_kinds)
- assert set(counts) == set(expected_counts)
- if expected_request:
- mock_elasticsearch.search_aggs.assert_called_with(expected_request)
-
-
-@pytest.mark.parametrize(
- """
- repository,
- day,
- count_response, expected_request, expected_count, throws
- """,
- [
- pytest.param(
- FAKE_REPOSITORIES['user1/repo1'],
- parse("2018-03-08").date(),
- COUNT_RESPONSE, COUNT_REQUEST, 1, False,
- id="Valid Count with 1 as result"),
- ])
-def test_count_repository_actions(repository,
- day,
- count_response, expected_request, expected_count, throws,
- logs_model, mock_elasticsearch, mock_db_model, app_config):
- mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
- mock_elasticsearch.count = Mock(return_value=count_response)
- mock_elasticsearch.list_indices = Mock(return_value=INDEX_LIST_RESPONSE)
-
- configure(app_config)
- if throws:
- with pytest.raises(Exception):
- logs_model.count_repository_actions(repository, day)
- else:
- count = logs_model.count_repository_actions(repository, day)
- assert count == expected_count
- if expected_request:
- mock_elasticsearch.count.assert_called_with(expected_request)
-
-
-@pytest.mark.parametrize(
- """
- start_datetime, end_datetime,
- repository_id, namespace_id,
- max_query_time, scroll_responses, expected_requests, expected_logs, throws
- """,
- [
- pytest.param(
- parse("2018-03-08"), parse("2018-04-02"),
- 1, 1,
- timedelta(seconds=10), SCROLL_RESPONSES, SCROLL_REQUESTS, SCROLL_LOGS, False,
- id="Scroll 3 pages with page size = 1"
- ),
- ])
-def test_yield_logs_for_export(start_datetime, end_datetime,
- repository_id, namespace_id,
- max_query_time, scroll_responses, expected_requests, expected_logs, throws,
- logs_model, mock_elasticsearch, mock_db_model, mock_max_result_window, app_config):
- mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
- mock_elasticsearch.search_scroll_create = Mock(return_value=scroll_responses[0])
- mock_elasticsearch.scroll_get = Mock(side_effect=scroll_responses[1:-1])
- mock_elasticsearch.scroll_delete = Mock(return_value=scroll_responses[-1])
-
- configure(app_config)
- if throws:
- with pytest.raises(Exception):
- logs_model.yield_logs_for_export(start_datetime, end_datetime, max_query_time=max_query_time)
- else:
- log_generator = logs_model.yield_logs_for_export(start_datetime, end_datetime,
- max_query_time=max_query_time)
- counter = 0
- for logs in log_generator:
- if counter == 0:
- mock_elasticsearch.search_scroll_create.assert_called_with(*expected_requests[counter])
- else:
- mock_elasticsearch.scroll_get.assert_called_with(*expected_requests[counter])
- assert expected_logs[counter] == logs
- counter += 1
- # the last two requests must be
- # 1. get with response scroll with 0 hits, which indicates the termination condition
- # 2. delete scroll request
- mock_elasticsearch.scroll_get.assert_called_with(*expected_requests[-2])
- mock_elasticsearch.scroll_delete.assert_called_with(*expected_requests[-1])
-
-
-@pytest.mark.parametrize('prefix, is_valid', [
- pytest.param('..', False, id='Invalid `..`'),
- pytest.param('.', False, id='Invalid `.`'),
- pytest.param('-prefix', False, id='Invalid prefix start -'),
- pytest.param('_prefix', False, id='Invalid prefix start _'),
- pytest.param('+prefix', False, id='Invalid prefix start +'),
- pytest.param('prefix_with_UPPERCASES', False, id='Invalid uppercase'),
- pytest.param('valid_index', True, id='Valid prefix'),
- pytest.param('valid_index_with_numbers1234', True, id='Valid prefix with numbers'),
- pytest.param('a'*256, False, id='Prefix too long')
-])
-def test_valid_index_prefix(prefix, is_valid):
- assert ElasticsearchLogs._valid_index_prefix(prefix) == is_valid
-
-
-@pytest.mark.parametrize('index, cutoff_date, expected_result', [
- pytest.param(
- INDEX_NAME_PREFIX+'2019-06-06',
- datetime(2019, 6, 8),
- True,
- id="Index older than cutoff"
- ),
- pytest.param(
- INDEX_NAME_PREFIX+'2019-06-06',
- datetime(2019, 6, 4),
- False,
- id="Index younger than cutoff"
- ),
- pytest.param(
- INDEX_NAME_PREFIX+'2019-06-06',
- datetime(2019, 6, 6, 23),
- False,
- id="Index older than cutoff but timedelta less than 1 day"
- ),
- pytest.param(
- INDEX_NAME_PREFIX+'2019-06-06',
- datetime(2019, 6, 7),
- True,
- id="Index older than cutoff by exactly one day"
- ),
-])
-def test_can_delete_index(index, cutoff_date, expected_result):
- es = ElasticsearchLogs(index_prefix=INDEX_NAME_PREFIX)
- assert datetime.strptime(index.split(es._index_prefix, 1)[-1], INDEX_DATE_FORMAT)
- assert es.can_delete_index(index, cutoff_date) == expected_result
diff --git a/data/logs_model/test/test_logs_interface.py b/data/logs_model/test/test_logs_interface.py
deleted file mode 100644
index 8f4f143c0..000000000
--- a/data/logs_model/test/test_logs_interface.py
+++ /dev/null
@@ -1,473 +0,0 @@
-from datetime import datetime, timedelta, date
-from data.logs_model.datatypes import AggregatedLogCount
-from data.logs_model.table_logs_model import TableLogsModel
-from data.logs_model.combined_model import CombinedLogsModel
-from data.logs_model.inmemory_model import InMemoryModel
-from data.logs_model.combined_model import _merge_aggregated_log_counts
-from data.logs_model.document_logs_model import _date_range_in_single_index, DocumentLogsModel
-from data.logs_model.interface import LogsIterationTimeout
-from data.logs_model.test.fake_elasticsearch import FAKE_ES_HOST, fake_elasticsearch
-
-from data.database import LogEntry, LogEntry2, LogEntry3, LogEntryKind
-from data import model
-
-from test.fixtures import *
-
-
-@pytest.fixture()
-def mock_page_size():
- page_size = 2
- with patch('data.logs_model.document_logs_model.PAGE_SIZE', page_size):
- yield page_size
-
-
-@pytest.fixture()
-def clear_db_logs(initialized_db):
- LogEntry.delete().execute()
- LogEntry2.delete().execute()
- LogEntry3.delete().execute()
-
-
-def combined_model():
- return CombinedLogsModel(TableLogsModel(), InMemoryModel())
-
-
-def es_model():
- return DocumentLogsModel(producer='elasticsearch', elasticsearch_config={
- 'host': FAKE_ES_HOST,
- 'port': 12345,
- })
-
-@pytest.fixture()
-def fake_es():
- with fake_elasticsearch():
- yield
-
-
-@pytest.fixture(params=[TableLogsModel, InMemoryModel, es_model, combined_model])
-def logs_model(request, clear_db_logs, fake_es):
- return request.param()
-
-
-def _lookup_logs(logs_model, start_time, end_time, **kwargs):
- logs_found = []
- page_token = None
- while True:
- found = logs_model.lookup_logs(start_time, end_time, page_token=page_token, **kwargs)
- logs_found.extend(found.logs)
- page_token = found.next_page_token
- if not found.logs or not page_token:
- break
-
- assert len(logs_found) == len(set(logs_found))
- return logs_found
-
-
-@pytest.mark.skipif(os.environ.get('TEST_DATABASE_URI', '').find('mysql') >= 0,
- reason='Flaky on MySQL')
-@pytest.mark.parametrize('namespace_name, repo_name, performer_name, check_args, expect_results', [
- pytest.param('devtable', 'simple', 'devtable', {}, True, id='no filters'),
- pytest.param('devtable', 'simple', 'devtable', {
- 'performer_name': 'devtable',
- }, True, id='matching performer'),
-
- pytest.param('devtable', 'simple', 'devtable', {
- 'namespace_name': 'devtable',
- }, True, id='matching namespace'),
-
- pytest.param('devtable', 'simple', 'devtable', {
- 'namespace_name': 'devtable',
- 'repository_name': 'simple',
- }, True, id='matching repository'),
-
- pytest.param('devtable', 'simple', 'devtable', {
- 'performer_name': 'public',
- }, False, id='different performer'),
-
- pytest.param('devtable', 'simple', 'devtable', {
- 'namespace_name': 'public',
- }, False, id='different namespace'),
-
- pytest.param('devtable', 'simple', 'devtable', {
- 'namespace_name': 'devtable',
- 'repository_name': 'complex',
- }, False, id='different repository'),
-])
-def test_logs(namespace_name, repo_name, performer_name, check_args, expect_results, logs_model):
- # Add some logs.
- kinds = list(LogEntryKind.select())
- user = model.user.get_user(performer_name)
-
- start_timestamp = datetime.utcnow()
- timestamp = start_timestamp
-
- for kind in kinds:
- for index in range(0, 3):
- logs_model.log_action(kind.name, namespace_name=namespace_name, repository_name=repo_name,
- performer=user, ip='1.2.3.4', timestamp=timestamp)
- timestamp = timestamp + timedelta(seconds=1)
-
- found = _lookup_logs(logs_model, start_timestamp, start_timestamp + timedelta(minutes=10),
- **check_args)
- if expect_results:
- assert len(found) == len(kinds) * 3
- else:
- assert not found
-
- aggregated_counts = logs_model.get_aggregated_log_counts(start_timestamp,
- start_timestamp + timedelta(minutes=10),
- **check_args)
- if expect_results:
- assert len(aggregated_counts) == len(kinds)
- for ac in aggregated_counts:
- assert ac.count == 3
- else:
- assert not aggregated_counts
-
-
-@pytest.mark.parametrize('filter_kinds, expect_results', [
- pytest.param(None, True),
- pytest.param(['push_repo'], True, id='push_repo filter'),
- pytest.param(['pull_repo'], True, id='pull_repo filter'),
- pytest.param(['push_repo', 'pull_repo'], False, id='push and pull filters')
-])
-def test_lookup_latest_logs(filter_kinds, expect_results, logs_model):
- kind_map = model.log.get_log_entry_kinds()
- if filter_kinds:
- ignore_ids = [kind_map[kind_name] for kind_name in filter_kinds if filter_kinds]
- else:
- ignore_ids = []
-
- now = datetime.now()
- namespace_name = 'devtable'
- repo_name = 'simple'
- performer_name = 'devtable'
-
- user = model.user.get_user(performer_name)
- size = 3
-
- # Log some push actions
- logs_model.log_action('push_repo', namespace_name=namespace_name, repository_name=repo_name,
- performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=1, seconds=11))
- logs_model.log_action('push_repo', namespace_name=namespace_name, repository_name=repo_name,
- performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=7, seconds=33))
-
- # Log some pull actions
- logs_model.log_action('pull_repo', namespace_name=namespace_name, repository_name=repo_name,
- performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=0, seconds=3))
- logs_model.log_action('pull_repo', namespace_name=namespace_name, repository_name=repo_name,
- performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=3, seconds=55))
- logs_model.log_action('pull_repo', namespace_name=namespace_name, repository_name=repo_name,
- performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=5, seconds=3))
- logs_model.log_action('pull_repo', namespace_name=namespace_name, repository_name=repo_name,
- performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=11, seconds=11))
-
- # Get the latest logs
- latest_logs = logs_model.lookup_latest_logs(performer_name, repo_name, namespace_name,
- filter_kinds=filter_kinds, size=size)
-
- # Test max lookup size
- assert len(latest_logs) <= size
-
- # Make sure that the latest logs returned are in decreasing order
- assert all(x >= y for x, y in zip(latest_logs, latest_logs[1:]))
-
- if expect_results:
- assert latest_logs
-
- # Lookup all logs filtered by kinds and sort them in reverse chronological order
- all_logs = _lookup_logs(logs_model, now - timedelta(days=30), now + timedelta(days=30),
- filter_kinds=filter_kinds, namespace_name=namespace_name,
- repository_name=repo_name)
- all_logs = sorted(all_logs, key=lambda l: l.datetime, reverse=True)
-
- # Check that querying all logs does not return the filtered kinds
- assert all([log.kind_id not in ignore_ids for log in all_logs])
-
- # Check that the latest logs contains only th most recent ones
- assert latest_logs == all_logs[:len(latest_logs)]
-
-
-def test_count_repository_actions(logs_model):
- # Log some actions.
- logs_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- # Log some actions to a different repo.
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='complex',
- ip='1.2.3.4')
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='complex',
- ip='1.2.3.4')
-
- # Count the actions.
- day = date.today()
- simple_repo = model.repository.get_repository('devtable', 'simple')
-
- count = logs_model.count_repository_actions(simple_repo, day)
- assert count == 3
-
- complex_repo = model.repository.get_repository('devtable', 'complex')
- count = logs_model.count_repository_actions(complex_repo, day)
- assert count == 2
-
- # Try counting actions for a few days in the future to ensure it doesn't raise an error.
- count = logs_model.count_repository_actions(simple_repo, day + timedelta(days=5))
- assert count == 0
-
-
-def test_yield_log_rotation_context(logs_model):
- cutoff_date = datetime.now()
- min_logs_per_rotation = 3
-
- # Log some actions to be archived
- # One day
- logs_model.log_action('push_repo', namespace_name='devtable', repository_name='simple1',
- ip='1.2.3.4', timestamp=cutoff_date-timedelta(days=1, seconds=1))
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple2',
- ip='5.6.7.8', timestamp=cutoff_date-timedelta(days=1, seconds=2))
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple3',
- ip='9.10.11.12', timestamp=cutoff_date-timedelta(days=1, seconds=3))
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple4',
- ip='0.0.0.0', timestamp=cutoff_date-timedelta(days=1, seconds=4))
- # Another day
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple5',
- ip='1.1.1.1', timestamp=cutoff_date-timedelta(days=2, seconds=1))
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple5',
- ip='1.1.1.1', timestamp=cutoff_date-timedelta(days=2, seconds=2))
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple5',
- ip='1.1.1.1', timestamp=cutoff_date-timedelta(days=2, seconds=3))
-
- found = _lookup_logs(logs_model, cutoff_date - timedelta(days=3), cutoff_date + timedelta(days=1))
- assert found is not None and len(found) == 7
-
- # Iterate the logs using the log rotation contexts
- all_logs = []
- for log_rotation_context in logs_model.yield_log_rotation_context(cutoff_date,
- min_logs_per_rotation):
- with log_rotation_context as context:
- for logs, _ in context.yield_logs_batch():
- all_logs.extend(logs)
-
- assert len(all_logs) == 7
- found = _lookup_logs(logs_model, cutoff_date - timedelta(days=3), cutoff_date + timedelta(days=1))
- assert not found
-
- # Make sure all datetimes are monotonically increasing (by datetime) after sorting the lookup
- # to make sure no duplicates were returned
- all_logs.sort(key=lambda d: d.datetime)
- assert all(x.datetime < y.datetime for x, y in zip(all_logs, all_logs[1:]))
-
-
-def test_count_repository_actions_with_wildcard_disabled(initialized_db):
- with fake_elasticsearch(allow_wildcard=False):
- logs_model = es_model()
-
- # Log some actions.
- logs_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- # Log some actions to a different repo.
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='complex',
- ip='1.2.3.4')
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='complex',
- ip='1.2.3.4')
-
- # Count the actions.
- day = date.today()
- simple_repo = model.repository.get_repository('devtable', 'simple')
-
- count = logs_model.count_repository_actions(simple_repo, day)
- assert count == 3
-
- complex_repo = model.repository.get_repository('devtable', 'complex')
- count = logs_model.count_repository_actions(complex_repo, day)
- assert count == 2
-
- # Try counting actions for a few days in the future to ensure it doesn't raise an error.
- count = logs_model.count_repository_actions(simple_repo, day + timedelta(days=5))
- assert count == 0
-
-
-@pytest.mark.skipif(os.environ.get('TEST_DATABASE_URI', '').find('mysql') >= 0,
- reason='Flaky on MySQL')
-def test_yield_logs_for_export(logs_model):
- # Add some logs.
- kinds = list(LogEntryKind.select())
- user = model.user.get_user('devtable')
-
- start_timestamp = datetime.utcnow()
- timestamp = start_timestamp
-
- for kind in kinds:
- for index in range(0, 10):
- logs_model.log_action(kind.name, namespace_name='devtable', repository_name='simple',
- performer=user, ip='1.2.3.4', timestamp=timestamp)
- timestamp = timestamp + timedelta(seconds=1)
-
- # Yield the logs.
- simple_repo = model.repository.get_repository('devtable', 'simple')
- logs_found = []
- for logs in logs_model.yield_logs_for_export(start_timestamp, timestamp + timedelta(minutes=10),
- repository_id=simple_repo.id):
- logs_found.extend(logs)
-
- # Ensure we found all added logs.
- assert len(logs_found) == len(kinds) * 10
-
-
-def test_yield_logs_for_export_timeout(logs_model):
- # Add some logs.
- kinds = list(LogEntryKind.select())
- user = model.user.get_user('devtable')
-
- start_timestamp = datetime.utcnow()
- timestamp = start_timestamp
-
- for kind in kinds:
- for _ in range(0, 2):
- logs_model.log_action(kind.name, namespace_name='devtable', repository_name='simple',
- performer=user, ip='1.2.3.4', timestamp=timestamp)
- timestamp = timestamp + timedelta(seconds=1)
-
- # Yield the logs. Since we set the timeout to nothing, it should immediately fail.
- simple_repo = model.repository.get_repository('devtable', 'simple')
- with pytest.raises(LogsIterationTimeout):
- list(logs_model.yield_logs_for_export(start_timestamp, timestamp + timedelta(minutes=1),
- repository_id=simple_repo.id,
- max_query_time=timedelta(seconds=0)))
-
-
-def test_disabled_namespace(clear_db_logs):
- logs_model = TableLogsModel(lambda kind, namespace, is_free: namespace == 'devtable')
-
- # Log some actions.
- logs_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
- ip='1.2.3.4')
-
- # Log some actions to a different namespace.
- logs_model.log_action('push_repo', namespace_name='buynlarge', repository_name='orgrepo',
- ip='1.2.3.4')
-
- logs_model.log_action('pull_repo', namespace_name='buynlarge', repository_name='orgrepo',
- ip='1.2.3.4')
- logs_model.log_action('pull_repo', namespace_name='buynlarge', repository_name='orgrepo',
- ip='1.2.3.4')
-
- # Count the actions.
- day = datetime.today() - timedelta(minutes=60)
- simple_repo = model.repository.get_repository('devtable', 'simple')
- count = logs_model.count_repository_actions(simple_repo, day)
- assert count == 0
-
- org_repo = model.repository.get_repository('buynlarge', 'orgrepo')
- count = logs_model.count_repository_actions(org_repo, day)
- assert count == 3
-
-
-@pytest.mark.parametrize('aggregated_log_counts1, aggregated_log_counts2, expected_result', [
- pytest.param(
- [
- AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0)), # 1
- AggregatedLogCount(1, 3, datetime(2019, 6, 7, 0, 0)), # 2
- ],
- [
- AggregatedLogCount(1, 5, datetime(2019, 6, 6, 0, 0)), # 1
- AggregatedLogCount(1, 7, datetime(2019, 6, 7, 0, 0)), # 2
- AggregatedLogCount(3, 3, datetime(2019, 6, 1, 0, 0)), # 3
- ],
- [
- AggregatedLogCount(1, 8, datetime(2019, 6, 6, 0, 0)), # 1
- AggregatedLogCount(1, 10, datetime(2019, 6, 7, 0, 0)), # 2
- AggregatedLogCount(3, 3, datetime(2019, 6, 1, 0, 0)) # 3
- ]
- ),
- pytest.param(
- [
- AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0)), # 1
- ],
- [
- AggregatedLogCount(1, 7, datetime(2019, 6, 7, 0, 0)), # 2
- ],
- [
- AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0)), # 1
- AggregatedLogCount(1, 7, datetime(2019, 6, 7, 0, 0)), # 2
- ]
- ),
- pytest.param(
- [],
- [AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0))],
- [AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0))]
- ),
-])
-def test_merge_aggregated_log_counts(aggregated_log_counts1, aggregated_log_counts2, expected_result):
- assert (sorted(_merge_aggregated_log_counts(aggregated_log_counts1, aggregated_log_counts2)) ==
- sorted(expected_result))
-
-
-@pytest.mark.parametrize('dt1, dt2, expected_result', [
- # Valid dates
- pytest.param(date(2019, 6, 17), date(2019, 6, 18), True),
-
- # Invalid dates
- pytest.param(date(2019, 6, 17), date(2019, 6, 17), False),
- pytest.param(date(2019, 6, 17), date(2019, 6, 19), False),
- pytest.param(date(2019, 6, 18), date(2019, 6, 17), False),
-
- # Valid datetimes
- pytest.param(datetime(2019, 6, 17, 0, 1), datetime(2019, 6, 17, 0, 2), True),
-
- # Invalid datetimes
- pytest.param(datetime(2019, 6, 17, 0, 2), datetime(2019, 6, 17, 0, 1), False),
- pytest.param(datetime(2019, 6, 17, 11), datetime(2019, 6, 17, 11) + timedelta(hours=14), False),
-])
-def test_date_range_in_single_index(dt1, dt2, expected_result):
- assert _date_range_in_single_index(dt1, dt2) == expected_result
-
-
-def test_pagination(logs_model, mock_page_size):
- """
- Make sure that pagination does not stop if searching through multiple indices by day,
- and the current log count matches the page size while there are still indices to be searched.
- """
- day1 = datetime.now()
- day2 = day1 + timedelta(days=1)
- day3 = day2 + timedelta(days=1)
-
- # Log some actions in day indices
- # One day
- logs_model.log_action('push_repo', namespace_name='devtable', repository_name='simple1',
- ip='1.2.3.4', timestamp=day1)
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple1',
- ip='5.6.7.8', timestamp=day1)
-
- found = _lookup_logs(logs_model, day1-timedelta(seconds=1), day3+timedelta(seconds=1))
- assert len(found) == mock_page_size
-
- # Another day
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple2',
- ip='1.1.1.1', timestamp=day2)
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple2',
- ip='0.0.0.0', timestamp=day2)
-
- # Yet another day
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple2',
- ip='1.1.1.1', timestamp=day3)
- logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple2',
- ip='0.0.0.0', timestamp=day3)
-
- found = _lookup_logs(logs_model, day1-timedelta(seconds=1), day3+timedelta(seconds=1))
- assert len(found) == 6
diff --git a/data/logs_model/test/test_logs_producer.py b/data/logs_model/test/test_logs_producer.py
deleted file mode 100644
index 382684244..000000000
--- a/data/logs_model/test/test_logs_producer.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import logging
-import pytest
-from dateutil.parser import parse
-from mock import patch, Mock
-
-import botocore
-
-from data.logs_model import configure
-
-from test_elasticsearch import app_config, logs_model_config, logs_model, mock_elasticsearch, mock_db_model
-from mock_elasticsearch import *
-
-
-logger = logging.getLogger(__name__)
-
-FAKE_KAFKA_BROKERS = ['fake_server1', 'fake_server2']
-FAKE_KAFKA_TOPIC = 'sometopic'
-FAKE_MAX_BLOCK_SECONDS = 1
-
-@pytest.fixture()
-def kafka_logs_producer_config(app_config):
- producer_config = {}
- producer_config.update(app_config)
-
- kafka_config = {
- 'bootstrap_servers': FAKE_KAFKA_BROKERS,
- 'topic': FAKE_KAFKA_TOPIC,
- 'max_block_seconds': FAKE_MAX_BLOCK_SECONDS
- }
-
- producer_config['LOGS_MODEL_CONFIG']['producer'] = 'kafka'
- producer_config['LOGS_MODEL_CONFIG']['kafka_config'] = kafka_config
- return producer_config
-
-
-@pytest.fixture()
-def kinesis_logs_producer_config(app_config):
- producer_config = {}
- producer_config.update(app_config)
-
- kinesis_stream_config = {
- 'stream_name': 'test-stream',
- 'aws_region': 'fake_region',
- 'aws_access_key': 'some_key',
- 'aws_secret_key': 'some_secret'
- }
-
- producer_config['LOGS_MODEL_CONFIG']['producer'] = 'kinesis_stream'
- producer_config['LOGS_MODEL_CONFIG']['kinesis_stream_config'] = kinesis_stream_config
- return producer_config
-
-
-def test_kafka_logs_producers(logs_model, mock_elasticsearch, mock_db_model, kafka_logs_producer_config):
- mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
-
- producer_config = kafka_logs_producer_config
- with patch('kafka.client_async.KafkaClient.check_version'), patch('kafka.KafkaProducer.send') as mock_send:
- configure(producer_config)
- logs_model.log_action('pull_repo', 'user1', Mock(id=1), '192.168.1.1', {'key': 'value'},
- None, 'repo1', parse("2019-01-01T03:30"))
-
- mock_send.assert_called_once()
-
-
-def test_kinesis_logs_producers(logs_model, mock_elasticsearch, mock_db_model, kinesis_logs_producer_config):
- mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
-
- producer_config = kinesis_logs_producer_config
- with patch('botocore.endpoint.EndpointCreator.create_endpoint'), \
- patch('botocore.client.BaseClient._make_api_call') as mock_send:
- configure(producer_config)
- logs_model.log_action('pull_repo', 'user1', Mock(id=1), '192.168.1.1', {'key': 'value'},
- None, 'repo1', parse("2019-01-01T03:30"))
-
- # Check that a PutRecord api call is made.
- # NOTE: The second arg of _make_api_call uses a randomized PartitionKey
- mock_send.assert_called_once_with(u'PutRecord', mock_send.call_args_list[0][0][1])
diff --git a/data/migrations/__init__.py b/data/migrations/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/data/migrations/env.py b/data/migrations/env.py
index fdc870672..108c4c496 100644
--- a/data/migrations/env.py
+++ b/data/migrations/env.py
@@ -1,43 +1,25 @@
-import logging
+from __future__ import with_statement
+
import os
-from logging.config import fileConfig
-from urllib import unquote
-
from alembic import context
-from alembic.script.revision import ResolutionError
-from alembic.util import CommandError
from sqlalchemy import engine_from_config, pool
+from logging.config import fileConfig
+from urllib import unquote, quote
from peewee import SqliteDatabase
from data.database import all_models, db
-from data.migrations.tester import NoopTester, PopulateTestDataTester
+from app import app
from data.model.sqlalchemybridge import gen_sqlalchemy_metadata
-from release import GIT_HEAD, REGION, SERVICE
from util.morecollections import AttrDict
-from data.migrations.progress import PrometheusReporter, NullReporter
-
config = context.config
-DB_URI = config.get_main_option('db_uri', 'sqlite:///test/data/test.db')
-PROM_LABEL_PREFIX = 'DBA_OP_LABEL_'
+config.set_main_option('sqlalchemy.url', unquote(app.config['DB_URI']))
-
-# This option exists because alembic needs the db proxy to be configured in order
-# to perform migrations. The app import does the init of the proxy, but we don't
-# want that in the case of the config app, as we are explicitly connecting to a
-# db that the user has passed in, and we can't have import dependency on app
-if config.get_main_option('alembic_setup_app', 'True') == 'True':
- from app import app
- DB_URI = app.config['DB_URI']
-
-config.set_main_option('sqlalchemy.url', unquote(DB_URI))
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name:
- fileConfig(config.config_file_name)
-
-logger = logging.getLogger(__name__)
+ fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
@@ -51,104 +33,56 @@ tables = AttrDict(target_metadata.tables)
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
-def get_tester():
- """ Returns the tester to use. We only return the tester that populates data
- if the TEST_MIGRATE env var is set to `true` AND we make sure we're not
- connecting to a production database.
- """
- if os.environ.get('TEST_MIGRATE', '') == 'true':
- url = unquote(DB_URI)
- if url.find('amazonaws.com') < 0:
- return PopulateTestDataTester()
-
- return NoopTester()
-
-def get_progress_reporter():
- prom_addr = os.environ.get('DBA_OP_PROMETHEUS_PUSH_GATEWAY_ADDR', None)
-
- if prom_addr is not None:
- prom_job = os.environ.get('DBA_OP_JOB_ID')
-
- def _process_label_key(label_key):
- return label_key[len(PROM_LABEL_PREFIX):].lower()
- labels = {_process_label_key(k): v for k, v in os.environ.items()
- if k.startswith(PROM_LABEL_PREFIX)}
-
- return PrometheusReporter(prom_addr, prom_job, labels)
- else:
- return NullReporter()
-
-def report_success(ctx=None, step=None, heads=None, run_args=None):
- progress_reporter = run_args['progress_reporter']
- progress_reporter.report_version_complete(success=True)
-
def run_migrations_offline():
- """Run migrations in 'offline' mode.
+ """Run migrations in 'offline' mode.
- This configures the context with just a URL
- and not an Engine, though an Engine is acceptable
- here as well. By skipping the Engine creation
- we don't even need a DBAPI to be available.
+ This configures the context with just a URL
+ and not an Engine, though an Engine is acceptable
+ here as well. By skipping the Engine creation
+ we don't even need a DBAPI to be available.
- Calls to context.execute() here emit the given string to the
- script output.
+ Calls to context.execute() here emit the given string to the
+ script output.
- """
- url = unquote(DB_URI)
- context.configure(url=url, target_metadata=target_metadata, transactional_ddl=True)
+ """
+ url = unquote(app.config['DB_URI'])
+ context.configure(url=url, target_metadata=target_metadata, transactional_ddl=True)
- with context.begin_transaction():
- context.run_migrations(tables=tables, tester=get_tester(), progress_reporter=NullReporter())
+ with context.begin_transaction():
+ context.run_migrations(tables=tables)
def run_migrations_online():
- """Run migrations in 'online' mode.
+ """Run migrations in 'online' mode.
- In this scenario we need to create an Engine
- and associate a connection with the context.
+ In this scenario we need to create an Engine
+ and associate a connection with the context.
- """
+ """
- if (isinstance(db.obj, SqliteDatabase) and
- not 'GENMIGRATE' in os.environ and
- not 'DB_URI' in os.environ):
- print 'Skipping Sqlite migration!'
- return
+ if isinstance(db.obj, SqliteDatabase) and not 'GENMIGRATE' in os.environ and not 'DB_URI' in os.environ:
+ print ('Skipping Sqlite migration!')
+ return
- progress_reporter = get_progress_reporter()
- engine = engine_from_config(config.get_section(config.config_ini_section),
- prefix='sqlalchemy.',
- poolclass=pool.NullPool)
+ engine = engine_from_config(
+ config.get_section(config.config_ini_section),
+ prefix='sqlalchemy.',
+ poolclass=pool.NullPool)
- connection = engine.connect()
- context.configure(connection=connection,
- target_metadata=target_metadata,
- transactional_ddl=False,
- on_version_apply=report_success)
+ connection = engine.connect()
+ context.configure(
+ connection=connection,
+ target_metadata=target_metadata,
+ transactional_ddl=False,
+ )
- try:
- with context.begin_transaction():
- try:
- context.run_migrations(tables=tables, tester=get_tester(),
- progress_reporter=progress_reporter)
- except (CommandError, ResolutionError) as ex:
- if 'No such revision' not in str(ex):
- raise
-
- if not REGION or not GIT_HEAD:
- raise
-
- from data.model.release import get_recent_releases
-
- # ignore revision error if we're running the previous release
- releases = list(get_recent_releases(SERVICE, REGION).offset(1).limit(1))
- if releases and releases[0].version == GIT_HEAD:
- logger.warn('Skipping database migration because revision not found')
- else:
- raise
- finally:
- connection.close()
+ try:
+ with context.begin_transaction():
+ context.run_migrations(tables=tables)
+ finally:
+ connection.close()
if context.is_offline_mode():
- run_migrations_offline()
+ run_migrations_offline()
else:
- run_migrations_online()
+ run_migrations_online()
+
diff --git a/data/migrations/migration.sh b/data/migrations/migration.sh
index bf8d234b6..9d20c5a6a 100755
--- a/data/migrations/migration.sh
+++ b/data/migrations/migration.sh
@@ -1,57 +1,59 @@
set -e
-PARSED_DOCKER_HOST=`echo $DOCKER_HOST | sed 's/tcp:\/\///' | sed 's/:.*//'`
-DOCKER_IP="${PARSED_DOCKER_HOST:-127.0.0.1}"
+DOCKER_IP=`echo $DOCKER_HOST | sed 's/tcp:\/\///' | sed 's/:.*//'`
MYSQL_CONFIG_OVERRIDE="{\"DB_URI\":\"mysql+pymysql://root:password@$DOCKER_IP/genschema\"}"
PERCONA_CONFIG_OVERRIDE="{\"DB_URI\":\"mysql+pymysql://root:password@$DOCKER_IP/genschema\"}"
PGSQL_CONFIG_OVERRIDE="{\"DB_URI\":\"postgresql://postgres@$DOCKER_IP/genschema\"}"
up_mysql() {
# Run a SQL database on port 3306 inside of Docker.
- docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql:5.7
+ docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql
- echo 'Sleeping for 25...'
- sleep 25
+ # Sleep for 10s to get MySQL get started.
+ echo 'Sleeping for 20...'
+ sleep 20
# Add the database to mysql.
- docker run --rm --link mysql:mysql mysql:5.7 sh -c 'echo "create database genschema" | mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -ppassword'
+ docker run --rm --link mysql:mysql mysql sh -c 'echo "create database genschema" | mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -ppassword'
}
down_mysql() {
- docker kill mysql || true
- docker rm -v mysql || true
+ docker kill mysql
+ docker rm -v mysql
}
up_mariadb() {
# Run a SQL database on port 3306 inside of Docker.
docker run --name mariadb -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mariadb
- echo 'Sleeping for 25...'
- sleep 25
+ # Sleep for 10s to get MySQL get started.
+ echo 'Sleeping for 10...'
+ sleep 10
# Add the database to mysql.
docker run --rm --link mariadb:mariadb mariadb sh -c 'echo "create database genschema" | mysql -h"$MARIADB_PORT_3306_TCP_ADDR" -P"$MARIADB_PORT_3306_TCP_PORT" -uroot -ppassword'
}
down_mariadb() {
- docker kill mariadb || true
- docker rm -v mariadb || true
+ docker kill mariadb
+ docker rm -v mariadb
}
up_percona() {
# Run a SQL database on port 3306 inside of Docker.
docker run --name percona -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d percona
- echo 'Sleeping for 25...'
- sleep 25
+ # Sleep for 10s
+ echo 'Sleeping for 10...'
+ sleep 10
# Add the daabase to mysql.
docker run --rm --link percona:percona percona sh -c 'echo "create database genschema" | mysql -h $PERCONA_PORT_3306_TCP_ADDR -uroot -ppassword'
}
down_percona() {
- docker kill percona || true
- docker rm -v percona || true
+ docker kill percona
+ docker rm -v percona
}
up_postgres() {
@@ -64,40 +66,30 @@ up_postgres() {
# Add the database to postgres.
docker run --rm --link postgres:postgres postgres sh -c 'echo "create database genschema" | psql -h "$POSTGRES_PORT_5432_TCP_ADDR" -p "$POSTGRES_PORT_5432_TCP_PORT" -U postgres'
- docker run --rm --link postgres:postgres postgres sh -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm;" | psql -h "$POSTGRES_PORT_5432_TCP_ADDR" -p "$POSTGRES_PORT_5432_TCP_PORT" -U postgres -d genschema'
-
}
down_postgres() {
- docker kill postgres || true
- docker rm -v postgres || true
+ docker kill postgres
+ docker rm -v postgres
}
gen_migrate() {
- # Generate a database with the schema as defined by the existing alembic model.
+ # Generate a SQLite database with the schema as defined by the existing alembic model.
QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic upgrade head
-
# Generate the migration to the current model.
QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic revision --autogenerate -m "$2"
}
test_migrate() {
- # Generate a database with the schema as defined by the existing alembic model.
- echo '> Running upgrade'
- TEST_MIGRATE=true QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic upgrade head
+ # Generate a SQLite database with the schema as defined by the existing alembic model.
+ QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic upgrade head
# Downgrade to verify it works in both directions.
- echo '> Running downgrade'
COUNT=`ls data/migrations/versions/*.py | wc -l | tr -d ' '`
- TEST_MIGRATE=true QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic downgrade "-$COUNT"
+ QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic downgrade "-$COUNT"
}
-down_mysql
-down_postgres
-down_mariadb
-down_percona
-
# Test (and generate, if requested) via MySQL.
echo '> Starting MySQL'
up_mysql
@@ -116,16 +108,6 @@ test_migrate $MYSQL_CONFIG_OVERRIDE
set -e
down_mysql
-# Test via Postgres.
-echo '> Starting Postgres'
-up_postgres
-
-echo '> Testing Migration (postgres)'
-set +e
-test_migrate $PGSQL_CONFIG_OVERRIDE
-set -e
-down_postgres
-
# Test via MariaDB.
echo '> Starting MariaDB'
up_mariadb
@@ -145,3 +127,13 @@ set +e
test_migrate $PERCONA_CONFIG_OVERRIDE
set -e
down_percona
+
+# Test via Postgres.
+echo '> Starting Postgres'
+up_postgres
+
+echo '> Testing Migration (postgres)'
+set +e
+test_migrate $PGSQL_CONFIG_OVERRIDE
+set -e
+down_postgres
diff --git a/data/migrations/progress.py b/data/migrations/progress.py
deleted file mode 100644
index 91278beea..000000000
--- a/data/migrations/progress.py
+++ /dev/null
@@ -1,101 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-from functools import partial, wraps
-
-from prometheus_client import CollectorRegistry, Gauge, Counter, push_to_gateway
-
-from util.abchelpers import nooper
-
-
-@add_metaclass(ABCMeta)
-class ProgressReporter(object):
- """ Implements an interface for reporting progress with the migrations.
- """
- @abstractmethod
- def report_version_complete(self, success):
- """ Called when an entire migration is complete. """
-
- @abstractmethod
- def report_step_progress(self):
- """ Called when a single step in the migration has been completed. """
-
-
-@nooper
-class NullReporter(ProgressReporter):
- """ No-op version of the progress reporter, designed for use when no progress
- reporting endpoint is provided. """
-
-
-class PrometheusReporter(ProgressReporter):
- def __init__(self, prom_pushgateway_addr, prom_job, labels, total_steps_num=None):
- self._total_steps_num = total_steps_num
- self._completed_steps = 0.0
-
- registry = CollectorRegistry()
-
- self._migration_completion_percent = Gauge(
- 'migration_completion_percent',
- 'Estimate of the completion percentage of the job',
- registry=registry,
- )
- self._migration_complete_total = Counter(
- 'migration_complete_total',
- 'Binary value of whether or not the job is complete',
- registry=registry,
- )
- self._migration_failed_total = Counter(
- 'migration_failed_total',
- 'Binary value of whether or not the job has failed',
- registry=registry,
- )
- self._migration_items_completed_total = Counter(
- 'migration_items_completed_total',
- 'Number of items this migration has completed',
- registry=registry,
- )
-
- self._push = partial(push_to_gateway,
- prom_pushgateway_addr,
- job=prom_job,
- registry=registry,
- grouping_key=labels,
- )
-
- def report_version_complete(self, success=True):
- if success:
- self._migration_complete_total.inc()
- else:
- self._migration_failed_total.inc()
- self._migration_completion_percent.set(1.0)
-
- self._push()
-
- def report_step_progress(self):
- self._migration_items_completed_total.inc()
-
- if self._total_steps_num is not None:
- self._completed_steps += 1
- self._migration_completion_percent = self._completed_steps / self._total_steps_num
-
- self._push()
-
-
-class ProgressWrapper(object):
- def __init__(self, delegate_module, progress_monitor):
- self._delegate_module = delegate_module
- self._progress_monitor = progress_monitor
-
- def __getattr__(self, attr_name):
- # Will raise proper attribute error
- maybe_callable = self._delegate_module.__dict__[attr_name]
- if callable(maybe_callable):
- # Build a callable which when executed places the request
- # onto a queue
- @wraps(maybe_callable)
- def wrapped_method(*args, **kwargs):
- result = maybe_callable(*args, **kwargs)
- self._progress_monitor.report_step_progress()
- return result
-
- return wrapped_method
- return maybe_callable
diff --git a/data/migrations/script.py.mako b/data/migrations/script.py.mako
index f17f94d2b..1b92f9f48 100644
--- a/data/migrations/script.py.mako
+++ b/data/migrations/script.py.mako
@@ -10,18 +10,13 @@ Create Date: ${create_date}
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
-from alembic import op as original_op
-from progress import ProgressWrapper
+from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
-
+def upgrade(tables):
${upgrades if upgrades else "pass"}
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
-
+def downgrade(tables):
${downgrades if downgrades else "pass"}
diff --git a/data/migrations/test/test_db_config.py b/data/migrations/test/test_db_config.py
deleted file mode 100644
index 747c5eb73..000000000
--- a/data/migrations/test/test_db_config.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import pytest
-from mock import patch
-
-from data.runmigration import run_alembic_migration
-from alembic.script import ScriptDirectory
-from test.fixtures import *
-
-@pytest.mark.parametrize('db_uri, is_valid', [
- ('postgresql://devtable:password@quay-postgres/registry_database', True),
- ('postgresql://devtable:password%25@quay-postgres/registry_database', False),
- ('postgresql://devtable:password%%25@quay-postgres/registry_database', True),
- ('postgresql://devtable@db:password@quay-postgres/registry_database', True),
-])
-def test_alembic_db_uri(db_uri, is_valid):
- """ Test if the given URI is escaped for string interpolation (Python's configparser). """
- with patch('alembic.script.ScriptDirectory.run_env') as m:
- if is_valid:
- run_alembic_migration(db_uri)
- else:
- with pytest.raises(ValueError):
- run_alembic_migration(db_uri)
diff --git a/data/migrations/tester.py b/data/migrations/tester.py
deleted file mode 100644
index 2643b80e2..000000000
--- a/data/migrations/tester.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import json
-import logging
-import uuid
-
-from abc import ABCMeta, abstractmethod, abstractproperty
-from datetime import datetime
-from six import add_metaclass
-
-from alembic import op
-from sqlalchemy import text
-
-from util.abchelpers import nooper
-
-logger = logging.getLogger(__name__)
-
-def escape_table_name(table_name):
- if op.get_bind().engine.name == 'postgresql':
- # Needed for the `user` table.
- return '"%s"' % table_name
-
- return table_name
-
-
-class DataTypes(object):
- @staticmethod
- def DateTime():
- return datetime.now()
-
- @staticmethod
- def Date():
- return datetime.now()
-
- @staticmethod
- def String():
- return 'somestringvalue'
-
- @staticmethod
- def Token():
- return '%s%s' % ('a' * 60, 'b' * 60)
-
- @staticmethod
- def UTF8Char():
- return 'some other value'
-
- @staticmethod
- def UUID():
- return str(uuid.uuid4())
-
- @staticmethod
- def JSON():
- return json.dumps(dict(foo='bar', baz='meh'))
-
- @staticmethod
- def Boolean():
- if op.get_bind().engine.name == 'postgresql':
- return True
-
- return 1
-
- @staticmethod
- def BigInteger():
- return 21474836470
-
- @staticmethod
- def Integer():
- return 42
-
- @staticmethod
- def Constant(value):
- def get_value():
- return value
- return get_value
-
- @staticmethod
- def Foreign(table_name):
- def get_index():
- result = op.get_bind().execute("SELECT id FROM %s LIMIT 1" % escape_table_name(table_name))
- try:
- return list(result)[0][0]
- except IndexError:
- raise Exception('Could not find row for table %s' % table_name)
- finally:
- result.close()
-
- return get_index
-
-
-@add_metaclass(ABCMeta)
-class MigrationTester(object):
- """ Implements an interface for adding testing capabilities to the
- data model migration system in Alembic.
- """
- TestDataType = DataTypes
-
- @abstractproperty
- def is_testing(self):
- """ Returns whether we are currently under a migration test. """
-
- @abstractmethod
- def populate_table(self, table_name, fields):
- """ Called to populate a table with the given fields filled in with testing data. """
-
- @abstractmethod
- def populate_column(self, table_name, col_name, field_type):
- """ Called to populate a column in a table to be filled in with testing data. """
-
-
-@nooper
-class NoopTester(MigrationTester):
- """ No-op version of the tester, designed for production workloads. """
-
-
-class PopulateTestDataTester(MigrationTester):
- @property
- def is_testing(self):
- return True
-
- def populate_table(self, table_name, fields):
- columns = {field_name: field_type() for field_name, field_type in fields}
- field_name_vars = [':' + field_name for field_name, _ in fields]
-
- if op.get_bind().engine.name == 'postgresql':
- field_names = ["%s" % field_name for field_name, _ in fields]
- else:
- field_names = ["`%s`" % field_name for field_name, _ in fields]
-
- table_name = escape_table_name(table_name)
- query = text('INSERT INTO %s (%s) VALUES (%s)' % (table_name, ', '.join(field_names),
- ', '.join(field_name_vars)))
- logger.info("Executing test query %s with values %s", query, columns.values())
- op.get_bind().execute(query, **columns)
-
- def populate_column(self, table_name, col_name, field_type):
- col_value = field_type()
- row_id = DataTypes.Foreign(table_name)()
-
- table_name = escape_table_name(table_name)
- update_text = text("UPDATE %s SET %s=:col_value where ID=:row_id" % (table_name, col_name))
- logger.info("Executing test query %s with value %s on row %s", update_text, col_value, row_id)
- op.get_bind().execute(update_text, col_value=col_value, row_id=row_id)
diff --git a/data/migrations/versions/0cf50323c78b_add_creation_date_to_user_table.py b/data/migrations/versions/0cf50323c78b_add_creation_date_to_user_table.py
deleted file mode 100644
index 2a995e58c..000000000
--- a/data/migrations/versions/0cf50323c78b_add_creation_date_to_user_table.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""Add creation date to User table
-
-Revision ID: 0cf50323c78b
-Revises: 87fbbc224f10
-Create Date: 2018-03-09 13:19:41.903196
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '0cf50323c78b'
-down_revision = '87fbbc224f10'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.add_column('user', sa.Column('creation_date', sa.DateTime(), nullable=True))
- # ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_column('user', 'creation_date', tester.TestDataType.DateTime)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_column('user', 'creation_date')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/10f45ee2310b_add_tag_tagkind_and_manifestchild_tables.py b/data/migrations/versions/10f45ee2310b_add_tag_tagkind_and_manifestchild_tables.py
deleted file mode 100644
index e2b4073da..000000000
--- a/data/migrations/versions/10f45ee2310b_add_tag_tagkind_and_manifestchild_tables.py
+++ /dev/null
@@ -1,100 +0,0 @@
-"""Add Tag, TagKind and ManifestChild tables
-
-Revision ID: 10f45ee2310b
-Revises: 13411de1c0ff
-Create Date: 2018-10-29 15:22:53.552216
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '10f45ee2310b'
-down_revision = '13411de1c0ff'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from util.migrate import UTF8CharField
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('tagkind',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tagkind'))
- )
- op.create_index('tagkind_name', 'tagkind', ['name'], unique=True)
- op.create_table('manifestchild',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('child_manifest_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['child_manifest_id'], ['manifest.id'], name=op.f('fk_manifestchild_child_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestchild_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestchild_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestchild'))
- )
- op.create_index('manifestchild_child_manifest_id', 'manifestchild', ['child_manifest_id'], unique=False)
- op.create_index('manifestchild_manifest_id', 'manifestchild', ['manifest_id'], unique=False)
- op.create_index('manifestchild_manifest_id_child_manifest_id', 'manifestchild', ['manifest_id', 'child_manifest_id'], unique=True)
- op.create_index('manifestchild_repository_id', 'manifestchild', ['repository_id'], unique=False)
- op.create_index('manifestchild_repository_id_child_manifest_id', 'manifestchild', ['repository_id', 'child_manifest_id'], unique=False)
- op.create_index('manifestchild_repository_id_manifest_id', 'manifestchild', ['repository_id', 'manifest_id'], unique=False)
- op.create_index('manifestchild_repository_id_manifest_id_child_manifest_id', 'manifestchild', ['repository_id', 'manifest_id', 'child_manifest_id'], unique=False)
- op.create_table('tag',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=True),
- sa.Column('lifetime_start_ms', sa.BigInteger(), nullable=False),
- sa.Column('lifetime_end_ms', sa.BigInteger(), nullable=True),
- sa.Column('hidden', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
- sa.Column('reversion', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
- sa.Column('tag_kind_id', sa.Integer(), nullable=False),
- sa.Column('linked_tag_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['linked_tag_id'], ['tag.id'], name=op.f('fk_tag_linked_tag_id_tag')),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_tag_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_tag_repository_id_repository')),
- sa.ForeignKeyConstraint(['tag_kind_id'], ['tagkind.id'], name=op.f('fk_tag_tag_kind_id_tagkind')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tag'))
- )
- op.create_index('tag_lifetime_end_ms', 'tag', ['lifetime_end_ms'], unique=False)
- op.create_index('tag_linked_tag_id', 'tag', ['linked_tag_id'], unique=False)
- op.create_index('tag_manifest_id', 'tag', ['manifest_id'], unique=False)
- op.create_index('tag_repository_id', 'tag', ['repository_id'], unique=False)
- op.create_index('tag_repository_id_name', 'tag', ['repository_id', 'name'], unique=False)
- op.create_index('tag_repository_id_name_hidden', 'tag', ['repository_id', 'name', 'hidden'], unique=False)
- op.create_index('tag_repository_id_name_lifetime_end_ms', 'tag', ['repository_id', 'name', 'lifetime_end_ms'], unique=True)
- op.create_index('tag_repository_id_name_tag_kind_id', 'tag', ['repository_id', 'name', 'tag_kind_id'], unique=False)
- op.create_index('tag_tag_kind_id', 'tag', ['tag_kind_id'], unique=False)
- # ### end Alembic commands ###
-
- op.bulk_insert(tables.tagkind,
- [
- {'name': 'tag'},
- ])
-
- # ### population of test data ### #
- tester.populate_table('tag', [
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('tag_kind_id', tester.TestDataType.Foreign('tagkind')),
- ('name', tester.TestDataType.String),
- ('manifest_id', tester.TestDataType.Foreign('manifest')),
- ('lifetime_start_ms', tester.TestDataType.BigInteger),
- ])
-
- tester.populate_table('manifestchild', [
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('manifest_id', tester.TestDataType.Foreign('manifest')),
- ('child_manifest_id', tester.TestDataType.Foreign('manifest')),
- ])
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('tag')
- op.drop_table('manifestchild')
- op.drop_table('tagkind')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/13411de1c0ff_remove_unique_from_tagmanifesttomanifest.py b/data/migrations/versions/13411de1c0ff_remove_unique_from_tagmanifesttomanifest.py
deleted file mode 100644
index 70e0a21d7..000000000
--- a/data/migrations/versions/13411de1c0ff_remove_unique_from_tagmanifesttomanifest.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""Remove unique from TagManifestToManifest
-
-Revision ID: 13411de1c0ff
-Revises: 654e6df88b71
-Create Date: 2018-08-19 23:30:24.969549
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '13411de1c0ff'
-down_revision = '654e6df88b71'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # Note: Because of a restriction in MySQL, we cannot simply remove the index and re-add
- # it without the unique=False, nor can we simply alter the index. To make it work, we'd have to
- # remove the primary key on the field, so instead we simply drop the table entirely and
- # recreate it with the modified index. The backfill will re-fill this in.
- op.drop_table('tagmanifesttomanifest')
-
- op.create_table('tagmanifesttomanifest',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('tag_manifest_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('broken', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_tagmanifesttomanifest_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['tag_manifest_id'], ['tagmanifest.id'], name=op.f('fk_tagmanifesttomanifest_tag_manifest_id_tagmanifest')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifesttomanifest'))
- )
- op.create_index('tagmanifesttomanifest_broken', 'tagmanifesttomanifest', ['broken'], unique=False)
- op.create_index('tagmanifesttomanifest_manifest_id', 'tagmanifesttomanifest', ['manifest_id'], unique=False)
- op.create_index('tagmanifesttomanifest_tag_manifest_id', 'tagmanifesttomanifest', ['tag_manifest_id'], unique=True)
-
- tester.populate_table('tagmanifesttomanifest', [
- ('manifest_id', tester.TestDataType.Foreign('manifest')),
- ('tag_manifest_id', tester.TestDataType.Foreign('tagmanifest')),
- ])
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- pass
diff --git a/data/migrations/versions/13da56878560_migrate_registry_namespaces_to_.py b/data/migrations/versions/13da56878560_migrate_registry_namespaces_to_.py
new file mode 100644
index 000000000..30ac75c96
--- /dev/null
+++ b/data/migrations/versions/13da56878560_migrate_registry_namespaces_to_.py
@@ -0,0 +1,24 @@
+"""Migrate registry namespaces to reference a user.
+
+Revision ID: 13da56878560
+Revises: 51d04d0e7e6f
+Create Date: 2014-09-18 13:56:45.130455
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '13da56878560'
+down_revision = '51d04d0e7e6f'
+
+from alembic import op
+import sqlalchemy as sa
+
+from data.database import Repository, User
+
+def upgrade(tables):
+ # Add the namespace_user column, allowing it to be nullable
+ op.add_column('repository', sa.Column('namespace_user_id', sa.Integer(), sa.ForeignKey('user.id')))
+
+
+def downgrade(tables):
+ op.drop_column('repository', 'namespace_user_id')
diff --git a/data/migrations/versions/14fe12ade3df_add_build_queue_item_reference_to_the_.py b/data/migrations/versions/14fe12ade3df_add_build_queue_item_reference_to_the_.py
new file mode 100644
index 000000000..5e8d21211
--- /dev/null
+++ b/data/migrations/versions/14fe12ade3df_add_build_queue_item_reference_to_the_.py
@@ -0,0 +1,30 @@
+"""Add build queue item reference to the repositorybuild table
+
+Revision ID: 14fe12ade3df
+Revises: 5ad999136045
+Create Date: 2015-02-12 16:11:57.814645
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '14fe12ade3df'
+down_revision = '5ad999136045'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repositorybuild', sa.Column('queue_item_id', sa.Integer(), nullable=True))
+ op.create_index('repositorybuild_queue_item_id', 'repositorybuild', ['queue_item_id'], unique=False)
+ op.create_foreign_key(op.f('fk_repositorybuild_queue_item_id_queueitem'), 'repositorybuild', 'queueitem', ['queue_item_id'], ['id'])
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_constraint(op.f('fk_repositorybuild_queue_item_id_queueitem'), 'repositorybuild', type_='foreignkey')
+ op.drop_index('repositorybuild_queue_item_id', table_name='repositorybuild')
+ op.drop_column('repositorybuild', 'queue_item_id')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/152bb29a1bb3_add_maximum_build_queue_count_setting_.py b/data/migrations/versions/152bb29a1bb3_add_maximum_build_queue_count_setting_.py
deleted file mode 100644
index 489303dde..000000000
--- a/data/migrations/versions/152bb29a1bb3_add_maximum_build_queue_count_setting_.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""Add maximum build queue count setting to user table
-
-Revision ID: 152bb29a1bb3
-Revises: 7367229b38d9
-Create Date: 2018-02-20 13:34:34.902415
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '152bb29a1bb3'
-down_revision = 'cbc8177760d9'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.add_column('user', sa.Column('maximum_queued_builds_count', sa.Integer(), nullable=True))
- # ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_column('user', 'maximum_queued_builds_count', tester.TestDataType.Integer)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_column('user', 'maximum_queued_builds_count')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/152edccba18c_make_blodupload_byte_count_not_nullable.py b/data/migrations/versions/152edccba18c_make_blodupload_byte_count_not_nullable.py
deleted file mode 100644
index 6eca834fa..000000000
--- a/data/migrations/versions/152edccba18c_make_blodupload_byte_count_not_nullable.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""Make BlodUpload byte_count not nullable
-
-Revision ID: 152edccba18c
-Revises: c91c564aad34
-Create Date: 2018-02-23 12:41:25.571835
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '152edccba18c'
-down_revision = 'c91c564aad34'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.alter_column('blobupload', 'byte_count', existing_type=sa.BigInteger(),
- nullable=False)
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.alter_column('blobupload', 'byte_count', existing_type=sa.BigInteger(),
- nullable=True)
diff --git a/data/migrations/versions/154f2befdfbe_add_enabled_column_to_the_user_system.py b/data/migrations/versions/154f2befdfbe_add_enabled_column_to_the_user_system.py
new file mode 100644
index 000000000..9e6532197
--- /dev/null
+++ b/data/migrations/versions/154f2befdfbe_add_enabled_column_to_the_user_system.py
@@ -0,0 +1,26 @@
+"""Add enabled column to the user system
+
+Revision ID: 154f2befdfbe
+Revises: 41f4587c84ae
+Create Date: 2015-05-11 17:02:43.507847
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '154f2befdfbe'
+down_revision = '41f4587c84ae'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('user', sa.Column('enabled', sa.Boolean(), nullable=False, default=True, server_default="1"))
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('user', 'enabled')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/1594a74a74ca_add_metadata_field_to_external_logins.py b/data/migrations/versions/1594a74a74ca_add_metadata_field_to_external_logins.py
new file mode 100644
index 000000000..2f6c60706
--- /dev/null
+++ b/data/migrations/versions/1594a74a74ca_add_metadata_field_to_external_logins.py
@@ -0,0 +1,35 @@
+"""add metadata field to external logins
+
+Revision ID: 1594a74a74ca
+Revises: f42b0ea7a4d
+Create Date: 2014-09-04 18:17:35.205698
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1594a74a74ca'
+down_revision = 'f42b0ea7a4d'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('federatedlogin', sa.Column('metadata_json', sa.Text(), nullable=False))
+ ### end Alembic commands ###
+
+ op.bulk_insert(tables.loginservice,
+ [
+ {'id':4, 'name':'google'},
+ ])
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('federatedlogin', 'metadata_json')
+ ### end Alembic commands ###
+
+ op.execute(
+ (tables.loginservice.delete()
+ .where(tables.loginservice.c.name == op.inline_literal('google')))
+ )
diff --git a/data/migrations/versions/1783530bee68_add_logentry2_table_quay_io_only.py b/data/migrations/versions/1783530bee68_add_logentry2_table_quay_io_only.py
deleted file mode 100644
index ffe5d9176..000000000
--- a/data/migrations/versions/1783530bee68_add_logentry2_table_quay_io_only.py
+++ /dev/null
@@ -1,49 +0,0 @@
-"""Add LogEntry2 table - QUAY.IO ONLY
-
-Revision ID: 1783530bee68
-Revises: 5b7503aada1b
-Create Date: 2018-05-17 16:32:28.532264
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '1783530bee68'
-down_revision = '5b7503aada1b'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('logentry2',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('kind_id', sa.Integer(), nullable=False),
- sa.Column('account_id', sa.Integer(), nullable=False),
- sa.Column('performer_id', sa.Integer(), nullable=True),
- sa.Column('repository_id', sa.Integer(), nullable=True),
- sa.Column('datetime', sa.DateTime(), nullable=False),
- sa.Column('ip', sa.String(length=255), nullable=True),
- sa.Column('metadata_json', sa.Text(), nullable=False),
- sa.ForeignKeyConstraint(['kind_id'], ['logentrykind.id'], name=op.f('fk_logentry2_kind_id_logentrykind')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_logentry2'))
- )
- op.create_index('logentry2_account_id', 'logentry2', ['account_id'], unique=False)
- op.create_index('logentry2_account_id_datetime', 'logentry2', ['account_id', 'datetime'], unique=False)
- op.create_index('logentry2_datetime', 'logentry2', ['datetime'], unique=False)
- op.create_index('logentry2_kind_id', 'logentry2', ['kind_id'], unique=False)
- op.create_index('logentry2_performer_id', 'logentry2', ['performer_id'], unique=False)
- op.create_index('logentry2_performer_id_datetime', 'logentry2', ['performer_id', 'datetime'], unique=False)
- op.create_index('logentry2_repository_id', 'logentry2', ['repository_id'], unique=False)
- op.create_index('logentry2_repository_id_datetime', 'logentry2', ['repository_id', 'datetime'], unique=False)
- op.create_index('logentry2_repository_id_datetime_kind_id', 'logentry2', ['repository_id', 'datetime', 'kind_id'], unique=False)
- # ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('logentry2')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/17aff2e1354e_add_automatic_disable_of_build_triggers.py b/data/migrations/versions/17aff2e1354e_add_automatic_disable_of_build_triggers.py
deleted file mode 100644
index 27f1aafa6..000000000
--- a/data/migrations/versions/17aff2e1354e_add_automatic_disable_of_build_triggers.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""Add automatic disable of build triggers
-
-Revision ID: 17aff2e1354e
-Revises: 61cadbacb9fc
-Create Date: 2017-10-18 15:58:03.971526
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '17aff2e1354e'
-down_revision = '61cadbacb9fc'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.add_column('repositorybuildtrigger', sa.Column('successive_failure_count', sa.Integer(), server_default='0', nullable=False))
- op.add_column('repositorybuildtrigger', sa.Column('successive_internal_error_count', sa.Integer(), server_default='0', nullable=False))
- # ### end Alembic commands ###
-
- op.bulk_insert(
- tables.disablereason,
- [
- {'id': 2, 'name': 'successive_build_failures'},
- {'id': 3, 'name': 'successive_build_internal_errors'},
- ],
- )
-
- # ### population of test data ### #
- tester.populate_column('repositorybuildtrigger', 'successive_failure_count', tester.TestDataType.Integer)
- tester.populate_column('repositorybuildtrigger', 'successive_internal_error_count', tester.TestDataType.Integer)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_column('repositorybuildtrigger', 'successive_internal_error_count')
- op.drop_column('repositorybuildtrigger', 'successive_failure_count')
- # ### end Alembic commands ###
-
- op.execute(tables
- .disablereason
- .delete()
- .where(tables.disablereason.c.name == op.inline_literal('successive_internal_error_count')))
-
- op.execute(tables
- .disablereason
- .delete()
- .where(tables.disablereason.c.name == op.inline_literal('successive_failure_count')))
diff --git a/data/migrations/versions/17f11e265e13_add_uuid_field_to_user.py b/data/migrations/versions/17f11e265e13_add_uuid_field_to_user.py
new file mode 100644
index 000000000..9371941f8
--- /dev/null
+++ b/data/migrations/versions/17f11e265e13_add_uuid_field_to_user.py
@@ -0,0 +1,23 @@
+"""add uuid field to user
+
+Revision ID: 17f11e265e13
+Revises: 313d297811c4
+Create Date: 2014-11-11 14:32:54.866188
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '17f11e265e13'
+down_revision = '313d297811c4'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+
+def upgrade(tables):
+ op.add_column('user', sa.Column('uuid', sa.String(length=36), nullable=True))
+
+
+def downgrade(tables):
+ op.drop_column('user', 'uuid')
diff --git a/data/migrations/versions/1c3decf6b9c4_add_revert_tag_log_entry_kind.py b/data/migrations/versions/1c3decf6b9c4_add_revert_tag_log_entry_kind.py
new file mode 100644
index 000000000..6e2bccb68
--- /dev/null
+++ b/data/migrations/versions/1c3decf6b9c4_add_revert_tag_log_entry_kind.py
@@ -0,0 +1,29 @@
+"""Add revert_tag log entry kind
+
+Revision ID: 1c3decf6b9c4
+Revises: 4ce2169efd3b
+Create Date: 2015-04-16 17:14:11.154856
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1c3decf6b9c4'
+down_revision = '4ce2169efd3b'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ op.bulk_insert(tables.logentrykind,
+ [
+ {'id': 47, 'name':'revert_tag'},
+ ])
+
+
+def downgrade(tables):
+ op.execute(
+ (tables.logentrykind.delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('revert_tag')))
+
+ )
\ No newline at end of file
diff --git a/data/migrations/versions/1c5b738283a5_backfill_user_uuids.py b/data/migrations/versions/1c5b738283a5_backfill_user_uuids.py
new file mode 100644
index 000000000..44ea6f5ec
--- /dev/null
+++ b/data/migrations/versions/1c5b738283a5_backfill_user_uuids.py
@@ -0,0 +1,23 @@
+"""backfill user uuids
+
+Revision ID: 1c5b738283a5
+Revises: 2fb36d4be80d
+Create Date: 2014-11-20 18:22:03.418215
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1c5b738283a5'
+down_revision = '2fb36d4be80d'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+from util.migrate.backfill_user_uuids import backfill_user_uuids
+
+def upgrade(tables):
+ backfill_user_uuids()
+
+
+def downgrade(tables):
+ pass
diff --git a/data/migrations/versions/1d2d86d09fcd_actually_remove_the_column.py b/data/migrations/versions/1d2d86d09fcd_actually_remove_the_column.py
new file mode 100644
index 000000000..a7942b7d4
--- /dev/null
+++ b/data/migrations/versions/1d2d86d09fcd_actually_remove_the_column.py
@@ -0,0 +1,37 @@
+"""Actually remove the column access_token_id
+
+Revision ID: 1d2d86d09fcd
+Revises: 14fe12ade3df
+Create Date: 2015-02-12 16:27:30.260797
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1d2d86d09fcd'
+down_revision = '14fe12ade3df'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+from sqlalchemy.exc import InternalError
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ try:
+ op.drop_constraint(u'fk_logentry_access_token_id_accesstoken', 'logentry', type_='foreignkey')
+ op.drop_index('logentry_access_token_id', table_name='logentry')
+ op.drop_column('logentry', 'access_token_id')
+ except InternalError:
+ pass
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ try:
+ op.add_column('logentry', sa.Column('access_token_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
+ op.create_foreign_key(u'fk_logentry_access_token_id_accesstoken', 'logentry', 'accesstoken', ['access_token_id'], ['id'])
+ op.create_index('logentry_access_token_id', 'logentry', ['access_token_id'], unique=False)
+ except InternalError:
+ pass
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/1f116e06b68_add_gitlab_trigger_type.py b/data/migrations/versions/1f116e06b68_add_gitlab_trigger_type.py
new file mode 100644
index 000000000..40d9a9399
--- /dev/null
+++ b/data/migrations/versions/1f116e06b68_add_gitlab_trigger_type.py
@@ -0,0 +1,25 @@
+"""Add gitlab trigger type
+
+Revision ID: 1f116e06b68
+Revises: 313179799c8b
+Create Date: 2015-05-03 10:45:06.257913
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1f116e06b68'
+down_revision = '313179799c8b'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ op.bulk_insert(tables.buildtriggerservice, [{'id': 4, 'name': 'gitlab'}])
+
+
+def downgrade(tables):
+ op.execute(
+ tables.buildtriggerservice.delete()
+ .where(tables.buildtriggerservice.c.name == op.inline_literal('gitlab'))
+ )
diff --git a/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py b/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py
new file mode 100644
index 000000000..8185c1118
--- /dev/null
+++ b/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py
@@ -0,0 +1,54 @@
+"""Remove fields from image table that were migrated to imagestorage.
+
+Revision ID: 201d55b38649
+Revises: 5a07499ce53f
+Create Date: 2014-06-12 19:48:53.861115
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '201d55b38649'
+down_revision = '5a07499ce53f'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('buildtriggerservice_name', table_name='buildtriggerservice')
+ op.create_index('buildtriggerservice_name', 'buildtriggerservice', ['name'], unique=True)
+ op.drop_column('image', 'comment')
+ op.drop_column('image', 'checksum')
+ op.drop_column('image', 'image_size')
+ op.drop_column('image', 'command')
+ op.drop_column('image', 'created')
+ op.drop_index('logentrykind_name', table_name='logentrykind')
+ op.create_index('logentrykind_name', 'logentrykind', ['name'], unique=True)
+ op.drop_index('notificationkind_name', table_name='notificationkind')
+ op.create_index('notificationkind_name', 'notificationkind', ['name'], unique=True)
+ op.drop_index('role_name', table_name='role')
+ op.create_index('role_name', 'role', ['name'], unique=True)
+ op.drop_index('visibility_name', table_name='visibility')
+ op.create_index('visibility_name', 'visibility', ['name'], unique=True)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('visibility_name', table_name='visibility')
+ op.create_index('visibility_name', 'visibility', ['name'], unique=False)
+ op.drop_index('role_name', table_name='role')
+ op.create_index('role_name', 'role', ['name'], unique=False)
+ op.drop_index('notificationkind_name', table_name='notificationkind')
+ op.create_index('notificationkind_name', 'notificationkind', ['name'], unique=False)
+ op.drop_index('logentrykind_name', table_name='logentrykind')
+ op.create_index('logentrykind_name', 'logentrykind', ['name'], unique=False)
+ op.add_column('image', sa.Column('created', sa.DateTime(), nullable=True))
+ op.add_column('image', sa.Column('command', sa.Text(), nullable=True))
+ op.add_column('image', sa.Column('image_size', sa.BigInteger(), nullable=True))
+ op.add_column('image', sa.Column('checksum', sa.String(length=255), nullable=True))
+ op.add_column('image', sa.Column('comment', sa.Text(), nullable=True))
+ op.drop_index('buildtriggerservice_name', table_name='buildtriggerservice')
+ op.create_index('buildtriggerservice_name', 'buildtriggerservice', ['name'], unique=False)
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/204abf14783d_add_log_entry_kind_for_verbs.py b/data/migrations/versions/204abf14783d_add_log_entry_kind_for_verbs.py
new file mode 100644
index 000000000..981c46087
--- /dev/null
+++ b/data/migrations/versions/204abf14783d_add_log_entry_kind_for_verbs.py
@@ -0,0 +1,28 @@
+"""Add log entry kind for verbs
+
+Revision ID: 204abf14783d
+Revises: 2430f55c41d5
+Create Date: 2014-10-29 15:38:06.100915
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '204abf14783d'
+down_revision = '2430f55c41d5'
+
+from alembic import op
+import sqlalchemy as sa
+
+def upgrade(tables):
+ op.bulk_insert(tables.logentrykind,
+ [
+ {'id': 46, 'name':'repo_verb'},
+ ])
+
+
+def downgrade(tables):
+ op.execute(
+ (tables.logentrykind.delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('repo_verb')))
+
+ )
diff --git a/data/migrations/versions/2088f2b81010_add_stars.py b/data/migrations/versions/2088f2b81010_add_stars.py
new file mode 100644
index 000000000..ad4ccdf2b
--- /dev/null
+++ b/data/migrations/versions/2088f2b81010_add_stars.py
@@ -0,0 +1,40 @@
+"""add stars
+
+Revision ID: 2088f2b81010
+Revises: 1c5b738283a5
+Create Date: 2014-12-02 17:45:00.707498
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2088f2b81010'
+down_revision = '707d5191eda'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ op.create_table('star',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_star_repository_id_repository')),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_star_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_star'))
+ )
+ with op.batch_alter_table('star', schema=None) as batch_op:
+ batch_op.create_index('star_repository_id', ['repository_id'], unique=False)
+ batch_op.create_index('star_user_id', ['user_id'], unique=False)
+ batch_op.create_index('star_user_id_repository_id', ['user_id', 'repository_id'], unique=True)
+
+def downgrade(tables):
+ op.drop_constraint('fk_star_repository_id_repository', 'star', type_='foreignkey')
+ op.drop_constraint('fk_star_user_id_user', 'star', type_='foreignkey')
+ with op.batch_alter_table('star', schema=None) as batch_op:
+ batch_op.drop_index('star_user_id_repository_id')
+ batch_op.drop_index('star_user_id')
+ batch_op.drop_index('star_repository_id')
+
+ op.drop_table('star')
diff --git a/data/migrations/versions/214350b6a8b1_add_private_key_to_build_triggers.py b/data/migrations/versions/214350b6a8b1_add_private_key_to_build_triggers.py
new file mode 100644
index 000000000..dc7e052bc
--- /dev/null
+++ b/data/migrations/versions/214350b6a8b1_add_private_key_to_build_triggers.py
@@ -0,0 +1,26 @@
+"""add private key to build triggers
+
+Revision ID: 214350b6a8b1
+Revises: 2b2529fd23ff
+Create Date: 2015-03-19 14:23:52.604505
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '214350b6a8b1'
+down_revision = '67eb43c778b'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repositorybuildtrigger', sa.Column('private_key', sa.Text(), nullable=True))
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('repositorybuildtrigger', 'private_key')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/224ce4c72c2f_add_last_accessed_field_to_user_table.py b/data/migrations/versions/224ce4c72c2f_add_last_accessed_field_to_user_table.py
deleted file mode 100644
index 9b9bb1978..000000000
--- a/data/migrations/versions/224ce4c72c2f_add_last_accessed_field_to_user_table.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Add last_accessed field to User table
-
-Revision ID: 224ce4c72c2f
-Revises: b547bc139ad8
-Create Date: 2018-03-12 22:44:07.070490
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '224ce4c72c2f'
-down_revision = 'b547bc139ad8'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.add_column('user', sa.Column('last_accessed', sa.DateTime(), nullable=True))
- op.create_index('user_last_accessed', 'user', ['last_accessed'], unique=False)
- # ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_column('user', 'last_accessed', tester.TestDataType.DateTime)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('user_last_accessed', table_name='user')
- op.drop_column('user', 'last_accessed')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/228d1af6af1c_mysql_max_index_lengths.py b/data/migrations/versions/228d1af6af1c_mysql_max_index_lengths.py
new file mode 100644
index 000000000..2f6ff722b
--- /dev/null
+++ b/data/migrations/versions/228d1af6af1c_mysql_max_index_lengths.py
@@ -0,0 +1,25 @@
+"""mysql max index lengths
+
+Revision ID: 228d1af6af1c
+Revises: 5b84373e5db
+Create Date: 2015-01-06 14:35:24.651424
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '228d1af6af1c'
+down_revision = '5b84373e5db'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ op.drop_index('queueitem_queue_name', table_name='queueitem')
+ op.create_index('queueitem_queue_name', 'queueitem', ['queue_name'], unique=False, mysql_length=767)
+
+ op.drop_index('image_ancestors', table_name='image')
+ op.create_index('image_ancestors', 'image', ['ancestors'], unique=False, mysql_length=767)
+
+def downgrade(tables):
+ pass
diff --git a/data/migrations/versions/2430f55c41d5_calculate_uncompressed_sizes_for_all_.py b/data/migrations/versions/2430f55c41d5_calculate_uncompressed_sizes_for_all_.py
new file mode 100644
index 000000000..20d6fb094
--- /dev/null
+++ b/data/migrations/versions/2430f55c41d5_calculate_uncompressed_sizes_for_all_.py
@@ -0,0 +1,22 @@
+"""Calculate uncompressed sizes for all images
+
+Revision ID: 2430f55c41d5
+Revises: 3b4d3a4461dc
+Create Date: 2014-10-07 14:50:04.660315
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2430f55c41d5'
+down_revision = '3b4d3a4461dc'
+
+from alembic import op
+import sqlalchemy as sa
+from util.migrate.uncompressedsize import backfill_sizes_from_data
+
+
+def upgrade(tables):
+ backfill_sizes_from_data()
+
+def downgrade(tables):
+ pass
diff --git a/data/migrations/versions/246df01a6d51_add_index_to_retries_remaining.py b/data/migrations/versions/246df01a6d51_add_index_to_retries_remaining.py
new file mode 100644
index 000000000..41488b17b
--- /dev/null
+++ b/data/migrations/versions/246df01a6d51_add_index_to_retries_remaining.py
@@ -0,0 +1,26 @@
+"""Add index to retries_remaining
+
+Revision ID: 246df01a6d51
+Revises: 5232a5610a0a
+Create Date: 2015-08-04 17:59:42.262877
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '246df01a6d51'
+down_revision = '5232a5610a0a'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('queueitem_retries_remaining', 'queueitem', ['retries_remaining'], unique=False)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('queueitem_retries_remaining', table_name='queueitem')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/2b2529fd23ff_add_aggregate_size_column.py b/data/migrations/versions/2b2529fd23ff_add_aggregate_size_column.py
new file mode 100644
index 000000000..32a6e7f2f
--- /dev/null
+++ b/data/migrations/versions/2b2529fd23ff_add_aggregate_size_column.py
@@ -0,0 +1,26 @@
+"""Add aggregate size column
+
+Revision ID: 2b2529fd23ff
+Revises: 2088f2b81010
+Create Date: 2015-03-16 17:36:53.321458
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2b2529fd23ff'
+down_revision = '2088f2b81010'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('imagestorage', sa.Column('aggregate_size', sa.BigInteger(), nullable=True))
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('imagestorage', 'aggregate_size')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/2b4dc0818a5e_add_a_unique_index_to_prevent_deadlocks_.py b/data/migrations/versions/2b4dc0818a5e_add_a_unique_index_to_prevent_deadlocks_.py
new file mode 100644
index 000000000..8efe0c123
--- /dev/null
+++ b/data/migrations/versions/2b4dc0818a5e_add_a_unique_index_to_prevent_deadlocks_.py
@@ -0,0 +1,26 @@
+"""Add a unique index to prevent deadlocks with tags.
+
+Revision ID: 2b4dc0818a5e
+Revises: 2b2529fd23ff
+Create Date: 2015-03-20 23:37:10.558179
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2b4dc0818a5e'
+down_revision = '2b2529fd23ff'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('repositorytag_repository_id_name_lifetime_end_ts', 'repositorytag', ['repository_id', 'name', 'lifetime_end_ts'], unique=True)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('repositorytag_repository_id_name_lifetime_end_ts', table_name='repositorytag')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/2bf8af5bad95_add_keystone_login_service.py b/data/migrations/versions/2bf8af5bad95_add_keystone_login_service.py
new file mode 100644
index 000000000..440e7ec98
--- /dev/null
+++ b/data/migrations/versions/2bf8af5bad95_add_keystone_login_service.py
@@ -0,0 +1,26 @@
+"""Add keystone login service
+
+Revision ID: 2bf8af5bad95
+Revises: 154f2befdfbe
+Create Date: 2015-06-29 21:19:13.053165
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2bf8af5bad95'
+down_revision = '154f2befdfbe'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ op.bulk_insert(tables.loginservice, [{'id': 6, 'name': 'keystone'}])
+
+
+def downgrade(tables):
+ op.execute(
+ tables.loginservice.delete()
+ .where(tables.loginservice.c.name == op.inline_literal('keystone'))
+ )
+
diff --git a/data/migrations/versions/2e09ad97b06c_add_missing_tag_manifest_table.py b/data/migrations/versions/2e09ad97b06c_add_missing_tag_manifest_table.py
new file mode 100644
index 000000000..9659ba2ae
--- /dev/null
+++ b/data/migrations/versions/2e09ad97b06c_add_missing_tag_manifest_table.py
@@ -0,0 +1,35 @@
+"""Add missing tag manifest table
+
+Revision ID: 2e09ad97b06c
+Revises: 2bf8af5bad95
+Create Date: 2015-07-22 16:10:42.549566
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2e09ad97b06c'
+down_revision = '2bf8af5bad95'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('tagmanifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('tag_id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('json_data', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['tag_id'], ['repositorytag.id'], name=op.f('fk_tagmanifest_tag_id_repositorytag')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifest'))
+ )
+ op.create_index('tagmanifest_digest', 'tagmanifest', ['digest'], unique=True)
+ op.create_index('tagmanifest_tag_id', 'tagmanifest', ['tag_id'], unique=True)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('tagmanifest')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/2fb36d4be80d_remove_the_namespace_column.py b/data/migrations/versions/2fb36d4be80d_remove_the_namespace_column.py
new file mode 100644
index 000000000..19b9582f3
--- /dev/null
+++ b/data/migrations/versions/2fb36d4be80d_remove_the_namespace_column.py
@@ -0,0 +1,30 @@
+"""remove the namespace column.
+
+Revision ID: 2430f55c41d5
+Revises: 17f11e265e13
+Create Date: 2014-09-30 17:31:33.308490
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2fb36d4be80d'
+down_revision = '17f11e265e13'
+
+from alembic import op
+import sqlalchemy as sa
+
+import re
+from app import app
+
+
+NAMESPACE_EXTRACTOR = re.compile(r'^([a-z]+/)([a-z0-9_]+)(/.*$)')
+
+
+def upgrade(tables):
+ op.create_index('repository_namespace_user_id', 'repository', ['namespace_user_id'], unique=False)
+ op.drop_column('repository', 'namespace')
+
+
+def downgrade(tables):
+ op.add_column('repository', sa.Column('namespace', sa.String(length=255)))
+ op.drop_index('repository_namespace_user_id', table_name='repository')
diff --git a/data/migrations/versions/30c044b75632_add_repositoryactioncount_table.py b/data/migrations/versions/30c044b75632_add_repositoryactioncount_table.py
new file mode 100644
index 000000000..8df45958e
--- /dev/null
+++ b/data/migrations/versions/30c044b75632_add_repositoryactioncount_table.py
@@ -0,0 +1,36 @@
+"""Add RepositoryActionCount table
+
+Revision ID: 30c044b75632
+Revises: 2b4dc0818a5e
+Create Date: 2015-04-13 13:21:18.159602
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '30c044b75632'
+down_revision = '2b4dc0818a5e'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('repositoryactioncount',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('count', sa.Integer(), nullable=False),
+ sa.Column('date', sa.Date(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositoryactioncount_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repositoryactioncount'))
+ )
+ op.create_index('repositoryactioncount_date', 'repositoryactioncount', ['date'], unique=False)
+ op.create_index('repositoryactioncount_repository_id', 'repositoryactioncount', ['repository_id'], unique=False)
+ op.create_index('repositoryactioncount_repository_id_date', 'repositoryactioncount', ['repository_id', 'date'], unique=True)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('repositoryactioncount')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/31288f79df53_make_resource_key_nullable.py b/data/migrations/versions/31288f79df53_make_resource_key_nullable.py
new file mode 100644
index 000000000..e52795ce6
--- /dev/null
+++ b/data/migrations/versions/31288f79df53_make_resource_key_nullable.py
@@ -0,0 +1,30 @@
+"""make resource_key nullable
+
+Revision ID: 31288f79df53
+Revises: 214350b6a8b1
+Create Date: 2015-03-23 14:34:04.816295
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '31288f79df53'
+down_revision = '214350b6a8b1'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column('repositorybuild', 'resource_key',
+ existing_type=mysql.VARCHAR(length=255),
+ nullable=True)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column('repositorybuild', 'resource_key',
+ existing_type=mysql.VARCHAR(length=255),
+ nullable=False)
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/313179799c8b_add_bitbucket_build_trigger_type.py b/data/migrations/versions/313179799c8b_add_bitbucket_build_trigger_type.py
new file mode 100644
index 000000000..7f4d67cad
--- /dev/null
+++ b/data/migrations/versions/313179799c8b_add_bitbucket_build_trigger_type.py
@@ -0,0 +1,25 @@
+"""Add bitbucket build trigger type
+
+Revision ID: 313179799c8b
+Revises: 37c47a7af956
+Create Date: 2015-04-30 15:52:33.388825
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '313179799c8b'
+down_revision = '37c47a7af956'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ op.bulk_insert(tables.buildtriggerservice, [{'id': 3, 'name': 'bitbucket'}])
+
+
+def downgrade(tables):
+ op.execute(
+ tables.buildtriggerservice.delete()
+ .where(tables.buildtriggerservice.c.name == op.inline_literal('bitbucket'))
+ )
diff --git a/data/migrations/versions/313d297811c4_add_an_index_to_the_docker_image_id_.py b/data/migrations/versions/313d297811c4_add_an_index_to_the_docker_image_id_.py
new file mode 100644
index 000000000..2ed6bd2f5
--- /dev/null
+++ b/data/migrations/versions/313d297811c4_add_an_index_to_the_docker_image_id_.py
@@ -0,0 +1,26 @@
+"""Add an index to the docker_image_id field
+
+Revision ID: 313d297811c4
+Revises: 204abf14783d
+Create Date: 2014-11-13 12:40:57.414787
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '313d297811c4'
+down_revision = '204abf14783d'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('image_docker_image_id', 'image', ['docker_image_id'], unique=False)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('image_docker_image_id', table_name='image')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/325a4d7c79d9_prepare_the_database_for_the_new_.py b/data/migrations/versions/325a4d7c79d9_prepare_the_database_for_the_new_.py
new file mode 100644
index 000000000..d6bdcb35e
--- /dev/null
+++ b/data/migrations/versions/325a4d7c79d9_prepare_the_database_for_the_new_.py
@@ -0,0 +1,141 @@
+"""Prepare the database for the new notifications system
+
+Revision ID: 325a4d7c79d9
+Revises: 4b7ef0c7bdb2
+Create Date: 2014-07-31 13:08:18.667393
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '325a4d7c79d9'
+down_revision = '4b7ef0c7bdb2'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('externalnotificationmethod',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('externalnotificationmethod_name', 'externalnotificationmethod', ['name'], unique=True)
+ op.bulk_insert(tables.externalnotificationmethod,
+ [
+ {'id':1, 'name':'quay_notification'},
+ {'id':2, 'name':'email'},
+ {'id':3, 'name':'webhook'},
+ ])
+ op.create_table('externalnotificationevent',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('externalnotificationevent_name', 'externalnotificationevent', ['name'], unique=True)
+ op.bulk_insert(tables.externalnotificationevent,
+ [
+ {'id':1, 'name':'repo_push'},
+ {'id':2, 'name':'build_queued'},
+ {'id':3, 'name':'build_start'},
+ {'id':4, 'name':'build_success'},
+ {'id':5, 'name':'build_failure'},
+ ])
+ op.create_table('repositoryauthorizedemail',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('email', sa.String(length=255), nullable=False),
+ sa.Column('code', sa.String(length=255), nullable=False),
+ sa.Column('confirmed', sa.Boolean(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('repositoryauthorizedemail_code', 'repositoryauthorizedemail', ['code'], unique=True)
+ op.create_index('repositoryauthorizedemail_email_repository_id', 'repositoryauthorizedemail', ['email', 'repository_id'], unique=True)
+ op.create_index('repositoryauthorizedemail_repository_id', 'repositoryauthorizedemail', ['repository_id'], unique=False)
+ op.create_table('repositorynotification',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('event_id', sa.Integer(), nullable=False),
+ sa.Column('method_id', sa.Integer(), nullable=False),
+ sa.Column('config_json', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['event_id'], ['externalnotificationevent.id'], ),
+ sa.ForeignKeyConstraint(['method_id'], ['externalnotificationmethod.id'], ),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('repositorynotification_event_id', 'repositorynotification', ['event_id'], unique=False)
+ op.create_index('repositorynotification_method_id', 'repositorynotification', ['method_id'], unique=False)
+ op.create_index('repositorynotification_repository_id', 'repositorynotification', ['repository_id'], unique=False)
+ op.create_index('repositorynotification_uuid', 'repositorynotification', ['uuid'], unique=False)
+ op.add_column(u'notification', sa.Column('dismissed', sa.Boolean(), nullable=False))
+
+ # Manually add the new notificationkind types
+ op.bulk_insert(tables.notificationkind,
+ [
+ {'id':5, 'name':'repo_push'},
+ {'id':6, 'name':'build_queued'},
+ {'id':7, 'name':'build_start'},
+ {'id':8, 'name':'build_success'},
+ {'id':9, 'name':'build_failure'},
+ ])
+
+ # Manually add the new logentrykind types
+ op.bulk_insert(tables.logentrykind,
+ [
+ {'id':39, 'name':'add_repo_notification'},
+ {'id':40, 'name':'delete_repo_notification'},
+ ])
+
+
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column(u'notification', 'dismissed')
+ op.drop_table('repositorynotification')
+ op.drop_table('repositoryauthorizedemail')
+ op.drop_table('externalnotificationevent')
+ op.drop_table('externalnotificationmethod')
+
+ # Manually remove the notificationkind and logentrykind types
+ op.execute(
+ (tables.notificationkind.delete()
+ .where(tables.notificationkind.c.name == op.inline_literal('repo_push')))
+
+ )
+ op.execute(
+ (tables.notificationkind.delete()
+ .where(tables.notificationkind.c.name == op.inline_literal('build_queued')))
+
+ )
+ op.execute(
+ (tables.notificationkind.delete()
+ .where(tables.notificationkind.c.name == op.inline_literal('build_start')))
+
+ )
+ op.execute(
+ (tables.notificationkind.delete()
+ .where(tables.notificationkind.c.name == op.inline_literal('build_success')))
+
+ )
+ op.execute(
+ (tables.notificationkind.delete()
+ .where(tables.notificationkind.c.name == op.inline_literal('build_failure')))
+
+ )
+
+ op.execute(
+ (tables.logentrykind.delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('add_repo_notification')))
+
+ )
+ op.execute(
+ (tables.logentrykind.delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('delete_repo_notification')))
+
+ )
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/34c8ef052ec9_repo_mirror_columns.py b/data/migrations/versions/34c8ef052ec9_repo_mirror_columns.py
deleted file mode 100644
index 2b73b8afa..000000000
--- a/data/migrations/versions/34c8ef052ec9_repo_mirror_columns.py
+++ /dev/null
@@ -1,129 +0,0 @@
-"""repo mirror columns
-
-Revision ID: 34c8ef052ec9
-Revises: c059b952ed76
-Create Date: 2019-10-07 13:11:20.424715
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '34c8ef052ec9'
-down_revision = 'cc6778199cdb'
-
-from alembic import op
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-from datetime import datetime
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-from peewee import ForeignKeyField, DateTimeField, BooleanField
-from data.database import (BaseModel, RepoMirrorType, RepoMirrorStatus, RepoMirrorRule, uuid_generator,
- QuayUserField, Repository, IntegerField, JSONField)
-from data.fields import EnumField as ClientEnumField, CharField, EncryptedCharField
-
-import logging
-
-logger = logging.getLogger(__name__)
-
-BATCH_SIZE = 10
-
-
-# Original model
-class RepoMirrorConfig(BaseModel):
- """
- Represents a repository to be mirrored and any additional configuration
- required to perform the mirroring.
- """
- repository = ForeignKeyField(Repository, index=True, unique=True, backref='mirror')
- creation_date = DateTimeField(default=datetime.utcnow)
- is_enabled = BooleanField(default=True)
-
- # Mirror Configuration
- mirror_type = ClientEnumField(RepoMirrorType, default=RepoMirrorType.PULL)
- internal_robot = QuayUserField(allows_robots=True, null=True, backref='mirrorpullrobot',
- robot_null_delete=True)
- external_reference = CharField()
- external_registry = CharField()
- external_namespace = CharField()
- external_repository = CharField()
- external_registry_username = EncryptedCharField(max_length=2048, null=True)
- external_registry_password = EncryptedCharField(max_length=2048, null=True)
- external_registry_config = JSONField(default={})
-
- # Worker Queuing
- sync_interval = IntegerField() # seconds between syncs
- sync_start_date = DateTimeField(null=True) # next start time
- sync_expiration_date = DateTimeField(null=True) # max duration
- sync_retries_remaining = IntegerField(default=3)
- sync_status = ClientEnumField(RepoMirrorStatus, default=RepoMirrorStatus.NEVER_RUN)
- sync_transaction_id = CharField(default=uuid_generator, max_length=36)
-
- # Tag-Matching Rules
- root_rule = ForeignKeyField(RepoMirrorRule)
-
-
-def _iterate(model_class, clause):
- while True:
- has_rows = False
- for row in list(model_class.select().where(clause).limit(BATCH_SIZE)):
- has_rows = True
- yield row
-
- if not has_rows:
- break
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
-
- logger.info('Migrating to external_reference from existing columns')
-
- op.add_column('repomirrorconfig', sa.Column('external_reference', sa.Text(), nullable=True))
-
- from app import app
- if app.config.get('SETUP_COMPLETE', False) or tester.is_testing:
- for repo_mirror in _iterate(RepoMirrorConfig, (RepoMirrorConfig.external_reference >> None)):
- repo = '%s/%s/%s' % (repo_mirror.external_registry, repo_mirror.external_namespace, repo_mirror.external_repository)
- logger.info('migrating %s' % repo)
- repo_mirror.external_reference = repo
- repo_mirror.save()
-
- op.drop_column('repomirrorconfig', 'external_registry')
- op.drop_column('repomirrorconfig', 'external_namespace')
- op.drop_column('repomirrorconfig', 'external_repository')
-
- op.alter_column('repomirrorconfig', 'external_reference', nullable=False, existing_type=sa.Text())
-
-
- tester.populate_column('repomirrorconfig', 'external_reference', tester.TestDataType.String)
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
-
- '''
- This will downgrade existing data but may not exactly match previous data structure. If the
- external_reference does not have three parts (registry, namespace, repository) then a failed
- value is inserted.
- '''
-
- op.add_column('repomirrorconfig', sa.Column('external_registry', sa.String(length=255), nullable=True))
- op.add_column('repomirrorconfig', sa.Column('external_namespace', sa.String(length=255), nullable=True))
- op.add_column('repomirrorconfig', sa.Column('external_repository', sa.String(length=255), nullable=True))
-
- from app import app
- if app.config.get('SETUP_COMPLETE', False):
- logger.info('Restoring columns from external_reference')
- for repo_mirror in _iterate(RepoMirrorConfig, (RepoMirrorConfig.external_registry >> None)):
- logger.info('Restoring %s' % repo_mirror.external_reference)
- parts = repo_mirror.external_reference.split('/', 2)
- repo_mirror.external_registry = parts[0] if len(parts) >= 1 else 'DOWNGRADE-FAILED'
- repo_mirror.external_namespace = parts[1] if len(parts) >= 2 else 'DOWNGRADE-FAILED'
- repo_mirror.external_repository = parts[2] if len(parts) >= 3 else 'DOWNGRADE-FAILED'
- repo_mirror.save()
-
- op.drop_column('repomirrorconfig', 'external_reference')
-
- op.alter_column('repomirrorconfig', 'external_registry', nullable=False, existing_type=sa.String(length=255))
- op.alter_column('repomirrorconfig', 'external_namespace', nullable=False, existing_type=sa.String(length=255))
- op.alter_column('repomirrorconfig', 'external_repository', nullable=False, existing_type=sa.String(length=255))
diff --git a/data/migrations/versions/34fd69f63809_add_support_for_build_log_migration.py b/data/migrations/versions/34fd69f63809_add_support_for_build_log_migration.py
new file mode 100644
index 000000000..a731d0158
--- /dev/null
+++ b/data/migrations/versions/34fd69f63809_add_support_for_build_log_migration.py
@@ -0,0 +1,26 @@
+"""Add support for build log migration.
+
+Revision ID: 34fd69f63809
+Revises: 4a0c94399f38
+Create Date: 2014-09-12 11:50:09.217777
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '34fd69f63809'
+down_revision = '4a0c94399f38'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repositorybuild', sa.Column('logs_archived', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()))
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('repositorybuild', 'logs_archived')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/37c47a7af956_add_custom_git_trigger_type_to_database.py b/data/migrations/versions/37c47a7af956_add_custom_git_trigger_type_to_database.py
new file mode 100644
index 000000000..ef2f9efa3
--- /dev/null
+++ b/data/migrations/versions/37c47a7af956_add_custom_git_trigger_type_to_database.py
@@ -0,0 +1,25 @@
+"""add custom-git trigger type to database
+
+Revision ID: 37c47a7af956
+Revises: 3fee6f979c2a
+Create Date: 2015-04-24 14:50:26.275516
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '37c47a7af956'
+down_revision = '3fee6f979c2a'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ op.bulk_insert(tables.buildtriggerservice, [{'id': 2, 'name': 'custom-git'}])
+
+
+def downgrade(tables):
+ op.execute(
+ tables.buildtriggerservice.delete()
+ .where(tables.buildtriggerservice.c.name == op.inline_literal('custom-git'))
+ )
diff --git a/data/migrations/versions/3b4d3a4461dc_add_support_for_squashed_images.py b/data/migrations/versions/3b4d3a4461dc_add_support_for_squashed_images.py
new file mode 100644
index 000000000..87d668fa3
--- /dev/null
+++ b/data/migrations/versions/3b4d3a4461dc_add_support_for_squashed_images.py
@@ -0,0 +1,49 @@
+"""Add support for squashed images
+
+Revision ID: 3b4d3a4461dc
+Revises: b1d41e2071b
+Create Date: 2014-10-07 14:49:13.105746
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3b4d3a4461dc'
+down_revision = 'b1d41e2071b'
+
+from alembic import op
+import sqlalchemy as sa
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('imagestoragetransformation',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragetransformation'))
+ )
+ op.create_index('imagestoragetransformation_name', 'imagestoragetransformation', ['name'], unique=True)
+ op.bulk_insert(tables.imagestoragetransformation,
+ [
+ {'id':1, 'name':'squash'},
+ ])
+ op.create_table('derivedimagestorage',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('source_id', sa.Integer(), nullable=True),
+ sa.Column('derivative_id', sa.Integer(), nullable=False),
+ sa.Column('transformation_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['derivative_id'], ['imagestorage.id'], name=op.f('fk_derivedimagestorage_derivative_id_imagestorage')),
+ sa.ForeignKeyConstraint(['source_id'], ['imagestorage.id'], name=op.f('fk_derivedimagestorage_source_id_imagestorage')),
+ sa.ForeignKeyConstraint(['transformation_id'], ['imagestoragetransformation.id'], name=op.f('fk_dis_transformation_id_ist')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_derivedimagestorage'))
+ )
+ op.create_index('derivedimagestorage_derivative_id', 'derivedimagestorage', ['derivative_id'], unique=False)
+ op.create_index('derivedimagestorage_source_id', 'derivedimagestorage', ['source_id'], unique=False)
+ op.create_index('derivedimagestorage_source_id_transformation_id', 'derivedimagestorage', ['source_id', 'transformation_id'], unique=True)
+ op.create_index('derivedimagestorage_transformation_id', 'derivedimagestorage', ['transformation_id'], unique=False)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('derivedimagestorage')
+ op.drop_table('imagestoragetransformation')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/3e2d38b52a75_add_access_token_kinds_type.py b/data/migrations/versions/3e2d38b52a75_add_access_token_kinds_type.py
new file mode 100644
index 000000000..53d0ae9df
--- /dev/null
+++ b/data/migrations/versions/3e2d38b52a75_add_access_token_kinds_type.py
@@ -0,0 +1,44 @@
+"""Add access token kinds type
+
+Revision ID: 3e2d38b52a75
+Revises: 1d2d86d09fcd
+Create Date: 2015-02-17 12:03:26.422485
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3e2d38b52a75'
+down_revision = '1d2d86d09fcd'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('accesstokenkind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_accesstokenkind'))
+ )
+ op.create_index('accesstokenkind_name', 'accesstokenkind', ['name'], unique=True)
+ op.add_column(u'accesstoken', sa.Column('kind_id', sa.Integer(), nullable=True))
+ op.create_index('accesstoken_kind_id', 'accesstoken', ['kind_id'], unique=False)
+ op.create_foreign_key(op.f('fk_accesstoken_kind_id_accesstokenkind'), 'accesstoken', 'accesstokenkind', ['kind_id'], ['id'])
+ ### end Alembic commands ###
+
+ op.bulk_insert(tables.accesstokenkind,
+ [
+ {'id': 1, 'name':'build-worker'},
+ {'id': 2, 'name':'pushpull-token'},
+ ])
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_constraint(op.f('fk_accesstoken_kind_id_accesstokenkind'), 'accesstoken', type_='foreignkey')
+ op.drop_index('accesstoken_kind_id', table_name='accesstoken')
+ op.drop_column(u'accesstoken', 'kind_id')
+ op.drop_index('accesstokenkind_name', table_name='accesstokenkind')
+ op.drop_table('accesstokenkind')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/3e8cc74a1e7b_add_severity_and_media_type_to_global_.py b/data/migrations/versions/3e8cc74a1e7b_add_severity_and_media_type_to_global_.py
deleted file mode 100644
index 87e6f8890..000000000
--- a/data/migrations/versions/3e8cc74a1e7b_add_severity_and_media_type_to_global_.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Add severity and media_type to global messages
-
-Revision ID: 3e8cc74a1e7b
-Revises: fc47c1ec019f
-Create Date: 2017-01-17 16:22:28.584237
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '3e8cc74a1e7b'
-down_revision = 'fc47c1ec019f'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.add_column('messages', sa.Column('media_type_id', sa.Integer(), nullable=False, server_default='1'))
- op.add_column('messages', sa.Column('severity', sa.String(length=255), nullable=False, server_default='info'))
- op.alter_column('messages', 'uuid',
- existing_type=mysql.VARCHAR(length=36),
- server_default='',
- nullable=False)
- op.create_index('messages_media_type_id', 'messages', ['media_type_id'], unique=False)
- op.create_index('messages_severity', 'messages', ['severity'], unique=False)
- op.create_index('messages_uuid', 'messages', ['uuid'], unique=False)
- op.create_foreign_key(op.f('fk_messages_media_type_id_mediatype'), 'messages', 'mediatype', ['media_type_id'], ['id'])
- # ### end Alembic commands ###
-
- op.bulk_insert(tables.mediatype,
- [
- {'name': 'text/markdown'},
- ])
-
- # ### population of test data ### #
- tester.populate_column('messages', 'media_type_id', tester.TestDataType.Foreign('mediatype'))
- tester.populate_column('messages', 'severity', lambda: 'info')
- tester.populate_column('messages', 'uuid', tester.TestDataType.UUID)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_constraint(op.f('fk_messages_media_type_id_mediatype'), 'messages', type_='foreignkey')
- op.drop_index('messages_uuid', table_name='messages')
- op.drop_index('messages_severity', table_name='messages')
- op.drop_index('messages_media_type_id', table_name='messages')
- op.alter_column('messages', 'uuid',
- existing_type=mysql.VARCHAR(length=36),
- nullable=True)
- op.drop_column('messages', 'severity')
- op.drop_column('messages', 'media_type_id')
- # ### end Alembic commands ###
-
- op.execute(tables
- .mediatype
- .delete()
- .where(tables.
- mediatype.c.name == op.inline_literal('text/markdown')))
diff --git a/data/migrations/versions/3f4fe1194671_backfill_the_namespace_user_fields.py b/data/migrations/versions/3f4fe1194671_backfill_the_namespace_user_fields.py
new file mode 100644
index 000000000..4a1e2fe9d
--- /dev/null
+++ b/data/migrations/versions/3f4fe1194671_backfill_the_namespace_user_fields.py
@@ -0,0 +1,26 @@
+"""Backfill the namespace_user fields.
+
+Revision ID: 3f4fe1194671
+Revises: 6f2ecf5afcf
+Create Date: 2014-09-24 14:29:45.192179
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3f4fe1194671'
+down_revision = '6f2ecf5afcf'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ conn = op.get_bind()
+ user_table_name_escaped = conn.dialect.identifier_preparer.format_table(tables['user'])
+ conn.execute('update repository set namespace_user_id = (select id from {0} where {0}.username = repository.namespace) where namespace_user_id is NULL'.format(user_table_name_escaped))
+ op.create_index('repository_namespace_user_id_name', 'repository', ['namespace_user_id', 'name'], unique=True)
+
+
+def downgrade(tables):
+ op.drop_constraint('fk_repository_namespace_user_id_user', table_name='repository', type_='foreignkey')
+ op.drop_index('repository_namespace_user_id_name', table_name='repository')
diff --git a/data/migrations/versions/3fee6f979c2a_make_auth_token_nullable.py b/data/migrations/versions/3fee6f979c2a_make_auth_token_nullable.py
new file mode 100644
index 000000000..2574271ef
--- /dev/null
+++ b/data/migrations/versions/3fee6f979c2a_make_auth_token_nullable.py
@@ -0,0 +1,30 @@
+"""make auth_token nullable
+
+Revision ID: 3fee6f979c2a
+Revises: 31288f79df53
+Create Date: 2015-03-27 11:11:24.046996
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3fee6f979c2a'
+down_revision = '31288f79df53'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column('repositorybuildtrigger', 'auth_token',
+ existing_type=mysql.VARCHAR(length=255),
+ nullable=True)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column('repositorybuildtrigger', 'auth_token',
+ existing_type=mysql.VARCHAR(length=255),
+ nullable=False)
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/41f4587c84ae_add_jwt_authentication_login_service.py b/data/migrations/versions/41f4587c84ae_add_jwt_authentication_login_service.py
new file mode 100644
index 000000000..f15875b83
--- /dev/null
+++ b/data/migrations/versions/41f4587c84ae_add_jwt_authentication_login_service.py
@@ -0,0 +1,28 @@
+"""Add JWT Authentication login service
+
+Revision ID: 41f4587c84ae
+Revises: 1f116e06b68
+Create Date: 2015-06-02 16:13:02.636590
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '41f4587c84ae'
+down_revision = '1f116e06b68'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ op.bulk_insert(tables.loginservice,
+ [
+ {'id': 5, 'name':'jwtauthn'},
+ ])
+
+
+def downgrade(tables):
+ op.execute(
+ (tables.loginservice.delete()
+ .where(tables.loginservice.c.name == op.inline_literal('jwtauthn')))
+ )
diff --git a/data/migrations/versions/437ee6269a9d_migrate_bitbucket_services_to_webhooks.py b/data/migrations/versions/437ee6269a9d_migrate_bitbucket_services_to_webhooks.py
new file mode 100644
index 000000000..9ebf6bcd2
--- /dev/null
+++ b/data/migrations/versions/437ee6269a9d_migrate_bitbucket_services_to_webhooks.py
@@ -0,0 +1,24 @@
+"""Migrate BitBucket services to webhooks
+
+Revision ID: 437ee6269a9d
+Revises: 154f2befdfbe
+Create Date: 2015-07-21 14:03:44.964200
+
+"""
+
+from util.migrate.migratebitbucketservices import run_bitbucket_migration
+
+# revision identifiers, used by Alembic.
+revision = '437ee6269a9d'
+down_revision = '2e09ad97b06c'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ run_bitbucket_migration()
+
+
+def downgrade(tables):
+ pass
diff --git a/data/migrations/versions/43e943c0639f_add_log_kind_for_regenerating_robot_.py b/data/migrations/versions/43e943c0639f_add_log_kind_for_regenerating_robot_.py
new file mode 100644
index 000000000..983528b06
--- /dev/null
+++ b/data/migrations/versions/43e943c0639f_add_log_kind_for_regenerating_robot_.py
@@ -0,0 +1,28 @@
+"""add log kind for regenerating robot tokens
+
+Revision ID: 43e943c0639f
+Revises: 82297d834ad
+Create Date: 2014-08-25 17:14:42.784518
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '43e943c0639f'
+down_revision = '82297d834ad'
+
+from alembic import op
+import sqlalchemy as sa
+
+def upgrade(tables):
+ op.bulk_insert(tables.logentrykind,
+ [
+ {'id': 41, 'name':'regenerate_robot_token'},
+ ])
+
+
+def downgrade(tables):
+ op.execute(
+ (tables.logentrykind.delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('regenerate_robot_token')))
+
+ )
diff --git a/data/migrations/versions/45fd8b9869d4_add_notification_type.py b/data/migrations/versions/45fd8b9869d4_add_notification_type.py
deleted file mode 100644
index 66f5c0870..000000000
--- a/data/migrations/versions/45fd8b9869d4_add_notification_type.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""add_notification_type
-
-Revision ID: 45fd8b9869d4
-Revises: 94836b099894
-Create Date: 2016-12-01 12:02:19.724528
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '45fd8b9869d4'
-down_revision = '94836b099894'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.bulk_insert(tables.notificationkind,
- [
- {'name': 'build_cancelled'},
- ])
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.execute(tables
- .notificationkind
- .delete()
- .where(tables.
- notificationkind.c.name == op.inline_literal('build_cancelled')))
diff --git a/data/migrations/versions/47670cbeced_migrate_existing_webhooks_to_.py b/data/migrations/versions/47670cbeced_migrate_existing_webhooks_to_.py
new file mode 100644
index 000000000..eaa687c73
--- /dev/null
+++ b/data/migrations/versions/47670cbeced_migrate_existing_webhooks_to_.py
@@ -0,0 +1,31 @@
+"""Migrate existing webhooks to notifications.
+
+Revision ID: 47670cbeced
+Revises: 325a4d7c79d9
+Create Date: 2014-07-31 13:49:38.332807
+Hand Edited By Joseph Schorr
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '47670cbeced'
+down_revision = '325a4d7c79d9'
+
+from alembic import op, context
+import sqlalchemy as sa
+
+def get_id(query):
+ conn = op.get_bind()
+ return list(conn.execute(query, ()).fetchall())[0][0]
+
+def upgrade(tables):
+ conn = op.get_bind()
+ event_id = get_id('Select id From externalnotificationevent Where name=\'repo_push\' Limit 1')
+ method_id = get_id('Select id From externalnotificationmethod Where name=\'webhook\' Limit 1')
+ conn.execute('Insert Into repositorynotification (uuid, repository_id, event_id, method_id, config_json) Select public_id, repository_id, %s, %s, parameters FROM webhook' % (event_id, method_id))
+
+def downgrade(tables):
+ conn = op.get_bind()
+ event_id = get_id('Select id From externalnotificationevent Where name=\'repo_push\' Limit 1')
+ method_id = get_id('Select id From externalnotificationmethod Where name=\'webhook\' Limit 1')
+ conn.execute('Insert Into webhook (public_id, repository_id, parameters) Select uuid, repository_id, config_json FROM repositorynotification Where event_id=%s And method_id=%s' % (event_id, method_id))
diff --git a/data/migrations/versions/481623ba00ba_add_index_on_logs_archived_on_.py b/data/migrations/versions/481623ba00ba_add_index_on_logs_archived_on_.py
deleted file mode 100644
index da8476f8a..000000000
--- a/data/migrations/versions/481623ba00ba_add_index_on_logs_archived_on_.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""Add index on logs_archived on repositorybuild
-
-Revision ID: 481623ba00ba
-Revises: b9045731c4de
-Create Date: 2019-02-15 16:09:47.326805
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '481623ba00ba'
-down_revision = 'b9045731c4de'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_index('repositorybuild_logs_archived', 'repositorybuild', ['logs_archived'], unique=False)
- # ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('repositorybuild_logs_archived', table_name='repositorybuild')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/4a0c94399f38_add_new_notification_kinds.py b/data/migrations/versions/4a0c94399f38_add_new_notification_kinds.py
new file mode 100644
index 000000000..6b4160b19
--- /dev/null
+++ b/data/migrations/versions/4a0c94399f38_add_new_notification_kinds.py
@@ -0,0 +1,39 @@
+"""add new notification kinds
+
+Revision ID: 4a0c94399f38
+Revises: 1594a74a74ca
+Create Date: 2014-08-28 16:17:01.898269
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '4a0c94399f38'
+down_revision = '1594a74a74ca'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ op.bulk_insert(tables.externalnotificationmethod,
+ [
+ {'id':4, 'name':'flowdock'},
+ {'id':5, 'name':'hipchat'},
+ {'id':6, 'name':'slack'},
+ ])
+
+def downgrade(tables):
+ op.execute(
+ (tables.externalnotificationmethod.delete()
+ .where(tables.externalnotificationmethod.c.name == op.inline_literal('flowdock')))
+ )
+
+ op.execute(
+ (tables.externalnotificationmethod.delete()
+ .where(tables.externalnotificationmethod.c.name == op.inline_literal('hipchat')))
+ )
+
+ op.execute(
+ (tables.externalnotificationmethod.delete()
+ .where(tables.externalnotificationmethod.c.name == op.inline_literal('slack')))
+ )
diff --git a/data/migrations/versions/4b7ef0c7bdb2_add_the_maintenance_notification_type.py b/data/migrations/versions/4b7ef0c7bdb2_add_the_maintenance_notification_type.py
new file mode 100644
index 000000000..9f48ca6c6
--- /dev/null
+++ b/data/migrations/versions/4b7ef0c7bdb2_add_the_maintenance_notification_type.py
@@ -0,0 +1,28 @@
+"""Add the maintenance notification type.
+
+Revision ID: 4b7ef0c7bdb2
+Revises: bcdde200a1b
+Create Date: 2014-06-27 19:09:56.387534
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '4b7ef0c7bdb2'
+down_revision = 'bcdde200a1b'
+
+from alembic import op
+import sqlalchemy as sa
+
+def upgrade(tables):
+ op.bulk_insert(tables.notificationkind,
+ [
+ {'id':4, 'name':'maintenance'},
+ ])
+
+
+def downgrade(tables):
+ op.execute(
+ (tables.notificationkind.delete()
+ .where(tables.notificationkind.c.name == op.inline_literal('maintenance')))
+
+ )
diff --git a/data/migrations/versions/4ce2169efd3b_add_reversion_column_to_the_tags_table.py b/data/migrations/versions/4ce2169efd3b_add_reversion_column_to_the_tags_table.py
new file mode 100644
index 000000000..19a1d6ba6
--- /dev/null
+++ b/data/migrations/versions/4ce2169efd3b_add_reversion_column_to_the_tags_table.py
@@ -0,0 +1,26 @@
+"""Add reversion column to the tags table
+
+Revision ID: 4ce2169efd3b
+Revises: 30c044b75632
+Create Date: 2015-04-16 17:10:16.039835
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '4ce2169efd3b'
+down_revision = '30c044b75632'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repositorytag', sa.Column('reversion', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()))
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('repositorytag', 'reversion')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/4ef04c61fcf9_allow_tags_to_be_marked_as_hidden.py b/data/migrations/versions/4ef04c61fcf9_allow_tags_to_be_marked_as_hidden.py
new file mode 100644
index 000000000..e4fc1ea5e
--- /dev/null
+++ b/data/migrations/versions/4ef04c61fcf9_allow_tags_to_be_marked_as_hidden.py
@@ -0,0 +1,26 @@
+"""Allow tags to be marked as hidden.
+
+Revision ID: 4ef04c61fcf9
+Revises: 509d2857566f
+Create Date: 2015-02-18 16:34:16.586129
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '4ef04c61fcf9'
+down_revision = '509d2857566f'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repositorytag', sa.Column('hidden', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()))
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('repositorytag', 'hidden')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/4fdb65816b8d_add_brute_force_prevention_metadata_to_.py b/data/migrations/versions/4fdb65816b8d_add_brute_force_prevention_metadata_to_.py
new file mode 100644
index 000000000..bc8373655
--- /dev/null
+++ b/data/migrations/versions/4fdb65816b8d_add_brute_force_prevention_metadata_to_.py
@@ -0,0 +1,28 @@
+"""Add brute force prevention metadata to the user table.
+
+Revision ID: 4fdb65816b8d
+Revises: 43e943c0639f
+Create Date: 2014-09-03 12:35:33.722435
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '4fdb65816b8d'
+down_revision = '43e943c0639f'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('user', sa.Column('invalid_login_attempts', sa.Integer(), nullable=False, server_default="0"))
+ op.add_column('user', sa.Column('last_invalid_login', sa.DateTime(), nullable=False))
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('user', 'last_invalid_login')
+ op.drop_column('user', 'invalid_login_attempts')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/509d2857566f_track_the_lifetime_start_and_end_for_.py b/data/migrations/versions/509d2857566f_track_the_lifetime_start_and_end_for_.py
new file mode 100644
index 000000000..a13ec00d1
--- /dev/null
+++ b/data/migrations/versions/509d2857566f_track_the_lifetime_start_and_end_for_.py
@@ -0,0 +1,36 @@
+"""Track the lifetime start and end for tags to allow the state of a repository to be rewound.
+
+Revision ID: 509d2857566f
+Revises: 3e2d38b52a75
+Create Date: 2015-02-13 14:35:38.939049
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '509d2857566f'
+down_revision = '3e2d38b52a75'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repositorytag', sa.Column('lifetime_end_ts', sa.Integer(), nullable=True))
+ op.add_column('repositorytag', sa.Column('lifetime_start_ts', sa.Integer(), nullable=False, server_default="0"))
+ op.create_index('repositorytag_lifetime_end_ts', 'repositorytag', ['lifetime_end_ts'], unique=False)
+ op.drop_index('repositorytag_repository_id_name', table_name='repositorytag')
+ op.create_index('repositorytag_repository_id_name', 'repositorytag', ['repository_id', 'name'], unique=False)
+ op.add_column('user', sa.Column('removed_tag_expiration_s', sa.Integer(), nullable=False, server_default="1209600"))
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('user', 'removed_tag_expiration_s')
+ op.drop_index('repositorytag_repository_id_name', table_name='repositorytag')
+ op.create_index('repositorytag_repository_id_name', 'repositorytag', ['repository_id', 'name'], unique=True)
+ op.drop_index('repositorytag_lifetime_end_ts', table_name='repositorytag')
+ op.drop_column('repositorytag', 'lifetime_start_ts')
+ op.drop_column('repositorytag', 'lifetime_end_ts')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/51d04d0e7e6f_email_invites_for_joining_a_team.py b/data/migrations/versions/51d04d0e7e6f_email_invites_for_joining_a_team.py
new file mode 100644
index 000000000..c18335adb
--- /dev/null
+++ b/data/migrations/versions/51d04d0e7e6f_email_invites_for_joining_a_team.py
@@ -0,0 +1,78 @@
+"""Email invites for joining a team.
+
+Revision ID: 51d04d0e7e6f
+Revises: 34fd69f63809
+Create Date: 2014-09-15 23:51:35.478232
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '51d04d0e7e6f'
+down_revision = '34fd69f63809'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('teammemberinvite',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=True),
+ sa.Column('email', sa.String(length=255), nullable=True),
+ sa.Column('team_id', sa.Integer(), nullable=False),
+ sa.Column('inviter_id', sa.Integer(), nullable=False),
+ sa.Column('invite_token', sa.String(length=255), nullable=False),
+ sa.ForeignKeyConstraint(['inviter_id'], ['user.id'], ),
+ sa.ForeignKeyConstraint(['team_id'], ['team.id'], ),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('teammemberinvite_inviter_id', 'teammemberinvite', ['inviter_id'], unique=False)
+ op.create_index('teammemberinvite_team_id', 'teammemberinvite', ['team_id'], unique=False)
+ op.create_index('teammemberinvite_user_id', 'teammemberinvite', ['user_id'], unique=False)
+ ### end Alembic commands ###
+
+ # Manually add the new logentrykind types
+ op.bulk_insert(tables.logentrykind,
+ [
+ {'id':42, 'name':'org_invite_team_member'},
+ {'id':43, 'name':'org_team_member_invite_accepted'},
+ {'id':44, 'name':'org_team_member_invite_declined'},
+ {'id':45, 'name':'org_delete_team_member_invite'},
+ ])
+
+ op.bulk_insert(tables.notificationkind,
+ [
+ {'id':10, 'name':'org_team_invite'},
+ ])
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.execute(
+ (tables.logentrykind.delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('org_invite_team_member')))
+ )
+
+ op.execute(
+ (tables.logentrykind.delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('org_team_member_invite_accepted')))
+ )
+
+ op.execute(
+ (tables.logentrykind.delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('org_team_member_invite_declined')))
+ )
+
+ op.execute(
+ (tables.logentrykind.delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('org_delete_team_member_invite')))
+ )
+
+ op.execute(
+ (tables.notificationkind.delete()
+ .where(tables.notificationkind.c.name == op.inline_literal('org_team_invite')))
+ )
+
+ op.drop_table('teammemberinvite')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/5232a5610a0a_add_logentry_repo_datetime_kind_index.py b/data/migrations/versions/5232a5610a0a_add_logentry_repo_datetime_kind_index.py
new file mode 100644
index 000000000..c8bff8bbd
--- /dev/null
+++ b/data/migrations/versions/5232a5610a0a_add_logentry_repo_datetime_kind_index.py
@@ -0,0 +1,26 @@
+"""Add LogEntry repo-datetime-kind index
+
+Revision ID: 5232a5610a0a
+Revises: 437ee6269a9d
+Create Date: 2015-07-31 13:25:41.877733
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '5232a5610a0a'
+down_revision = '437ee6269a9d'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('logentry_repository_id_datetime_kind_id', 'logentry', ['repository_id', 'datetime', 'kind_id'], unique=False)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('logentry_repository_id_datetime_kind_id', table_name='logentry')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/5248ddf35167_repository_mirror.py b/data/migrations/versions/5248ddf35167_repository_mirror.py
deleted file mode 100644
index 8bb806105..000000000
--- a/data/migrations/versions/5248ddf35167_repository_mirror.py
+++ /dev/null
@@ -1,144 +0,0 @@
-"""Repository Mirror
-
-Revision ID: 5248ddf35167
-Revises: b918abdbee43
-Create Date: 2019-06-25 16:22:36.310532
-
-"""
-
-revision = '5248ddf35167'
-down_revision = 'b918abdbee43'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.create_table('repomirrorrule',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('creation_date', sa.DateTime(), nullable=False),
- sa.Column('rule_type', sa.Integer(), nullable=False),
- sa.Column('rule_value', sa.Text(), nullable=False),
- sa.Column('left_child_id', sa.Integer(), nullable=True),
- sa.Column('right_child_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['left_child_id'], ['repomirrorrule.id'], name=op.f('fk_repomirrorrule_left_child_id_repomirrorrule')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repomirrorrule_repository_id_repository')),
- sa.ForeignKeyConstraint(['right_child_id'], ['repomirrorrule.id'], name=op.f('fk_repomirrorrule_right_child_id_repomirrorrule')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repomirrorrule')))
- op.create_index('repomirrorrule_left_child_id', 'repomirrorrule', ['left_child_id'], unique=False)
- op.create_index('repomirrorrule_repository_id', 'repomirrorrule', ['repository_id'], unique=False)
- op.create_index('repomirrorrule_right_child_id', 'repomirrorrule', ['right_child_id'], unique=False)
- op.create_index('repomirrorrule_rule_type', 'repomirrorrule', ['rule_type'], unique=False)
- op.create_index('repomirrorrule_uuid', 'repomirrorrule', ['uuid'], unique=True)
-
- op.create_table('repomirrorconfig',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('creation_date', sa.DateTime(), nullable=False),
- sa.Column('is_enabled', sa.Boolean(), nullable=False),
- sa.Column('mirror_type', sa.Integer(), nullable=False),
- sa.Column('internal_robot_id', sa.Integer(), nullable=False),
- sa.Column('external_registry', sa.String(length=255), nullable=False),
- sa.Column('external_namespace', sa.String(length=255), nullable=False),
- sa.Column('external_repository', sa.String(length=255), nullable=False),
- sa.Column('external_registry_username', sa.String(length=2048), nullable=True),
- sa.Column('external_registry_password', sa.String(length=2048), nullable=True),
- sa.Column('external_registry_config', sa.Text(), nullable=False),
- sa.Column('sync_interval', sa.Integer(), nullable=False, server_default='60'),
- sa.Column('sync_start_date', sa.DateTime(), nullable=True),
- sa.Column('sync_expiration_date', sa.DateTime(), nullable=True),
- sa.Column('sync_retries_remaining', sa.Integer(), nullable=False, server_default='3'),
- sa.Column('sync_status', sa.Integer(), nullable=False),
- sa.Column('sync_transaction_id', sa.String(length=36), nullable=True),
- sa.Column('root_rule_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repomirrorconfig_repository_id_repository')),
- sa.ForeignKeyConstraint(['root_rule_id'], ['repomirrorrule.id'], name=op.f('fk_repomirrorconfig_root_rule_id_repomirrorrule')),
- sa.ForeignKeyConstraint(['internal_robot_id'], ['user.id'], name=op.f('fk_repomirrorconfig_internal_robot_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repomirrorconfig'))
- )
- op.create_index('repomirrorconfig_mirror_type', 'repomirrorconfig', ['mirror_type'], unique=False)
- op.create_index('repomirrorconfig_repository_id', 'repomirrorconfig', ['repository_id'], unique=True)
- op.create_index('repomirrorconfig_root_rule_id', 'repomirrorconfig', ['root_rule_id'], unique=False)
- op.create_index('repomirrorconfig_sync_status', 'repomirrorconfig', ['sync_status'], unique=False)
- op.create_index('repomirrorconfig_sync_transaction_id', 'repomirrorconfig', ['sync_transaction_id'], unique=False)
- op.create_index('repomirrorconfig_internal_robot_id', 'repomirrorconfig', ['internal_robot_id'], unique=False)
-
- op.add_column(u'repository', sa.Column('state', sa.Integer(), nullable=False, server_default='0'))
- op.create_index('repository_state', 'repository', ['state'], unique=False)
-
- op.bulk_insert(tables.logentrykind,
- [
- {'name': 'repo_mirror_enabled'},
- {'name': 'repo_mirror_disabled'},
- {'name': 'repo_mirror_config_changed'},
- {'name': 'repo_mirror_sync_started'},
- {'name': 'repo_mirror_sync_failed'},
- {'name': 'repo_mirror_sync_success'},
- {'name': 'repo_mirror_sync_now_requested'},
- {'name': 'repo_mirror_sync_tag_success'},
- {'name': 'repo_mirror_sync_tag_failed'},
- {'name': 'repo_mirror_sync_test_success'},
- {'name': 'repo_mirror_sync_test_failed'},
- {'name': 'repo_mirror_sync_test_started'},
- {'name': 'change_repo_state'}
- ])
-
-
- tester.populate_table('repomirrorrule', [
- ('uuid', tester.TestDataType.String),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('creation_date', tester.TestDataType.DateTime),
- ('rule_type', tester.TestDataType.Integer),
- ('rule_value', tester.TestDataType.String),
- ])
-
- tester.populate_table('repomirrorconfig', [
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('creation_date', tester.TestDataType.DateTime),
- ('is_enabled', tester.TestDataType.Boolean),
- ('mirror_type', tester.TestDataType.Constant(1)),
- ('internal_robot_id', tester.TestDataType.Foreign('user')),
- ('external_registry', tester.TestDataType.String),
- ('external_namespace', tester.TestDataType.String),
- ('external_repository', tester.TestDataType.String),
- ('external_registry_username', tester.TestDataType.String),
- ('external_registry_password', tester.TestDataType.String),
- ('external_registry_config', tester.TestDataType.JSON),
- ('sync_start_date', tester.TestDataType.DateTime),
- ('sync_expiration_date', tester.TestDataType.DateTime),
- ('sync_retries_remaining', tester.TestDataType.Integer),
- ('sync_status', tester.TestDataType.Constant(0)),
- ('sync_transaction_id', tester.TestDataType.String),
- ('root_rule_id', tester.TestDataType.Foreign('repomirrorrule')),
- ])
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.drop_column(u'repository', 'state')
-
- op.drop_table('repomirrorconfig')
-
- op.drop_table('repomirrorrule')
-
- for logentrykind in [
- 'repo_mirror_enabled',
- 'repo_mirror_disabled',
- 'repo_mirror_config_changed',
- 'repo_mirror_sync_started',
- 'repo_mirror_sync_failed',
- 'repo_mirror_sync_success',
- 'repo_mirror_sync_now_requested',
- 'repo_mirror_sync_tag_success',
- 'repo_mirror_sync_tag_failed',
- 'repo_mirror_sync_test_success',
- 'repo_mirror_sync_test_failed',
- 'repo_mirror_sync_test_started',
- 'change_repo_state'
- ]:
- op.execute(tables.logentrykind.delete()
- .where(tables.logentrykind.c.name == op.inline_literal(logentrykind)))
diff --git a/data/migrations/versions/53e2ac668296_remove_reference_to_subdir.py b/data/migrations/versions/53e2ac668296_remove_reference_to_subdir.py
deleted file mode 100644
index e0b61814b..000000000
--- a/data/migrations/versions/53e2ac668296_remove_reference_to_subdir.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Remove reference to subdir
-
-Revision ID: 53e2ac668296
-Revises: ed01e313d3cb
-Create Date: 2017-03-28 15:01:31.073382
-
-"""
-
-# revision identifiers, used by Alembic.
-import json
-
-import logging
-from alembic.script.revision import RevisionError
-from alembic.util import CommandError
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-revision = '53e2ac668296'
-down_revision = 'ed01e313d3cb'
-
-log = logging.getLogger(__name__)
-
-
-def run_migration(migrate_function, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- conn = op.get_bind()
- triggers = conn.execute("SELECT id, config FROM repositorybuildtrigger")
- for trigger in triggers:
- config = json.dumps(migrate_function(json.loads(trigger[1])))
- try:
- conn.execute("UPDATE repositorybuildtrigger SET config=%s WHERE id=%s", config, trigger[0])
- except(RevisionError, CommandError) as e:
- log.warning("Failed to update build trigger %s with exception: ", trigger[0], e)
-
-
-def upgrade(tables, tester, progress_reporter):
- run_migration(delete_subdir, progress_reporter)
-
-
-def downgrade(tables, tester, progress_reporter):
- run_migration(add_subdir, progress_reporter)
-
-
-def delete_subdir(config):
- """ Remove subdir from config """
- if not config:
- return config
- if 'subdir' in config:
- del config['subdir']
-
- return config
-
-
-def add_subdir(config):
- """ Add subdir back into config """
- if not config:
- return config
- if 'context' in config:
- config['subdir'] = config['context']
-
- return config
diff --git a/data/migrations/versions/54492a68a3cf_add_namespacegeorestriction_table.py b/data/migrations/versions/54492a68a3cf_add_namespacegeorestriction_table.py
deleted file mode 100644
index efe900ad7..000000000
--- a/data/migrations/versions/54492a68a3cf_add_namespacegeorestriction_table.py
+++ /dev/null
@@ -1,49 +0,0 @@
-"""Add NamespaceGeoRestriction table
-
-Revision ID: 54492a68a3cf
-Revises: c00a1f15968b
-Create Date: 2018-12-05 15:12:14.201116
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '54492a68a3cf'
-down_revision = 'c00a1f15968b'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('namespacegeorestriction',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('namespace_id', sa.Integer(), nullable=False),
- sa.Column('added', sa.DateTime(), nullable=False),
- sa.Column('description', sa.String(length=255), nullable=False),
- sa.Column('unstructured_json', sa.Text(), nullable=False),
- sa.Column('restricted_region_iso_code', sa.String(length=255), nullable=False),
- sa.ForeignKeyConstraint(['namespace_id'], ['user.id'], name=op.f('fk_namespacegeorestriction_namespace_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_namespacegeorestriction'))
- )
- op.create_index('namespacegeorestriction_namespace_id', 'namespacegeorestriction', ['namespace_id'], unique=False)
- op.create_index('namespacegeorestriction_namespace_id_restricted_region_iso_code', 'namespacegeorestriction', ['namespace_id', 'restricted_region_iso_code'], unique=True)
- op.create_index('namespacegeorestriction_restricted_region_iso_code', 'namespacegeorestriction', ['restricted_region_iso_code'], unique=False)
- # ### end Alembic commands ###
-
- tester.populate_table('namespacegeorestriction', [
- ('namespace_id', tester.TestDataType.Foreign('user')),
- ('added', tester.TestDataType.DateTime),
- ('description', tester.TestDataType.String),
- ('unstructured_json', tester.TestDataType.JSON),
- ('restricted_region_iso_code', tester.TestDataType.String),
- ])
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('namespacegeorestriction')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/5a07499ce53f_set_up_initial_database.py b/data/migrations/versions/5a07499ce53f_set_up_initial_database.py
new file mode 100644
index 000000000..c1d082066
--- /dev/null
+++ b/data/migrations/versions/5a07499ce53f_set_up_initial_database.py
@@ -0,0 +1,518 @@
+"""Set up initial database
+
+Revision ID: 5a07499ce53f
+Revises: None
+Create Date: 2014-05-13 11:26:51.808426
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '5a07499ce53f'
+down_revision = None
+
+from alembic import op
+import sqlalchemy as sa
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('loginservice',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('loginservice_name', 'loginservice', ['name'], unique=True)
+
+ op.bulk_insert(tables.loginservice,
+ [
+ {'id':1, 'name':'github'},
+ {'id':2, 'name':'quayrobot'},
+ {'id':3, 'name':'ldap'},
+ ])
+
+ op.create_table('imagestorage',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('checksum', sa.String(length=255), nullable=True),
+ sa.Column('created', sa.DateTime(), nullable=True),
+ sa.Column('comment', sa.Text(), nullable=True),
+ sa.Column('command', sa.Text(), nullable=True),
+ sa.Column('image_size', sa.BigInteger(), nullable=True),
+ sa.Column('uploading', sa.Boolean(), nullable=True),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_table('queueitem',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('queue_name', sa.String(length=1024), nullable=False),
+ sa.Column('body', sa.Text(), nullable=False),
+ sa.Column('available_after', sa.DateTime(), nullable=False),
+ sa.Column('available', sa.Boolean(), nullable=False),
+ sa.Column('processing_expires', sa.DateTime(), nullable=True),
+ sa.Column('retries_remaining', sa.Integer(), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('queueitem_available', 'queueitem', ['available'], unique=False)
+ op.create_index('queueitem_available_after', 'queueitem', ['available_after'], unique=False)
+ op.create_index('queueitem_processing_expires', 'queueitem', ['processing_expires'], unique=False)
+ op.create_index('queueitem_queue_name', 'queueitem', ['queue_name'], unique=False, mysql_length=767)
+ op.create_table('role',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('role_name', 'role', ['name'], unique=False)
+
+ op.bulk_insert(tables.role,
+ [
+ {'id':1, 'name':'admin'},
+ {'id':2, 'name':'write'},
+ {'id':3, 'name':'read'},
+ ])
+
+ op.create_table('logentrykind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('logentrykind_name', 'logentrykind', ['name'], unique=False)
+
+ op.bulk_insert(tables.logentrykind,
+ [
+ {'id':1, 'name':'account_change_plan'},
+ {'id':2, 'name':'account_change_cc'},
+ {'id':3, 'name':'account_change_password'},
+ {'id':4, 'name':'account_convert'},
+
+ {'id':5, 'name':'create_robot'},
+ {'id':6, 'name':'delete_robot'},
+
+ {'id':7, 'name':'create_repo'},
+ {'id':8, 'name':'push_repo'},
+ {'id':9, 'name':'pull_repo'},
+ {'id':10, 'name':'delete_repo'},
+ {'id':11, 'name':'create_tag'},
+ {'id':12, 'name':'move_tag'},
+ {'id':13, 'name':'delete_tag'},
+ {'id':14, 'name':'add_repo_permission'},
+ {'id':15, 'name':'change_repo_permission'},
+ {'id':16, 'name':'delete_repo_permission'},
+ {'id':17, 'name':'change_repo_visibility'},
+ {'id':18, 'name':'add_repo_accesstoken'},
+ {'id':19, 'name':'delete_repo_accesstoken'},
+ {'id':20, 'name':'add_repo_webhook'},
+ {'id':21, 'name':'delete_repo_webhook'},
+ {'id':22, 'name':'set_repo_description'},
+
+ {'id':23, 'name':'build_dockerfile'},
+
+ {'id':24, 'name':'org_create_team'},
+ {'id':25, 'name':'org_delete_team'},
+ {'id':26, 'name':'org_add_team_member'},
+ {'id':27, 'name':'org_remove_team_member'},
+ {'id':28, 'name':'org_set_team_description'},
+ {'id':29, 'name':'org_set_team_role'},
+
+ {'id':30, 'name':'create_prototype_permission'},
+ {'id':31, 'name':'modify_prototype_permission'},
+ {'id':32, 'name':'delete_prototype_permission'},
+
+ {'id':33, 'name':'setup_repo_trigger'},
+ {'id':34, 'name':'delete_repo_trigger'},
+
+ {'id':35, 'name':'create_application'},
+ {'id':36, 'name':'update_application'},
+ {'id':37, 'name':'delete_application'},
+ {'id':38, 'name':'reset_application_client_secret'},
+ ])
+
+ op.create_table('notificationkind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('notificationkind_name', 'notificationkind', ['name'], unique=False)
+
+ op.bulk_insert(tables.notificationkind,
+ [
+ {'id':1, 'name':'password_required'},
+ {'id':2, 'name':'over_private_usage'},
+ {'id':3, 'name':'expiring_license'},
+ ])
+
+ op.create_table('teamrole',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('teamrole_name', 'teamrole', ['name'], unique=False)
+
+ op.bulk_insert(tables.teamrole,
+ [
+ {'id':1, 'name':'admin'},
+ {'id':2, 'name':'creator'},
+ {'id':3, 'name':'member'},
+ ])
+
+ op.create_table('visibility',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('visibility_name', 'visibility', ['name'], unique=False)
+
+ op.bulk_insert(tables.visibility,
+ [
+ {'id':1, 'name':'public'},
+ {'id':2, 'name':'private'},
+ ])
+
+ op.create_table('user',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('username', sa.String(length=255), nullable=False),
+ sa.Column('password_hash', sa.String(length=255), nullable=True),
+ sa.Column('email', sa.String(length=255), nullable=False),
+ sa.Column('verified', sa.Boolean(), nullable=False),
+ sa.Column('stripe_id', sa.String(length=255), nullable=True),
+ sa.Column('organization', sa.Boolean(), nullable=False),
+ sa.Column('robot', sa.Boolean(), nullable=False),
+ sa.Column('invoice_email', sa.Boolean(), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('user_email', 'user', ['email'], unique=True)
+ op.create_index('user_organization', 'user', ['organization'], unique=False)
+ op.create_index('user_robot', 'user', ['robot'], unique=False)
+ op.create_index('user_stripe_id', 'user', ['stripe_id'], unique=False)
+ op.create_index('user_username', 'user', ['username'], unique=True)
+ op.create_table('buildtriggerservice',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('buildtriggerservice_name', 'buildtriggerservice', ['name'], unique=False)
+
+ op.bulk_insert(tables.buildtriggerservice,
+ [
+ {'id':1, 'name':'github'},
+ ])
+
+ op.create_table('federatedlogin',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('service_id', sa.Integer(), nullable=False),
+ sa.Column('service_ident', sa.String(length=255), nullable=False),
+ sa.ForeignKeyConstraint(['service_id'], ['loginservice.id'], ),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('federatedlogin_service_id', 'federatedlogin', ['service_id'], unique=False)
+ op.create_index('federatedlogin_service_id_service_ident', 'federatedlogin', ['service_id', 'service_ident'], unique=True)
+ op.create_index('federatedlogin_service_id_user_id', 'federatedlogin', ['service_id', 'user_id'], unique=True)
+ op.create_index('federatedlogin_user_id', 'federatedlogin', ['user_id'], unique=False)
+ op.create_table('oauthapplication',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('client_id', sa.String(length=255), nullable=False),
+ sa.Column('client_secret', sa.String(length=255), nullable=False),
+ sa.Column('redirect_uri', sa.String(length=255), nullable=False),
+ sa.Column('application_uri', sa.String(length=255), nullable=False),
+ sa.Column('organization_id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('description', sa.Text(), nullable=False),
+ sa.Column('gravatar_email', sa.String(length=255), nullable=True),
+ sa.ForeignKeyConstraint(['organization_id'], ['user.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('oauthapplication_client_id', 'oauthapplication', ['client_id'], unique=False)
+ op.create_index('oauthapplication_organization_id', 'oauthapplication', ['organization_id'], unique=False)
+ op.create_table('notification',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('kind_id', sa.Integer(), nullable=False),
+ sa.Column('target_id', sa.Integer(), nullable=False),
+ sa.Column('metadata_json', sa.Text(), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['kind_id'], ['notificationkind.id'], ),
+ sa.ForeignKeyConstraint(['target_id'], ['user.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('notification_created', 'notification', ['created'], unique=False)
+ op.create_index('notification_kind_id', 'notification', ['kind_id'], unique=False)
+ op.create_index('notification_target_id', 'notification', ['target_id'], unique=False)
+ op.create_index('notification_uuid', 'notification', ['uuid'], unique=False)
+ op.create_table('emailconfirmation',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('code', sa.String(length=255), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('pw_reset', sa.Boolean(), nullable=False),
+ sa.Column('new_email', sa.String(length=255), nullable=True),
+ sa.Column('email_confirm', sa.Boolean(), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('emailconfirmation_code', 'emailconfirmation', ['code'], unique=True)
+ op.create_index('emailconfirmation_user_id', 'emailconfirmation', ['user_id'], unique=False)
+ op.create_table('team',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('organization_id', sa.Integer(), nullable=False),
+ sa.Column('role_id', sa.Integer(), nullable=False),
+ sa.Column('description', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['organization_id'], ['user.id'], ),
+ sa.ForeignKeyConstraint(['role_id'], ['teamrole.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('team_name', 'team', ['name'], unique=False)
+ op.create_index('team_name_organization_id', 'team', ['name', 'organization_id'], unique=True)
+ op.create_index('team_organization_id', 'team', ['organization_id'], unique=False)
+ op.create_index('team_role_id', 'team', ['role_id'], unique=False)
+ op.create_table('repository',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('namespace', sa.String(length=255), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('visibility_id', sa.Integer(), nullable=False),
+ sa.Column('description', sa.Text(), nullable=True),
+ sa.Column('badge_token', sa.String(length=255), nullable=False),
+ sa.ForeignKeyConstraint(['visibility_id'], ['visibility.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('repository_namespace_name', 'repository', ['namespace', 'name'], unique=True)
+ op.create_index('repository_visibility_id', 'repository', ['visibility_id'], unique=False)
+ op.create_table('accesstoken',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('friendly_name', sa.String(length=255), nullable=True),
+ sa.Column('code', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.Column('role_id', sa.Integer(), nullable=False),
+ sa.Column('temporary', sa.Boolean(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('accesstoken_code', 'accesstoken', ['code'], unique=True)
+ op.create_index('accesstoken_repository_id', 'accesstoken', ['repository_id'], unique=False)
+ op.create_index('accesstoken_role_id', 'accesstoken', ['role_id'], unique=False)
+ op.create_table('repositorypermission',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('team_id', sa.Integer(), nullable=True),
+ sa.Column('user_id', sa.Integer(), nullable=True),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('role_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
+ sa.ForeignKeyConstraint(['team_id'], ['team.id'], ),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('repositorypermission_repository_id', 'repositorypermission', ['repository_id'], unique=False)
+ op.create_index('repositorypermission_role_id', 'repositorypermission', ['role_id'], unique=False)
+ op.create_index('repositorypermission_team_id', 'repositorypermission', ['team_id'], unique=False)
+ op.create_index('repositorypermission_team_id_repository_id', 'repositorypermission', ['team_id', 'repository_id'], unique=True)
+ op.create_index('repositorypermission_user_id', 'repositorypermission', ['user_id'], unique=False)
+ op.create_index('repositorypermission_user_id_repository_id', 'repositorypermission', ['user_id', 'repository_id'], unique=True)
+ op.create_table('oauthaccesstoken',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('application_id', sa.Integer(), nullable=False),
+ sa.Column('authorized_user_id', sa.Integer(), nullable=False),
+ sa.Column('scope', sa.String(length=255), nullable=False),
+ sa.Column('access_token', sa.String(length=255), nullable=False),
+ sa.Column('token_type', sa.String(length=255), nullable=False),
+ sa.Column('expires_at', sa.DateTime(), nullable=False),
+ sa.Column('refresh_token', sa.String(length=255), nullable=True),
+ sa.Column('data', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['application_id'], ['oauthapplication.id'], ),
+ sa.ForeignKeyConstraint(['authorized_user_id'], ['user.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('oauthaccesstoken_access_token', 'oauthaccesstoken', ['access_token'], unique=False)
+ op.create_index('oauthaccesstoken_application_id', 'oauthaccesstoken', ['application_id'], unique=False)
+ op.create_index('oauthaccesstoken_authorized_user_id', 'oauthaccesstoken', ['authorized_user_id'], unique=False)
+ op.create_index('oauthaccesstoken_refresh_token', 'oauthaccesstoken', ['refresh_token'], unique=False)
+ op.create_index('oauthaccesstoken_uuid', 'oauthaccesstoken', ['uuid'], unique=False)
+ op.create_table('teammember',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('team_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['team_id'], ['team.id'], ),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('teammember_team_id', 'teammember', ['team_id'], unique=False)
+ op.create_index('teammember_user_id', 'teammember', ['user_id'], unique=False)
+ op.create_index('teammember_user_id_team_id', 'teammember', ['user_id', 'team_id'], unique=True)
+ op.create_table('webhook',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('public_id', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('parameters', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('webhook_public_id', 'webhook', ['public_id'], unique=True)
+ op.create_index('webhook_repository_id', 'webhook', ['repository_id'], unique=False)
+ op.create_table('oauthauthorizationcode',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('application_id', sa.Integer(), nullable=False),
+ sa.Column('code', sa.String(length=255), nullable=False),
+ sa.Column('scope', sa.String(length=255), nullable=False),
+ sa.Column('data', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['application_id'], ['oauthapplication.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('oauthauthorizationcode_application_id', 'oauthauthorizationcode', ['application_id'], unique=False)
+ op.create_index('oauthauthorizationcode_code', 'oauthauthorizationcode', ['code'], unique=False)
+ op.create_table('image',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('docker_image_id', sa.String(length=255), nullable=False),
+ sa.Column('checksum', sa.String(length=255), nullable=True),
+ sa.Column('created', sa.DateTime(), nullable=True),
+ sa.Column('comment', sa.Text(), nullable=True),
+ sa.Column('command', sa.Text(), nullable=True),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('image_size', sa.BigInteger(), nullable=True),
+ sa.Column('ancestors', sa.String(length=60535), nullable=True),
+ sa.Column('storage_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('image_ancestors', 'image', ['ancestors'], unique=False, mysql_length=767)
+ op.create_index('image_repository_id', 'image', ['repository_id'], unique=False)
+ op.create_index('image_repository_id_docker_image_id', 'image', ['repository_id', 'docker_image_id'], unique=True)
+ op.create_index('image_storage_id', 'image', ['storage_id'], unique=False)
+ op.create_table('permissionprototype',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('org_id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('activating_user_id', sa.Integer(), nullable=True),
+ sa.Column('delegate_user_id', sa.Integer(), nullable=True),
+ sa.Column('delegate_team_id', sa.Integer(), nullable=True),
+ sa.Column('role_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['activating_user_id'], ['user.id'], ),
+ sa.ForeignKeyConstraint(['delegate_team_id'], ['team.id'], ),
+ sa.ForeignKeyConstraint(['delegate_user_id'], ['user.id'], ),
+ sa.ForeignKeyConstraint(['org_id'], ['user.id'], ),
+ sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('permissionprototype_activating_user_id', 'permissionprototype', ['activating_user_id'], unique=False)
+ op.create_index('permissionprototype_delegate_team_id', 'permissionprototype', ['delegate_team_id'], unique=False)
+ op.create_index('permissionprototype_delegate_user_id', 'permissionprototype', ['delegate_user_id'], unique=False)
+ op.create_index('permissionprototype_org_id', 'permissionprototype', ['org_id'], unique=False)
+ op.create_index('permissionprototype_org_id_activating_user_id', 'permissionprototype', ['org_id', 'activating_user_id'], unique=False)
+ op.create_index('permissionprototype_role_id', 'permissionprototype', ['role_id'], unique=False)
+ op.create_table('repositorytag',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('image_id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['image_id'], ['image.id'], ),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('repositorytag_image_id', 'repositorytag', ['image_id'], unique=False)
+ op.create_index('repositorytag_repository_id', 'repositorytag', ['repository_id'], unique=False)
+ op.create_index('repositorytag_repository_id_name', 'repositorytag', ['repository_id', 'name'], unique=True)
+ op.create_table('logentry',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('kind_id', sa.Integer(), nullable=False),
+ sa.Column('account_id', sa.Integer(), nullable=False),
+ sa.Column('performer_id', sa.Integer(), nullable=True),
+ sa.Column('repository_id', sa.Integer(), nullable=True),
+ sa.Column('access_token_id', sa.Integer(), nullable=True),
+ sa.Column('datetime', sa.DateTime(), nullable=False),
+ sa.Column('ip', sa.String(length=255), nullable=True),
+ sa.Column('metadata_json', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['access_token_id'], ['accesstoken.id'], ),
+ sa.ForeignKeyConstraint(['account_id'], ['user.id'], ),
+ sa.ForeignKeyConstraint(['kind_id'], ['logentrykind.id'], ),
+ sa.ForeignKeyConstraint(['performer_id'], ['user.id'], ),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('logentry_access_token_id', 'logentry', ['access_token_id'], unique=False)
+ op.create_index('logentry_account_id', 'logentry', ['account_id'], unique=False)
+ op.create_index('logentry_datetime', 'logentry', ['datetime'], unique=False)
+ op.create_index('logentry_kind_id', 'logentry', ['kind_id'], unique=False)
+ op.create_index('logentry_performer_id', 'logentry', ['performer_id'], unique=False)
+ op.create_index('logentry_repository_id', 'logentry', ['repository_id'], unique=False)
+ op.create_table('repositorybuildtrigger',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('service_id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('connected_user_id', sa.Integer(), nullable=False),
+ sa.Column('auth_token', sa.String(length=255), nullable=False),
+ sa.Column('config', sa.Text(), nullable=False),
+ sa.Column('write_token_id', sa.Integer(), nullable=True),
+ sa.Column('pull_robot_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['connected_user_id'], ['user.id'], ),
+ sa.ForeignKeyConstraint(['pull_robot_id'], ['user.id'], ),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.ForeignKeyConstraint(['service_id'], ['buildtriggerservice.id'], ),
+ sa.ForeignKeyConstraint(['write_token_id'], ['accesstoken.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('repositorybuildtrigger_connected_user_id', 'repositorybuildtrigger', ['connected_user_id'], unique=False)
+ op.create_index('repositorybuildtrigger_pull_robot_id', 'repositorybuildtrigger', ['pull_robot_id'], unique=False)
+ op.create_index('repositorybuildtrigger_repository_id', 'repositorybuildtrigger', ['repository_id'], unique=False)
+ op.create_index('repositorybuildtrigger_service_id', 'repositorybuildtrigger', ['service_id'], unique=False)
+ op.create_index('repositorybuildtrigger_write_token_id', 'repositorybuildtrigger', ['write_token_id'], unique=False)
+ op.create_table('repositorybuild',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('access_token_id', sa.Integer(), nullable=False),
+ sa.Column('resource_key', sa.String(length=255), nullable=False),
+ sa.Column('job_config', sa.Text(), nullable=False),
+ sa.Column('phase', sa.String(length=255), nullable=False),
+ sa.Column('started', sa.DateTime(), nullable=False),
+ sa.Column('display_name', sa.String(length=255), nullable=False),
+ sa.Column('trigger_id', sa.Integer(), nullable=True),
+ sa.Column('pull_robot_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['access_token_id'], ['accesstoken.id'], ),
+ sa.ForeignKeyConstraint(['pull_robot_id'], ['user.id'], ),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.ForeignKeyConstraint(['trigger_id'], ['repositorybuildtrigger.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('repositorybuild_access_token_id', 'repositorybuild', ['access_token_id'], unique=False)
+ op.create_index('repositorybuild_pull_robot_id', 'repositorybuild', ['pull_robot_id'], unique=False)
+ op.create_index('repositorybuild_repository_id', 'repositorybuild', ['repository_id'], unique=False)
+ op.create_index('repositorybuild_resource_key', 'repositorybuild', ['resource_key'], unique=False)
+ op.create_index('repositorybuild_trigger_id', 'repositorybuild', ['trigger_id'], unique=False)
+ op.create_index('repositorybuild_uuid', 'repositorybuild', ['uuid'], unique=False)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('repositorybuild')
+ op.drop_table('repositorybuildtrigger')
+ op.drop_table('logentry')
+ op.drop_table('repositorytag')
+ op.drop_table('permissionprototype')
+ op.drop_table('image')
+ op.drop_table('oauthauthorizationcode')
+ op.drop_table('webhook')
+ op.drop_table('teammember')
+ op.drop_table('oauthaccesstoken')
+ op.drop_table('repositorypermission')
+ op.drop_table('accesstoken')
+ op.drop_table('repository')
+ op.drop_table('team')
+ op.drop_table('emailconfirmation')
+ op.drop_table('notification')
+ op.drop_table('oauthapplication')
+ op.drop_table('federatedlogin')
+ op.drop_table('buildtriggerservice')
+ op.drop_table('user')
+ op.drop_table('visibility')
+ op.drop_table('teamrole')
+ op.drop_table('notificationkind')
+ op.drop_table('logentrykind')
+ op.drop_table('role')
+ op.drop_table('queueitem')
+ op.drop_table('imagestorage')
+ op.drop_table('loginservice')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/5ad999136045_add_signature_storage.py b/data/migrations/versions/5ad999136045_add_signature_storage.py
new file mode 100644
index 000000000..f306c58b8
--- /dev/null
+++ b/data/migrations/versions/5ad999136045_add_signature_storage.py
@@ -0,0 +1,55 @@
+"""Add signature storage
+
+Revision ID: 5ad999136045
+Revises: 228d1af6af1c
+Create Date: 2015-02-05 15:01:54.989573
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '5ad999136045'
+down_revision = '228d1af6af1c'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('imagestoragesignaturekind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragesignaturekind'))
+ )
+ op.create_index('imagestoragesignaturekind_name', 'imagestoragesignaturekind', ['name'], unique=True)
+ op.create_table('imagestoragesignature',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('storage_id', sa.Integer(), nullable=False),
+ sa.Column('kind_id', sa.Integer(), nullable=False),
+ sa.Column('signature', sa.Text(), nullable=True),
+ sa.Column('uploading', sa.Boolean(), nullable=True),
+ sa.ForeignKeyConstraint(['kind_id'], ['imagestoragesignaturekind.id'], name=op.f('fk_imagestoragesignature_kind_id_imagestoragesignaturekind')),
+ sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_imagestoragesignature_storage_id_imagestorage')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragesignature'))
+ )
+ op.create_index('imagestoragesignature_kind_id', 'imagestoragesignature', ['kind_id'], unique=False)
+ op.create_index('imagestoragesignature_kind_id_storage_id', 'imagestoragesignature', ['kind_id', 'storage_id'], unique=True)
+ op.create_index('imagestoragesignature_storage_id', 'imagestoragesignature', ['storage_id'], unique=False)
+ ### end Alembic commands ###
+
+ op.bulk_insert(tables.imagestoragetransformation,
+ [
+ {'id': 2, 'name':'aci'},
+ ])
+
+ op.bulk_insert(tables.imagestoragesignaturekind,
+ [
+ {'id': 1, 'name':'gpg2'},
+ ])
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('imagestoragesignature')
+ op.drop_table('imagestoragesignaturekind')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/5b7503aada1b_cleanup_old_robots.py b/data/migrations/versions/5b7503aada1b_cleanup_old_robots.py
deleted file mode 100644
index 89b469d6b..000000000
--- a/data/migrations/versions/5b7503aada1b_cleanup_old_robots.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Cleanup old robots
-
-Revision ID: 5b7503aada1b
-Revises: 224ce4c72c2f
-Create Date: 2018-05-09 17:18:52.230504
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '5b7503aada1b'
-down_revision = '224ce4c72c2f'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-from util.migrate.cleanup_old_robots import cleanup_old_robots
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- cleanup_old_robots()
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # Nothing to do.
- pass
diff --git a/data/migrations/versions/5b84373e5db_convert_slack_webhook_data.py b/data/migrations/versions/5b84373e5db_convert_slack_webhook_data.py
new file mode 100644
index 000000000..52f431b74
--- /dev/null
+++ b/data/migrations/versions/5b84373e5db_convert_slack_webhook_data.py
@@ -0,0 +1,24 @@
+"""Convert slack webhook data
+
+Revision ID: 5b84373e5db
+Revises: 1c5b738283a5
+Create Date: 2014-12-16 12:02:55.167744
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '5b84373e5db'
+down_revision = '1c5b738283a5'
+
+from alembic import op
+import sqlalchemy as sa
+
+from util.migrate.migrateslackwebhook import run_slackwebhook_migration
+
+
+def upgrade(tables):
+ run_slackwebhook_migration()
+
+
+def downgrade(tables):
+ pass
diff --git a/data/migrations/versions/5cbbfc95bac7_remove_oci_tables_not_used_by_cnr_the_.py b/data/migrations/versions/5cbbfc95bac7_remove_oci_tables_not_used_by_cnr_the_.py
deleted file mode 100644
index 46a2c3cec..000000000
--- a/data/migrations/versions/5cbbfc95bac7_remove_oci_tables_not_used_by_cnr_the_.py
+++ /dev/null
@@ -1,170 +0,0 @@
-"""Remove 'oci' tables not used by CNR. The rest will be migrated and renamed.
-
-Revision ID: 5cbbfc95bac7
-Revises: 1783530bee68
-Create Date: 2018-05-23 17:28:40.114433
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '5cbbfc95bac7'
-down_revision = '1783530bee68'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-from util.migrate import UTF8LongText, UTF8CharField
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('derivedimage')
- op.drop_table('manifestlabel')
- op.drop_table('blobplacementlocationpreference')
- op.drop_table('blobuploading')
- op.drop_table('bittorrentpieces')
- op.drop_table('manifestlayerdockerv1')
- op.drop_table('manifestlayerscan')
- op.drop_table('manifestlayer')
- # ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table(
- 'manifestlayer',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('manifest_index', sa.BigInteger(), nullable=False),
- sa.Column('metadata_json', UTF8LongText, nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_manifestlayer_blob_id_blob')),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlayer_manifest_id_manifest')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayer'))
- )
- op.create_index('manifestlayer_manifest_index', 'manifestlayer', ['manifest_index'], unique=False)
- op.create_index('manifestlayer_manifest_id_manifest_index', 'manifestlayer', ['manifest_id', 'manifest_index'], unique=True)
- op.create_index('manifestlayer_manifest_id', 'manifestlayer', ['manifest_id'], unique=False)
- op.create_index('manifestlayer_blob_id', 'manifestlayer', ['blob_id'], unique=False)
-
- op.create_table(
- 'manifestlayerscan',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('layer_id', sa.Integer(), nullable=False),
- sa.Column('scannable', sa.Boolean(), nullable=False),
- sa.Column('scanned_by', UTF8CharField(length=255), nullable=False),
- sa.ForeignKeyConstraint(['layer_id'], ['manifestlayer.id'], name=op.f('fk_manifestlayerscan_layer_id_manifestlayer')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayerscan'))
- )
-
- op.create_index('manifestlayerscan_layer_id', 'manifestlayerscan', ['layer_id'], unique=True)
-
- op.create_table(
- 'bittorrentpieces',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.Column('pieces', UTF8LongText, nullable=False),
- sa.Column('piece_length', sa.BigInteger(), nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_bittorrentpieces_blob_id_blob')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_bittorrentpieces'))
- )
-
- op.create_index('bittorrentpieces_blob_id_piece_length', 'bittorrentpieces', ['blob_id', 'piece_length'], unique=True)
- op.create_index('bittorrentpieces_blob_id', 'bittorrentpieces', ['blob_id'], unique=False)
-
- op.create_table(
- 'blobuploading',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('created', sa.DateTime(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('location_id', sa.Integer(), nullable=False),
- sa.Column('byte_count', sa.BigInteger(), nullable=False),
- sa.Column('uncompressed_byte_count', sa.BigInteger(), nullable=True),
- sa.Column('chunk_count', sa.BigInteger(), nullable=False),
- sa.Column('storage_metadata', UTF8LongText, nullable=True),
- sa.Column('sha_state', UTF8LongText, nullable=True),
- sa.Column('piece_sha_state', UTF8LongText, nullable=True),
- sa.Column('piece_hashes', UTF8LongText, nullable=True),
- sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobuploading_location_id_blobplacementlocation')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_blobuploading_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blobuploading'))
- )
-
- op.create_index('blobuploading_uuid', 'blobuploading', ['uuid'], unique=True)
- op.create_index('blobuploading_repository_id_uuid', 'blobuploading', ['repository_id', 'uuid'], unique=True)
- op.create_index('blobuploading_repository_id', 'blobuploading', ['repository_id'], unique=False)
- op.create_index('blobuploading_location_id', 'blobuploading', ['location_id'], unique=False)
- op.create_index('blobuploading_created', 'blobuploading', ['created'], unique=False)
-
- op.create_table(
- 'manifestlayerdockerv1',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('manifest_layer_id', sa.Integer(), nullable=False),
- sa.Column('image_id', UTF8CharField(length=255), nullable=False),
- sa.Column('checksum', UTF8CharField(length=255), nullable=False),
- sa.Column('compat_json', UTF8LongText, nullable=False),
- sa.ForeignKeyConstraint(['manifest_layer_id'], ['manifestlayer.id'], name=op.f('fk_manifestlayerdockerv1_manifest_layer_id_manifestlayer')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayerdockerv1'))
- )
-
- op.create_index('manifestlayerdockerv1_manifest_layer_id', 'manifestlayerdockerv1', ['manifest_layer_id'], unique=False)
- op.create_index('manifestlayerdockerv1_image_id', 'manifestlayerdockerv1', ['image_id'], unique=False)
-
- op.create_table(
- 'manifestlabel',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('annotated_id', sa.Integer(), nullable=False),
- sa.Column('label_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['annotated_id'], ['manifest.id'], name=op.f('fk_manifestlabel_annotated_id_manifest')),
- sa.ForeignKeyConstraint(['label_id'], ['label.id'], name=op.f('fk_manifestlabel_label_id_label')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestlabel_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlabel'))
- )
-
- op.create_index('manifestlabel_repository_id_annotated_id_label_id', 'manifestlabel', ['repository_id', 'annotated_id', 'label_id'], unique=True)
- op.create_index('manifestlabel_repository_id', 'manifestlabel', ['repository_id'], unique=False)
- op.create_index('manifestlabel_label_id', 'manifestlabel', ['label_id'], unique=False)
- op.create_index('manifestlabel_annotated_id', 'manifestlabel', ['annotated_id'], unique=False)
-
- op.create_table(
- 'blobplacementlocationpreference',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('location_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobplacementlocpref_locid_blobplacementlocation')),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_blobplacementlocationpreference_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacementlocationpreference'))
- )
- op.create_index('blobplacementlocationpreference_user_id', 'blobplacementlocationpreference', ['user_id'], unique=False)
- op.create_index('blobplacementlocationpreference_location_id', 'blobplacementlocationpreference', ['location_id'], unique=False)
-
-
- op.create_table(
- 'derivedimage',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('source_manifest_id', sa.Integer(), nullable=False),
- sa.Column('derived_manifest_json', UTF8LongText, nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.Column('uniqueness_hash', sa.String(length=255), nullable=False),
- sa.Column('signature_blob_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_derivedimage_blob_id_blob')),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_derivedimage_media_type_id_mediatype')),
- sa.ForeignKeyConstraint(['signature_blob_id'], ['blob.id'], name=op.f('fk_derivedimage_signature_blob_id_blob')),
- sa.ForeignKeyConstraint(['source_manifest_id'], ['manifest.id'], name=op.f('fk_derivedimage_source_manifest_id_manifest')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_derivedimage'))
- )
- op.create_index('derivedimage_uuid', 'derivedimage', ['uuid'], unique=True)
- op.create_index('derivedimage_uniqueness_hash', 'derivedimage', ['uniqueness_hash'], unique=True)
- op.create_index('derivedimage_source_manifest_id_media_type_id_uniqueness_hash', 'derivedimage', ['source_manifest_id', 'media_type_id', 'uniqueness_hash'], unique=True)
- op.create_index('derivedimage_source_manifest_id_blob_id', 'derivedimage', ['source_manifest_id', 'blob_id'], unique=True)
- op.create_index('derivedimage_source_manifest_id', 'derivedimage', ['source_manifest_id'], unique=False)
- op.create_index('derivedimage_signature_blob_id', 'derivedimage', ['signature_blob_id'], unique=False)
- op.create_index('derivedimage_media_type_id', 'derivedimage', ['media_type_id'], unique=False)
- op.create_index('derivedimage_blob_id', 'derivedimage', ['blob_id'], unique=False)
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/5d463ea1e8a8_backfill_new_appr_tables.py b/data/migrations/versions/5d463ea1e8a8_backfill_new_appr_tables.py
deleted file mode 100644
index a0df295dc..000000000
--- a/data/migrations/versions/5d463ea1e8a8_backfill_new_appr_tables.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""Backfill new appr tables
-
-Revision ID: 5d463ea1e8a8
-Revises: 610320e9dacf
-Create Date: 2018-07-08 10:01:19.756126
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '5d463ea1e8a8'
-down_revision = '610320e9dacf'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from util.migrate.table_ops import copy_table_contents
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- conn = op.get_bind()
-
- copy_table_contents('blob', 'apprblob', conn)
- copy_table_contents('manifest', 'apprmanifest', conn)
- copy_table_contents('manifestlist', 'apprmanifestlist', conn)
- copy_table_contents('blobplacement', 'apprblobplacement', conn)
- copy_table_contents('manifestblob', 'apprmanifestblob', conn)
- copy_table_contents('manifestlistmanifest', 'apprmanifestlistmanifest', conn)
- copy_table_contents('tag', 'apprtag', conn)
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- pass
diff --git a/data/migrations/versions/610320e9dacf_add_new_appr_specific_tables.py b/data/migrations/versions/610320e9dacf_add_new_appr_specific_tables.py
deleted file mode 100644
index 99c365260..000000000
--- a/data/migrations/versions/610320e9dacf_add_new_appr_specific_tables.py
+++ /dev/null
@@ -1,206 +0,0 @@
-"""Add new Appr-specific tables
-
-Revision ID: 610320e9dacf
-Revises: 5cbbfc95bac7
-Create Date: 2018-05-24 16:46:13.514562
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '610320e9dacf'
-down_revision = '5cbbfc95bac7'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-from util.migrate.table_ops import copy_table_contents
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('apprblobplacementlocation',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_apprblobplacementlocation'))
- )
- op.create_index('apprblobplacementlocation_name', 'apprblobplacementlocation', ['name'], unique=True)
- op.create_table('apprtagkind',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_apprtagkind'))
- )
- op.create_index('apprtagkind_name', 'apprtagkind', ['name'], unique=True)
- op.create_table('apprblob',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.Column('size', sa.BigInteger(), nullable=False),
- sa.Column('uncompressed_size', sa.BigInteger(), nullable=True),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_apprblob_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_apprblob'))
- )
- op.create_index('apprblob_digest', 'apprblob', ['digest'], unique=True)
- op.create_index('apprblob_media_type_id', 'apprblob', ['media_type_id'], unique=False)
- op.create_table('apprmanifest',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.Column('manifest_json', sa.Text(), nullable=False),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_apprmanifest_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_apprmanifest'))
- )
- op.create_index('apprmanifest_digest', 'apprmanifest', ['digest'], unique=True)
- op.create_index('apprmanifest_media_type_id', 'apprmanifest', ['media_type_id'], unique=False)
- op.create_table('apprmanifestlist',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('manifest_list_json', sa.Text(), nullable=False),
- sa.Column('schema_version', sa.String(length=255), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_apprmanifestlist_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_apprmanifestlist'))
- )
- op.create_index('apprmanifestlist_digest', 'apprmanifestlist', ['digest'], unique=True)
- op.create_index('apprmanifestlist_media_type_id', 'apprmanifestlist', ['media_type_id'], unique=False)
- op.create_table('apprblobplacement',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.Column('location_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['apprblob.id'], name=op.f('fk_apprblobplacement_blob_id_apprblob')),
- sa.ForeignKeyConstraint(['location_id'], ['apprblobplacementlocation.id'], name=op.f('fk_apprblobplacement_location_id_apprblobplacementlocation')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_apprblobplacement'))
- )
- op.create_index('apprblobplacement_blob_id', 'apprblobplacement', ['blob_id'], unique=False)
- op.create_index('apprblobplacement_blob_id_location_id', 'apprblobplacement', ['blob_id', 'location_id'], unique=True)
- op.create_index('apprblobplacement_location_id', 'apprblobplacement', ['location_id'], unique=False)
- op.create_table('apprmanifestblob',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['apprblob.id'], name=op.f('fk_apprmanifestblob_blob_id_apprblob')),
- sa.ForeignKeyConstraint(['manifest_id'], ['apprmanifest.id'], name=op.f('fk_apprmanifestblob_manifest_id_apprmanifest')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_apprmanifestblob'))
- )
- op.create_index('apprmanifestblob_blob_id', 'apprmanifestblob', ['blob_id'], unique=False)
- op.create_index('apprmanifestblob_manifest_id', 'apprmanifestblob', ['manifest_id'], unique=False)
- op.create_index('apprmanifestblob_manifest_id_blob_id', 'apprmanifestblob', ['manifest_id', 'blob_id'], unique=True)
- op.create_table('apprmanifestlistmanifest',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('manifest_list_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('operating_system', sa.String(length=255), nullable=True),
- sa.Column('architecture', sa.String(length=255), nullable=True),
- sa.Column('platform_json', sa.Text(), nullable=True),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['manifest_id'], ['apprmanifest.id'], name=op.f('fk_apprmanifestlistmanifest_manifest_id_apprmanifest')),
- sa.ForeignKeyConstraint(['manifest_list_id'], ['apprmanifestlist.id'], name=op.f('fk_apprmanifestlistmanifest_manifest_list_id_apprmanifestlist')),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_apprmanifestlistmanifest_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_apprmanifestlistmanifest'))
- )
- op.create_index('apprmanifestlistmanifest_manifest_id', 'apprmanifestlistmanifest', ['manifest_id'], unique=False)
- op.create_index('apprmanifestlistmanifest_manifest_list_id', 'apprmanifestlistmanifest', ['manifest_list_id'], unique=False)
- op.create_index('apprmanifestlistmanifest_manifest_list_id_media_type_id', 'apprmanifestlistmanifest', ['manifest_list_id', 'media_type_id'], unique=False)
- op.create_index('apprmanifestlistmanifest_manifest_list_id_operating_system_arch', 'apprmanifestlistmanifest', ['manifest_list_id', 'operating_system', 'architecture', 'media_type_id'], unique=False)
- op.create_index('apprmanifestlistmanifest_media_type_id', 'apprmanifestlistmanifest', ['media_type_id'], unique=False)
- op.create_table('apprtag',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('manifest_list_id', sa.Integer(), nullable=True),
- sa.Column('lifetime_start', sa.BigInteger(), nullable=False),
- sa.Column('lifetime_end', sa.BigInteger(), nullable=True),
- sa.Column('hidden', sa.Boolean(), nullable=False),
- sa.Column('reverted', sa.Boolean(), nullable=False),
- sa.Column('protected', sa.Boolean(), nullable=False),
- sa.Column('tag_kind_id', sa.Integer(), nullable=False),
- sa.Column('linked_tag_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['linked_tag_id'], ['apprtag.id'], name=op.f('fk_apprtag_linked_tag_id_apprtag')),
- sa.ForeignKeyConstraint(['manifest_list_id'], ['apprmanifestlist.id'], name=op.f('fk_apprtag_manifest_list_id_apprmanifestlist')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_apprtag_repository_id_repository')),
- sa.ForeignKeyConstraint(['tag_kind_id'], ['apprtagkind.id'], name=op.f('fk_apprtag_tag_kind_id_apprtagkind')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_apprtag'))
- )
- op.create_index('apprtag_lifetime_end', 'apprtag', ['lifetime_end'], unique=False)
- op.create_index('apprtag_linked_tag_id', 'apprtag', ['linked_tag_id'], unique=False)
- op.create_index('apprtag_manifest_list_id', 'apprtag', ['manifest_list_id'], unique=False)
- op.create_index('apprtag_repository_id', 'apprtag', ['repository_id'], unique=False)
- op.create_index('apprtag_repository_id_name', 'apprtag', ['repository_id', 'name'], unique=False)
- op.create_index('apprtag_repository_id_name_hidden', 'apprtag', ['repository_id', 'name', 'hidden'], unique=False)
- op.create_index('apprtag_repository_id_name_lifetime_end', 'apprtag', ['repository_id', 'name', 'lifetime_end'], unique=True)
- op.create_index('apprtag_tag_kind_id', 'apprtag', ['tag_kind_id'], unique=False)
- # ### end Alembic commands ###
-
- conn = op.get_bind()
- copy_table_contents('blobplacementlocation', 'apprblobplacementlocation', conn)
- copy_table_contents('tagkind', 'apprtagkind', conn)
-
- # ### population of test data ### #
-
- tester.populate_table('apprmanifest', [
- ('digest', tester.TestDataType.String),
- ('media_type_id', tester.TestDataType.Foreign('mediatype')),
- ('manifest_json', tester.TestDataType.JSON),
- ])
-
- tester.populate_table('apprmanifestlist', [
- ('digest', tester.TestDataType.String),
- ('manifest_list_json', tester.TestDataType.JSON),
- ('schema_version', tester.TestDataType.String),
- ('media_type_id', tester.TestDataType.Foreign('mediatype')),
- ])
-
- tester.populate_table('apprmanifestlistmanifest', [
- ('manifest_list_id', tester.TestDataType.Foreign('apprmanifestlist')),
- ('manifest_id', tester.TestDataType.Foreign('apprmanifest')),
- ('operating_system', tester.TestDataType.String),
- ('architecture', tester.TestDataType.String),
- ('platform_json', tester.TestDataType.JSON),
- ('media_type_id', tester.TestDataType.Foreign('mediatype')),
- ])
-
- tester.populate_table('apprblob', [
- ('digest', tester.TestDataType.String),
- ('media_type_id', tester.TestDataType.Foreign('mediatype')),
- ('size', tester.TestDataType.BigInteger),
- ('uncompressed_size', tester.TestDataType.BigInteger),
- ])
-
- tester.populate_table('apprmanifestblob', [
- ('manifest_id', tester.TestDataType.Foreign('apprmanifest')),
- ('blob_id', tester.TestDataType.Foreign('apprblob')),
- ])
-
- tester.populate_table('apprtag', [
- ('name', tester.TestDataType.String),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('manifest_list_id', tester.TestDataType.Foreign('apprmanifestlist')),
- ('lifetime_start', tester.TestDataType.Integer),
- ('hidden', tester.TestDataType.Boolean),
- ('reverted', tester.TestDataType.Boolean),
- ('protected', tester.TestDataType.Boolean),
- ('tag_kind_id', tester.TestDataType.Foreign('apprtagkind')),
- ])
-
- tester.populate_table('apprblobplacement', [
- ('blob_id', tester.TestDataType.Foreign('apprmanifestblob')),
- ('location_id', tester.TestDataType.Foreign('apprblobplacementlocation')),
- ])
-
- # ### end population of test data ### #
-
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('apprtag')
- op.drop_table('apprmanifestlistmanifest')
- op.drop_table('apprmanifestblob')
- op.drop_table('apprblobplacement')
- op.drop_table('apprmanifestlist')
- op.drop_table('apprmanifest')
- op.drop_table('apprblob')
- op.drop_table('apprtagkind')
- op.drop_table('apprblobplacementlocation')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/61cadbacb9fc_add_ability_for_build_triggers_to_be_.py b/data/migrations/versions/61cadbacb9fc_add_ability_for_build_triggers_to_be_.py
deleted file mode 100644
index 1dbb1e7a4..000000000
--- a/data/migrations/versions/61cadbacb9fc_add_ability_for_build_triggers_to_be_.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""Add ability for build triggers to be disabled
-
-Revision ID: 61cadbacb9fc
-Revises: b4c2d45bc132
-Create Date: 2017-10-18 12:07:26.190901
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '61cadbacb9fc'
-down_revision = 'b4c2d45bc132'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('disablereason',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_disablereason'))
- )
- op.create_index('disablereason_name', 'disablereason', ['name'], unique=True)
-
- op.bulk_insert(
- tables.disablereason,
- [
- {'id': 1, 'name': 'user_toggled'},
- ],
- )
-
- op.bulk_insert(tables.logentrykind, [
- {'name': 'toggle_repo_trigger'},
- ])
-
- op.add_column(u'repositorybuildtrigger', sa.Column('disabled_reason_id', sa.Integer(), nullable=True))
- op.add_column(u'repositorybuildtrigger', sa.Column('enabled', sa.Boolean(), nullable=False, server_default=sa.sql.expression.true()))
- op.create_index('repositorybuildtrigger_disabled_reason_id', 'repositorybuildtrigger', ['disabled_reason_id'], unique=False)
- op.create_foreign_key(op.f('fk_repositorybuildtrigger_disabled_reason_id_disablereason'), 'repositorybuildtrigger', 'disablereason', ['disabled_reason_id'], ['id'])
- # ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_column('repositorybuildtrigger', 'disabled_reason_id', tester.TestDataType.Foreign('disablereason'))
- tester.populate_column('repositorybuildtrigger', 'enabled', tester.TestDataType.Boolean)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_constraint(op.f('fk_repositorybuildtrigger_disabled_reason_id_disablereason'), 'repositorybuildtrigger', type_='foreignkey')
- op.drop_index('repositorybuildtrigger_disabled_reason_id', table_name='repositorybuildtrigger')
- op.drop_column(u'repositorybuildtrigger', 'enabled')
- op.drop_column(u'repositorybuildtrigger', 'disabled_reason_id')
- op.drop_table('disablereason')
- # ### end Alembic commands ###
-
- op.execute(tables
- .logentrykind
- .delete()
- .where(tables.logentrykind.c.name == op.inline_literal('toggle_repo_trigger')))
diff --git a/data/migrations/versions/654e6df88b71_change_manifest_bytes_to_a_utf8_text_.py b/data/migrations/versions/654e6df88b71_change_manifest_bytes_to_a_utf8_text_.py
deleted file mode 100644
index b7d17207f..000000000
--- a/data/migrations/versions/654e6df88b71_change_manifest_bytes_to_a_utf8_text_.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Change manifest_bytes to a UTF8 text field
-
-Revision ID: 654e6df88b71
-Revises: eafdeadcebc7
-Create Date: 2018-08-15 09:58:46.109277
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '654e6df88b71'
-down_revision = 'eafdeadcebc7'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-from util.migrate import UTF8LongText
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.alter_column('manifest', 'manifest_bytes', existing_type=sa.Text(), type_=UTF8LongText())
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.alter_column('manifest', 'manifest_bytes', existing_type=UTF8LongText(), type_=sa.Text())
diff --git a/data/migrations/versions/67eb43c778b_add_index_for_repository_datetime_to_.py b/data/migrations/versions/67eb43c778b_add_index_for_repository_datetime_to_.py
new file mode 100644
index 000000000..00ff374e4
--- /dev/null
+++ b/data/migrations/versions/67eb43c778b_add_index_for_repository_datetime_to_.py
@@ -0,0 +1,26 @@
+"""add index for repository+datetime to logentry
+
+Revision ID: 67eb43c778b
+Revises: 1c3decf6b9c4
+Create Date: 2015-04-19 16:00:39.126289
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '67eb43c778b'
+down_revision = '1c3decf6b9c4'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('logentry_repository_id_datetime', 'logentry', ['repository_id', 'datetime'], unique=False)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('logentry_repository_id_datetime', table_name='logentry')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py b/data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py
deleted file mode 100644
index aae5325b9..000000000
--- a/data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py
+++ /dev/null
@@ -1,47 +0,0 @@
-"""Add TagToRepositoryTag table
-
-Revision ID: 67f0abd172ae
-Revises: 10f45ee2310b
-Create Date: 2018-10-30 11:31:06.615488
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '67f0abd172ae'
-down_revision = '10f45ee2310b'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('tagtorepositorytag',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('tag_id', sa.Integer(), nullable=False),
- sa.Column('repository_tag_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_tagtorepositorytag_repository_id_repository')),
- sa.ForeignKeyConstraint(['repository_tag_id'], ['repositorytag.id'], name=op.f('fk_tagtorepositorytag_repository_tag_id_repositorytag')),
- sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], name=op.f('fk_tagtorepositorytag_tag_id_tag')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tagtorepositorytag'))
- )
- op.create_index('tagtorepositorytag_repository_id', 'tagtorepositorytag', ['repository_id'], unique=False)
- op.create_index('tagtorepositorytag_repository_tag_id', 'tagtorepositorytag', ['repository_tag_id'], unique=True)
- op.create_index('tagtorepositorytag_tag_id', 'tagtorepositorytag', ['tag_id'], unique=True)
- # ### end Alembic commands ###
-
- tester.populate_table('tagtorepositorytag', [
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('tag_id', tester.TestDataType.Foreign('tag')),
- ('repository_tag_id', tester.TestDataType.Foreign('repositorytag')),
- ])
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('tagtorepositorytag')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/6c21e2cfb8b6_change_logentry_to_use_a_biginteger_as_.py b/data/migrations/versions/6c21e2cfb8b6_change_logentry_to_use_a_biginteger_as_.py
deleted file mode 100644
index 789ba4fa4..000000000
--- a/data/migrations/versions/6c21e2cfb8b6_change_logentry_to_use_a_biginteger_as_.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Change LogEntry to use a BigInteger as its primary key
-
-Revision ID: 6c21e2cfb8b6
-Revises: d17c695859ea
-Create Date: 2018-07-27 16:30:02.877346
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '6c21e2cfb8b6'
-down_revision = 'd17c695859ea'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.alter_column(
- table_name='logentry',
- column_name='id',
- nullable=False,
- autoincrement=True,
- type_=sa.BigInteger(),
- )
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.alter_column(
- table_name='logentry',
- column_name='id',
- nullable=False,
- autoincrement=True,
- type_=sa.Integer(),
- )
diff --git a/data/migrations/versions/6c7014e84a5e_add_user_prompt_support.py b/data/migrations/versions/6c7014e84a5e_add_user_prompt_support.py
deleted file mode 100644
index 99ee1e77c..000000000
--- a/data/migrations/versions/6c7014e84a5e_add_user_prompt_support.py
+++ /dev/null
@@ -1,56 +0,0 @@
-"""Add user prompt support
-
-Revision ID: 6c7014e84a5e
-Revises: c156deb8845d
-Create Date: 2016-10-31 16:26:31.447705
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '6c7014e84a5e'
-down_revision = 'c156deb8845d'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.create_table('userpromptkind',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_userpromptkind'))
- )
- op.create_index('userpromptkind_name', 'userpromptkind', ['name'], unique=False)
- op.create_table('userprompt',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('kind_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['kind_id'], ['userpromptkind.id'], name=op.f('fk_userprompt_kind_id_userpromptkind')),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_userprompt_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_userprompt'))
- )
- op.create_index('userprompt_kind_id', 'userprompt', ['kind_id'], unique=False)
- op.create_index('userprompt_user_id', 'userprompt', ['user_id'], unique=False)
- op.create_index('userprompt_user_id_kind_id', 'userprompt', ['user_id', 'kind_id'], unique=True)
- ### end Alembic commands ###
-
- op.bulk_insert(tables.userpromptkind,
- [
- {'name':'confirm_username'},
- ])
-
- # ### population of test data ### #
- tester.populate_table('userprompt', [
- ('user_id', tester.TestDataType.Foreign('user')),
- ('kind_id', tester.TestDataType.Foreign('userpromptkind')),
- ])
- # ### end population of test data ### #
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('userprompt')
- op.drop_table('userpromptkind')
- ### end Alembic commands ###
diff --git a/data/migrations/versions/6ec8726c0ace_add_logentry3_table.py b/data/migrations/versions/6ec8726c0ace_add_logentry3_table.py
deleted file mode 100644
index 47ecf1cb1..000000000
--- a/data/migrations/versions/6ec8726c0ace_add_logentry3_table.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""Add LogEntry3 table
-
-Revision ID: 6ec8726c0ace
-Revises: 54492a68a3cf
-Create Date: 2019-01-03 13:41:02.897957
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '6ec8726c0ace'
-down_revision = '54492a68a3cf'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('logentry3',
- sa.Column('id', sa.BigInteger(), nullable=False),
- sa.Column('kind_id', sa.Integer(), nullable=False),
- sa.Column('account_id', sa.Integer(), nullable=False),
- sa.Column('performer_id', sa.Integer(), nullable=True),
- sa.Column('repository_id', sa.Integer(), nullable=True),
- sa.Column('datetime', sa.DateTime(), nullable=False),
- sa.Column('ip', sa.String(length=255), nullable=True),
- sa.Column('metadata_json', sa.Text(), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_logentry3'))
- )
- op.create_index('logentry3_account_id_datetime', 'logentry3', ['account_id', 'datetime'], unique=False)
- op.create_index('logentry3_datetime', 'logentry3', ['datetime'], unique=False)
- op.create_index('logentry3_performer_id_datetime', 'logentry3', ['performer_id', 'datetime'], unique=False)
- op.create_index('logentry3_repository_id_datetime_kind_id', 'logentry3', ['repository_id', 'datetime', 'kind_id'], unique=False)
- # ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('logentry3')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/6f2ecf5afcf_add_the_uncompressed_size_to_image_.py b/data/migrations/versions/6f2ecf5afcf_add_the_uncompressed_size_to_image_.py
new file mode 100644
index 000000000..0022ae128
--- /dev/null
+++ b/data/migrations/versions/6f2ecf5afcf_add_the_uncompressed_size_to_image_.py
@@ -0,0 +1,25 @@
+"""add the uncompressed size to image storage
+
+Revision ID: 6f2ecf5afcf
+Revises: 13da56878560
+Create Date: 2014-09-22 14:39:13.470566
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '6f2ecf5afcf'
+down_revision = '13da56878560'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('imagestorage', sa.Column('uncompressed_size', sa.BigInteger(), nullable=True))
+ ### end Alembic commands ###
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('imagestorage', 'uncompressed_size')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py b/data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py
deleted file mode 100644
index 43459af40..000000000
--- a/data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py
+++ /dev/null
@@ -1,287 +0,0 @@
-"""Backfill new encrypted fields
-
-Revision ID: 703298a825c2
-Revises: c13c8052f7a6
-Create Date: 2019-08-19 16:07:48.109889
-
-"""
-# revision identifiers, used by Alembic.
-revision = '703298a825c2'
-down_revision = 'c13c8052f7a6'
-
-import logging
-import uuid
-
-from datetime import datetime
-
-from peewee import (JOIN, IntegrityError, DateTimeField, CharField, ForeignKeyField,
- BooleanField, TextField, IntegerField)
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-
-import sqlalchemy as sa
-
-from data.database import (BaseModel, User, Repository, AccessTokenKind, Role,
- random_string_generator, QuayUserField, BuildTriggerService,
- uuid_generator, DisableReason)
-from data.fields import Credential, DecryptedValue, EncryptedCharField, EncryptedTextField, EnumField, CredentialField
-from data.model.token import ACCESS_TOKEN_NAME_PREFIX_LENGTH
-from data.model.appspecifictoken import TOKEN_NAME_PREFIX_LENGTH as AST_TOKEN_NAME_PREFIX_LENGTH
-from data.model.oauth import ACCESS_TOKEN_PREFIX_LENGTH as OAUTH_ACCESS_TOKEN_PREFIX_LENGTH
-from data.model.oauth import AUTHORIZATION_CODE_PREFIX_LENGTH
-
-BATCH_SIZE = 10
-
-logger = logging.getLogger(__name__)
-
-def _iterate(model_class, clause):
- while True:
- has_rows = False
- for row in list(model_class.select().where(clause).limit(BATCH_SIZE)):
- has_rows = True
- yield row
-
- if not has_rows:
- break
-
-
-def _decrypted(value):
- if value is None:
- return None
-
- assert isinstance(value, basestring)
- return DecryptedValue(value)
-
-
-# NOTE: As per standard migrations involving Peewee models, we copy them here, as they will change
-# after this call.
-class AccessToken(BaseModel):
- code = CharField(default=random_string_generator(length=64), unique=True, index=True)
- token_name = CharField(default=random_string_generator(length=32), unique=True, index=True)
- token_code = EncryptedCharField(default_token_length=32)
-
-class RobotAccountToken(BaseModel):
- robot_account = QuayUserField(index=True, allows_robots=True, unique=True)
- token = EncryptedCharField(default_token_length=64)
- fully_migrated = BooleanField(default=False)
-
-class RepositoryBuildTrigger(BaseModel):
- uuid = CharField(default=uuid_generator, index=True)
- auth_token = CharField(null=True)
- private_key = TextField(null=True)
-
- secure_auth_token = EncryptedCharField(null=True)
- secure_private_key = EncryptedTextField(null=True)
- fully_migrated = BooleanField(default=False)
-
-class AppSpecificAuthToken(BaseModel):
- token_name = CharField(index=True, unique=True, default=random_string_generator(60))
- token_secret = EncryptedCharField(default_token_length=60)
- token_code = CharField(default=random_string_generator(length=120), unique=True, index=True)
-
-class OAuthAccessToken(BaseModel):
- token_name = CharField(index=True, unique=True)
- token_code = CredentialField()
- access_token = CharField(index=True)
-
-class OAuthAuthorizationCode(BaseModel):
- code = CharField(index=True, unique=True, null=True)
- code_name = CharField(index=True, unique=True)
- code_credential = CredentialField()
-
-class OAuthApplication(BaseModel):
- secure_client_secret = EncryptedCharField(default_token_length=40, null=True)
- fully_migrated = BooleanField(default=False)
- client_secret = CharField(default=random_string_generator(length=40))
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
-
- from app import app
- if app.config.get('SETUP_COMPLETE', False) or tester.is_testing:
- # Empty all access token names to fix the bug where we put the wrong name and code
- # in for some tokens.
- AccessToken.update(token_name=None).where(AccessToken.token_name >> None).execute()
-
- # AccessToken.
- logger.info('Backfilling encrypted credentials for access tokens')
- for access_token in _iterate(AccessToken, ((AccessToken.token_name >> None) |
- (AccessToken.token_name == ''))):
- logger.info('Backfilling encrypted credentials for access token %s', access_token.id)
- assert access_token.code is not None
- assert access_token.code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
- assert access_token.code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:]
-
- token_name = access_token.code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
- token_code = _decrypted(access_token.code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:])
-
- (AccessToken
- .update(token_name=token_name, token_code=token_code)
- .where(AccessToken.id == access_token.id, AccessToken.code == access_token.code)
- .execute())
-
- assert AccessToken.select().where(AccessToken.token_name >> None).count() == 0
-
- # Robots.
- logger.info('Backfilling encrypted credentials for robots')
- while True:
- has_row = False
- query = (User
- .select()
- .join(RobotAccountToken, JOIN.LEFT_OUTER)
- .where(User.robot == True, RobotAccountToken.id >> None)
- .limit(BATCH_SIZE))
-
- for robot_user in query:
- logger.info('Backfilling encrypted credentials for robot %s', robot_user.id)
- has_row = True
- try:
- RobotAccountToken.create(robot_account=robot_user,
- token=_decrypted(robot_user.email),
- fully_migrated=False)
- except IntegrityError:
- break
-
- if not has_row:
- break
-
- # RepositoryBuildTrigger
- logger.info('Backfilling encrypted credentials for repo build triggers')
- for repo_build_trigger in _iterate(RepositoryBuildTrigger,
- (RepositoryBuildTrigger.fully_migrated == False)):
- logger.info('Backfilling encrypted credentials for repo build trigger %s',
- repo_build_trigger.id)
-
- (RepositoryBuildTrigger
- .update(secure_auth_token=_decrypted(repo_build_trigger.auth_token),
- secure_private_key=_decrypted(repo_build_trigger.private_key),
- fully_migrated=True)
- .where(RepositoryBuildTrigger.id == repo_build_trigger.id,
- RepositoryBuildTrigger.uuid == repo_build_trigger.uuid)
- .execute())
-
- assert (RepositoryBuildTrigger
- .select()
- .where(RepositoryBuildTrigger.fully_migrated == False)
- .count()) == 0
-
- # AppSpecificAuthToken
- logger.info('Backfilling encrypted credentials for app specific auth tokens')
- for token in _iterate(AppSpecificAuthToken, ((AppSpecificAuthToken.token_name >> None) |
- (AppSpecificAuthToken.token_name == '') |
- (AppSpecificAuthToken.token_secret >> None))):
- logger.info('Backfilling encrypted credentials for app specific auth %s',
- token.id)
- assert token.token_code[AST_TOKEN_NAME_PREFIX_LENGTH:]
-
- token_name = token.token_code[:AST_TOKEN_NAME_PREFIX_LENGTH]
- token_secret = _decrypted(token.token_code[AST_TOKEN_NAME_PREFIX_LENGTH:])
- assert token_name
- assert token_secret
-
- (AppSpecificAuthToken
- .update(token_name=token_name,
- token_secret=token_secret)
- .where(AppSpecificAuthToken.id == token.id,
- AppSpecificAuthToken.token_code == token.token_code)
- .execute())
-
- assert (AppSpecificAuthToken
- .select()
- .where(AppSpecificAuthToken.token_name >> None)
- .count()) == 0
-
- # OAuthAccessToken
- logger.info('Backfilling credentials for OAuth access tokens')
- for token in _iterate(OAuthAccessToken, ((OAuthAccessToken.token_name >> None) |
- (OAuthAccessToken.token_name == ''))):
- logger.info('Backfilling credentials for OAuth access token %s', token.id)
- token_name = token.access_token[:OAUTH_ACCESS_TOKEN_PREFIX_LENGTH]
- token_code = Credential.from_string(token.access_token[OAUTH_ACCESS_TOKEN_PREFIX_LENGTH:])
- assert token_name
- assert token.access_token[OAUTH_ACCESS_TOKEN_PREFIX_LENGTH:]
-
- (OAuthAccessToken
- .update(token_name=token_name,
- token_code=token_code)
- .where(OAuthAccessToken.id == token.id,
- OAuthAccessToken.access_token == token.access_token)
- .execute())
-
- assert (OAuthAccessToken
- .select()
- .where(OAuthAccessToken.token_name >> None)
- .count()) == 0
-
- # OAuthAuthorizationCode
- logger.info('Backfilling credentials for OAuth auth code')
- for code in _iterate(OAuthAuthorizationCode, ((OAuthAuthorizationCode.code_name >> None) |
- (OAuthAuthorizationCode.code_name == ''))):
- logger.info('Backfilling credentials for OAuth auth code %s', code.id)
- user_code = code.code or random_string_generator(AUTHORIZATION_CODE_PREFIX_LENGTH * 2)()
- code_name = user_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
- code_credential = Credential.from_string(user_code[AUTHORIZATION_CODE_PREFIX_LENGTH:])
- assert code_name
- assert user_code[AUTHORIZATION_CODE_PREFIX_LENGTH:]
-
- (OAuthAuthorizationCode
- .update(code_name=code_name, code_credential=code_credential)
- .where(OAuthAuthorizationCode.id == code.id)
- .execute())
-
- assert (OAuthAuthorizationCode
- .select()
- .where(OAuthAuthorizationCode.code_name >> None)
- .count()) == 0
-
- # OAuthApplication
- logger.info('Backfilling secret for OAuth applications')
- for app in _iterate(OAuthApplication, OAuthApplication.fully_migrated == False):
- logger.info('Backfilling secret for OAuth application %s', app.id)
- client_secret = app.client_secret or str(uuid.uuid4())
- secure_client_secret = _decrypted(client_secret)
-
- (OAuthApplication
- .update(secure_client_secret=secure_client_secret, fully_migrated=True)
- .where(OAuthApplication.id == app.id, OAuthApplication.fully_migrated == False)
- .execute())
-
- assert (OAuthApplication
- .select()
- .where(OAuthApplication.fully_migrated == False)
- .count()) == 0
-
- # Adjust existing fields to be nullable.
- op.alter_column('accesstoken', 'code', nullable=True, existing_type=sa.String(length=255))
- op.alter_column('oauthaccesstoken', 'access_token', nullable=True, existing_type=sa.String(length=255))
- op.alter_column('oauthauthorizationcode', 'code', nullable=True, existing_type=sa.String(length=255))
- op.alter_column('appspecificauthtoken', 'token_code', nullable=True, existing_type=sa.String(length=255))
-
- # Adjust new fields to be non-nullable.
- op.alter_column('accesstoken', 'token_name', nullable=False, existing_type=sa.String(length=255))
- op.alter_column('accesstoken', 'token_code', nullable=False, existing_type=sa.String(length=255))
-
- op.alter_column('appspecificauthtoken', 'token_name', nullable=False, existing_type=sa.String(length=255))
- op.alter_column('appspecificauthtoken', 'token_secret', nullable=False, existing_type=sa.String(length=255))
-
- op.alter_column('oauthaccesstoken', 'token_name', nullable=False, existing_type=sa.String(length=255))
- op.alter_column('oauthaccesstoken', 'token_code', nullable=False, existing_type=sa.String(length=255))
-
- op.alter_column('oauthauthorizationcode', 'code_name', nullable=False, existing_type=sa.String(length=255))
- op.alter_column('oauthauthorizationcode', 'code_credential', nullable=False, existing_type=sa.String(length=255))
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
-
- op.alter_column('accesstoken', 'token_name', nullable=True, existing_type=sa.String(length=255))
- op.alter_column('accesstoken', 'token_code', nullable=True, existing_type=sa.String(length=255))
-
- op.alter_column('appspecificauthtoken', 'token_name', nullable=True, existing_type=sa.String(length=255))
- op.alter_column('appspecificauthtoken', 'token_secret', nullable=True, existing_type=sa.String(length=255))
-
- op.alter_column('oauthaccesstoken', 'token_name', nullable=True, existing_type=sa.String(length=255))
- op.alter_column('oauthaccesstoken', 'token_code', nullable=True, existing_type=sa.String(length=255))
-
- op.alter_column('oauthauthorizationcode', 'code_name', nullable=True, existing_type=sa.String(length=255))
- op.alter_column('oauthauthorizationcode', 'code_credential', nullable=True, existing_type=sa.String(length=255))
diff --git a/data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py b/data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py
new file mode 100644
index 000000000..9b2110df7
--- /dev/null
+++ b/data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py
@@ -0,0 +1,34 @@
+"""Change build queue reference from foreign key to an id.
+
+Revision ID: 707d5191eda
+Revises: 4ef04c61fcf9
+Create Date: 2015-02-23 12:36:33.814528
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '707d5191eda'
+down_revision = '4ef04c61fcf9'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repositorybuild', sa.Column('queue_id', sa.String(length=255), nullable=True))
+ op.create_index('repositorybuild_queue_id', 'repositorybuild', ['queue_id'], unique=False)
+ op.drop_constraint(u'fk_repositorybuild_queue_item_id_queueitem', 'repositorybuild', type_='foreignkey')
+ op.drop_index('repositorybuild_queue_item_id', table_name='repositorybuild')
+ op.drop_column('repositorybuild', 'queue_item_id')
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repositorybuild', sa.Column('queue_item_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
+ op.create_foreign_key(u'fk_repositorybuild_queue_item_id_queueitem', 'repositorybuild', 'queueitem', ['queue_item_id'], ['id'])
+ op.create_index('repositorybuild_queue_item_id', 'repositorybuild', ['queue_item_id'], unique=False)
+ op.drop_index('repositorybuild_queue_id', table_name='repositorybuild')
+ op.drop_column('repositorybuild', 'queue_id')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/7367229b38d9_add_support_for_app_specific_tokens.py b/data/migrations/versions/7367229b38d9_add_support_for_app_specific_tokens.py
deleted file mode 100644
index b5fb97d63..000000000
--- a/data/migrations/versions/7367229b38d9_add_support_for_app_specific_tokens.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""Add support for app specific tokens
-
-Revision ID: 7367229b38d9
-Revises: d8989249f8f6
-Create Date: 2017-12-12 13:15:42.419764
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '7367229b38d9'
-down_revision = 'd8989249f8f6'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-from util.migrate import UTF8CharField
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('appspecificauthtoken',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('title', UTF8CharField(length=255), nullable=False),
- sa.Column('token_code', sa.String(length=255), nullable=False),
- sa.Column('created', sa.DateTime(), nullable=False),
- sa.Column('expiration', sa.DateTime(), nullable=True),
- sa.Column('last_accessed', sa.DateTime(), nullable=True),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_appspecificauthtoken_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_appspecificauthtoken'))
- )
- op.create_index('appspecificauthtoken_token_code', 'appspecificauthtoken', ['token_code'], unique=True)
- op.create_index('appspecificauthtoken_user_id', 'appspecificauthtoken', ['user_id'], unique=False)
- op.create_index('appspecificauthtoken_user_id_expiration', 'appspecificauthtoken', ['user_id', 'expiration'], unique=False)
- op.create_index('appspecificauthtoken_uuid', 'appspecificauthtoken', ['uuid'], unique=False)
- # ### end Alembic commands ###
-
- op.bulk_insert(tables.logentrykind, [
- {'name': 'create_app_specific_token'},
- {'name': 'revoke_app_specific_token'},
- ])
-
- # ### population of test data ### #
- tester.populate_table('appspecificauthtoken', [
- ('user_id', tester.TestDataType.Foreign('user')),
- ('uuid', tester.TestDataType.UUID),
- ('title', tester.TestDataType.UTF8Char),
- ('token_code', tester.TestDataType.String),
- ('created', tester.TestDataType.DateTime),
- ('expiration', tester.TestDataType.DateTime),
- ('last_accessed', tester.TestDataType.DateTime),
- ])
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('appspecificauthtoken')
- # ### end Alembic commands ###
-
- op.execute(tables
- .logentrykind
- .delete()
- .where(tables.
- logentrykind.name == op.inline_literal('create_app_specific_token')))
-
- op.execute(tables
- .logentrykind
- .delete()
- .where(tables.
- logentrykind.name == op.inline_literal('revoke_app_specific_token')))
diff --git a/data/migrations/versions/7a525c68eb13_add_oci_app_models.py b/data/migrations/versions/7a525c68eb13_add_oci_app_models.py
deleted file mode 100644
index 7cade6854..000000000
--- a/data/migrations/versions/7a525c68eb13_add_oci_app_models.py
+++ /dev/null
@@ -1,340 +0,0 @@
-"""Add OCI/App models
-
-Revision ID: 7a525c68eb13
-Revises: e2894a3a3c19
-Create Date: 2017-01-24 16:25:52.170277
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '7a525c68eb13'
-down_revision = 'e2894a3a3c19'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-from sqlalchemy.sql import table, column
-from util.migrate import UTF8LongText, UTF8CharField
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.create_table(
- 'tagkind',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tagkind'))
- )
- op.create_index('tagkind_name', 'tagkind', ['name'], unique=True)
-
- op.create_table(
- 'blobplacementlocation',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacementlocation'))
- )
- op.create_index('blobplacementlocation_name', 'blobplacementlocation', ['name'], unique=True)
-
- op.create_table(
- 'blob',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.Column('size', sa.BigInteger(), nullable=False),
- sa.Column('uncompressed_size', sa.BigInteger(), nullable=True),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_blob_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blob'))
- )
- op.create_index('blob_digest', 'blob', ['digest'], unique=True)
- op.create_index('blob_media_type_id', 'blob', ['media_type_id'], unique=False)
-
- op.create_table(
- 'blobplacementlocationpreference',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('location_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobplacementlocpref_locid_blobplacementlocation')),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_blobplacementlocationpreference_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacementlocationpreference'))
- )
- op.create_index('blobplacementlocationpreference_location_id', 'blobplacementlocationpreference', ['location_id'], unique=False)
- op.create_index('blobplacementlocationpreference_user_id', 'blobplacementlocationpreference', ['user_id'], unique=False)
-
- op.create_table(
- 'manifest',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.Column('manifest_json', UTF8LongText, nullable=False),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifest_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifest'))
- )
- op.create_index('manifest_digest', 'manifest', ['digest'], unique=True)
- op.create_index('manifest_media_type_id', 'manifest', ['media_type_id'], unique=False)
-
- op.create_table(
- 'manifestlist',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('manifest_list_json', UTF8LongText, nullable=False),
- sa.Column('schema_version', UTF8CharField(length=255), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifestlist_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlist'))
- )
- op.create_index('manifestlist_digest', 'manifestlist', ['digest'], unique=True)
- op.create_index('manifestlist_media_type_id', 'manifestlist', ['media_type_id'], unique=False)
-
- op.create_table(
- 'bittorrentpieces',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.Column('pieces', UTF8LongText, nullable=False),
- sa.Column('piece_length', sa.BigInteger(), nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_bittorrentpieces_blob_id_blob')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_bittorrentpieces'))
- )
- op.create_index('bittorrentpieces_blob_id', 'bittorrentpieces', ['blob_id'], unique=False)
- op.create_index('bittorrentpieces_blob_id_piece_length', 'bittorrentpieces', ['blob_id', 'piece_length'], unique=True)
-
- op.create_table(
- 'blobplacement',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.Column('location_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_blobplacement_blob_id_blob')),
- sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobplacement_location_id_blobplacementlocation')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacement'))
- )
- op.create_index('blobplacement_blob_id', 'blobplacement', ['blob_id'], unique=False)
- op.create_index('blobplacement_blob_id_location_id', 'blobplacement', ['blob_id', 'location_id'], unique=True)
- op.create_index('blobplacement_location_id', 'blobplacement', ['location_id'], unique=False)
-
- op.create_table(
- 'blobuploading',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('created', sa.DateTime(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('location_id', sa.Integer(), nullable=False),
- sa.Column('byte_count', sa.BigInteger(), nullable=False),
- sa.Column('uncompressed_byte_count', sa.BigInteger(), nullable=True),
- sa.Column('chunk_count', sa.BigInteger(), nullable=False),
- sa.Column('storage_metadata', UTF8LongText, nullable=True),
- sa.Column('sha_state', UTF8LongText, nullable=True),
- sa.Column('piece_sha_state', UTF8LongText, nullable=True),
- sa.Column('piece_hashes', UTF8LongText, nullable=True),
- sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobuploading_location_id_blobplacementlocation')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_blobuploading_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blobuploading'))
- )
- op.create_index('blobuploading_created', 'blobuploading', ['created'], unique=False)
- op.create_index('blobuploading_location_id', 'blobuploading', ['location_id'], unique=False)
- op.create_index('blobuploading_repository_id', 'blobuploading', ['repository_id'], unique=False)
- op.create_index('blobuploading_repository_id_uuid', 'blobuploading', ['repository_id', 'uuid'], unique=True)
- op.create_index('blobuploading_uuid', 'blobuploading', ['uuid'], unique=True)
-
- op.create_table(
- 'derivedimage',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('source_manifest_id', sa.Integer(), nullable=False),
- sa.Column('derived_manifest_json', UTF8LongText, nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.Column('uniqueness_hash', sa.String(length=255), nullable=False),
- sa.Column('signature_blob_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_derivedimage_blob_id_blob')),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_derivedimage_media_type_id_mediatype')),
- sa.ForeignKeyConstraint(['signature_blob_id'], ['blob.id'], name=op.f('fk_derivedimage_signature_blob_id_blob')),
- sa.ForeignKeyConstraint(['source_manifest_id'], ['manifest.id'], name=op.f('fk_derivedimage_source_manifest_id_manifest')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_derivedimage'))
- )
- op.create_index('derivedimage_blob_id', 'derivedimage', ['blob_id'], unique=False)
- op.create_index('derivedimage_media_type_id', 'derivedimage', ['media_type_id'], unique=False)
- op.create_index('derivedimage_signature_blob_id', 'derivedimage', ['signature_blob_id'], unique=False)
- op.create_index('derivedimage_source_manifest_id', 'derivedimage', ['source_manifest_id'], unique=False)
- op.create_index('derivedimage_source_manifest_id_blob_id', 'derivedimage', ['source_manifest_id', 'blob_id'], unique=True)
- op.create_index('derivedimage_source_manifest_id_media_type_id_uniqueness_hash', 'derivedimage', ['source_manifest_id', 'media_type_id', 'uniqueness_hash'], unique=True)
- op.create_index('derivedimage_uniqueness_hash', 'derivedimage', ['uniqueness_hash'], unique=True)
- op.create_index('derivedimage_uuid', 'derivedimage', ['uuid'], unique=True)
-
- op.create_table(
- 'manifestblob',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_manifestblob_blob_id_blob')),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestblob_manifest_id_manifest')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestblob'))
- )
- op.create_index('manifestblob_blob_id', 'manifestblob', ['blob_id'], unique=False)
- op.create_index('manifestblob_manifest_id', 'manifestblob', ['manifest_id'], unique=False)
- op.create_index('manifestblob_manifest_id_blob_id', 'manifestblob', ['manifest_id', 'blob_id'], unique=True)
-
- op.create_table(
- 'manifestlabel',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('annotated_id', sa.Integer(), nullable=False),
- sa.Column('label_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['annotated_id'], ['manifest.id'], name=op.f('fk_manifestlabel_annotated_id_manifest')),
- sa.ForeignKeyConstraint(['label_id'], ['label.id'], name=op.f('fk_manifestlabel_label_id_label')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestlabel_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlabel'))
- )
- op.create_index('manifestlabel_annotated_id', 'manifestlabel', ['annotated_id'], unique=False)
- op.create_index('manifestlabel_label_id', 'manifestlabel', ['label_id'], unique=False)
- op.create_index('manifestlabel_repository_id', 'manifestlabel', ['repository_id'], unique=False)
- op.create_index('manifestlabel_repository_id_annotated_id_label_id', 'manifestlabel', ['repository_id', 'annotated_id', 'label_id'], unique=True)
-
- op.create_table(
- 'manifestlayer',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('manifest_index', sa.BigInteger(), nullable=False),
- sa.Column('metadata_json', UTF8LongText, nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_manifestlayer_blob_id_blob')),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlayer_manifest_id_manifest')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayer'))
- )
- op.create_index('manifestlayer_blob_id', 'manifestlayer', ['blob_id'], unique=False)
- op.create_index('manifestlayer_manifest_id', 'manifestlayer', ['manifest_id'], unique=False)
- op.create_index('manifestlayer_manifest_id_manifest_index', 'manifestlayer', ['manifest_id', 'manifest_index'], unique=True)
- op.create_index('manifestlayer_manifest_index', 'manifestlayer', ['manifest_index'], unique=False)
-
- op.create_table(
- 'manifestlistmanifest',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('manifest_list_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('operating_system', UTF8CharField(length=255), nullable=True),
- sa.Column('architecture', UTF8CharField(length=255), nullable=True),
- sa.Column('platform_json', UTF8LongText, nullable=True),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlistmanifest_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['manifest_list_id'], ['manifestlist.id'], name=op.f('fk_manifestlistmanifest_manifest_list_id_manifestlist')),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifestlistmanifest_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlistmanifest'))
- )
- op.create_index('manifestlistmanifest_manifest_id', 'manifestlistmanifest', ['manifest_id'], unique=False)
- op.create_index('manifestlistmanifest_manifest_list_id', 'manifestlistmanifest', ['manifest_list_id'], unique=False)
- op.create_index('manifestlistmanifest_manifest_listid_os_arch_mtid', 'manifestlistmanifest', ['manifest_list_id', 'operating_system', 'architecture', 'media_type_id'], unique=False)
- op.create_index('manifestlistmanifest_manifest_listid_mtid', 'manifestlistmanifest', ['manifest_list_id', 'media_type_id'], unique=False)
- op.create_index('manifestlistmanifest_media_type_id', 'manifestlistmanifest', ['media_type_id'], unique=False)
-
- op.create_table(
- 'tag',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', UTF8CharField(length=190), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('manifest_list_id', sa.Integer(), nullable=True),
- sa.Column('lifetime_start', sa.BigInteger(), nullable=False),
- sa.Column('lifetime_end', sa.BigInteger(), nullable=True),
- sa.Column('hidden', sa.Boolean(), nullable=False),
- sa.Column('reverted', sa.Boolean(), nullable=False),
- sa.Column('protected', sa.Boolean(), nullable=False),
- sa.Column('tag_kind_id', sa.Integer(), nullable=False),
- sa.Column('linked_tag_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['linked_tag_id'], ['tag.id'], name=op.f('fk_tag_linked_tag_id_tag')),
- sa.ForeignKeyConstraint(['manifest_list_id'], ['manifestlist.id'], name=op.f('fk_tag_manifest_list_id_manifestlist')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_tag_repository_id_repository')),
- sa.ForeignKeyConstraint(['tag_kind_id'], ['tagkind.id'], name=op.f('fk_tag_tag_kind_id_tagkind')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tag'))
- )
- op.create_index('tag_lifetime_end', 'tag', ['lifetime_end'], unique=False)
- op.create_index('tag_linked_tag_id', 'tag', ['linked_tag_id'], unique=False)
- op.create_index('tag_manifest_list_id', 'tag', ['manifest_list_id'], unique=False)
- op.create_index('tag_repository_id', 'tag', ['repository_id'], unique=False)
- op.create_index('tag_repository_id_name_hidden', 'tag', ['repository_id', 'name', 'hidden'], unique=False)
- op.create_index('tag_repository_id_name_lifetime_end', 'tag', ['repository_id', 'name', 'lifetime_end'], unique=True)
- op.create_index('tag_repository_id_name', 'tag', ['repository_id', 'name'], unique=False)
- op.create_index('tag_tag_kind_id', 'tag', ['tag_kind_id'], unique=False)
-
- op.create_table(
- 'manifestlayerdockerv1',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('manifest_layer_id', sa.Integer(), nullable=False),
- sa.Column('image_id', UTF8CharField(length=255), nullable=False),
- sa.Column('checksum', UTF8CharField(length=255), nullable=False),
- sa.Column('compat_json', UTF8LongText, nullable=False),
- sa.ForeignKeyConstraint(['manifest_layer_id'], ['manifestlayer.id'], name=op.f('fk_manifestlayerdockerv1_manifest_layer_id_manifestlayer')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayerdockerv1'))
- )
- op.create_index('manifestlayerdockerv1_image_id', 'manifestlayerdockerv1', ['image_id'], unique=False)
- op.create_index('manifestlayerdockerv1_manifest_layer_id', 'manifestlayerdockerv1', ['manifest_layer_id'], unique=False)
-
- op.create_table(
- 'manifestlayerscan',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('layer_id', sa.Integer(), nullable=False),
- sa.Column('scannable', sa.Boolean(), nullable=False),
- sa.Column('scanned_by', UTF8CharField(length=255), nullable=False),
- sa.ForeignKeyConstraint(['layer_id'], ['manifestlayer.id'], name=op.f('fk_manifestlayerscan_layer_id_manifestlayer')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayerscan'))
- )
- op.create_index('manifestlayerscan_layer_id', 'manifestlayerscan', ['layer_id'], unique=True)
-
- blobplacementlocation_table = table('blobplacementlocation',
- column('id', sa.Integer()),
- column('name', sa.String()),
- )
-
- op.bulk_insert(
- blobplacementlocation_table,
- [
- {'name': 'local_eu'},
- {'name': 'local_us'},
- ],
- )
-
- op.bulk_insert(
- tables.mediatype,
- [
- {'name': 'application/vnd.cnr.blob.v0.tar+gzip'},
- {'name': 'application/vnd.cnr.package-manifest.helm.v0.json'},
- {'name': 'application/vnd.cnr.package-manifest.kpm.v0.json'},
- {'name': 'application/vnd.cnr.package-manifest.docker-compose.v0.json'},
- {'name': 'application/vnd.cnr.package.kpm.v0.tar+gzip'},
- {'name': 'application/vnd.cnr.package.helm.v0.tar+gzip'},
- {'name': 'application/vnd.cnr.package.docker-compose.v0.tar+gzip'},
- {'name': 'application/vnd.cnr.manifests.v0.json'},
- {'name': 'application/vnd.cnr.manifest.list.v0.json'},
- ],
- )
-
- tagkind_table = table('tagkind',
- column('id', sa.Integer()),
- column('name', sa.String()),
- )
-
- op.bulk_insert(
- tagkind_table,
- [
- {'id': 1, 'name': 'tag'},
- {'id': 2, 'name': 'release'},
- {'id': 3, 'name': 'channel'},
- ]
- )
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.drop_table('manifestlayerscan')
- op.drop_table('manifestlayerdockerv1')
- op.drop_table('tag')
- op.drop_table('manifestlistmanifest')
- op.drop_table('manifestlayer')
- op.drop_table('manifestlabel')
- op.drop_table('manifestblob')
- op.drop_table('derivedimage')
- op.drop_table('blobuploading')
- op.drop_table('blobplacement')
- op.drop_table('bittorrentpieces')
- op.drop_table('manifestlist')
- op.drop_table('manifest')
- op.drop_table('blobplacementlocationpreference')
- op.drop_table('blob')
- op.drop_table('tagkind')
- op.drop_table('blobplacementlocation')
diff --git a/data/migrations/versions/82297d834ad_add_us_west_location.py b/data/migrations/versions/82297d834ad_add_us_west_location.py
new file mode 100644
index 000000000..b939a939e
--- /dev/null
+++ b/data/migrations/versions/82297d834ad_add_us_west_location.py
@@ -0,0 +1,29 @@
+"""add US West location
+
+Revision ID: 82297d834ad
+Revises: 47670cbeced
+Create Date: 2014-08-15 13:35:23.834079
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '82297d834ad'
+down_revision = '47670cbeced'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ op.bulk_insert(tables.imagestoragelocation,
+ [
+ {'id':8, 'name':'s3_us_west_1'},
+ ])
+
+
+def downgrade(tables):
+ op.execute(
+ (tables.imagestoragelocation.delete()
+ .where(tables.imagestoragelocation.c.name == op.inline_literal('s3_us_west_1')))
+
+ )
diff --git a/data/migrations/versions/87fbbc224f10_add_disabled_datetime_to_trigger.py b/data/migrations/versions/87fbbc224f10_add_disabled_datetime_to_trigger.py
deleted file mode 100644
index ac177cd9f..000000000
--- a/data/migrations/versions/87fbbc224f10_add_disabled_datetime_to_trigger.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Add disabled datetime to trigger
-
-Revision ID: 87fbbc224f10
-Revises: 17aff2e1354e
-Create Date: 2017-10-24 14:06:37.658705
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '87fbbc224f10'
-down_revision = '17aff2e1354e'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.add_column('repositorybuildtrigger', sa.Column('disabled_datetime', sa.DateTime(), nullable=True))
- op.create_index('repositorybuildtrigger_disabled_datetime', 'repositorybuildtrigger', ['disabled_datetime'], unique=False)
- # ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_column('repositorybuildtrigger', 'disabled_datetime', tester.TestDataType.DateTime)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('repositorybuildtrigger_disabled_datetime', table_name='repositorybuildtrigger')
- op.drop_column('repositorybuildtrigger', 'disabled_datetime')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/9093adccc784_add_v2_2_data_models_for_manifest_.py b/data/migrations/versions/9093adccc784_add_v2_2_data_models_for_manifest_.py
deleted file mode 100644
index 49797c6ae..000000000
--- a/data/migrations/versions/9093adccc784_add_v2_2_data_models_for_manifest_.py
+++ /dev/null
@@ -1,180 +0,0 @@
-"""Add V2_2 data models for Manifest, ManifestBlob and ManifestLegacyImage
-
-Revision ID: 9093adccc784
-Revises: 6c21e2cfb8b6
-Create Date: 2018-08-06 16:07:50.222749
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '9093adccc784'
-down_revision = '6c21e2cfb8b6'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from image.docker.schema1 import DOCKER_SCHEMA1_CONTENT_TYPES
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('manifest',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.Column('manifest_bytes', sa.Text(), nullable=False),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifest_media_type_id_mediatype')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifest_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifest'))
- )
- op.create_index('manifest_digest', 'manifest', ['digest'], unique=False)
- op.create_index('manifest_media_type_id', 'manifest', ['media_type_id'], unique=False)
- op.create_index('manifest_repository_id', 'manifest', ['repository_id'], unique=False)
- op.create_index('manifest_repository_id_digest', 'manifest', ['repository_id', 'digest'], unique=True)
- op.create_index('manifest_repository_id_media_type_id', 'manifest', ['repository_id', 'media_type_id'], unique=False)
- op.create_table('manifestblob',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.Column('blob_index', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['imagestorage.id'], name=op.f('fk_manifestblob_blob_id_imagestorage')),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestblob_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestblob_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestblob'))
- )
- op.create_index('manifestblob_blob_id', 'manifestblob', ['blob_id'], unique=False)
- op.create_index('manifestblob_manifest_id', 'manifestblob', ['manifest_id'], unique=False)
- op.create_index('manifestblob_manifest_id_blob_id', 'manifestblob', ['manifest_id', 'blob_id'], unique=True)
- op.create_index('manifestblob_manifest_id_blob_index', 'manifestblob', ['manifest_id', 'blob_index'], unique=True)
- op.create_index('manifestblob_repository_id', 'manifestblob', ['repository_id'], unique=False)
- op.create_table('manifestlabel',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('label_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['label_id'], ['label.id'], name=op.f('fk_manifestlabel_label_id_label')),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlabel_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestlabel_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlabel'))
- )
- op.create_index('manifestlabel_label_id', 'manifestlabel', ['label_id'], unique=False)
- op.create_index('manifestlabel_manifest_id', 'manifestlabel', ['manifest_id'], unique=False)
- op.create_index('manifestlabel_manifest_id_label_id', 'manifestlabel', ['manifest_id', 'label_id'], unique=True)
- op.create_index('manifestlabel_repository_id', 'manifestlabel', ['repository_id'], unique=False)
- op.create_table('manifestlegacyimage',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('image_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['image_id'], ['image.id'], name=op.f('fk_manifestlegacyimage_image_id_image')),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlegacyimage_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestlegacyimage_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlegacyimage'))
- )
- op.create_index('manifestlegacyimage_image_id', 'manifestlegacyimage', ['image_id'], unique=False)
- op.create_index('manifestlegacyimage_manifest_id', 'manifestlegacyimage', ['manifest_id'], unique=True)
- op.create_index('manifestlegacyimage_repository_id', 'manifestlegacyimage', ['repository_id'], unique=False)
- op.create_table('tagmanifesttomanifest',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('tag_manifest_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('broken', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_tagmanifesttomanifest_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['tag_manifest_id'], ['tagmanifest.id'], name=op.f('fk_tagmanifesttomanifest_tag_manifest_id_tagmanifest')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifesttomanifest'))
- )
- op.create_index('tagmanifesttomanifest_broken', 'tagmanifesttomanifest', ['broken'], unique=False)
- op.create_index('tagmanifesttomanifest_manifest_id', 'tagmanifesttomanifest', ['manifest_id'], unique=True)
- op.create_index('tagmanifesttomanifest_tag_manifest_id', 'tagmanifesttomanifest', ['tag_manifest_id'], unique=True)
- op.create_table('tagmanifestlabelmap',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('tag_manifest_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=True),
- sa.Column('label_id', sa.Integer(), nullable=False),
- sa.Column('tag_manifest_label_id', sa.Integer(), nullable=False),
- sa.Column('manifest_label_id', sa.Integer(), nullable=True),
- sa.Column('broken_manifest', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
- sa.ForeignKeyConstraint(['label_id'], ['label.id'], name=op.f('fk_tagmanifestlabelmap_label_id_label')),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_tagmanifestlabelmap_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['manifest_label_id'], ['manifestlabel.id'], name=op.f('fk_tagmanifestlabelmap_manifest_label_id_manifestlabel')),
- sa.ForeignKeyConstraint(['tag_manifest_id'], ['tagmanifest.id'], name=op.f('fk_tagmanifestlabelmap_tag_manifest_id_tagmanifest')),
- sa.ForeignKeyConstraint(['tag_manifest_label_id'], ['tagmanifestlabel.id'], name=op.f('fk_tagmanifestlabelmap_tag_manifest_label_id_tagmanifestlabel')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifestlabelmap'))
- )
- op.create_index('tagmanifestlabelmap_broken_manifest', 'tagmanifestlabelmap', ['broken_manifest'], unique=False)
- op.create_index('tagmanifestlabelmap_label_id', 'tagmanifestlabelmap', ['label_id'], unique=False)
- op.create_index('tagmanifestlabelmap_manifest_id', 'tagmanifestlabelmap', ['manifest_id'], unique=False)
- op.create_index('tagmanifestlabelmap_manifest_label_id', 'tagmanifestlabelmap', ['manifest_label_id'], unique=False)
- op.create_index('tagmanifestlabelmap_tag_manifest_id', 'tagmanifestlabelmap', ['tag_manifest_id'], unique=False)
- op.create_index('tagmanifestlabelmap_tag_manifest_label_id', 'tagmanifestlabelmap', ['tag_manifest_label_id'], unique=False)
- # ### end Alembic commands ###
-
- for media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
- op.bulk_insert(tables.mediatype,
- [
- {'name': media_type},
- ])
-
- # ### population of test data ### #
- tester.populate_table('manifest', [
- ('digest', tester.TestDataType.String),
- ('manifest_bytes', tester.TestDataType.JSON),
- ('media_type_id', tester.TestDataType.Foreign('mediatype')),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ])
-
- tester.populate_table('manifestblob', [
- ('manifest_id', tester.TestDataType.Foreign('manifest')),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('blob_id', tester.TestDataType.Foreign('imagestorage')),
- ('blob_index', tester.TestDataType.Integer),
- ])
-
- tester.populate_table('manifestlabel', [
- ('manifest_id', tester.TestDataType.Foreign('manifest')),
- ('label_id', tester.TestDataType.Foreign('label')),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ])
-
- tester.populate_table('manifestlegacyimage', [
- ('manifest_id', tester.TestDataType.Foreign('manifest')),
- ('image_id', tester.TestDataType.Foreign('image')),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ])
-
- tester.populate_table('tagmanifesttomanifest', [
- ('manifest_id', tester.TestDataType.Foreign('manifest')),
- ('tag_manifest_id', tester.TestDataType.Foreign('tagmanifest')),
- ])
-
- tester.populate_table('tagmanifestlabelmap', [
- ('manifest_id', tester.TestDataType.Foreign('manifest')),
- ('tag_manifest_id', tester.TestDataType.Foreign('tagmanifest')),
- ('tag_manifest_label_id', tester.TestDataType.Foreign('tagmanifestlabel')),
- ('manifest_label_id', tester.TestDataType.Foreign('manifestlabel')),
- ('label_id', tester.TestDataType.Foreign('label')),
- ])
-
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- for media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
- op.execute(tables
- .mediatype
- .delete()
- .where(tables.
- mediatype.c.name == op.inline_literal(media_type)))
-
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('tagmanifestlabelmap')
- op.drop_table('tagmanifesttomanifest')
- op.drop_table('manifestlegacyimage')
- op.drop_table('manifestlabel')
- op.drop_table('manifestblob')
- op.drop_table('manifest')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/94836b099894_create_new_notification_type.py b/data/migrations/versions/94836b099894_create_new_notification_type.py
deleted file mode 100644
index 6bc780d01..000000000
--- a/data/migrations/versions/94836b099894_create_new_notification_type.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Create new notification type
-
-Revision ID: 94836b099894
-Revises: faf752bd2e0a
-Create Date: 2016-11-30 10:29:51.519278
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '94836b099894'
-down_revision = 'faf752bd2e0a'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.bulk_insert(tables.externalnotificationevent,
- [
- {'name': 'build_cancelled'},
- ])
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.execute(tables
- .externalnotificationevent
- .delete()
- .where(tables.
- externalnotificationevent.c.name == op.inline_literal('build_cancelled')))
diff --git a/data/migrations/versions/9a1087b007d_allow_the_namespace_column_to_be_.py b/data/migrations/versions/9a1087b007d_allow_the_namespace_column_to_be_.py
new file mode 100644
index 000000000..a0726bf3b
--- /dev/null
+++ b/data/migrations/versions/9a1087b007d_allow_the_namespace_column_to_be_.py
@@ -0,0 +1,29 @@
+"""Allow the namespace column to be nullable.
+
+Revision ID: 9a1087b007d
+Revises: 3f4fe1194671
+Create Date: 2014-10-01 16:11:21.277226
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '9a1087b007d'
+down_revision = '3f4fe1194671'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ op.drop_index('repository_namespace_name', table_name='repository')
+ op.alter_column('repository', 'namespace', nullable=True, existing_type=sa.String(length=255),
+ server_default=sa.text('NULL'))
+
+
+def downgrade(tables):
+ conn = op.get_bind()
+ user_table_name_escaped = conn.dialect.identifier_preparer.format_table(tables['user'])
+ conn.execute('update repository set namespace = (select username from {0} where {0}.id = repository.namespace_user_id) where namespace is NULL'.format(user_table_name_escaped))
+
+ op.create_index('repository_namespace_name', 'repository', ['namespace', 'name'], unique=True)
+ op.alter_column('repository', 'namespace', nullable=False, existing_type=sa.String(length=255))
diff --git a/data/migrations/versions/a6c463dfb9fe_back_fill_build_expand_config.py b/data/migrations/versions/a6c463dfb9fe_back_fill_build_expand_config.py
deleted file mode 100644
index c4c6b3f33..000000000
--- a/data/migrations/versions/a6c463dfb9fe_back_fill_build_expand_config.py
+++ /dev/null
@@ -1,101 +0,0 @@
-"""back fill build expand_config
-
-Revision ID: a6c463dfb9fe
-Revises: b4df55dea4b3
-Create Date: 2017-03-17 10:00:19.739858
-
-"""
-
-# revision identifiers, used by Alembic.
-import json
-import os
-
-from app import app
-from peewee import *
-from data.database import BaseModel
-
-revision = 'a6c463dfb9fe'
-down_revision = 'b4df55dea4b3'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-
-
-class RepositoryBuildTrigger(BaseModel):
- config = TextField(default='{}')
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- if not app.config.get('SETUP_COMPLETE', False):
- return
-
- repostioryBuildTriggers = RepositoryBuildTrigger.select()
- for repositoryBuildTrigger in repostioryBuildTriggers:
- config = json.loads(repositoryBuildTrigger.config)
- repositoryBuildTrigger.config = json.dumps(get_config_expand(config))
- repositoryBuildTrigger.save()
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- if not app.config.get('SETUP_COMPLETE', False):
- return
-
- repostioryBuildTriggers = RepositoryBuildTrigger.select()
- for repositoryBuildTrigger in repostioryBuildTriggers:
- config = json.loads(repositoryBuildTrigger.config)
- repositoryBuildTrigger.config = json.dumps(get_config_expand(config))
- repositoryBuildTrigger.save()
-
-
-def create_context(current_subdir):
- if current_subdir == "":
- current_subdir = os.path.sep + current_subdir
-
- if current_subdir[len(current_subdir) - 1] != os.path.sep:
- current_subdir += os.path.sep
-
- context, _ = os.path.split(current_subdir)
- return context
-
-
-def create_dockerfile_path(current_subdir):
- if current_subdir == "":
- current_subdir = os.path.sep + current_subdir
-
- if current_subdir[len(current_subdir) - 1] != os.path.sep:
- current_subdir += os.path.sep
-
- return current_subdir + "Dockerfile"
-
-
-def get_config_expand(config):
- """ A function to transform old records into new records """
- if not config:
- return config
-
- # skip records that have been updated
- if "context" in config or "dockerfile_path" in config:
- return config
-
- config_expand = {}
- if "subdir" in config:
- config_expand = dict(config)
- config_expand["context"] = create_context(config["subdir"])
- config_expand["dockerfile_path"] = create_dockerfile_path(config["subdir"])
-
- return config_expand
-
-
-def get_config_contract(config):
- """ A function to delete context and dockerfile_path from config """
- if not config:
- return config
-
- if "context" in config:
- del config["context"]
-
- if "dockerfile_path" in config:
- del config["dockerfile_path"]
-
- return config
diff --git a/data/migrations/versions/b1d41e2071b_add_an_index_to_the_uuid_in_the_image_.py b/data/migrations/versions/b1d41e2071b_add_an_index_to_the_uuid_in_the_image_.py
new file mode 100644
index 000000000..71a9df794
--- /dev/null
+++ b/data/migrations/versions/b1d41e2071b_add_an_index_to_the_uuid_in_the_image_.py
@@ -0,0 +1,22 @@
+"""Add an index to the uuid in the image storage table.
+
+Revision ID: b1d41e2071b
+Revises: 9a1087b007d
+Create Date: 2014-10-06 18:42:10.021235
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'b1d41e2071b'
+down_revision = '9a1087b007d'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ op.create_index('imagestorage_uuid', 'imagestorage', ['uuid'], unique=True)
+
+
+def downgrade(tables):
+ op.drop_index('imagestorage_uuid', table_name='imagestorage')
diff --git a/data/migrations/versions/b4c2d45bc132_add_deleted_namespace_table.py b/data/migrations/versions/b4c2d45bc132_add_deleted_namespace_table.py
deleted file mode 100644
index d9c53f10c..000000000
--- a/data/migrations/versions/b4c2d45bc132_add_deleted_namespace_table.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""Add deleted namespace table
-
-Revision ID: b4c2d45bc132
-Revises: 152edccba18c
-Create Date: 2018-02-27 11:43:02.329941
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'b4c2d45bc132'
-down_revision = '152edccba18c'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('deletednamespace',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('namespace_id', sa.Integer(), nullable=False),
- sa.Column('marked', sa.DateTime(), nullable=False),
- sa.Column('original_username', sa.String(length=255), nullable=False),
- sa.Column('original_email', sa.String(length=255), nullable=False),
- sa.Column('queue_id', sa.String(length=255), nullable=True),
- sa.ForeignKeyConstraint(['namespace_id'], ['user.id'], name=op.f('fk_deletednamespace_namespace_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_deletednamespace'))
- )
- op.create_index('deletednamespace_namespace_id', 'deletednamespace', ['namespace_id'], unique=True)
- op.create_index('deletednamespace_original_email', 'deletednamespace', ['original_email'], unique=False)
- op.create_index('deletednamespace_original_username', 'deletednamespace', ['original_username'], unique=False)
- op.create_index('deletednamespace_queue_id', 'deletednamespace', ['queue_id'], unique=False)
- # ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_table('deletednamespace', [
- ('namespace_id', tester.TestDataType.Foreign('user')),
- ('marked', tester.TestDataType.DateTime),
- ('original_username', tester.TestDataType.UTF8Char),
- ('original_email', tester.TestDataType.String),
- ('queue_id', tester.TestDataType.Foreign('queueitem')),
- ])
- # ### end population of test data ### #
-
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('deletednamespace')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/b4df55dea4b3_add_repository_kind.py b/data/migrations/versions/b4df55dea4b3_add_repository_kind.py
deleted file mode 100644
index d96dd8c43..000000000
--- a/data/migrations/versions/b4df55dea4b3_add_repository_kind.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""add repository kind
-
-Revision ID: b4df55dea4b3
-Revises: 7a525c68eb13
-Create Date: 2017-03-19 12:59:41.484430
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'b4df55dea4b3'
-down_revision = 'b8ae68ad3e52'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.create_table(
- 'repositorykind',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorykind'))
- )
- op.create_index('repositorykind_name', 'repositorykind', ['name'], unique=True)
-
- op.bulk_insert(
- tables.repositorykind,
- [
- {'id': 1, 'name': 'image'},
- {'id': 2, 'name': 'application'},
- ],
- )
-
- op.add_column(u'repository', sa.Column('kind_id', sa.Integer(), nullable=False, server_default='1'))
- op.create_index('repository_kind_id', 'repository', ['kind_id'], unique=False)
- op.create_foreign_key(op.f('fk_repository_kind_id_repositorykind'), 'repository', 'repositorykind', ['kind_id'], ['id'])
-
- # ### population of test data ### #
- tester.populate_column('repository', 'kind_id', tester.TestDataType.Foreign('repositorykind'))
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.drop_constraint(op.f('fk_repository_kind_id_repositorykind'), 'repository', type_='foreignkey')
- op.drop_index('repository_kind_id', table_name='repository')
- op.drop_column(u'repository', 'kind_id')
- op.drop_table('repositorykind')
diff --git a/data/migrations/versions/b547bc139ad8_add_robotaccountmetadata_table.py b/data/migrations/versions/b547bc139ad8_add_robotaccountmetadata_table.py
deleted file mode 100644
index 1d26fa2d9..000000000
--- a/data/migrations/versions/b547bc139ad8_add_robotaccountmetadata_table.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""Add RobotAccountMetadata table
-
-Revision ID: b547bc139ad8
-Revises: 0cf50323c78b
-Create Date: 2018-03-09 15:50:48.298880
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'b547bc139ad8'
-down_revision = '0cf50323c78b'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from util.migrate import UTF8CharField
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('robotaccountmetadata',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('robot_account_id', sa.Integer(), nullable=False),
- sa.Column('description', UTF8CharField(length=255), nullable=False),
- sa.Column('unstructured_json', sa.Text(), nullable=False),
- sa.ForeignKeyConstraint(['robot_account_id'], ['user.id'], name=op.f('fk_robotaccountmetadata_robot_account_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_robotaccountmetadata'))
- )
- op.create_index('robotaccountmetadata_robot_account_id', 'robotaccountmetadata', ['robot_account_id'], unique=True)
- # ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_table('robotaccountmetadata', [
- ('robot_account_id', tester.TestDataType.Foreign('user')),
- ('description', tester.TestDataType.UTF8Char),
- ('unstructured_json', tester.TestDataType.JSON),
- ])
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('robotaccountmetadata')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py b/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py
deleted file mode 100644
index d76c8e018..000000000
--- a/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""Change BlobUpload fields to BigIntegers to allow layers > 8GB
-
-Revision ID: b8ae68ad3e52
-Revises: 7a525c68eb13
-Create Date: 2017-02-27 11:26:49.182349
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'b8ae68ad3e52'
-down_revision = '7a525c68eb13'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.alter_column('blobupload', 'byte_count', existing_type=sa.Integer(), type_=sa.BigInteger())
- op.alter_column('blobupload', 'uncompressed_byte_count', existing_type=sa.Integer(), type_=sa.BigInteger())
-
- # ### population of test data ### #
- tester.populate_column('blobupload', 'byte_count', tester.TestDataType.BigInteger)
- tester.populate_column('blobupload', 'uncompressed_byte_count', tester.TestDataType.BigInteger)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### population of test data ### #
- tester.populate_column('blobupload', 'byte_count', tester.TestDataType.Integer)
- tester.populate_column('blobupload', 'uncompressed_byte_count', tester.TestDataType.Integer)
- # ### end population of test data ### #
-
- op.alter_column('blobupload', 'byte_count', existing_type=sa.BigInteger(), type_=sa.Integer())
- op.alter_column('blobupload', 'uncompressed_byte_count', existing_type=sa.BigInteger(), type_=sa.Integer())
diff --git a/data/migrations/versions/b9045731c4de_add_lifetime_indexes_to_tag_tables.py b/data/migrations/versions/b9045731c4de_add_lifetime_indexes_to_tag_tables.py
deleted file mode 100644
index b85ae3514..000000000
--- a/data/migrations/versions/b9045731c4de_add_lifetime_indexes_to_tag_tables.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Add lifetime end indexes to tag tables
-
-Revision ID: b9045731c4de
-Revises: e184af42242d
-Create Date: 2019-02-14 17:18:40.474310
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'b9045731c4de'
-down_revision = 'e184af42242d'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_index('repositorytag_repository_id_lifetime_end_ts', 'repositorytag', ['repository_id', 'lifetime_end_ts'], unique=False)
- op.create_index('tag_repository_id_lifetime_end_ms', 'tag', ['repository_id', 'lifetime_end_ms'], unique=False)
-
- op.create_index('repositorytag_repository_id_lifetime_start_ts', 'repositorytag', ['repository_id', 'lifetime_start_ts'], unique=False)
- op.create_index('tag_repository_id_lifetime_start_ms', 'tag', ['repository_id', 'lifetime_start_ms'], unique=False)
- # ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('tag_repository_id_lifetime_end_ms', table_name='tag')
- op.drop_index('repositorytag_repository_id_lifetime_end_ts', table_name='repositorytag')
-
- op.drop_index('tag_repository_id_lifetime_start_ms', table_name='tag')
- op.drop_index('repositorytag_repository_id_lifetime_start_ts', table_name='repositorytag')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/b918abdbee43_run_full_tag_backfill.py b/data/migrations/versions/b918abdbee43_run_full_tag_backfill.py
deleted file mode 100644
index 3968abd32..000000000
--- a/data/migrations/versions/b918abdbee43_run_full_tag_backfill.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""Run full tag backfill
-
-Revision ID: b918abdbee43
-Revises: 481623ba00ba
-Create Date: 2019-03-14 13:38:03.411609
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'b918abdbee43'
-down_revision = '481623ba00ba'
-
-import logging.config
-
-from app import app
-from peewee import JOIN, fn
-
-from workers.tagbackfillworker import backfill_tag
-from data.database import RepositoryTag, Repository, User, TagToRepositoryTag
-from util.log import logfile_path
-
-logger = logging.getLogger(__name__)
-
-
-def upgrade(tables, tester, progress_reporter):
- if not app.config.get('SETUP_COMPLETE', False):
- return
-
- start_id = 0
- end_id = 1000
- size = 1000
-
- max_id = RepositoryTag.select(fn.Max(RepositoryTag.id)).scalar()
- if max_id is None:
- return
-
- logger.info("Found maximum ID %s" % max_id)
-
- while True:
- if start_id > max_id:
- break
-
- logger.info('Checking tag range %s - %s', start_id, end_id)
- r = list(RepositoryTag
- .select()
- .join(Repository)
- .switch(RepositoryTag)
- .join(TagToRepositoryTag, JOIN.LEFT_OUTER)
- .where(TagToRepositoryTag.id >> None)
- .where(RepositoryTag.hidden == False,
- RepositoryTag.id >= start_id,
- RepositoryTag.id < end_id))
-
- if len(r) < 1000 and size < 100000:
- size *= 2
-
- start_id = end_id
- end_id = start_id + size
-
- if not len(r):
- continue
-
- logger.info('Found %s tags to backfill', len(r))
- for index, t in enumerate(r):
- logger.info("Backfilling tag %s of %s", index, len(r))
- backfill_tag(t)
-
-
-def downgrade(tables, tester, progress_reporter):
- # Nothing to do.
- pass
diff --git a/data/migrations/versions/bcdde200a1b_add_placements_and_locations_to_the_db.py b/data/migrations/versions/bcdde200a1b_add_placements_and_locations_to_the_db.py
new file mode 100644
index 000000000..9fc433126
--- /dev/null
+++ b/data/migrations/versions/bcdde200a1b_add_placements_and_locations_to_the_db.py
@@ -0,0 +1,55 @@
+"""Add placements and locations to the db.
+
+Revision ID: bcdde200a1b
+Revises: 201d55b38649
+Create Date: 2014-06-18 13:32:42.907922
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'bcdde200a1b'
+down_revision = '201d55b38649'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('imagestoragelocation',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('imagestoragelocation_name', 'imagestoragelocation', ['name'], unique=True)
+
+ op.bulk_insert(tables.imagestoragelocation,
+ [
+ {'id':1, 'name':'s3_us_east_1'},
+ {'id':2, 'name':'s3_eu_west_1'},
+ {'id':3, 'name':'s3_ap_southeast_1'},
+ {'id':4, 'name':'s3_ap_southeast_2'},
+ {'id':5, 'name':'s3_ap_northeast_1'},
+ {'id':6, 'name':'s3_sa_east_1'},
+ {'id':7, 'name':'local'},
+ ])
+
+ op.create_table('imagestorageplacement',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('storage_id', sa.Integer(), nullable=False),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['location_id'], ['imagestoragelocation.id'], ),
+ sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('imagestorageplacement_location_id', 'imagestorageplacement', ['location_id'], unique=False)
+ op.create_index('imagestorageplacement_storage_id', 'imagestorageplacement', ['storage_id'], unique=False)
+ op.create_index('imagestorageplacement_storage_id_location_id', 'imagestorageplacement', ['storage_id', 'location_id'], unique=True)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('imagestorageplacement')
+ op.drop_table('imagestoragelocation')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/be8d1c402ce0_add_teamsync_table.py b/data/migrations/versions/be8d1c402ce0_add_teamsync_table.py
deleted file mode 100644
index 62c0aba44..000000000
--- a/data/migrations/versions/be8d1c402ce0_add_teamsync_table.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""Add TeamSync table
-
-Revision ID: be8d1c402ce0
-Revises: a6c463dfb9fe
-Create Date: 2017-02-23 13:34:52.356812
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'be8d1c402ce0'
-down_revision = 'a6c463dfb9fe'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from util.migrate import UTF8LongText
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.create_table('teamsync',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('team_id', sa.Integer(), nullable=False),
- sa.Column('transaction_id', sa.String(length=255), nullable=False),
- sa.Column('last_updated', sa.DateTime(), nullable=True),
- sa.Column('service_id', sa.Integer(), nullable=False),
- sa.Column('config', UTF8LongText(), nullable=False),
- sa.ForeignKeyConstraint(['service_id'], ['loginservice.id'], name=op.f('fk_teamsync_service_id_loginservice')),
- sa.ForeignKeyConstraint(['team_id'], ['team.id'], name=op.f('fk_teamsync_team_id_team')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_teamsync'))
- )
- op.create_index('teamsync_last_updated', 'teamsync', ['last_updated'], unique=False)
- op.create_index('teamsync_service_id', 'teamsync', ['service_id'], unique=False)
- op.create_index('teamsync_team_id', 'teamsync', ['team_id'], unique=True)
- ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_table('teamsync', [
- ('team_id', tester.TestDataType.Foreign('team')),
- ('transaction_id', tester.TestDataType.String),
- ('last_updated', tester.TestDataType.DateTime),
- ('service_id', tester.TestDataType.Foreign('loginservice')),
- ('config', tester.TestDataType.JSON),
- ])
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('teamsync')
- ### end Alembic commands ###
diff --git a/data/migrations/versions/c00a1f15968b_add_schema2_media_types.py b/data/migrations/versions/c00a1f15968b_add_schema2_media_types.py
deleted file mode 100644
index 2d2a050df..000000000
--- a/data/migrations/versions/c00a1f15968b_add_schema2_media_types.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
-
-"""Add schema2 media types
-
-Revision ID: c00a1f15968b
-Revises: 67f0abd172ae
-Create Date: 2018-11-13 09:20:21.968503
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'c00a1f15968b'
-down_revision = '67f0abd172ae'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- for media_type in DOCKER_SCHEMA2_CONTENT_TYPES:
- op.bulk_insert(tables.mediatype,
- [
- {'name': media_type},
- ])
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- for media_type in DOCKER_SCHEMA2_CONTENT_TYPES:
- op.execute(tables
- .mediatype
- .delete()
- .where(tables.
- mediatype.c.name == op.inline_literal(media_type)))
diff --git a/data/migrations/versions/c059b952ed76_remove_unencrypted_fields_and_data.py b/data/migrations/versions/c059b952ed76_remove_unencrypted_fields_and_data.py
deleted file mode 100644
index 4854630bf..000000000
--- a/data/migrations/versions/c059b952ed76_remove_unencrypted_fields_and_data.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""Remove unencrypted fields and data
-
-Revision ID: c059b952ed76
-Revises: 703298a825c2
-Create Date: 2019-08-19 16:31:00.952773
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'c059b952ed76'
-down_revision = '703298a825c2'
-
-import uuid
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-from data.database import FederatedLogin, User, RobotAccountToken
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('oauthaccesstoken_refresh_token', table_name='oauthaccesstoken')
- op.drop_column(u'oauthaccesstoken', 'refresh_token')
-
- op.drop_column('accesstoken', 'code')
-
- op.drop_column('appspecificauthtoken', 'token_code')
-
- op.drop_column('oauthaccesstoken', 'access_token')
- op.drop_column('oauthapplication', 'client_secret')
-
- op.drop_column('oauthauthorizationcode', 'code')
-
- op.drop_column('repositorybuildtrigger', 'private_key')
- op.drop_column('repositorybuildtrigger', 'auth_token')
- # ### end Alembic commands ###
-
- # Overwrite all plaintext robot credentials.
- from app import app
- if app.config.get('SETUP_COMPLETE', False) or tester.is_testing:
- while True:
- try:
- robot_account_token = RobotAccountToken.get(fully_migrated=False)
- robot_account = robot_account_token.robot_account
-
- robot_account.email = str(uuid.uuid4())
- robot_account.save()
-
- federated_login = FederatedLogin.get(user=robot_account)
- federated_login.service_ident = 'robot:%s' % robot_account.id
- federated_login.save()
-
- robot_account_token.fully_migrated = True
- robot_account_token.save()
- except RobotAccountToken.DoesNotExist:
- break
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.add_column(u'oauthaccesstoken', sa.Column('refresh_token', sa.String(length=255), nullable=True))
- op.create_index('oauthaccesstoken_refresh_token', 'oauthaccesstoken', ['refresh_token'], unique=False)
-
- op.add_column('repositorybuildtrigger', sa.Column('auth_token', sa.String(length=255), nullable=True))
- op.add_column('repositorybuildtrigger', sa.Column('private_key', sa.Text(), nullable=True))
-
- op.add_column('oauthauthorizationcode', sa.Column('code', sa.String(length=255), nullable=True))
- op.create_index('oauthauthorizationcode_code', 'oauthauthorizationcode', ['code'], unique=True)
-
- op.add_column('oauthapplication', sa.Column('client_secret', sa.String(length=255), nullable=True))
- op.add_column('oauthaccesstoken', sa.Column('access_token', sa.String(length=255), nullable=True))
-
- op.create_index('oauthaccesstoken_access_token', 'oauthaccesstoken', ['access_token'], unique=False)
-
- op.add_column('appspecificauthtoken', sa.Column('token_code', sa.String(length=255), nullable=True))
- op.create_index('appspecificauthtoken_token_code', 'appspecificauthtoken', ['token_code'], unique=True)
-
- op.add_column('accesstoken', sa.Column('code', sa.String(length=255), nullable=True))
- op.create_index('accesstoken_code', 'accesstoken', ['code'], unique=True)
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/c13c8052f7a6_add_new_fields_and_tables_for_encrypted_.py b/data/migrations/versions/c13c8052f7a6_add_new_fields_and_tables_for_encrypted_.py
deleted file mode 100644
index 15ecabd00..000000000
--- a/data/migrations/versions/c13c8052f7a6_add_new_fields_and_tables_for_encrypted_.py
+++ /dev/null
@@ -1,104 +0,0 @@
-"""Add new fields and tables for encrypted tokens
-
-Revision ID: c13c8052f7a6
-Revises: 5248ddf35167
-Create Date: 2019-08-19 15:59:36.269155
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'c13c8052f7a6'
-down_revision = '5248ddf35167'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('robotaccounttoken',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('robot_account_id', sa.Integer(), nullable=False),
- sa.Column('token', sa.String(length=255), nullable=False),
- sa.Column('fully_migrated', sa.Boolean(), nullable=False, server_default='0'),
- sa.ForeignKeyConstraint(['robot_account_id'], ['user.id'], name=op.f('fk_robotaccounttoken_robot_account_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_robotaccounttoken'))
- )
- op.create_index('robotaccounttoken_robot_account_id', 'robotaccounttoken', ['robot_account_id'], unique=True)
-
- op.add_column(u'accesstoken', sa.Column('token_code', sa.String(length=255), nullable=True))
- op.add_column(u'accesstoken', sa.Column('token_name', sa.String(length=255), nullable=True))
- op.create_index('accesstoken_token_name', 'accesstoken', ['token_name'], unique=True)
-
- op.add_column(u'appspecificauthtoken', sa.Column('token_name', sa.String(length=255), nullable=True))
- op.add_column(u'appspecificauthtoken', sa.Column('token_secret', sa.String(length=255), nullable=True))
- op.create_index('appspecificauthtoken_token_name', 'appspecificauthtoken', ['token_name'], unique=True)
-
- op.add_column(u'emailconfirmation', sa.Column('verification_code', sa.String(length=255), nullable=True))
-
- op.add_column(u'oauthaccesstoken', sa.Column('token_code', sa.String(length=255), nullable=True))
- op.add_column(u'oauthaccesstoken', sa.Column('token_name', sa.String(length=255), nullable=True))
- op.create_index('oauthaccesstoken_token_name', 'oauthaccesstoken', ['token_name'], unique=True)
-
- op.add_column(u'oauthapplication', sa.Column('secure_client_secret', sa.String(length=255), nullable=True))
- op.add_column(u'oauthapplication', sa.Column('fully_migrated', sa.Boolean(), server_default='0', nullable=False))
-
- op.add_column(u'oauthauthorizationcode', sa.Column('code_credential', sa.String(length=255), nullable=True))
- op.add_column(u'oauthauthorizationcode', sa.Column('code_name', sa.String(length=255), nullable=True))
- op.create_index('oauthauthorizationcode_code_name', 'oauthauthorizationcode', ['code_name'], unique=True)
- op.drop_index('oauthauthorizationcode_code', table_name='oauthauthorizationcode')
- op.create_index('oauthauthorizationcode_code', 'oauthauthorizationcode', ['code'], unique=True)
-
- op.add_column(u'repositorybuildtrigger', sa.Column('secure_auth_token', sa.String(length=255), nullable=True))
- op.add_column(u'repositorybuildtrigger', sa.Column('secure_private_key', sa.Text(), nullable=True))
- op.add_column(u'repositorybuildtrigger', sa.Column('fully_migrated', sa.Boolean(), server_default='0', nullable=False))
- # ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_table('robotaccounttoken', [
- ('robot_account_id', tester.TestDataType.Foreign('user')),
- ('token', tester.TestDataType.Token),
- ('fully_migrated', tester.TestDataType.Boolean),
- ])
-
- tester.populate_column('accesstoken', 'code', tester.TestDataType.Token)
-
- tester.populate_column('appspecificauthtoken', 'token_code', tester.TestDataType.Token)
-
- tester.populate_column('emailconfirmation', 'verification_code', tester.TestDataType.Token)
-
- tester.populate_column('oauthaccesstoken', 'token_code', tester.TestDataType.Token)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_column(u'repositorybuildtrigger', 'secure_private_key')
- op.drop_column(u'repositorybuildtrigger', 'secure_auth_token')
-
- op.drop_index('oauthauthorizationcode_code', table_name='oauthauthorizationcode')
- op.create_index('oauthauthorizationcode_code', 'oauthauthorizationcode', ['code'], unique=False)
- op.drop_index('oauthauthorizationcode_code_name', table_name='oauthauthorizationcode')
- op.drop_column(u'oauthauthorizationcode', 'code_name')
- op.drop_column(u'oauthauthorizationcode', 'code_credential')
-
- op.drop_column(u'oauthapplication', 'secure_client_secret')
-
- op.drop_index('oauthaccesstoken_token_name', table_name='oauthaccesstoken')
- op.drop_column(u'oauthaccesstoken', 'token_name')
- op.drop_column(u'oauthaccesstoken', 'token_code')
-
- op.drop_column(u'emailconfirmation', 'verification_code')
-
- op.drop_index('appspecificauthtoken_token_name', table_name='appspecificauthtoken')
- op.drop_column(u'appspecificauthtoken', 'token_secret')
- op.drop_column(u'appspecificauthtoken', 'token_name')
-
- op.drop_index('accesstoken_token_name', table_name='accesstoken')
- op.drop_column(u'accesstoken', 'token_name')
- op.drop_column(u'accesstoken', 'token_code')
-
- op.drop_table('robotaccounttoken')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/c156deb8845d_reset_our_migrations_with_a_required_.py b/data/migrations/versions/c156deb8845d_reset_our_migrations_with_a_required_.py
deleted file mode 100644
index 3277f5ae6..000000000
--- a/data/migrations/versions/c156deb8845d_reset_our_migrations_with_a_required_.py
+++ /dev/null
@@ -1,1254 +0,0 @@
-"""Reset our migrations with a required update
-
-Revision ID: c156deb8845d
-Revises: None
-Create Date: 2016-11-08 11:58:11.110762
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'c156deb8845d'
-down_revision = None
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from util.migrate import UTF8LongText, UTF8CharField
-from datetime import datetime
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- now = datetime.now().strftime("'%Y-%m-%d %H:%M:%S'")
-
- op.create_table('accesstokenkind',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_accesstokenkind'))
- )
- op.create_index('accesstokenkind_name', 'accesstokenkind', ['name'], unique=True)
- op.create_table('buildtriggerservice',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_buildtriggerservice'))
- )
- op.create_index('buildtriggerservice_name', 'buildtriggerservice', ['name'], unique=True)
- op.create_table('externalnotificationevent',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_externalnotificationevent'))
- )
- op.create_index('externalnotificationevent_name', 'externalnotificationevent', ['name'], unique=True)
- op.create_table('externalnotificationmethod',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_externalnotificationmethod'))
- )
- op.create_index('externalnotificationmethod_name', 'externalnotificationmethod', ['name'], unique=True)
- op.create_table('imagestorage',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('checksum', sa.String(length=255), nullable=True),
- sa.Column('image_size', sa.BigInteger(), nullable=True),
- sa.Column('uncompressed_size', sa.BigInteger(), nullable=True),
- sa.Column('uploading', sa.Boolean(), nullable=True),
- sa.Column('cas_path', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
- sa.Column('content_checksum', sa.String(length=255), nullable=True),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestorage'))
- )
- op.create_index('imagestorage_content_checksum', 'imagestorage', ['content_checksum'], unique=False)
- op.create_index('imagestorage_uuid', 'imagestorage', ['uuid'], unique=True)
- op.create_table('imagestoragelocation',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragelocation'))
- )
- op.create_index('imagestoragelocation_name', 'imagestoragelocation', ['name'], unique=True)
- op.create_table('imagestoragesignaturekind',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragesignaturekind'))
- )
- op.create_index('imagestoragesignaturekind_name', 'imagestoragesignaturekind', ['name'], unique=True)
- op.create_table('imagestoragetransformation',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragetransformation'))
- )
- op.create_index('imagestoragetransformation_name', 'imagestoragetransformation', ['name'], unique=True)
- op.create_table('labelsourcetype',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('mutable', sa.Boolean(), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_labelsourcetype'))
- )
- op.create_index('labelsourcetype_name', 'labelsourcetype', ['name'], unique=True)
- op.create_table('logentrykind',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_logentrykind'))
- )
- op.create_index('logentrykind_name', 'logentrykind', ['name'], unique=True)
- op.create_table('loginservice',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_loginservice'))
- )
- op.create_index('loginservice_name', 'loginservice', ['name'], unique=True)
- op.create_table('mediatype',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_mediatype'))
- )
- op.create_index('mediatype_name', 'mediatype', ['name'], unique=True)
- op.create_table('messages',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('content', sa.Text(), nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=True),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_messages'))
- )
- op.create_table('notificationkind',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_notificationkind'))
- )
- op.create_index('notificationkind_name', 'notificationkind', ['name'], unique=True)
- op.create_table('quayregion',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_quayregion'))
- )
- op.create_index('quayregion_name', 'quayregion', ['name'], unique=True)
- op.create_table('quayservice',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_quayservice'))
- )
- op.create_index('quayservice_name', 'quayservice', ['name'], unique=True)
- op.create_table('queueitem',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('queue_name', sa.String(length=1024), nullable=False),
- sa.Column('body', sa.Text(), nullable=False),
- sa.Column('available_after', sa.DateTime(), nullable=False),
- sa.Column('available', sa.Boolean(), nullable=False),
- sa.Column('processing_expires', sa.DateTime(), nullable=True),
- sa.Column('retries_remaining', sa.Integer(), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_queueitem'))
- )
- op.create_index('queueitem_available', 'queueitem', ['available'], unique=False)
- op.create_index('queueitem_available_after', 'queueitem', ['available_after'], unique=False)
- op.create_index('queueitem_processing_expires', 'queueitem', ['processing_expires'], unique=False)
- op.create_index('queueitem_queue_name', 'queueitem', ['queue_name'], unique=False, mysql_length=767)
- op.create_index('queueitem_retries_remaining', 'queueitem', ['retries_remaining'], unique=False)
- op.create_table('role',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_role'))
- )
- op.create_index('role_name', 'role', ['name'], unique=True)
- op.create_table('servicekeyapproval',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('approver_id', sa.Integer(), nullable=True),
- sa.Column('approval_type', sa.String(length=255), nullable=False),
- sa.Column('approved_date', sa.DateTime(), nullable=False),
- sa.Column('notes', UTF8LongText(), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_servicekeyapproval'))
- )
- op.create_index('servicekeyapproval_approval_type', 'servicekeyapproval', ['approval_type'], unique=False)
- op.create_index('servicekeyapproval_approver_id', 'servicekeyapproval', ['approver_id'], unique=False)
- op.create_table('teamrole',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_teamrole'))
- )
- op.create_index('teamrole_name', 'teamrole', ['name'], unique=False)
- op.create_table('user',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=True),
- sa.Column('username', sa.String(length=255), nullable=False),
- sa.Column('password_hash', sa.String(length=255), nullable=True),
- sa.Column('email', sa.String(length=255), nullable=False),
- sa.Column('verified', sa.Boolean(), nullable=False),
- sa.Column('stripe_id', sa.String(length=255), nullable=True),
- sa.Column('organization', sa.Boolean(), nullable=False),
- sa.Column('robot', sa.Boolean(), nullable=False),
- sa.Column('invoice_email', sa.Boolean(), nullable=False),
- sa.Column('invalid_login_attempts', sa.Integer(), nullable=False, server_default='0'),
- sa.Column('last_invalid_login', sa.DateTime(), nullable=False),
- sa.Column('removed_tag_expiration_s', sa.Integer(), nullable=False, server_default='1209600'),
- sa.Column('enabled', sa.Boolean(), nullable=False, server_default=sa.sql.expression.true()),
- sa.Column('invoice_email_address', sa.String(length=255), nullable=True),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_user'))
- )
- op.create_index('user_email', 'user', ['email'], unique=True)
- op.create_index('user_invoice_email_address', 'user', ['invoice_email_address'], unique=False)
- op.create_index('user_organization', 'user', ['organization'], unique=False)
- op.create_index('user_robot', 'user', ['robot'], unique=False)
- op.create_index('user_stripe_id', 'user', ['stripe_id'], unique=False)
- op.create_index('user_username', 'user', ['username'], unique=True)
- op.create_table('visibility',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_visibility'))
- )
- op.create_index('visibility_name', 'visibility', ['name'], unique=True)
- op.create_table('emailconfirmation',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('code', sa.String(length=255), nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('pw_reset', sa.Boolean(), nullable=False),
- sa.Column('new_email', sa.String(length=255), nullable=True),
- sa.Column('email_confirm', sa.Boolean(), nullable=False),
- sa.Column('created', sa.DateTime(), nullable=False),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_emailconfirmation_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_emailconfirmation'))
- )
- op.create_index('emailconfirmation_code', 'emailconfirmation', ['code'], unique=True)
- op.create_index('emailconfirmation_user_id', 'emailconfirmation', ['user_id'], unique=False)
- op.create_table('federatedlogin',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('service_id', sa.Integer(), nullable=False),
- sa.Column('service_ident', sa.String(length=255), nullable=False),
- sa.Column('metadata_json', sa.Text(), nullable=False),
- sa.ForeignKeyConstraint(['service_id'], ['loginservice.id'], name=op.f('fk_federatedlogin_service_id_loginservice')),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_federatedlogin_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_federatedlogin'))
- )
- op.create_index('federatedlogin_service_id', 'federatedlogin', ['service_id'], unique=False)
- op.create_index('federatedlogin_service_id_service_ident', 'federatedlogin', ['service_id', 'service_ident'], unique=True)
- op.create_index('federatedlogin_service_id_user_id', 'federatedlogin', ['service_id', 'user_id'], unique=True)
- op.create_index('federatedlogin_user_id', 'federatedlogin', ['user_id'], unique=False)
- op.create_table('imagestorageplacement',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('storage_id', sa.Integer(), nullable=False),
- sa.Column('location_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['location_id'], ['imagestoragelocation.id'], name=op.f('fk_imagestorageplacement_location_id_imagestoragelocation')),
- sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_imagestorageplacement_storage_id_imagestorage')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestorageplacement'))
- )
- op.create_index('imagestorageplacement_location_id', 'imagestorageplacement', ['location_id'], unique=False)
- op.create_index('imagestorageplacement_storage_id', 'imagestorageplacement', ['storage_id'], unique=False)
- op.create_index('imagestorageplacement_storage_id_location_id', 'imagestorageplacement', ['storage_id', 'location_id'], unique=True)
- op.create_table('imagestoragesignature',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('storage_id', sa.Integer(), nullable=False),
- sa.Column('kind_id', sa.Integer(), nullable=False),
- sa.Column('signature', sa.Text(), nullable=True),
- sa.Column('uploading', sa.Boolean(), nullable=True),
- sa.ForeignKeyConstraint(['kind_id'], ['imagestoragesignaturekind.id'], name=op.f('fk_imagestoragesignature_kind_id_imagestoragesignaturekind')),
- sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_imagestoragesignature_storage_id_imagestorage')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragesignature'))
- )
- op.create_index('imagestoragesignature_kind_id', 'imagestoragesignature', ['kind_id'], unique=False)
- op.create_index('imagestoragesignature_kind_id_storage_id', 'imagestoragesignature', ['kind_id', 'storage_id'], unique=True)
- op.create_index('imagestoragesignature_storage_id', 'imagestoragesignature', ['storage_id'], unique=False)
- op.create_table('label',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('key', UTF8CharField(length=255), nullable=False),
- sa.Column('value', UTF8LongText(), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.Column('source_type_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_label_media_type_id_mediatype')),
- sa.ForeignKeyConstraint(['source_type_id'], ['labelsourcetype.id'], name=op.f('fk_label_source_type_id_labelsourcetype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_label'))
- )
- op.create_index('label_key', 'label', ['key'], unique=False)
- op.create_index('label_media_type_id', 'label', ['media_type_id'], unique=False)
- op.create_index('label_source_type_id', 'label', ['source_type_id'], unique=False)
- op.create_index('label_uuid', 'label', ['uuid'], unique=True)
- op.create_table('logentry',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('kind_id', sa.Integer(), nullable=False),
- sa.Column('account_id', sa.Integer(), nullable=False),
- sa.Column('performer_id', sa.Integer(), nullable=True),
- sa.Column('repository_id', sa.Integer(), nullable=True),
- sa.Column('datetime', sa.DateTime(), nullable=False),
- sa.Column('ip', sa.String(length=255), nullable=True),
- sa.Column('metadata_json', sa.Text(), nullable=False),
- sa.ForeignKeyConstraint(['kind_id'], ['logentrykind.id'], name=op.f('fk_logentry_kind_id_logentrykind')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_logentry'))
- )
- op.create_index('logentry_account_id', 'logentry', ['account_id'], unique=False)
- op.create_index('logentry_account_id_datetime', 'logentry', ['account_id', 'datetime'], unique=False)
- op.create_index('logentry_datetime', 'logentry', ['datetime'], unique=False)
- op.create_index('logentry_kind_id', 'logentry', ['kind_id'], unique=False)
- op.create_index('logentry_performer_id', 'logentry', ['performer_id'], unique=False)
- op.create_index('logentry_performer_id_datetime', 'logentry', ['performer_id', 'datetime'], unique=False)
- op.create_index('logentry_repository_id', 'logentry', ['repository_id'], unique=False)
- op.create_index('logentry_repository_id_datetime', 'logentry', ['repository_id', 'datetime'], unique=False)
- op.create_index('logentry_repository_id_datetime_kind_id', 'logentry', ['repository_id', 'datetime', 'kind_id'], unique=False)
- op.create_table('notification',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('kind_id', sa.Integer(), nullable=False),
- sa.Column('target_id', sa.Integer(), nullable=False),
- sa.Column('metadata_json', sa.Text(), nullable=False),
- sa.Column('created', sa.DateTime(), nullable=False),
- sa.Column('dismissed', sa.Boolean(), nullable=False),
- sa.Column('lookup_path', sa.String(length=255), nullable=True),
- sa.ForeignKeyConstraint(['kind_id'], ['notificationkind.id'], name=op.f('fk_notification_kind_id_notificationkind')),
- sa.ForeignKeyConstraint(['target_id'], ['user.id'], name=op.f('fk_notification_target_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_notification'))
- )
- op.create_index('notification_created', 'notification', ['created'], unique=False)
- op.create_index('notification_kind_id', 'notification', ['kind_id'], unique=False)
- op.create_index('notification_lookup_path', 'notification', ['lookup_path'], unique=False)
- op.create_index('notification_target_id', 'notification', ['target_id'], unique=False)
- op.create_index('notification_uuid', 'notification', ['uuid'], unique=False)
- op.create_table('oauthapplication',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('client_id', sa.String(length=255), nullable=False),
- sa.Column('client_secret', sa.String(length=255), nullable=False),
- sa.Column('redirect_uri', sa.String(length=255), nullable=False),
- sa.Column('application_uri', sa.String(length=255), nullable=False),
- sa.Column('organization_id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('description', sa.Text(), nullable=False),
- sa.Column('gravatar_email', sa.String(length=255), nullable=True),
- sa.ForeignKeyConstraint(['organization_id'], ['user.id'], name=op.f('fk_oauthapplication_organization_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_oauthapplication'))
- )
- op.create_index('oauthapplication_client_id', 'oauthapplication', ['client_id'], unique=False)
- op.create_index('oauthapplication_organization_id', 'oauthapplication', ['organization_id'], unique=False)
- op.create_table('quayrelease',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('service_id', sa.Integer(), nullable=False),
- sa.Column('version', sa.String(length=255), nullable=False),
- sa.Column('region_id', sa.Integer(), nullable=False),
- sa.Column('reverted', sa.Boolean(), nullable=False),
- sa.Column('created', sa.DateTime(), nullable=False),
- sa.ForeignKeyConstraint(['region_id'], ['quayregion.id'], name=op.f('fk_quayrelease_region_id_quayregion')),
- sa.ForeignKeyConstraint(['service_id'], ['quayservice.id'], name=op.f('fk_quayrelease_service_id_quayservice')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_quayrelease'))
- )
- op.create_index('quayrelease_created', 'quayrelease', ['created'], unique=False)
- op.create_index('quayrelease_region_id', 'quayrelease', ['region_id'], unique=False)
- op.create_index('quayrelease_service_id', 'quayrelease', ['service_id'], unique=False)
- op.create_index('quayrelease_service_id_region_id_created', 'quayrelease', ['service_id', 'region_id', 'created'], unique=False)
- op.create_index('quayrelease_service_id_version_region_id', 'quayrelease', ['service_id', 'version', 'region_id'], unique=True)
- op.create_table('repository',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('namespace_user_id', sa.Integer(), nullable=True),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('visibility_id', sa.Integer(), nullable=False),
- sa.Column('description', sa.Text(), nullable=True),
- sa.Column('badge_token', sa.String(length=255), nullable=False),
- sa.ForeignKeyConstraint(['namespace_user_id'], ['user.id'], name=op.f('fk_repository_namespace_user_id_user')),
- sa.ForeignKeyConstraint(['visibility_id'], ['visibility.id'], name=op.f('fk_repository_visibility_id_visibility')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repository'))
- )
- op.create_index('repository_namespace_user_id', 'repository', ['namespace_user_id'], unique=False)
- op.create_index('repository_namespace_user_id_name', 'repository', ['namespace_user_id', 'name'], unique=True)
- op.create_index('repository_visibility_id', 'repository', ['visibility_id'], unique=False)
- op.create_table('servicekey',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('kid', sa.String(length=255), nullable=False),
- sa.Column('service', sa.String(length=255), nullable=False),
- sa.Column('jwk', UTF8LongText(), nullable=False),
- sa.Column('metadata', UTF8LongText(), nullable=False),
- sa.Column('created_date', sa.DateTime(), nullable=False),
- sa.Column('expiration_date', sa.DateTime(), nullable=True),
- sa.Column('rotation_duration', sa.Integer(), nullable=True),
- sa.Column('approval_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['approval_id'], ['servicekeyapproval.id'], name=op.f('fk_servicekey_approval_id_servicekeyapproval')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_servicekey'))
- )
- op.create_index('servicekey_approval_id', 'servicekey', ['approval_id'], unique=False)
- op.create_index('servicekey_kid', 'servicekey', ['kid'], unique=True)
- op.create_index('servicekey_service', 'servicekey', ['service'], unique=False)
- op.create_table('team',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('organization_id', sa.Integer(), nullable=False),
- sa.Column('role_id', sa.Integer(), nullable=False),
- sa.Column('description', sa.Text(), nullable=False),
- sa.ForeignKeyConstraint(['organization_id'], ['user.id'], name=op.f('fk_team_organization_id_user')),
- sa.ForeignKeyConstraint(['role_id'], ['teamrole.id'], name=op.f('fk_team_role_id_teamrole')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_team'))
- )
- op.create_index('team_name', 'team', ['name'], unique=False)
- op.create_index('team_name_organization_id', 'team', ['name', 'organization_id'], unique=True)
- op.create_index('team_organization_id', 'team', ['organization_id'], unique=False)
- op.create_index('team_role_id', 'team', ['role_id'], unique=False)
- op.create_table('torrentinfo',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('storage_id', sa.Integer(), nullable=False),
- sa.Column('piece_length', sa.Integer(), nullable=False),
- sa.Column('pieces', sa.Text(), nullable=False),
- sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_torrentinfo_storage_id_imagestorage')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_torrentinfo'))
- )
- op.create_index('torrentinfo_storage_id', 'torrentinfo', ['storage_id'], unique=False)
- op.create_index('torrentinfo_storage_id_piece_length', 'torrentinfo', ['storage_id', 'piece_length'], unique=True)
- op.create_table('userregion',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('location_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['location_id'], ['imagestoragelocation.id'], name=op.f('fk_userregion_location_id_imagestoragelocation')),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_userregion_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_userregion'))
- )
- op.create_index('userregion_location_id', 'userregion', ['location_id'], unique=False)
- op.create_index('userregion_user_id', 'userregion', ['user_id'], unique=False)
- op.create_table('accesstoken',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('friendly_name', sa.String(length=255), nullable=True),
- sa.Column('code', sa.String(length=255), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('created', sa.DateTime(), nullable=False),
- sa.Column('role_id', sa.Integer(), nullable=False),
- sa.Column('temporary', sa.Boolean(), nullable=False),
- sa.Column('kind_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['kind_id'], ['accesstokenkind.id'], name=op.f('fk_accesstoken_kind_id_accesstokenkind')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_accesstoken_repository_id_repository')),
- sa.ForeignKeyConstraint(['role_id'], ['role.id'], name=op.f('fk_accesstoken_role_id_role')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_accesstoken'))
- )
- op.create_index('accesstoken_code', 'accesstoken', ['code'], unique=True)
- op.create_index('accesstoken_kind_id', 'accesstoken', ['kind_id'], unique=False)
- op.create_index('accesstoken_repository_id', 'accesstoken', ['repository_id'], unique=False)
- op.create_index('accesstoken_role_id', 'accesstoken', ['role_id'], unique=False)
- op.create_table('blobupload',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('byte_count', sa.Integer(), nullable=False),
- sa.Column('sha_state', sa.Text(), nullable=True),
- sa.Column('location_id', sa.Integer(), nullable=False),
- sa.Column('storage_metadata', sa.Text(), nullable=True),
- sa.Column('chunk_count', sa.Integer(), nullable=False, server_default='0'),
- sa.Column('uncompressed_byte_count', sa.Integer(), nullable=True),
- sa.Column('created', sa.DateTime(), nullable=False, server_default=sa.text(now)),
- sa.Column('piece_sha_state', UTF8LongText(), nullable=True),
- sa.Column('piece_hashes', UTF8LongText(), nullable=True),
- sa.ForeignKeyConstraint(['location_id'], ['imagestoragelocation.id'], name=op.f('fk_blobupload_location_id_imagestoragelocation')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_blobupload_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blobupload'))
- )
- op.create_index('blobupload_created', 'blobupload', ['created'], unique=False)
- op.create_index('blobupload_location_id', 'blobupload', ['location_id'], unique=False)
- op.create_index('blobupload_repository_id', 'blobupload', ['repository_id'], unique=False)
- op.create_index('blobupload_repository_id_uuid', 'blobupload', ['repository_id', 'uuid'], unique=True)
- op.create_index('blobupload_uuid', 'blobupload', ['uuid'], unique=True)
- op.create_table('image',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('docker_image_id', sa.String(length=255), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('ancestors', sa.String(length=60535), nullable=True),
- sa.Column('storage_id', sa.Integer(), nullable=True),
- sa.Column('created', sa.DateTime(), nullable=True),
- sa.Column('comment', UTF8LongText(), nullable=True),
- sa.Column('command', sa.Text(), nullable=True),
- sa.Column('aggregate_size', sa.BigInteger(), nullable=True),
- sa.Column('v1_json_metadata', UTF8LongText(), nullable=True),
- sa.Column('v1_checksum', sa.String(length=255), nullable=True),
- sa.Column('security_indexed', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
- sa.Column('security_indexed_engine', sa.Integer(), nullable=False, server_default='-1'),
- sa.Column('parent_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_image_repository_id_repository')),
- sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_image_storage_id_imagestorage')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_image'))
- )
- op.create_index('image_ancestors', 'image', ['ancestors'], unique=False, mysql_length=767)
- op.create_index('image_docker_image_id', 'image', ['docker_image_id'], unique=False)
- op.create_index('image_parent_id', 'image', ['parent_id'], unique=False)
- op.create_index('image_repository_id', 'image', ['repository_id'], unique=False)
- op.create_index('image_repository_id_docker_image_id', 'image', ['repository_id', 'docker_image_id'], unique=True)
- op.create_index('image_security_indexed', 'image', ['security_indexed'], unique=False)
- op.create_index('image_security_indexed_engine', 'image', ['security_indexed_engine'], unique=False)
- op.create_index('image_security_indexed_engine_security_indexed', 'image', ['security_indexed_engine', 'security_indexed'], unique=False)
- op.create_index('image_storage_id', 'image', ['storage_id'], unique=False)
- op.create_table('oauthaccesstoken',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('application_id', sa.Integer(), nullable=False),
- sa.Column('authorized_user_id', sa.Integer(), nullable=False),
- sa.Column('scope', sa.String(length=255), nullable=False),
- sa.Column('access_token', sa.String(length=255), nullable=False),
- sa.Column('token_type', sa.String(length=255), nullable=False),
- sa.Column('expires_at', sa.DateTime(), nullable=False),
- sa.Column('refresh_token', sa.String(length=255), nullable=True),
- sa.Column('data', sa.Text(), nullable=False),
- sa.ForeignKeyConstraint(['application_id'], ['oauthapplication.id'], name=op.f('fk_oauthaccesstoken_application_id_oauthapplication')),
- sa.ForeignKeyConstraint(['authorized_user_id'], ['user.id'], name=op.f('fk_oauthaccesstoken_authorized_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_oauthaccesstoken'))
- )
- op.create_index('oauthaccesstoken_access_token', 'oauthaccesstoken', ['access_token'], unique=False)
- op.create_index('oauthaccesstoken_application_id', 'oauthaccesstoken', ['application_id'], unique=False)
- op.create_index('oauthaccesstoken_authorized_user_id', 'oauthaccesstoken', ['authorized_user_id'], unique=False)
- op.create_index('oauthaccesstoken_refresh_token', 'oauthaccesstoken', ['refresh_token'], unique=False)
- op.create_index('oauthaccesstoken_uuid', 'oauthaccesstoken', ['uuid'], unique=False)
- op.create_table('oauthauthorizationcode',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('application_id', sa.Integer(), nullable=False),
- sa.Column('code', sa.String(length=255), nullable=False),
- sa.Column('scope', sa.String(length=255), nullable=False),
- sa.Column('data', sa.Text(), nullable=False),
- sa.ForeignKeyConstraint(['application_id'], ['oauthapplication.id'], name=op.f('fk_oauthauthorizationcode_application_id_oauthapplication')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_oauthauthorizationcode'))
- )
- op.create_index('oauthauthorizationcode_application_id', 'oauthauthorizationcode', ['application_id'], unique=False)
- op.create_index('oauthauthorizationcode_code', 'oauthauthorizationcode', ['code'], unique=False)
- op.create_table('permissionprototype',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('org_id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('activating_user_id', sa.Integer(), nullable=True),
- sa.Column('delegate_user_id', sa.Integer(), nullable=True),
- sa.Column('delegate_team_id', sa.Integer(), nullable=True),
- sa.Column('role_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['activating_user_id'], ['user.id'], name=op.f('fk_permissionprototype_activating_user_id_user')),
- sa.ForeignKeyConstraint(['delegate_team_id'], ['team.id'], name=op.f('fk_permissionprototype_delegate_team_id_team')),
- sa.ForeignKeyConstraint(['delegate_user_id'], ['user.id'], name=op.f('fk_permissionprototype_delegate_user_id_user')),
- sa.ForeignKeyConstraint(['org_id'], ['user.id'], name=op.f('fk_permissionprototype_org_id_user')),
- sa.ForeignKeyConstraint(['role_id'], ['role.id'], name=op.f('fk_permissionprototype_role_id_role')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_permissionprototype'))
- )
- op.create_index('permissionprototype_activating_user_id', 'permissionprototype', ['activating_user_id'], unique=False)
- op.create_index('permissionprototype_delegate_team_id', 'permissionprototype', ['delegate_team_id'], unique=False)
- op.create_index('permissionprototype_delegate_user_id', 'permissionprototype', ['delegate_user_id'], unique=False)
- op.create_index('permissionprototype_org_id', 'permissionprototype', ['org_id'], unique=False)
- op.create_index('permissionprototype_org_id_activating_user_id', 'permissionprototype', ['org_id', 'activating_user_id'], unique=False)
- op.create_index('permissionprototype_role_id', 'permissionprototype', ['role_id'], unique=False)
- op.create_table('repositoryactioncount',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('count', sa.Integer(), nullable=False),
- sa.Column('date', sa.Date(), nullable=False),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositoryactioncount_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repositoryactioncount'))
- )
- op.create_index('repositoryactioncount_date', 'repositoryactioncount', ['date'], unique=False)
- op.create_index('repositoryactioncount_repository_id', 'repositoryactioncount', ['repository_id'], unique=False)
- op.create_index('repositoryactioncount_repository_id_date', 'repositoryactioncount', ['repository_id', 'date'], unique=True)
- op.create_table('repositoryauthorizedemail',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('email', sa.String(length=255), nullable=False),
- sa.Column('code', sa.String(length=255), nullable=False),
- sa.Column('confirmed', sa.Boolean(), nullable=False),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositoryauthorizedemail_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repositoryauthorizedemail'))
- )
- op.create_index('repositoryauthorizedemail_code', 'repositoryauthorizedemail', ['code'], unique=True)
- op.create_index('repositoryauthorizedemail_email_repository_id', 'repositoryauthorizedemail', ['email', 'repository_id'], unique=True)
- op.create_index('repositoryauthorizedemail_repository_id', 'repositoryauthorizedemail', ['repository_id'], unique=False)
- op.create_table('repositorynotification',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('event_id', sa.Integer(), nullable=False),
- sa.Column('method_id', sa.Integer(), nullable=False),
- sa.Column('title', sa.String(length=255), nullable=True),
- sa.Column('config_json', sa.Text(), nullable=False),
- sa.Column('event_config_json', UTF8LongText(), nullable=False),
- sa.ForeignKeyConstraint(['event_id'], ['externalnotificationevent.id'], name=op.f('fk_repositorynotification_event_id_externalnotificationevent')),
- sa.ForeignKeyConstraint(['method_id'], ['externalnotificationmethod.id'], name=op.f('fk_repositorynotification_method_id_externalnotificationmethod')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorynotification_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorynotification'))
- )
- op.create_index('repositorynotification_event_id', 'repositorynotification', ['event_id'], unique=False)
- op.create_index('repositorynotification_method_id', 'repositorynotification', ['method_id'], unique=False)
- op.create_index('repositorynotification_repository_id', 'repositorynotification', ['repository_id'], unique=False)
- op.create_index('repositorynotification_uuid', 'repositorynotification', ['uuid'], unique=False)
- op.create_table('repositorypermission',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('team_id', sa.Integer(), nullable=True),
- sa.Column('user_id', sa.Integer(), nullable=True),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('role_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorypermission_repository_id_repository')),
- sa.ForeignKeyConstraint(['role_id'], ['role.id'], name=op.f('fk_repositorypermission_role_id_role')),
- sa.ForeignKeyConstraint(['team_id'], ['team.id'], name=op.f('fk_repositorypermission_team_id_team')),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_repositorypermission_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorypermission'))
- )
- op.create_index('repositorypermission_repository_id', 'repositorypermission', ['repository_id'], unique=False)
- op.create_index('repositorypermission_role_id', 'repositorypermission', ['role_id'], unique=False)
- op.create_index('repositorypermission_team_id', 'repositorypermission', ['team_id'], unique=False)
- op.create_index('repositorypermission_team_id_repository_id', 'repositorypermission', ['team_id', 'repository_id'], unique=True)
- op.create_index('repositorypermission_user_id', 'repositorypermission', ['user_id'], unique=False)
- op.create_index('repositorypermission_user_id_repository_id', 'repositorypermission', ['user_id', 'repository_id'], unique=True)
- op.create_table('star',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('created', sa.DateTime(), nullable=False),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_star_repository_id_repository')),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_star_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_star'))
- )
- op.create_index('star_repository_id', 'star', ['repository_id'], unique=False)
- op.create_index('star_user_id', 'star', ['user_id'], unique=False)
- op.create_index('star_user_id_repository_id', 'star', ['user_id', 'repository_id'], unique=True)
- op.create_table('teammember',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('team_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['team_id'], ['team.id'], name=op.f('fk_teammember_team_id_team')),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_teammember_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_teammember'))
- )
- op.create_index('teammember_team_id', 'teammember', ['team_id'], unique=False)
- op.create_index('teammember_user_id', 'teammember', ['user_id'], unique=False)
- op.create_index('teammember_user_id_team_id', 'teammember', ['user_id', 'team_id'], unique=True)
- op.create_table('teammemberinvite',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=True),
- sa.Column('email', sa.String(length=255), nullable=True),
- sa.Column('team_id', sa.Integer(), nullable=False),
- sa.Column('inviter_id', sa.Integer(), nullable=False),
- sa.Column('invite_token', sa.String(length=255), nullable=False),
- sa.ForeignKeyConstraint(['inviter_id'], ['user.id'], name=op.f('fk_teammemberinvite_inviter_id_user')),
- sa.ForeignKeyConstraint(['team_id'], ['team.id'], name=op.f('fk_teammemberinvite_team_id_team')),
- sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_teammemberinvite_user_id_user')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_teammemberinvite'))
- )
- op.create_index('teammemberinvite_inviter_id', 'teammemberinvite', ['inviter_id'], unique=False)
- op.create_index('teammemberinvite_team_id', 'teammemberinvite', ['team_id'], unique=False)
- op.create_index('teammemberinvite_user_id', 'teammemberinvite', ['user_id'], unique=False)
- op.create_table('derivedstorageforimage',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('source_image_id', sa.Integer(), nullable=False),
- sa.Column('derivative_id', sa.Integer(), nullable=False),
- sa.Column('transformation_id', sa.Integer(), nullable=False),
- sa.Column('uniqueness_hash', sa.String(length=255), nullable=True),
- sa.ForeignKeyConstraint(['derivative_id'], ['imagestorage.id'], name=op.f('fk_derivedstorageforimage_derivative_id_imagestorage')),
- sa.ForeignKeyConstraint(['source_image_id'], ['image.id'], name=op.f('fk_derivedstorageforimage_source_image_id_image')),
- sa.ForeignKeyConstraint(['transformation_id'], ['imagestoragetransformation.id'], name=op.f('fk_derivedstorageforimage_transformation_constraint')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_derivedstorageforimage'))
- )
- op.create_index('derivedstorageforimage_derivative_id', 'derivedstorageforimage', ['derivative_id'], unique=False)
- op.create_index('derivedstorageforimage_source_image_id', 'derivedstorageforimage', ['source_image_id'], unique=False)
- op.create_index('uniqueness_index', 'derivedstorageforimage', ['source_image_id', 'transformation_id', 'uniqueness_hash'], unique=True)
- op.create_index('derivedstorageforimage_transformation_id', 'derivedstorageforimage', ['transformation_id'], unique=False)
- op.create_table('repositorybuildtrigger',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('service_id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('connected_user_id', sa.Integer(), nullable=False),
- sa.Column('auth_token', sa.String(length=255), nullable=True),
- sa.Column('private_key', sa.Text(), nullable=True),
- sa.Column('config', sa.Text(), nullable=False),
- sa.Column('write_token_id', sa.Integer(), nullable=True),
- sa.Column('pull_robot_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['connected_user_id'], ['user.id'], name=op.f('fk_repositorybuildtrigger_connected_user_id_user')),
- sa.ForeignKeyConstraint(['pull_robot_id'], ['user.id'], name=op.f('fk_repositorybuildtrigger_pull_robot_id_user')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorybuildtrigger_repository_id_repository')),
- sa.ForeignKeyConstraint(['service_id'], ['buildtriggerservice.id'], name=op.f('fk_repositorybuildtrigger_service_id_buildtriggerservice')),
- sa.ForeignKeyConstraint(['write_token_id'], ['accesstoken.id'], name=op.f('fk_repositorybuildtrigger_write_token_id_accesstoken')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorybuildtrigger'))
- )
- op.create_index('repositorybuildtrigger_connected_user_id', 'repositorybuildtrigger', ['connected_user_id'], unique=False)
- op.create_index('repositorybuildtrigger_pull_robot_id', 'repositorybuildtrigger', ['pull_robot_id'], unique=False)
- op.create_index('repositorybuildtrigger_repository_id', 'repositorybuildtrigger', ['repository_id'], unique=False)
- op.create_index('repositorybuildtrigger_service_id', 'repositorybuildtrigger', ['service_id'], unique=False)
- op.create_index('repositorybuildtrigger_write_token_id', 'repositorybuildtrigger', ['write_token_id'], unique=False)
- op.create_table('repositorytag',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('image_id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('lifetime_start_ts', sa.Integer(), nullable=False, server_default='0'),
- sa.Column('lifetime_end_ts', sa.Integer(), nullable=True),
- sa.Column('hidden', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
- sa.Column('reversion', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
- sa.ForeignKeyConstraint(['image_id'], ['image.id'], name=op.f('fk_repositorytag_image_id_image')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorytag_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorytag'))
- )
- op.create_index('repositorytag_image_id', 'repositorytag', ['image_id'], unique=False)
- op.create_index('repositorytag_lifetime_end_ts', 'repositorytag', ['lifetime_end_ts'], unique=False)
- op.create_index('repositorytag_repository_id', 'repositorytag', ['repository_id'], unique=False)
- op.create_index('repositorytag_repository_id_name', 'repositorytag', ['repository_id', 'name'], unique=False)
- op.create_index('repositorytag_repository_id_name_lifetime_end_ts', 'repositorytag', ['repository_id', 'name', 'lifetime_end_ts'], unique=True)
- op.create_table('repositorybuild',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('uuid', sa.String(length=255), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('access_token_id', sa.Integer(), nullable=False),
- sa.Column('resource_key', sa.String(length=255), nullable=True),
- sa.Column('job_config', sa.Text(), nullable=False),
- sa.Column('phase', sa.String(length=255), nullable=False),
- sa.Column('started', sa.DateTime(), nullable=False),
- sa.Column('display_name', sa.String(length=255), nullable=False),
- sa.Column('trigger_id', sa.Integer(), nullable=True),
- sa.Column('pull_robot_id', sa.Integer(), nullable=True),
- sa.Column('logs_archived', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
- sa.Column('queue_id', sa.String(length=255), nullable=True),
- sa.ForeignKeyConstraint(['access_token_id'], ['accesstoken.id'], name=op.f('fk_repositorybuild_access_token_id_accesstoken')),
- sa.ForeignKeyConstraint(['pull_robot_id'], ['user.id'], name=op.f('fk_repositorybuild_pull_robot_id_user')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorybuild_repository_id_repository')),
- sa.ForeignKeyConstraint(['trigger_id'], ['repositorybuildtrigger.id'], name=op.f('fk_repositorybuild_trigger_id_repositorybuildtrigger')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorybuild'))
- )
- op.create_index('repositorybuild_access_token_id', 'repositorybuild', ['access_token_id'], unique=False)
- op.create_index('repositorybuild_pull_robot_id', 'repositorybuild', ['pull_robot_id'], unique=False)
- op.create_index('repositorybuild_queue_id', 'repositorybuild', ['queue_id'], unique=False)
- op.create_index('repositorybuild_repository_id', 'repositorybuild', ['repository_id'], unique=False)
- op.create_index('repositorybuild_repository_id_started_phase', 'repositorybuild', ['repository_id', 'started', 'phase'], unique=False)
- op.create_index('repositorybuild_resource_key', 'repositorybuild', ['resource_key'], unique=False)
- op.create_index('repositorybuild_started', 'repositorybuild', ['started'], unique=False)
- op.create_index('repositorybuild_started_logs_archived_phase', 'repositorybuild', ['started', 'logs_archived', 'phase'], unique=False)
- op.create_index('repositorybuild_trigger_id', 'repositorybuild', ['trigger_id'], unique=False)
- op.create_index('repositorybuild_uuid', 'repositorybuild', ['uuid'], unique=False)
- op.create_table('tagmanifest',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('tag_id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('json_data', UTF8LongText(), nullable=False),
- sa.ForeignKeyConstraint(['tag_id'], ['repositorytag.id'], name=op.f('fk_tagmanifest_tag_id_repositorytag')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifest'))
- )
- op.create_index('tagmanifest_digest', 'tagmanifest', ['digest'], unique=False)
- op.create_index('tagmanifest_tag_id', 'tagmanifest', ['tag_id'], unique=True)
- op.create_table('tagmanifestlabel',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('annotated_id', sa.Integer(), nullable=False),
- sa.Column('label_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['annotated_id'], ['tagmanifest.id'], name=op.f('fk_tagmanifestlabel_annotated_id_tagmanifest')),
- sa.ForeignKeyConstraint(['label_id'], ['label.id'], name=op.f('fk_tagmanifestlabel_label_id_label')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_tagmanifestlabel_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifestlabel'))
- )
- op.create_index('tagmanifestlabel_annotated_id', 'tagmanifestlabel', ['annotated_id'], unique=False)
- op.create_index('tagmanifestlabel_annotated_id_label_id', 'tagmanifestlabel', ['annotated_id', 'label_id'], unique=True)
- op.create_index('tagmanifestlabel_label_id', 'tagmanifestlabel', ['label_id'], unique=False)
- op.create_index('tagmanifestlabel_repository_id', 'tagmanifestlabel', ['repository_id'], unique=False)
-
- op.bulk_insert(tables.accesstokenkind,
- [
- {'name':'build-worker'},
- {'name':'pushpull-token'},
- ])
-
- op.bulk_insert(tables.buildtriggerservice,
- [
- {'name':'github'},
- {'name':'gitlab'},
- {'name':'bitbucket'},
- {'name':'custom-git'},
- ])
-
- op.bulk_insert(tables.externalnotificationevent,
- [
- {'name':'build_failure'},
- {'name':'build_queued'},
- {'name':'build_start'},
- {'name':'build_success'},
- {'name':'repo_push'},
- {'name':'vulnerability_found'},
- ])
-
- op.bulk_insert(tables.externalnotificationmethod,
- [
- {'name':'email'},
- {'name':'flowdock'},
- {'name':'hipchat'},
- {'name':'quay_notification'},
- {'name':'slack'},
- {'name':'webhook'},
- ])
-
- op.bulk_insert(tables.imagestoragelocation,
- [
- {'name':'s3_us_east_1'},
- {'name':'s3_eu_west_1'},
- {'name':'s3_ap_southeast_1'},
- {'name':'s3_ap_southeast_2'},
- {'name':'s3_ap_northeast_1'},
- {'name':'s3_sa_east_1'},
- {'name':'local'},
- {'name':'s3_us_west_1'},
- ])
-
- op.bulk_insert(tables.imagestoragesignaturekind,
- [
- {'name':'gpg2'},
- ])
-
- op.bulk_insert(tables.imagestoragetransformation,
- [
- {'name':'squash'},
- {'name':'aci'},
- ])
-
- op.bulk_insert(tables.labelsourcetype,
- [
- {'name':'manifest', 'mutable': False},
- {'name':'api', 'mutable': True},
- {'name':'internal', 'mutable': False},
- ])
-
- op.bulk_insert(tables.logentrykind,
- [
- {'name':'account_change_cc'},
- {'name':'account_change_password'},
- {'name':'account_change_plan'},
- {'name':'account_convert'},
- {'name':'add_repo_accesstoken'},
- {'name':'add_repo_notification'},
- {'name':'add_repo_permission'},
- {'name':'add_repo_webhook'},
- {'name':'build_dockerfile'},
- {'name':'change_repo_permission'},
- {'name':'change_repo_visibility'},
- {'name':'create_application'},
- {'name':'create_prototype_permission'},
- {'name':'create_repo'},
- {'name':'create_robot'},
- {'name':'create_tag'},
- {'name':'delete_application'},
- {'name':'delete_prototype_permission'},
- {'name':'delete_repo'},
- {'name':'delete_repo_accesstoken'},
- {'name':'delete_repo_notification'},
- {'name':'delete_repo_permission'},
- {'name':'delete_repo_trigger'},
- {'name':'delete_repo_webhook'},
- {'name':'delete_robot'},
- {'name':'delete_tag'},
- {'name':'manifest_label_add'},
- {'name':'manifest_label_delete'},
- {'name':'modify_prototype_permission'},
- {'name':'move_tag'},
- {'name':'org_add_team_member'},
- {'name':'org_create_team'},
- {'name':'org_delete_team'},
- {'name':'org_delete_team_member_invite'},
- {'name':'org_invite_team_member'},
- {'name':'org_remove_team_member'},
- {'name':'org_set_team_description'},
- {'name':'org_set_team_role'},
- {'name':'org_team_member_invite_accepted'},
- {'name':'org_team_member_invite_declined'},
- {'name':'pull_repo'},
- {'name':'push_repo'},
- {'name':'regenerate_robot_token'},
- {'name':'repo_verb'},
- {'name':'reset_application_client_secret'},
- {'name':'revert_tag'},
- {'name':'service_key_approve'},
- {'name':'service_key_create'},
- {'name':'service_key_delete'},
- {'name':'service_key_extend'},
- {'name':'service_key_modify'},
- {'name':'service_key_rotate'},
- {'name':'setup_repo_trigger'},
- {'name':'set_repo_description'},
- {'name':'take_ownership'},
- {'name':'update_application'},
- ])
-
- op.bulk_insert(tables.loginservice,
- [
- {'name':'github'},
- {'name':'quayrobot'},
- {'name':'ldap'},
- {'name':'google'},
- {'name':'keystone'},
- {'name':'dex'},
- {'name':'jwtauthn'},
- ])
-
- op.bulk_insert(tables.mediatype,
- [
- {'name':'text/plain'},
- {'name':'application/json'},
- ])
-
- op.bulk_insert(tables.notificationkind,
- [
- {'name':'build_failure'},
- {'name':'build_queued'},
- {'name':'build_start'},
- {'name':'build_success'},
- {'name':'expiring_license'},
- {'name':'maintenance'},
- {'name':'org_team_invite'},
- {'name':'over_private_usage'},
- {'name':'password_required'},
- {'name':'repo_push'},
- {'name':'service_key_submitted'},
- {'name':'vulnerability_found'},
- ])
-
- op.bulk_insert(tables.role,
- [
- {'name':'admin'},
- {'name':'write'},
- {'name':'read'},
- ])
-
- op.bulk_insert(tables.teamrole,
- [
- {'name':'admin'},
- {'name':'creator'},
- {'name':'member'},
- ])
-
- op.bulk_insert(tables.visibility,
- [
- {'name':'public'},
- {'name':'private'},
- ])
-
- # ### population of test data ### #
- tester.populate_table('user', [
- ('uuid', tester.TestDataType.UUID),
- ('username', tester.TestDataType.String),
- ('password_hash', tester.TestDataType.String),
- ('email', tester.TestDataType.String),
- ('verified', tester.TestDataType.Boolean),
- ('organization', tester.TestDataType.Boolean),
- ('robot', tester.TestDataType.Boolean),
- ('invoice_email', tester.TestDataType.Boolean),
- ('invalid_login_attempts', tester.TestDataType.Integer),
- ('last_invalid_login', tester.TestDataType.DateTime),
- ('removed_tag_expiration_s', tester.TestDataType.Integer),
- ('enabled', tester.TestDataType.Boolean),
- ('invoice_email_address', tester.TestDataType.String),
- ])
-
- tester.populate_table('repository', [
- ('namespace_user_id', tester.TestDataType.Foreign('user')),
- ('name', tester.TestDataType.String),
- ('visibility_id', tester.TestDataType.Foreign('visibility')),
- ('description', tester.TestDataType.String),
- ('badge_token', tester.TestDataType.String),
- ])
-
- tester.populate_table('emailconfirmation', [
- ('code', tester.TestDataType.String),
- ('user_id', tester.TestDataType.Foreign('user')),
- ('pw_reset', tester.TestDataType.Boolean),
- ('email_confirm', tester.TestDataType.Boolean),
- ('created', tester.TestDataType.DateTime),
- ])
-
- tester.populate_table('federatedlogin', [
- ('user_id', tester.TestDataType.Foreign('user')),
- ('service_id', tester.TestDataType.Foreign('loginservice')),
- ('service_ident', tester.TestDataType.String),
- ('metadata_json', tester.TestDataType.JSON),
- ])
-
- tester.populate_table('imagestorage', [
- ('uuid', tester.TestDataType.UUID),
- ('checksum', tester.TestDataType.String),
- ('image_size', tester.TestDataType.BigInteger),
- ('uncompressed_size', tester.TestDataType.BigInteger),
- ('uploading', tester.TestDataType.Boolean),
- ('cas_path', tester.TestDataType.Boolean),
- ('content_checksum', tester.TestDataType.String),
- ])
-
- tester.populate_table('image', [
- ('docker_image_id', tester.TestDataType.UUID),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('ancestors', tester.TestDataType.String),
- ('storage_id', tester.TestDataType.Foreign('imagestorage')),
- ('security_indexed', tester.TestDataType.Boolean),
- ('security_indexed_engine', tester.TestDataType.Integer),
- ])
-
- tester.populate_table('imagestorageplacement', [
- ('storage_id', tester.TestDataType.Foreign('imagestorage')),
- ('location_id', tester.TestDataType.Foreign('imagestoragelocation')),
- ])
-
- tester.populate_table('messages', [
- ('content', tester.TestDataType.String),
- ('uuid', tester.TestDataType.UUID),
- ])
-
- tester.populate_table('queueitem', [
- ('queue_name', tester.TestDataType.String),
- ('body', tester.TestDataType.JSON),
- ('available_after', tester.TestDataType.DateTime),
- ('available', tester.TestDataType.Boolean),
- ('processing_expires', tester.TestDataType.DateTime),
- ('retries_remaining', tester.TestDataType.Integer),
- ])
-
- tester.populate_table('servicekeyapproval', [
- ('approver_id', tester.TestDataType.Foreign('user')),
- ('approval_type', tester.TestDataType.String),
- ('approved_date', tester.TestDataType.DateTime),
- ('notes', tester.TestDataType.String),
- ])
-
- tester.populate_table('servicekey', [
- ('name', tester.TestDataType.String),
- ('kid', tester.TestDataType.String),
- ('service', tester.TestDataType.String),
- ('jwk', tester.TestDataType.JSON),
- ('metadata', tester.TestDataType.JSON),
- ('created_date', tester.TestDataType.DateTime),
- ('approval_id', tester.TestDataType.Foreign('servicekeyapproval')),
- ])
-
- tester.populate_table('label', [
- ('uuid', tester.TestDataType.UUID),
- ('key', tester.TestDataType.UTF8Char),
- ('value', tester.TestDataType.JSON),
- ('media_type_id', tester.TestDataType.Foreign('mediatype')),
- ('source_type_id', tester.TestDataType.Foreign('labelsourcetype')),
- ])
-
- tester.populate_table('logentry', [
- ('kind_id', tester.TestDataType.Foreign('logentrykind')),
- ('account_id', tester.TestDataType.Foreign('user')),
- ('performer_id', tester.TestDataType.Foreign('user')),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('datetime', tester.TestDataType.DateTime),
- ('ip', tester.TestDataType.String),
- ('metadata_json', tester.TestDataType.JSON),
- ])
-
- tester.populate_table('notification', [
- ('uuid', tester.TestDataType.UUID),
- ('kind_id', tester.TestDataType.Foreign('notificationkind')),
- ('target_id', tester.TestDataType.Foreign('user')),
- ('metadata_json', tester.TestDataType.JSON),
- ('created', tester.TestDataType.DateTime),
- ('dismissed', tester.TestDataType.Boolean),
- ('lookup_path', tester.TestDataType.String),
- ])
-
- tester.populate_table('oauthapplication', [
- ('client_id', tester.TestDataType.String),
- ('client_secret', tester.TestDataType.String),
- ('redirect_uri', tester.TestDataType.String),
- ('application_uri', tester.TestDataType.String),
- ('organization_id', tester.TestDataType.Foreign('user')),
- ('name', tester.TestDataType.String),
- ('description', tester.TestDataType.String),
- ])
-
- tester.populate_table('team', [
- ('name', tester.TestDataType.String),
- ('organization_id', tester.TestDataType.Foreign('user')),
- ('role_id', tester.TestDataType.Foreign('teamrole')),
- ('description', tester.TestDataType.String),
- ])
-
- tester.populate_table('torrentinfo', [
- ('storage_id', tester.TestDataType.Foreign('imagestorage')),
- ('piece_length', tester.TestDataType.Integer),
- ('pieces', tester.TestDataType.String),
- ])
-
- tester.populate_table('userregion', [
- ('user_id', tester.TestDataType.Foreign('user')),
- ('location_id', tester.TestDataType.Foreign('imagestoragelocation')),
- ])
-
- tester.populate_table('accesstoken', [
- ('friendly_name', tester.TestDataType.String),
- ('code', tester.TestDataType.Token),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('created', tester.TestDataType.DateTime),
- ('role_id', tester.TestDataType.Foreign('role')),
- ('temporary', tester.TestDataType.Boolean),
- ('kind_id', tester.TestDataType.Foreign('accesstokenkind')),
- ])
-
- tester.populate_table('blobupload', [
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('uuid', tester.TestDataType.UUID),
- ('byte_count', tester.TestDataType.Integer),
- ('sha_state', tester.TestDataType.String),
- ('location_id', tester.TestDataType.Foreign('imagestoragelocation')),
- ('chunk_count', tester.TestDataType.Integer),
- ('created', tester.TestDataType.DateTime),
- ])
-
- tester.populate_table('oauthaccesstoken', [
- ('uuid', tester.TestDataType.UUID),
- ('application_id', tester.TestDataType.Foreign('oauthapplication')),
- ('authorized_user_id', tester.TestDataType.Foreign('user')),
- ('scope', tester.TestDataType.String),
- ('access_token', tester.TestDataType.Token),
- ('token_type', tester.TestDataType.String),
- ('expires_at', tester.TestDataType.DateTime),
- ('data', tester.TestDataType.JSON),
- ])
-
- tester.populate_table('oauthauthorizationcode', [
- ('application_id', tester.TestDataType.Foreign('oauthapplication')),
- ('code', tester.TestDataType.Token),
- ('scope', tester.TestDataType.String),
- ('data', tester.TestDataType.JSON),
- ])
-
- tester.populate_table('permissionprototype', [
- ('org_id', tester.TestDataType.Foreign('user')),
- ('uuid', tester.TestDataType.UUID),
- ('activating_user_id', tester.TestDataType.Foreign('user')),
- ('delegate_user_id', tester.TestDataType.Foreign('user')),
- ('role_id', tester.TestDataType.Foreign('role')),
- ])
-
- tester.populate_table('repositoryactioncount', [
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('count', tester.TestDataType.Integer),
- ('date', tester.TestDataType.Date),
- ])
-
- tester.populate_table('repositoryauthorizedemail', [
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('email', tester.TestDataType.String),
- ('code', tester.TestDataType.String),
- ('confirmed', tester.TestDataType.Boolean),
- ])
-
- tester.populate_table('repositorynotification', [
- ('uuid', tester.TestDataType.UUID),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('event_id', tester.TestDataType.Foreign('externalnotificationevent')),
- ('method_id', tester.TestDataType.Foreign('externalnotificationmethod')),
- ('title', tester.TestDataType.String),
- ('config_json', tester.TestDataType.JSON),
- ('event_config_json', tester.TestDataType.JSON),
- ])
-
- tester.populate_table('repositorypermission', [
- ('team_id', tester.TestDataType.Foreign('team')),
- ('user_id', tester.TestDataType.Foreign('user')),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('role_id', tester.TestDataType.Foreign('role')),
- ])
-
- tester.populate_table('star', [
- ('user_id', tester.TestDataType.Foreign('user')),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('created', tester.TestDataType.DateTime),
- ])
-
- tester.populate_table('teammember', [
- ('user_id', tester.TestDataType.Foreign('user')),
- ('team_id', tester.TestDataType.Foreign('team')),
- ])
-
- tester.populate_table('teammemberinvite', [
- ('user_id', tester.TestDataType.Foreign('user')),
- ('email', tester.TestDataType.String),
- ('team_id', tester.TestDataType.Foreign('team')),
- ('inviter_id', tester.TestDataType.Foreign('user')),
- ('invite_token', tester.TestDataType.String),
- ])
-
- tester.populate_table('derivedstorageforimage', [
- ('source_image_id', tester.TestDataType.Foreign('image')),
- ('derivative_id', tester.TestDataType.Foreign('imagestorage')),
- ('transformation_id', tester.TestDataType.Foreign('imagestoragetransformation')),
- ('uniqueness_hash', tester.TestDataType.String),
- ])
-
- tester.populate_table('repositorybuildtrigger', [
- ('uuid', tester.TestDataType.UUID),
- ('service_id', tester.TestDataType.Foreign('buildtriggerservice')),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('connected_user_id', tester.TestDataType.Foreign('user')),
- ('auth_token', tester.TestDataType.String),
- ('config', tester.TestDataType.JSON),
- ])
-
- tester.populate_table('repositorytag', [
- ('name', tester.TestDataType.String),
- ('image_id', tester.TestDataType.Foreign('image')),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('lifetime_start_ts', tester.TestDataType.Integer),
- ('hidden', tester.TestDataType.Boolean),
- ('reversion', tester.TestDataType.Boolean),
- ])
-
- tester.populate_table('repositorybuild', [
- ('uuid', tester.TestDataType.UUID),
- ('phase', tester.TestDataType.String),
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('access_token_id', tester.TestDataType.Foreign('accesstoken')),
- ('resource_key', tester.TestDataType.String),
- ('job_config', tester.TestDataType.JSON),
- ('started', tester.TestDataType.DateTime),
- ('display_name', tester.TestDataType.JSON),
- ('trigger_id', tester.TestDataType.Foreign('repositorybuildtrigger')),
- ('logs_archived', tester.TestDataType.Boolean),
- ])
-
- tester.populate_table('tagmanifest', [
- ('tag_id', tester.TestDataType.Foreign('repositorytag')),
- ('digest', tester.TestDataType.String),
- ('json_data', tester.TestDataType.JSON),
- ])
-
- tester.populate_table('tagmanifestlabel', [
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('annotated_id', tester.TestDataType.Foreign('tagmanifest')),
- ('label_id', tester.TestDataType.Foreign('label')),
- ])
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.drop_table('tagmanifestlabel')
- op.drop_table('tagmanifest')
- op.drop_table('repositorybuild')
- op.drop_table('repositorytag')
- op.drop_table('repositorybuildtrigger')
- op.drop_table('derivedstorageforimage')
- op.drop_table('teammemberinvite')
- op.drop_table('teammember')
- op.drop_table('star')
- op.drop_table('repositorypermission')
- op.drop_table('repositorynotification')
- op.drop_table('repositoryauthorizedemail')
- op.drop_table('repositoryactioncount')
- op.drop_table('permissionprototype')
- op.drop_table('oauthauthorizationcode')
- op.drop_table('oauthaccesstoken')
- op.drop_table('image')
- op.drop_table('blobupload')
- op.drop_table('accesstoken')
- op.drop_table('userregion')
- op.drop_table('torrentinfo')
- op.drop_table('team')
- op.drop_table('servicekey')
- op.drop_table('repository')
- op.drop_table('quayrelease')
- op.drop_table('oauthapplication')
- op.drop_table('notification')
- op.drop_table('logentry')
- op.drop_table('label')
- op.drop_table('imagestoragesignature')
- op.drop_table('imagestorageplacement')
- op.drop_table('federatedlogin')
- op.drop_table('emailconfirmation')
- op.drop_table('visibility')
- op.drop_table('user')
- op.drop_table('teamrole')
- op.drop_table('servicekeyapproval')
- op.drop_table('role')
- op.drop_table('queueitem')
- op.drop_table('quayservice')
- op.drop_table('quayregion')
- op.drop_table('notificationkind')
- op.drop_table('messages')
- op.drop_table('mediatype')
- op.drop_table('loginservice')
- op.drop_table('logentrykind')
- op.drop_table('labelsourcetype')
- op.drop_table('imagestoragetransformation')
- op.drop_table('imagestoragesignaturekind')
- op.drop_table('imagestoragelocation')
- op.drop_table('imagestorage')
- op.drop_table('externalnotificationmethod')
- op.drop_table('externalnotificationevent')
- op.drop_table('buildtriggerservice')
- op.drop_table('accesstokenkind')
diff --git a/data/migrations/versions/c3d4b7ebcdf7_backfill_repositorysearchscore_table.py b/data/migrations/versions/c3d4b7ebcdf7_backfill_repositorysearchscore_table.py
deleted file mode 100644
index 8e0a8ab8c..000000000
--- a/data/migrations/versions/c3d4b7ebcdf7_backfill_repositorysearchscore_table.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Backfill RepositorySearchScore table
-
-Revision ID: c3d4b7ebcdf7
-Revises: f30984525c86
-Create Date: 2017-04-13 12:01:59.572775
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'c3d4b7ebcdf7'
-down_revision = 'f30984525c86'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # Add a 0 entry into the RepositorySearchScore table for each repository that isn't present
- conn = op.get_bind()
- conn.execute("insert into repositorysearchscore (repository_id, score) SELECT id, 0 FROM " +
- "repository WHERE id not in (select repository_id from repositorysearchscore)")
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- pass
diff --git a/data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py b/data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py
deleted file mode 100644
index dc1567bd5..000000000
--- a/data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""Drop checksum on ImageStorage
-
-Revision ID: c91c564aad34
-Revises: 152bb29a1bb3
-Create Date: 2018-02-21 12:17:52.405644
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'c91c564aad34'
-down_revision = '152bb29a1bb3'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.drop_column('imagestorage', 'checksum')
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.add_column('imagestorage', sa.Column('checksum', sa.String(length=255), nullable=True))
diff --git a/data/migrations/versions/cbc8177760d9_add_user_location_field.py b/data/migrations/versions/cbc8177760d9_add_user_location_field.py
deleted file mode 100644
index cbdc87706..000000000
--- a/data/migrations/versions/cbc8177760d9_add_user_location_field.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""Add user location field
-
-Revision ID: cbc8177760d9
-Revises: 7367229b38d9
-Create Date: 2018-02-02 17:39:16.589623
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'cbc8177760d9'
-down_revision = '7367229b38d9'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-from util.migrate import UTF8CharField
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.add_column('user', sa.Column('location', UTF8CharField(length=255), nullable=True))
-
- # ### population of test data ### #
- tester.populate_column('user', 'location', tester.TestDataType.UTF8Char)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.drop_column('user', 'location')
diff --git a/data/migrations/versions/cc6778199cdb_repository_mirror_notification.py b/data/migrations/versions/cc6778199cdb_repository_mirror_notification.py
deleted file mode 100644
index a44704eec..000000000
--- a/data/migrations/versions/cc6778199cdb_repository_mirror_notification.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""repository mirror notification
-
-Revision ID: cc6778199cdb
-Revises: c059b952ed76
-Create Date: 2019-10-03 17:41:23.316914
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'cc6778199cdb'
-down_revision = 'c059b952ed76'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
-
- op.bulk_insert(tables.notificationkind,
- [
- {'name': 'repo_mirror_sync_started'},
- {'name': 'repo_mirror_sync_success'},
- {'name': 'repo_mirror_sync_failed'},
- ])
- op.bulk_insert(tables.externalnotificationevent,
- [
- {'name': 'repo_mirror_sync_started'},
- {'name': 'repo_mirror_sync_success'},
- {'name': 'repo_mirror_sync_failed'},
- ])
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
-
- op.execute(tables
- .notificationkind
- .delete()
- .where(tables.
- notificationkind.c.name == op.inline_literal('repo_mirror_sync_started')))
- op.execute(tables
- .notificationkind
- .delete()
- .where(tables.
- notificationkind.c.name == op.inline_literal('repo_mirror_sync_success')))
- op.execute(tables
- .notificationkind
- .delete()
- .where(tables.
- notificationkind.c.name == op.inline_literal('repo_mirror_sync_failed')))
-
- op.execute(tables
- .externalnotificationevent
- .delete()
- .where(tables.
- externalnotificationevent.c.name == op.inline_literal('repo_mirror_sync_started')))
- op.execute(tables
- .externalnotificationevent
- .delete()
- .where(tables.
- externalnotificationevent.c.name == op.inline_literal('repo_mirror_sync_success')))
- op.execute(tables
- .externalnotificationevent
- .delete()
- .where(tables.
- externalnotificationevent.c.name == op.inline_literal('repo_mirror_sync_failed')))
diff --git a/data/migrations/versions/d17c695859ea_delete_old_appr_tables.py b/data/migrations/versions/d17c695859ea_delete_old_appr_tables.py
deleted file mode 100644
index 9e847e8e2..000000000
--- a/data/migrations/versions/d17c695859ea_delete_old_appr_tables.py
+++ /dev/null
@@ -1,192 +0,0 @@
-"""Delete old Appr tables
-
-Revision ID: d17c695859ea
-Revises: 5d463ea1e8a8
-Create Date: 2018-07-16 15:21:11.593040
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'd17c695859ea'
-down_revision = '5d463ea1e8a8'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.sql import table, column
-from util.migrate import UTF8LongText, UTF8CharField
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('tag')
- op.drop_table('manifestlistmanifest')
- op.drop_table('manifestlist')
- op.drop_table('manifestblob')
- op.drop_table('manifest')
- op.drop_table('blobplacement')
- op.drop_table('blob')
- op.drop_table('blobplacementlocation')
- op.drop_table('tagkind')
- # ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table(
- 'tagkind',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tagkind'))
- )
- op.create_index('tagkind_name', 'tagkind', ['name'], unique=True)
-
- op.create_table(
- 'blobplacementlocation',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacementlocation'))
- )
- op.create_index('blobplacementlocation_name', 'blobplacementlocation', ['name'], unique=True)
-
- op.create_table(
- 'blob',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.Column('size', sa.BigInteger(), nullable=False),
- sa.Column('uncompressed_size', sa.BigInteger(), nullable=True),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_blob_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blob'))
- )
- op.create_index('blob_digest', 'blob', ['digest'], unique=True)
- op.create_index('blob_media_type_id', 'blob', ['media_type_id'], unique=False)
-
- op.create_table(
- 'manifest',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.Column('manifest_json', UTF8LongText, nullable=False),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifest_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifest'))
- )
- op.create_index('manifest_digest', 'manifest', ['digest'], unique=True)
- op.create_index('manifest_media_type_id', 'manifest', ['media_type_id'], unique=False)
-
- op.create_table(
- 'manifestlist',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('digest', sa.String(length=255), nullable=False),
- sa.Column('manifest_list_json', UTF8LongText, nullable=False),
- sa.Column('schema_version', UTF8CharField(length=255), nullable=False),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifestlist_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlist'))
- )
- op.create_index('manifestlist_digest', 'manifestlist', ['digest'], unique=True)
- op.create_index('manifestlist_media_type_id', 'manifestlist', ['media_type_id'], unique=False)
-
- op.create_table(
- 'blobplacement',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.Column('location_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_blobplacement_blob_id_blob')),
- sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobplacement_location_id_blobplacementlocation')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacement'))
- )
- op.create_index('blobplacement_blob_id', 'blobplacement', ['blob_id'], unique=False)
- op.create_index('blobplacement_blob_id_location_id', 'blobplacement', ['blob_id', 'location_id'], unique=True)
- op.create_index('blobplacement_location_id', 'blobplacement', ['location_id'], unique=False)
-
- op.create_table(
- 'manifestblob',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('blob_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_manifestblob_blob_id_blob')),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestblob_manifest_id_manifest')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestblob'))
- )
- op.create_index('manifestblob_blob_id', 'manifestblob', ['blob_id'], unique=False)
- op.create_index('manifestblob_manifest_id', 'manifestblob', ['manifest_id'], unique=False)
- op.create_index('manifestblob_manifest_id_blob_id', 'manifestblob', ['manifest_id', 'blob_id'], unique=True)
-
- op.create_table(
- 'manifestlistmanifest',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('manifest_list_id', sa.Integer(), nullable=False),
- sa.Column('manifest_id', sa.Integer(), nullable=False),
- sa.Column('operating_system', UTF8CharField(length=255), nullable=True),
- sa.Column('architecture', UTF8CharField(length=255), nullable=True),
- sa.Column('platform_json', UTF8LongText, nullable=True),
- sa.Column('media_type_id', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlistmanifest_manifest_id_manifest')),
- sa.ForeignKeyConstraint(['manifest_list_id'], ['manifestlist.id'], name=op.f('fk_manifestlistmanifest_manifest_list_id_manifestlist')),
- sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifestlistmanifest_media_type_id_mediatype')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlistmanifest'))
- )
- op.create_index('manifestlistmanifest_manifest_id', 'manifestlistmanifest', ['manifest_id'], unique=False)
- op.create_index('manifestlistmanifest_manifest_list_id', 'manifestlistmanifest', ['manifest_list_id'], unique=False)
- op.create_index('manifestlistmanifest_manifest_listid_os_arch_mtid', 'manifestlistmanifest', ['manifest_list_id', 'operating_system', 'architecture', 'media_type_id'], unique=False)
- op.create_index('manifestlistmanifest_manifest_listid_mtid', 'manifestlistmanifest', ['manifest_list_id', 'media_type_id'], unique=False)
- op.create_index('manifestlistmanifest_media_type_id', 'manifestlistmanifest', ['media_type_id'], unique=False)
-
- op.create_table(
- 'tag',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('name', UTF8CharField(length=190), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('manifest_list_id', sa.Integer(), nullable=True),
- sa.Column('lifetime_start', sa.BigInteger(), nullable=False),
- sa.Column('lifetime_end', sa.BigInteger(), nullable=True),
- sa.Column('hidden', sa.Boolean(), nullable=False),
- sa.Column('reverted', sa.Boolean(), nullable=False),
- sa.Column('protected', sa.Boolean(), nullable=False),
- sa.Column('tag_kind_id', sa.Integer(), nullable=False),
- sa.Column('linked_tag_id', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['linked_tag_id'], ['tag.id'], name=op.f('fk_tag_linked_tag_id_tag')),
- sa.ForeignKeyConstraint(['manifest_list_id'], ['manifestlist.id'], name=op.f('fk_tag_manifest_list_id_manifestlist')),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_tag_repository_id_repository')),
- sa.ForeignKeyConstraint(['tag_kind_id'], ['tagkind.id'], name=op.f('fk_tag_tag_kind_id_tagkind')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_tag'))
- )
- op.create_index('tag_lifetime_end', 'tag', ['lifetime_end'], unique=False)
- op.create_index('tag_linked_tag_id', 'tag', ['linked_tag_id'], unique=False)
- op.create_index('tag_manifest_list_id', 'tag', ['manifest_list_id'], unique=False)
- op.create_index('tag_repository_id', 'tag', ['repository_id'], unique=False)
- op.create_index('tag_repository_id_name_hidden', 'tag', ['repository_id', 'name', 'hidden'], unique=False)
- op.create_index('tag_repository_id_name_lifetime_end', 'tag', ['repository_id', 'name', 'lifetime_end'], unique=True)
- op.create_index('tag_repository_id_name', 'tag', ['repository_id', 'name'], unique=False)
- op.create_index('tag_tag_kind_id', 'tag', ['tag_kind_id'], unique=False)
-
- # ### end Alembic commands ###
-
- blobplacementlocation_table = table('blobplacementlocation',
- column('id', sa.Integer()),
- column('name', sa.String()),
- )
-
- op.bulk_insert(
- blobplacementlocation_table,
- [
- {'name': 'local_eu'},
- {'name': 'local_us'},
- ],
- )
-
- tagkind_table = table('tagkind',
- column('id', sa.Integer()),
- column('name', sa.String()),
- )
-
- op.bulk_insert(
- tagkind_table,
- [
- {'id': 1, 'name': 'tag'},
- {'id': 2, 'name': 'release'},
- {'id': 3, 'name': 'channel'},
- ]
- )
\ No newline at end of file
diff --git a/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py b/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py
deleted file mode 100644
index 24a65b8a4..000000000
--- a/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Backfill state_id and make it unique
-
-Revision ID: d42c175b439a
-Revises: 3e8cc74a1e7b
-Create Date: 2017-01-18 15:11:01.635632
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'd42c175b439a'
-down_revision = '3e8cc74a1e7b'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # Backfill the queueitem table's state_id field with unique values for all entries which are
- # empty.
- conn = op.get_bind()
- conn.execute("update queueitem set state_id = id where state_id = ''")
-
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('queueitem_state_id', table_name='queueitem')
- op.create_index('queueitem_state_id', 'queueitem', ['state_id'], unique=True)
- # ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('queueitem_state_id', table_name='queueitem')
- op.create_index('queueitem_state_id', 'queueitem', ['state_id'], unique=False)
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py b/data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py
deleted file mode 100644
index 42ec883eb..000000000
--- a/data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""Add change_tag_expiration log type
-
-Revision ID: d8989249f8f6
-Revises: dc4af11a5f90
-Create Date: 2017-06-21 21:18:25.948689
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'd8989249f8f6'
-down_revision = 'dc4af11a5f90'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.bulk_insert(tables.logentrykind, [
- {'name': 'change_tag_expiration'},
- ])
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.execute(tables
- .logentrykind
- .delete()
- .where(tables.logentrykind.c.name == op.inline_literal('change_tag_expiration')))
diff --git a/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py b/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py
deleted file mode 100644
index dc8512026..000000000
--- a/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""add notification number of failures column
-
-Revision ID: dc4af11a5f90
-Revises: 53e2ac668296
-Create Date: 2017-05-16 17:24:02.630365
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'dc4af11a5f90'
-down_revision = '53e2ac668296'
-
-import sqlalchemy as sa
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.add_column('repositorynotification', sa.Column('number_of_failures',
- sa.Integer(),
- nullable=False,
- server_default='0'))
- op.bulk_insert(tables.logentrykind, [
- {'name': 'reset_repo_notification'},
- ])
-
- # ### population of test data ### #
- tester.populate_column('repositorynotification', 'number_of_failures', tester.TestDataType.Integer)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- op.drop_column('repositorynotification', 'number_of_failures')
- op.execute(tables
- .logentrykind
- .delete()
- .where(tables.logentrykind.c.name == op.inline_literal('reset_repo_notification')))
diff --git a/data/migrations/versions/e184af42242d_add_missing_index_on_uuid_fields.py b/data/migrations/versions/e184af42242d_add_missing_index_on_uuid_fields.py
deleted file mode 100644
index b4513ce6d..000000000
--- a/data/migrations/versions/e184af42242d_add_missing_index_on_uuid_fields.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Add missing index on UUID fields
-
-Revision ID: e184af42242d
-Revises: 6ec8726c0ace
-Create Date: 2019-02-14 16:35:47.768086
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'e184af42242d'
-down_revision = '6ec8726c0ace'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_index('permissionprototype_uuid', 'permissionprototype', ['uuid'], unique=False)
- op.create_index('repositorybuildtrigger_uuid', 'repositorybuildtrigger', ['uuid'], unique=False)
- op.create_index('user_uuid', 'user', ['uuid'], unique=False)
- # ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('user_uuid', table_name='user')
- op.drop_index('repositorybuildtrigger_uuid', table_name='repositorybuildtrigger')
- op.drop_index('permissionprototype_uuid', table_name='permissionprototype')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/e2894a3a3c19_add_full_text_search_indexing_for_repo_.py b/data/migrations/versions/e2894a3a3c19_add_full_text_search_indexing_for_repo_.py
deleted file mode 100644
index 13ed12ba5..000000000
--- a/data/migrations/versions/e2894a3a3c19_add_full_text_search_indexing_for_repo_.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Add full text search indexing for repo name and description
-
-Revision ID: e2894a3a3c19
-Revises: d42c175b439a
-Create Date: 2017-01-11 13:55:54.890774
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'e2894a3a3c19'
-down_revision = 'd42c175b439a'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_index('repository_description__fulltext', 'repository', ['description'], unique=False, postgresql_using='gin', postgresql_ops={'description': 'gin_trgm_ops'}, mysql_prefix='FULLTEXT')
- op.create_index('repository_name__fulltext', 'repository', ['name'], unique=False, postgresql_using='gin', postgresql_ops={'name': 'gin_trgm_ops'}, mysql_prefix='FULLTEXT')
- # ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('repository_name__fulltext', table_name='repository')
- op.drop_index('repository_description__fulltext', table_name='repository')
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py b/data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py
deleted file mode 100644
index e2e69d99f..000000000
--- a/data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Remove blob_index from ManifestBlob table
-
-Revision ID: eafdeadcebc7
-Revises: 9093adccc784
-Create Date: 2018-08-07 15:57:54.001225
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'eafdeadcebc7'
-down_revision = '9093adccc784'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('manifestblob_manifest_id_blob_index', table_name='manifestblob')
- op.drop_column('manifestblob', 'blob_index')
- # ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.add_column('manifestblob', sa.Column('blob_index', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
- op.create_index('manifestblob_manifest_id_blob_index', 'manifestblob', ['manifest_id', 'blob_index'], unique=True)
- # ### end Alembic commands ###
diff --git a/data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py b/data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py
deleted file mode 100644
index 2a59ee4ec..000000000
--- a/data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""Add trust_enabled to repository
-
-Revision ID: ed01e313d3cb
-Revises: c3d4b7ebcdf7
-Create Date: 2017-04-14 17:38:03.319695
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'ed01e313d3cb'
-down_revision = 'c3d4b7ebcdf7'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.add_column('repository', sa.Column('trust_enabled', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()))
- ### end Alembic commands ###
- op.bulk_insert(tables.logentrykind, [
- {'name': 'change_repo_trust'},
- ])
-
- # ### population of test data ### #
- tester.populate_column('repository', 'trust_enabled', tester.TestDataType.Boolean)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.drop_column('repository', 'trust_enabled')
- ### end Alembic commands ###
-
- op.execute(tables
- .logentrykind
- .delete()
- .where(tables.
- logentrykind.name == op.inline_literal('change_repo_trust')))
diff --git a/data/migrations/versions/f30984525c86_add_repositorysearchscore_table.py b/data/migrations/versions/f30984525c86_add_repositorysearchscore_table.py
deleted file mode 100644
index f4a0d4045..000000000
--- a/data/migrations/versions/f30984525c86_add_repositorysearchscore_table.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""Add RepositorySearchScore table
-
-Revision ID: f30984525c86
-Revises: be8d1c402ce0
-Create Date: 2017-04-04 14:30:13.270728
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'f30984525c86'
-down_revision = 'be8d1c402ce0'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.create_table('repositorysearchscore',
- sa.Column('id', sa.Integer(), nullable=False),
- sa.Column('repository_id', sa.Integer(), nullable=False),
- sa.Column('score', sa.BigInteger(), nullable=False),
- sa.Column('last_updated', sa.DateTime(), nullable=True),
- sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorysearchscore_repository_id_repository')),
- sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorysearchscore'))
- )
- op.create_index('repositorysearchscore_repository_id', 'repositorysearchscore', ['repository_id'], unique=True)
- op.create_index('repositorysearchscore_score', 'repositorysearchscore', ['score'], unique=False)
- ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_table('repositorysearchscore', [
- ('repository_id', tester.TestDataType.Foreign('repository')),
- ('score', tester.TestDataType.BigInteger),
- ('last_updated', tester.TestDataType.DateTime),
- ])
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('repositorysearchscore')
- ### end Alembic commands ###
diff --git a/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py b/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py
new file mode 100644
index 000000000..5b3f6c812
--- /dev/null
+++ b/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py
@@ -0,0 +1,33 @@
+"""Remove the old webhooks table.
+
+Revision ID: f42b0ea7a4d
+Revises: 4fdb65816b8d
+Create Date: 2014-09-03 13:43:23.391464
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'f42b0ea7a4d'
+down_revision = '4fdb65816b8d'
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('webhook')
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('webhook',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('public_id', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('parameters', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/f5167870dd66_update_queue_item_table_indices.py b/data/migrations/versions/f5167870dd66_update_queue_item_table_indices.py
deleted file mode 100644
index d801764c1..000000000
--- a/data/migrations/versions/f5167870dd66_update_queue_item_table_indices.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""update queue item table indices
-
-Revision ID: f5167870dd66
-Revises: 45fd8b9869d4
-Create Date: 2016-12-08 17:26:20.333846
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'f5167870dd66'
-down_revision = '45fd8b9869d4'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.create_index('queueitem_processing_expires_available', 'queueitem', ['processing_expires', 'available'], unique=False)
- op.create_index('queueitem_pe_aafter_qname_rremaining_available', 'queueitem', ['processing_expires', 'available_after', 'queue_name', 'retries_remaining', 'available'], unique=False)
- op.create_index('queueitem_pexpires_aafter_rremaining_available', 'queueitem', ['processing_expires', 'available_after', 'retries_remaining', 'available'], unique=False)
- op.create_index('queueitem_processing_expires_queue_name_available', 'queueitem', ['processing_expires', 'queue_name', 'available'], unique=False)
- op.drop_index('queueitem_available', table_name='queueitem')
- op.drop_index('queueitem_available_after', table_name='queueitem')
- op.drop_index('queueitem_processing_expires', table_name='queueitem')
- op.drop_index('queueitem_retries_remaining', table_name='queueitem')
- ### end Alembic commands ###
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.create_index('queueitem_retries_remaining', 'queueitem', ['retries_remaining'], unique=False)
- op.create_index('queueitem_processing_expires', 'queueitem', ['processing_expires'], unique=False)
- op.create_index('queueitem_available_after', 'queueitem', ['available_after'], unique=False)
- op.create_index('queueitem_available', 'queueitem', ['available'], unique=False)
- op.drop_index('queueitem_processing_expires_queue_name_available', table_name='queueitem')
- op.drop_index('queueitem_pexpires_aafter_rremaining_available', table_name='queueitem')
- op.drop_index('queueitem_pe_aafter_qname_rremaining_available', table_name='queueitem')
- op.drop_index('queueitem_processing_expires_available', table_name='queueitem')
- ### end Alembic commands ###
diff --git a/data/migrations/versions/faf752bd2e0a_add_user_metadata_fields.py b/data/migrations/versions/faf752bd2e0a_add_user_metadata_fields.py
deleted file mode 100644
index 3e3b9b9a6..000000000
--- a/data/migrations/versions/faf752bd2e0a_add_user_metadata_fields.py
+++ /dev/null
@@ -1,56 +0,0 @@
-"""Add user metadata fields
-
-Revision ID: faf752bd2e0a
-Revises: 6c7014e84a5e
-Create Date: 2016-11-14 17:29:03.984665
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'faf752bd2e0a'
-down_revision = '6c7014e84a5e'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-
-from util.migrate import UTF8CharField
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.add_column('user', sa.Column('company', UTF8CharField(length=255), nullable=True))
- op.add_column('user', sa.Column('family_name', UTF8CharField(length=255), nullable=True))
- op.add_column('user', sa.Column('given_name', UTF8CharField(length=255), nullable=True))
- ### end Alembic commands ###
-
- op.bulk_insert(tables.userpromptkind,
- [
- {'name':'enter_name'},
- {'name':'enter_company'},
- ])
-
- # ### population of test data ### #
- tester.populate_column('user', 'company', tester.TestDataType.UTF8Char)
- tester.populate_column('user', 'family_name', tester.TestDataType.UTF8Char)
- tester.populate_column('user', 'given_name', tester.TestDataType.UTF8Char)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- ### commands auto generated by Alembic - please adjust! ###
- op.drop_column('user', 'given_name')
- op.drop_column('user', 'family_name')
- op.drop_column('user', 'company')
- ### end Alembic commands ###
-
- op.execute(
- (tables.userpromptkind.delete()
- .where(tables.userpromptkind.c.name == op.inline_literal('enter_name')))
- )
-
- op.execute(
- (tables.userpromptkind.delete()
- .where(tables.userpromptkind.c.name == op.inline_literal('enter_company')))
- )
diff --git a/data/migrations/versions/fc47c1ec019f_add_state_id_field_to_queueitem.py b/data/migrations/versions/fc47c1ec019f_add_state_id_field_to_queueitem.py
deleted file mode 100644
index dd0363ce3..000000000
--- a/data/migrations/versions/fc47c1ec019f_add_state_id_field_to_queueitem.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Add state_id field to QueueItem
-
-Revision ID: fc47c1ec019f
-Revises: f5167870dd66
-Create Date: 2017-01-12 15:44:23.643016
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'fc47c1ec019f'
-down_revision = 'f5167870dd66'
-
-from alembic import op as original_op
-from data.migrations.progress import ProgressWrapper
-import sqlalchemy as sa
-from sqlalchemy.dialects import mysql
-
-def upgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.add_column('queueitem', sa.Column('state_id', sa.String(length=255), nullable=False, server_default=''))
- op.create_index('queueitem_state_id', 'queueitem', ['state_id'], unique=False)
- # ### end Alembic commands ###
-
- # ### population of test data ### #
- tester.populate_column('queueitem', 'state_id', tester.TestDataType.String)
- # ### end population of test data ### #
-
-
-def downgrade(tables, tester, progress_reporter):
- op = ProgressWrapper(original_op, progress_reporter)
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index('queueitem_state_id', table_name='queueitem')
- op.drop_column('queueitem', 'state_id')
- # ### end Alembic commands ###
diff --git a/data/migrationutil.py b/data/migrationutil.py
deleted file mode 100644
index a433605f5..000000000
--- a/data/migrationutil.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import os
-
-from abc import ABCMeta, abstractmethod, abstractproperty
-from collections import namedtuple
-from six import add_metaclass
-
-MigrationPhase = namedtuple('MigrationPhase', ['name', 'alembic_revision', 'flags'])
-
-
-@add_metaclass(ABCMeta)
-class DataMigration(object):
- @abstractproperty
- def alembic_migration_revision(self):
- """ Returns the alembic migration revision corresponding to the currently configured phase.
- """
-
- @abstractmethod
- def has_flag(self, flag):
- """ Returns true if the data migration's current phase has the given flag set. """
-
-
-class NullDataMigration(DataMigration):
- @property
- def alembic_migration_revision(self):
- return 'head'
-
- def has_flag(self, flag):
- raise NotImplementedError()
-
-
-class DefinedDataMigration(DataMigration):
- def __init__(self, name, env_var, phases):
- assert phases
-
- self.name = name
- self.phases = {phase.name: phase for phase in phases}
-
- # Add a synthetic phase for new installations that skips the entire migration.
- self.phases['new-installation'] = phases[-1]._replace(name='new-installation',
- alembic_revision='head')
-
- phase_name = os.getenv(env_var)
- if phase_name is None:
- msg = 'Missing env var `%s` for data migration `%s`. %s' % (env_var, self.name,
- self._error_suffix)
- raise Exception(msg)
-
- current_phase = self.phases.get(phase_name)
- if current_phase is None:
- msg = 'Unknown phase `%s` for data migration `%s`. %s' % (phase_name, self.name,
- self._error_suffix)
- raise Exception(msg)
-
- self.current_phase = current_phase
-
- @property
- def _error_suffix(self):
- message = 'Available values for this migration: %s. ' % (self.phases.keys())
- message += 'If this is a new installation, please use `new-installation`.'
- return message
-
- @property
- def alembic_migration_revision(self):
- assert self.current_phase
- return self.current_phase.alembic_revision
-
- def has_flag(self, flag):
- assert self.current_phase
- return flag in self.current_phase.flags
diff --git a/data/model/__init__.py b/data/model/__init__.py
index 2c9260469..8c1214c54 100644
--- a/data/model/__init__.py
+++ b/data/model/__init__.py
@@ -1,30 +1,14 @@
-from data.database import db, db_transaction
+from data.database import db
class DataModelException(Exception):
pass
-class InvalidLabelKeyException(DataModelException):
- pass
-
-
-class InvalidMediaTypeException(DataModelException):
- pass
-
-
class BlobDoesNotExist(DataModelException):
pass
-class TorrentInfoDoesNotExist(DataModelException):
- pass
-
-
-class InvalidBlobUpload(DataModelException):
- pass
-
-
class InvalidEmailAddressException(DataModelException):
pass
@@ -45,6 +29,10 @@ class InvalidUsernameException(DataModelException):
pass
+class TooManyUsersException(DataModelException):
+ pass
+
+
class InvalidRepositoryBuildException(DataModelException):
pass
@@ -77,29 +65,6 @@ class InvalidTeamMemberException(DataModelException):
pass
-class InvalidManifestException(DataModelException):
- pass
-
-
-class ServiceKeyDoesNotExist(DataModelException):
- pass
-
-
-class ServiceKeyAlreadyApproved(DataModelException):
- pass
-
-
-class ServiceNameInvalid(DataModelException):
- pass
-
-
-class TagAlreadyCreatedException(DataModelException):
- pass
-
-class StaleTagException(DataModelException):
- pass
-
-
class TooManyLoginAttemptsException(Exception):
def __init__(self, message, retry_after):
super(TooManyLoginAttemptsException, self).__init__(message)
@@ -110,44 +75,17 @@ class Config(object):
def __init__(self):
self.app_config = None
self.store = None
- self.image_cleanup_callbacks = []
- self.repo_cleanup_callbacks = []
-
- def register_image_cleanup_callback(self, callback):
- self.image_cleanup_callbacks.append(callback)
-
- def register_repo_cleanup_callback(self, callback):
- self.repo_cleanup_callbacks.append(callback)
config = Config()
+def db_transaction():
+ return config.app_config['DB_TRANSACTION_FACTORY'](db)
+
+
# There MUST NOT be any circular dependencies between these subsections. If there are fix it by
# moving the minimal number of things to _basequery
-from data.model import (
- appspecifictoken,
- blob,
- build,
- gc,
- image,
- label,
- log,
- message,
- modelutil,
- notification,
- oauth,
- organization,
- permission,
- repositoryactioncount,
- repo_mirror,
- release,
- repo_mirror,
- repository,
- service_keys,
- storage,
- tag,
- team,
- token,
- user,
-)
+# TODO document the methods and modules for each one of the submodules below.
+from data.model import (blob, build, image, log, notification, oauth, organization, permission,
+ repository, storage, tag, team, token, user)
diff --git a/data/model/_basequery.py b/data/model/_basequery.py
index 5fc1733e0..131f860e7 100644
--- a/data/model/_basequery.py
+++ b/data/model/_basequery.py
@@ -1,55 +1,15 @@
-import logging
+from peewee import JOIN_LEFT_OUTER
+from cachetools import lru_cache
-from peewee import fn, PeeweeException
-from cachetools.func import lru_cache
-
-from datetime import datetime, timedelta
-
-from data.model import DataModelException, config
-from data.readreplica import ReadOnlyModeException
from data.database import (Repository, User, Team, TeamMember, RepositoryPermission, TeamRole,
- Namespace, Visibility, ImageStorage, Image, RepositoryKind,
- db_for_update)
-
-logger = logging.getLogger(__name__)
-
-def reduce_as_tree(queries_to_reduce):
- """ This method will split a list of queries into halves recursively until we reach individual
- queries, at which point it will start unioning the queries, or the already unioned subqueries.
- This works around a bug in peewee SQL generation where reducing linearly generates a chain
- of queries that will exceed the recursion depth limit when it has around 80 queries.
- """
- mid = len(queries_to_reduce)/2
- left = queries_to_reduce[:mid]
- right = queries_to_reduce[mid:]
-
- to_reduce_right = right[0]
- if len(right) > 1:
- to_reduce_right = reduce_as_tree(right)
-
- if len(left) > 1:
- to_reduce_left = reduce_as_tree(left)
- elif len(left) == 1:
- to_reduce_left = left[0]
- else:
- return to_reduce_right
-
- return to_reduce_left.union_all(to_reduce_right)
+ Namespace, Visibility, db_for_update)
-def get_existing_repository(namespace_name, repository_name, for_update=False, kind_filter=None):
+def get_existing_repository(namespace_name, repository_name, for_update=False):
query = (Repository
.select(Repository, Namespace)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(Namespace.username == namespace_name,
- Repository.name == repository_name))
-
- if kind_filter:
- query = (query
- .switch(Repository)
- .join(RepositoryKind)
- .where(RepositoryKind.name == kind_filter))
-
+ .where(Namespace.username == namespace_name, Repository.name == repository_name))
if for_update:
query = db_for_update(query)
@@ -61,67 +21,49 @@ def get_public_repo_visibility():
return Visibility.get(name='public')
-def _lookup_team_role(name):
- return _lookup_team_roles()[name]
-
-
-@lru_cache(maxsize=1)
-def _lookup_team_roles():
- return {role.name:role for role in TeamRole.select()}
-
-
-def filter_to_repos_for_user(query, user_id=None, namespace=None, repo_kind='image',
- include_public=True, start_id=None):
- if not include_public and not user_id:
+def filter_to_repos_for_user(query, username=None, namespace=None, include_public=True):
+ if not include_public and not username:
return Repository.select().where(Repository.id == '-1')
- # Filter on the type of repository.
- if repo_kind is not None:
- try:
- query = query.where(Repository.kind == Repository.kind.get_id(repo_kind))
- except RepositoryKind.DoesNotExist:
- raise DataModelException('Unknown repository kind')
-
- # Add the start ID if necessary.
- if start_id is not None:
- query = query.where(Repository.id >= start_id)
-
- # Add a namespace filter if necessary.
- if namespace:
- query = query.where(Namespace.username == namespace)
-
- # Build a set of queries that, when unioned together, return the full set of visible repositories
- # for the filters specified.
- queries = []
-
- if include_public:
- queries.append(query.where(Repository.visibility == get_public_repo_visibility()))
-
- if user_id is not None:
+ where_clause = None
+ if username:
+ UserThroughTeam = User.alias()
+ Org = User.alias()
AdminTeam = Team.alias()
AdminTeamMember = TeamMember.alias()
+ AdminUser = User.alias()
- # Add repositories in which the user has permission.
- queries.append(query
- .switch(RepositoryPermission)
- .where(RepositoryPermission.user == user_id))
+ query = (query
+ .switch(RepositoryPermission)
+ .join(User, JOIN_LEFT_OUTER)
+ .switch(RepositoryPermission)
+ .join(Team, JOIN_LEFT_OUTER)
+ .join(TeamMember, JOIN_LEFT_OUTER)
+ .join(UserThroughTeam, JOIN_LEFT_OUTER, on=(UserThroughTeam.id == TeamMember.user))
+ .switch(Repository)
+ .join(Org, JOIN_LEFT_OUTER, on=(Repository.namespace_user == Org.id))
+ .join(AdminTeam, JOIN_LEFT_OUTER, on=(Org.id == AdminTeam.organization))
+ .join(TeamRole, JOIN_LEFT_OUTER, on=(AdminTeam.role == TeamRole.id))
+ .switch(AdminTeam)
+ .join(AdminTeamMember, JOIN_LEFT_OUTER, on=(AdminTeam.id == AdminTeamMember.team))
+ .join(AdminUser, JOIN_LEFT_OUTER, on=(AdminTeamMember.user == AdminUser.id)))
- # Add repositories in which the user is a member of a team that has permission.
- queries.append(query
- .switch(RepositoryPermission)
- .join(Team)
- .join(TeamMember)
- .where(TeamMember.user == user_id))
+ where_clause = ((User.username == username) | (UserThroughTeam.username == username) |
+ ((AdminUser.username == username) & (TeamRole.name == 'admin')))
- # Add repositories under namespaces in which the user is the org admin.
- queries.append(query
- .switch(Repository)
- .join(AdminTeam, on=(Repository.namespace_user == AdminTeam.organization))
- .join(AdminTeamMember, on=(AdminTeam.id == AdminTeamMember.team))
- .where(AdminTeam.role == _lookup_team_role('admin'))
- .where(AdminTeamMember.user == user_id))
+ if namespace:
+ where_clause = where_clause & (Namespace.username == namespace)
- return reduce(lambda l, r: l | r, queries)
+ # TODO(jschorr, jake): Figure out why the old join on Visibility was so darn slow and
+ # remove this hack.
+ if include_public:
+ new_clause = (Repository.visibility == get_public_repo_visibility())
+ if where_clause:
+ where_clause = where_clause | new_clause
+ else:
+ where_clause = new_clause
+
+ return query.where(where_clause)
def get_user_organizations(username):
@@ -133,66 +75,3 @@ def get_user_organizations(username):
.join(TeamMember)
.join(UserAlias, on=(UserAlias.id == TeamMember.user))
.where(User.organization == True, UserAlias.username == username))
-
-
-def calculate_image_aggregate_size(ancestors_str, image_size, parent_image):
- ancestors = ancestors_str.split('/')[1:-1]
- if not ancestors:
- return image_size
-
- if parent_image is None:
- raise DataModelException('Could not load parent image')
-
- ancestor_size = parent_image.aggregate_size
- if ancestor_size is not None:
- return ancestor_size + image_size
-
- # Fallback to a slower path if the parent doesn't have an aggregate size saved.
- # TODO: remove this code if/when we do a full backfill.
- ancestor_size = (ImageStorage
- .select(fn.Sum(ImageStorage.image_size))
- .join(Image)
- .where(Image.id << ancestors)
- .scalar())
- if ancestor_size is None:
- return None
-
- return ancestor_size + image_size
-
-
-def update_last_accessed(token_or_user):
- """ Updates the `last_accessed` field on the given token or user. If the existing field's value
- is within the configured threshold, the update is skipped. """
- if not config.app_config.get('FEATURE_USER_LAST_ACCESSED'):
- return
-
- threshold = timedelta(seconds=config.app_config.get('LAST_ACCESSED_UPDATE_THRESHOLD_S', 120))
- if (token_or_user.last_accessed is not None and
- datetime.utcnow() - token_or_user.last_accessed < threshold):
- # Skip updating, as we don't want to put undue pressure on the database.
- return
-
- model_class = token_or_user.__class__
- last_accessed = datetime.utcnow()
-
- try:
- (model_class
- .update(last_accessed=last_accessed)
- .where(model_class.id == token_or_user.id)
- .execute())
- token_or_user.last_accessed = last_accessed
- except ReadOnlyModeException:
- pass
- except PeeweeException as ex:
- # If there is any form of DB exception, only fail if strict logging is enabled.
- strict_logging_disabled = config.app_config.get('ALLOW_PULLS_WITHOUT_STRICT_LOGGING')
- if strict_logging_disabled:
- data = {
- 'exception': ex,
- 'token_or_user': token_or_user.id,
- 'class': str(model_class),
- }
-
- logger.exception('update last_accessed for token/user failed', extra=data)
- else:
- raise
diff --git a/data/model/appspecifictoken.py b/data/model/appspecifictoken.py
deleted file mode 100644
index c0ead9440..000000000
--- a/data/model/appspecifictoken.py
+++ /dev/null
@@ -1,172 +0,0 @@
-import logging
-
-from datetime import datetime
-
-from active_migration import ActiveDataMigration, ERTMigrationFlags
-from data.database import AppSpecificAuthToken, User, random_string_generator
-from data.model import config
-from data.model._basequery import update_last_accessed
-from data.fields import DecryptedValue
-from util.timedeltastring import convert_to_timedelta
-from util.unicode import remove_unicode
-
-logger = logging.getLogger(__name__)
-
-TOKEN_NAME_PREFIX_LENGTH = 60
-MINIMUM_TOKEN_SUFFIX_LENGTH = 60
-
-
-def _default_expiration_duration():
- expiration_str = config.app_config.get('APP_SPECIFIC_TOKEN_EXPIRATION')
- return convert_to_timedelta(expiration_str) if expiration_str else None
-
-
-# Define a "unique" value so that callers can specifiy an expiration of None and *not* have it
-# use the default.
-_default_expiration_duration_opt = '__deo'
-
-def create_token(user, title, expiration=_default_expiration_duration_opt):
- """ Creates and returns an app specific token for the given user. If no expiration is specified
- (including `None`), then the default from config is used. """
- if expiration == _default_expiration_duration_opt:
- duration = _default_expiration_duration()
- expiration = duration + datetime.now() if duration else None
-
- token_code = random_string_generator(TOKEN_NAME_PREFIX_LENGTH + MINIMUM_TOKEN_SUFFIX_LENGTH)()
- token_name = token_code[:TOKEN_NAME_PREFIX_LENGTH]
- token_secret = token_code[TOKEN_NAME_PREFIX_LENGTH:]
-
- assert token_name
- assert token_secret
-
- # TODO(remove-unenc): Remove legacy handling.
- old_token_code = (token_code
- if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS)
- else None)
- return AppSpecificAuthToken.create(user=user,
- title=title,
- expiration=expiration,
- token_name=token_name,
- token_secret=DecryptedValue(token_secret),
- token_code=old_token_code)
-
-
-def list_tokens(user):
- """ Lists all tokens for the given user. """
- return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)
-
-
-def revoke_token(token):
- """ Revokes an app specific token by deleting it. """
- token.delete_instance()
-
-
-def revoke_token_by_uuid(uuid, owner):
- """ Revokes an app specific token by deleting it. """
- try:
- token = AppSpecificAuthToken.get(uuid=uuid, user=owner)
- except AppSpecificAuthToken.DoesNotExist:
- return None
-
- revoke_token(token)
- return token
-
-
-def get_expiring_tokens(user, soon):
- """ Returns all tokens owned by the given user that will be expiring "soon", where soon is defined
- by the soon parameter (a timedelta from now).
- """
- soon_datetime = datetime.now() + soon
- return (AppSpecificAuthToken
- .select()
- .where(AppSpecificAuthToken.user == user,
- AppSpecificAuthToken.expiration <= soon_datetime,
- AppSpecificAuthToken.expiration > datetime.now()))
-
-
-def gc_expired_tokens(expiration_window):
- """ Deletes all expired tokens outside of the expiration window. """
- (AppSpecificAuthToken
- .delete()
- .where(AppSpecificAuthToken.expiration < (datetime.now() - expiration_window))
- .execute())
-
-
-def get_token_by_uuid(uuid, owner=None):
- """ Looks up an unexpired app specific token with the given uuid. Returns it if found or
- None if none. If owner is specified, only tokens owned by the owner user will be
- returned.
- """
- try:
- query = (AppSpecificAuthToken
- .select()
- .where(AppSpecificAuthToken.uuid == uuid,
- ((AppSpecificAuthToken.expiration > datetime.now()) |
- (AppSpecificAuthToken.expiration >> None))))
- if owner is not None:
- query = query.where(AppSpecificAuthToken.user == owner)
-
- return query.get()
- except AppSpecificAuthToken.DoesNotExist:
- return None
-
-
-def access_valid_token(token_code):
- """ Looks up an unexpired app specific token with the given token code. If found, the token's
- last_accessed field is set to now and the token is returned. If not found, returns None.
- """
- token_code = remove_unicode(token_code)
-
- prefix = token_code[:TOKEN_NAME_PREFIX_LENGTH]
- if len(prefix) != TOKEN_NAME_PREFIX_LENGTH:
- return None
-
- suffix = token_code[TOKEN_NAME_PREFIX_LENGTH:]
-
- # Lookup the token by its prefix.
- try:
- token = (AppSpecificAuthToken
- .select(AppSpecificAuthToken, User)
- .join(User)
- .where(AppSpecificAuthToken.token_name == prefix,
- ((AppSpecificAuthToken.expiration > datetime.now()) |
- (AppSpecificAuthToken.expiration >> None)))
- .get())
-
- if not token.token_secret.matches(suffix):
- return None
-
- assert len(prefix) == TOKEN_NAME_PREFIX_LENGTH
- assert len(suffix) >= MINIMUM_TOKEN_SUFFIX_LENGTH
- update_last_accessed(token)
- return token
- except AppSpecificAuthToken.DoesNotExist:
- pass
-
- # TODO(remove-unenc): Remove legacy handling.
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- try:
- token = (AppSpecificAuthToken
- .select(AppSpecificAuthToken, User)
- .join(User)
- .where(AppSpecificAuthToken.token_code == token_code,
- ((AppSpecificAuthToken.expiration > datetime.now()) |
- (AppSpecificAuthToken.expiration >> None)))
- .get())
-
- update_last_accessed(token)
- return token
- except AppSpecificAuthToken.DoesNotExist:
- return None
-
- return None
-
-
-def get_full_token_string(token):
- # TODO(remove-unenc): Remove legacy handling.
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- if not token.token_name:
- return token.token_code
-
- assert token.token_name
- return '%s%s' % (token.token_name, token.token_secret.decrypt())
diff --git a/data/model/blob.py b/data/model/blob.py
index ac14891e8..4bad62584 100644
--- a/data/model/blob.py
+++ b/data/model/blob.py
@@ -1,237 +1,49 @@
-import logging
-
-from datetime import datetime
from uuid import uuid4
-from data.model import (tag, _basequery, BlobDoesNotExist, InvalidBlobUpload, db_transaction,
- storage as storage_model, InvalidImageException)
-from data.database import (Repository, Namespace, ImageStorage, Image, ImageStoragePlacement,
- BlobUpload, ImageStorageLocation, db_random_func)
-
-
-logger = logging.getLogger(__name__)
-
-
-def get_repository_blob_by_digest(repository, blob_digest):
- """ Find the content-addressable blob linked to the specified repository.
- """
- assert blob_digest
- try:
- storage = (ImageStorage
- .select(ImageStorage.uuid)
- .join(Image)
- .where(Image.repository == repository,
- ImageStorage.content_checksum == blob_digest,
- ImageStorage.uploading == False)
- .get())
-
- return storage_model.get_storage_by_uuid(storage.uuid)
- except (ImageStorage.DoesNotExist, InvalidImageException):
- raise BlobDoesNotExist('Blob does not exist with digest: {0}'.format(blob_digest))
+from data.model import tag, _basequery, BlobDoesNotExist, db_transaction
+from data.database import (Repository, Namespace, ImageStorage, Image, ImageStorageLocation,
+ ImageStoragePlacement)
def get_repo_blob_by_digest(namespace, repo_name, blob_digest):
""" Find the content-addressable blob linked to the specified repository.
"""
- assert blob_digest
- try:
- storage = (ImageStorage
- .select(ImageStorage.uuid)
- .join(Image)
- .join(Repository)
- .join(Namespace, on=(Namespace.id == Repository.namespace_user))
- .where(Repository.name == repo_name, Namespace.username == namespace,
- ImageStorage.content_checksum == blob_digest,
- ImageStorage.uploading == False)
- .get())
-
- return storage_model.get_storage_by_uuid(storage.uuid)
- except (ImageStorage.DoesNotExist, InvalidImageException):
+ placements = list(ImageStoragePlacement
+ .select(ImageStoragePlacement, ImageStorage, ImageStorageLocation)
+ .join(ImageStorageLocation)
+ .switch(ImageStoragePlacement)
+ .join(ImageStorage)
+ .join(Image)
+ .join(Repository)
+ .join(Namespace)
+ .where(Repository.name == repo_name, Namespace.username == namespace,
+ ImageStorage.checksum == blob_digest))
+ if not placements:
raise BlobDoesNotExist('Blob does not exist with digest: {0}'.format(blob_digest))
+ found = placements[0].storage
+ found.locations = {placement.location.name for placement in placements}
-def store_blob_record_and_temp_link(namespace, repo_name, blob_digest, location_obj, byte_count,
- link_expiration_s, uncompressed_byte_count=None):
- repo = _basequery.get_existing_repository(namespace, repo_name)
- assert repo
+ return found
- return store_blob_record_and_temp_link_in_repo(repo.id, blob_digest, location_obj, byte_count,
- link_expiration_s, uncompressed_byte_count)
-
-
-def store_blob_record_and_temp_link_in_repo(repository_id, blob_digest, location_obj, byte_count,
- link_expiration_s, uncompressed_byte_count=None):
+def store_blob_record_and_temp_link(namespace, repo_name, blob_digest, location_name,
+ link_expiration_s):
""" Store a record of the blob and temporarily link it to the specified repository.
"""
- assert blob_digest
- assert byte_count is not None
-
- with db_transaction():
- try:
- storage = ImageStorage.get(content_checksum=blob_digest)
- save_changes = False
-
- if storage.image_size is None:
- storage.image_size = byte_count
- save_changes = True
-
- if storage.uncompressed_size is None and uncompressed_byte_count is not None:
- storage.uncompressed_size = uncompressed_byte_count
- save_changes = True
-
- if save_changes:
- storage.save()
-
- ImageStoragePlacement.get(storage=storage, location=location_obj)
- except ImageStorage.DoesNotExist:
- storage = ImageStorage.create(content_checksum=blob_digest, uploading=False,
- image_size=byte_count,
- uncompressed_size=uncompressed_byte_count)
- ImageStoragePlacement.create(storage=storage, location=location_obj)
- except ImageStoragePlacement.DoesNotExist:
- ImageStoragePlacement.create(storage=storage, location=location_obj)
-
- _temp_link_blob(repository_id, storage, link_expiration_s)
- return storage
-
-
-def temp_link_blob(repository_id, blob_digest, link_expiration_s):
- """ Temporarily links to the blob record from the given repository. If the blob record is not
- found, return None.
- """
- assert blob_digest
-
- with db_transaction():
- try:
- storage = ImageStorage.get(content_checksum=blob_digest)
- except ImageStorage.DoesNotExist:
- return None
-
- _temp_link_blob(repository_id, storage, link_expiration_s)
- return storage
-
-
-def _temp_link_blob(repository_id, storage, link_expiration_s):
- """ Note: Should *always* be called by a parent under a transaction. """
random_image_name = str(uuid4())
+ with db_transaction:
+ repo = _basequery.get_existing_repository(namespace, repo_name)
- # Create a temporary link into the repository, to be replaced by the v1 metadata later
- # and create a temporary tag to reference it
- image = Image.create(storage=storage, docker_image_id=random_image_name, repository=repository_id)
- tag.create_temporary_hidden_tag(repository_id, image, link_expiration_s)
-
-
-def get_stale_blob_upload(stale_timespan):
- """ Returns a random blob upload which was created before the stale timespan. """
- stale_threshold = datetime.now() - stale_timespan
-
- try:
- candidates = (BlobUpload
- .select()
- .where(BlobUpload.created <= stale_threshold)
- .limit(500)
- .distinct()
- .alias('candidates'))
-
- found = (BlobUpload
- .select(candidates.c.id)
- .from_(candidates)
- .order_by(db_random_func())
- .get())
- if not found:
- return None
-
- return (BlobUpload
- .select(BlobUpload, ImageStorageLocation)
- .join(ImageStorageLocation)
- .where(BlobUpload.id == found.id)
- .get())
- except BlobUpload.DoesNotExist:
- return None
-
-
-def get_blob_upload_by_uuid(upload_uuid):
- """ Loads the upload with the given UUID, if any. """
- try:
- return (BlobUpload
- .select()
- .where(BlobUpload.uuid == upload_uuid)
- .get())
- except BlobUpload.DoesNotExist:
- return None
-
-
-def get_blob_upload(namespace, repo_name, upload_uuid):
- """ Load the upload which is already in progress.
- """
- try:
- return (BlobUpload
- .select(BlobUpload, ImageStorageLocation)
- .join(ImageStorageLocation)
- .switch(BlobUpload)
- .join(Repository)
- .join(Namespace, on=(Namespace.id == Repository.namespace_user))
- .where(Repository.name == repo_name, Namespace.username == namespace,
- BlobUpload.uuid == upload_uuid)
- .get())
- except BlobUpload.DoesNotExist:
- raise InvalidBlobUpload()
-
-
-def initiate_upload(namespace, repo_name, uuid, location_name, storage_metadata):
- """ Initiates a blob upload for the repository with the given namespace and name,
- in a specific location. """
- repo = _basequery.get_existing_repository(namespace, repo_name)
- return initiate_upload_for_repo(repo, uuid, location_name, storage_metadata)
-
-
-def initiate_upload_for_repo(repo, uuid, location_name, storage_metadata):
- """ Initiates a blob upload for a specific repository object, in a specific location. """
- location = storage_model.get_image_location_for_name(location_name)
- return BlobUpload.create(repository=repo, location=location.id, uuid=uuid,
- storage_metadata=storage_metadata)
-
-
-def get_shared_blob(digest):
- """ Returns the ImageStorage blob with the given digest or, if not present,
- returns None. This method is *only* to be used for shared blobs that are
- globally accessible, such as the special empty gzipped tar layer that Docker
- no longer pushes to us.
- """
- assert digest
- try:
- return ImageStorage.get(content_checksum=digest, uploading=False)
- except ImageStorage.DoesNotExist:
- return None
-
-
-def get_or_create_shared_blob(digest, byte_data, storage):
- """ Returns the ImageStorage blob with the given digest or, if not present,
- adds a row and writes the given byte data to the storage engine.
- This method is *only* to be used for shared blobs that are globally
- accessible, such as the special empty gzipped tar layer that Docker
- no longer pushes to us.
- """
- assert digest
- assert byte_data is not None
- assert storage
-
- try:
- return ImageStorage.get(content_checksum=digest, uploading=False)
- except ImageStorage.DoesNotExist:
- record = ImageStorage.create(image_size=len(byte_data), content_checksum=digest,
- cas_path=True, uploading=True)
- preferred = storage.preferred_locations[0]
- location_obj = ImageStorageLocation.get(name=preferred)
try:
- storage.put_content([preferred], storage_model.get_layer_path(record), byte_data)
- ImageStoragePlacement.create(storage=record, location=location_obj)
+ storage = ImageStorage.get(checksum=blob_digest)
+ location = ImageStorageLocation.get(name=location_name)
+ ImageStoragePlacement.get(storage=storage, location=location)
+ except ImageStorage.DoesNotExist:
+ storage = ImageStorage.create(checksum=blob_digest)
+ except ImageStoragePlacement.DoesNotExist:
+ ImageStoragePlacement.create(storage=storage, location=location)
- record.uploading = False
- record.save()
- except:
- logger.exception('Exception when trying to write special layer %s', digest)
- record.delete_instance()
- raise
-
- return record
+ # Create a temporary link into the repository, to be replaced by the v1 metadata later
+ # and create a temporary tag to reference it
+ image = Image.create(storage=storage, docker_image_id=random_image_name, repository=repo)
+ tag.create_temporary_hidden_tag(repo, image, link_expiration_s)
diff --git a/data/model/build.py b/data/model/build.py
index 79e282509..70501c5dc 100644
--- a/data/model/build.py
+++ b/data/model/build.py
@@ -1,56 +1,32 @@
import json
+from peewee import JOIN_LEFT_OUTER
from datetime import timedelta, datetime
-from peewee import JOIN
-
-from active_migration import ActiveDataMigration, ERTMigrationFlags
from data.database import (BuildTriggerService, RepositoryBuildTrigger, Repository, Namespace, User,
- RepositoryBuild, BUILD_PHASE, db_random_func, UseThenDisconnect,
- TRIGGER_DISABLE_REASON)
+ RepositoryBuild, BUILD_PHASE, db_for_update)
from data.model import (InvalidBuildTriggerException, InvalidRepositoryBuildException,
- db_transaction, user as user_model, config)
-from data.fields import DecryptedValue
+ db_transaction, user as user_model)
PRESUMED_DEAD_BUILD_AGE = timedelta(days=15)
-PHASES_NOT_ALLOWED_TO_CANCEL_FROM = (BUILD_PHASE.PUSHING, BUILD_PHASE.COMPLETE,
- BUILD_PHASE.ERROR, BUILD_PHASE.INTERNAL_ERROR)
-
-ARCHIVABLE_BUILD_PHASES = [BUILD_PHASE.COMPLETE, BUILD_PHASE.ERROR, BUILD_PHASE.CANCELLED]
-def update_build_trigger(trigger, config, auth_token=None, write_token=None):
+def update_build_trigger(trigger, config, auth_token=None):
trigger.config = json.dumps(config or {})
-
- # TODO(remove-unenc): Remove legacy field.
if auth_token is not None:
- if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
- trigger.auth_token = auth_token
-
- trigger.secure_auth_token = auth_token
-
- if write_token is not None:
- trigger.write_token = write_token
-
+ trigger.auth_token = auth_token
trigger.save()
def create_build_trigger(repo, service_name, auth_token, user, pull_robot=None, config=None):
+ config = config or {}
service = BuildTriggerService.get(name=service_name)
-
- # TODO(remove-unenc): Remove legacy field.
- old_auth_token = None
- if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
- old_auth_token = auth_token
-
- secure_auth_token = DecryptedValue(auth_token) if auth_token else None
trigger = RepositoryBuildTrigger.create(repository=repo, service=service,
- auth_token=old_auth_token,
- secure_auth_token=secure_auth_token,
+ auth_token=auth_token,
connected_user=user,
pull_robot=pull_robot,
- config=json.dumps(config or {}))
+ config=json.dumps(config))
return trigger
@@ -63,7 +39,7 @@ def get_build_trigger(trigger_uuid):
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(RepositoryBuildTrigger)
- .join(User, on=(RepositoryBuildTrigger.connected_user == User.id))
+ .join(User)
.where(RepositoryBuildTrigger.uuid == trigger_uuid)
.get())
except RepositoryBuildTrigger.DoesNotExist:
@@ -107,10 +83,10 @@ def _get_build_base_query():
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.switch(RepositoryBuild)
- .join(User, JOIN.LEFT_OUTER)
+ .join(User, JOIN_LEFT_OUTER)
.switch(RepositoryBuild)
- .join(RepositoryBuildTrigger, JOIN.LEFT_OUTER)
- .join(BuildTriggerService, JOIN.LEFT_OUTER)
+ .join(RepositoryBuildTrigger, JOIN_LEFT_OUTER)
+ .join(BuildTriggerService, JOIN_LEFT_OUTER)
.order_by(RepositoryBuild.started.desc()))
@@ -167,157 +143,31 @@ def get_pull_robot_name(trigger):
return trigger.pull_robot.username
-def _get_build_row(build_uuid):
- return RepositoryBuild.select().where(RepositoryBuild.uuid == build_uuid).get()
-
-
-def update_phase_then_close(build_uuid, phase):
- """ A function to change the phase of a build """
- with UseThenDisconnect(config.app_config):
+def cancel_repository_build(build, work_queue):
+ with db_transaction():
+ # Reload the build for update.
try:
- build = _get_build_row(build_uuid)
+ build = db_for_update(RepositoryBuild.select().where(RepositoryBuild.id == build.id)).get()
except RepositoryBuild.DoesNotExist:
return False
- # Can't update a cancelled build
- if build.phase == BUILD_PHASE.CANCELLED:
+ if build.phase != BUILD_PHASE.WAITING or not build.queue_id:
return False
- updated = (RepositoryBuild
- .update(phase=phase)
- .where(RepositoryBuild.id == build.id, RepositoryBuild.phase == build.phase)
- .execute())
-
- return updated > 0
-
-
-def create_cancel_build_in_queue(build_phase, build_queue_id, build_queue):
- """ A function to cancel a build before it leaves the queue """
-
- def cancel_build():
- cancelled = False
-
- if build_queue_id is not None:
- cancelled = build_queue.cancel(build_queue_id)
-
- if build_phase != BUILD_PHASE.WAITING:
+ # Try to cancel the queue item.
+ if not work_queue.cancel(build.queue_id):
return False
- return cancelled
-
- return cancel_build
+ # Delete the build row.
+ build.delete_instance()
+ return True
-def create_cancel_build_in_manager(build_phase, build_uuid, build_canceller):
- """ A function to cancel the build before it starts to push """
-
- def cancel_build():
- if build_phase in PHASES_NOT_ALLOWED_TO_CANCEL_FROM:
- return False
-
- return build_canceller.try_cancel_build(build_uuid)
-
- return cancel_build
-
-
-def cancel_repository_build(build, build_queue):
- """ This tries to cancel the build returns true if request is successful false
- if it can't be cancelled """
- from app import build_canceller
- from buildman.jobutil.buildjob import BuildJobNotifier
-
- cancel_builds = [create_cancel_build_in_queue(build.phase, build.queue_id, build_queue),
- create_cancel_build_in_manager(build.phase, build.uuid, build_canceller), ]
- for cancelled in cancel_builds:
- if cancelled():
- updated = update_phase_then_close(build.uuid, BUILD_PHASE.CANCELLED)
- if updated:
- BuildJobNotifier(build.uuid).send_notification("build_cancelled")
-
- return updated
-
- return False
-
-
-def get_archivable_build():
+def archivable_buildlogs_query():
presumed_dead_date = datetime.utcnow() - PRESUMED_DEAD_BUILD_AGE
-
- candidates = (RepositoryBuild
- .select(RepositoryBuild.id)
- .where((RepositoryBuild.phase << ARCHIVABLE_BUILD_PHASES) |
- (RepositoryBuild.started < presumed_dead_date),
- RepositoryBuild.logs_archived == False)
- .limit(50)
- .alias('candidates'))
-
- try:
- found_id = (RepositoryBuild
- .select(candidates.c.id)
- .from_(candidates)
- .order_by(db_random_func())
- .get())
- return RepositoryBuild.get(id=found_id)
- except RepositoryBuild.DoesNotExist:
- return None
-
-
-def mark_build_archived(build_uuid):
- """ Mark a build as archived, and return True if we were the ones who actually
- updated the row. """
return (RepositoryBuild
- .update(logs_archived=True)
- .where(RepositoryBuild.uuid == build_uuid,
- RepositoryBuild.logs_archived == False)
- .execute()) > 0
-
-
-def toggle_build_trigger(trigger, enabled, reason=TRIGGER_DISABLE_REASON.USER_TOGGLED):
- """ Toggles the enabled status of a build trigger. """
- trigger.enabled = enabled
-
- if not enabled:
- trigger.disabled_reason = RepositoryBuildTrigger.disabled_reason.get_id(reason)
- trigger.disabled_datetime = datetime.utcnow()
-
- trigger.save()
-
-
-def update_trigger_disable_status(trigger, final_phase):
- """ Updates the disable status of the given build trigger. If the build trigger had a
- failure, then the counter is increased and, if we've reached the limit, the trigger is
- automatically disabled. Otherwise, if the trigger succeeded, it's counter is reset. This
- ensures that triggers that continue to error are eventually automatically disabled.
- """
- with db_transaction():
- try:
- trigger = RepositoryBuildTrigger.get(id=trigger.id)
- except RepositoryBuildTrigger.DoesNotExist:
- # Already deleted.
- return
-
- # If the build completed successfully, then reset the successive counters.
- if final_phase == BUILD_PHASE.COMPLETE:
- trigger.successive_failure_count = 0
- trigger.successive_internal_error_count = 0
- trigger.save()
- return
-
- # Otherwise, increment the counters and check for trigger disable.
- if final_phase == BUILD_PHASE.ERROR:
- trigger.successive_failure_count = trigger.successive_failure_count + 1
- trigger.successive_internal_error_count = 0
- elif final_phase == BUILD_PHASE.INTERNAL_ERROR:
- trigger.successive_internal_error_count = trigger.successive_internal_error_count + 1
-
- # Check if we need to disable the trigger.
- failure_threshold = config.app_config.get('SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD')
- error_threshold = config.app_config.get('SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD')
-
- if failure_threshold and trigger.successive_failure_count >= failure_threshold:
- toggle_build_trigger(trigger, False, TRIGGER_DISABLE_REASON.BUILD_FALURES)
- elif (error_threshold and
- trigger.successive_internal_error_count >= error_threshold):
- toggle_build_trigger(trigger, False, TRIGGER_DISABLE_REASON.INTERNAL_ERRORS)
- else:
- # Save the trigger changes.
- trigger.save()
+ .select()
+ .where((RepositoryBuild.phase == BUILD_PHASE.COMPLETE) |
+ (RepositoryBuild.phase == BUILD_PHASE.ERROR) |
+ (RepositoryBuild.started < presumed_dead_date),
+ RepositoryBuild.logs_archived == False))
diff --git a/data/model/gc.py b/data/model/gc.py
deleted file mode 100644
index 7f898bec8..000000000
--- a/data/model/gc.py
+++ /dev/null
@@ -1,554 +0,0 @@
-import logging
-
-from data.model import config, db_transaction, storage, _basequery, tag as pre_oci_tag
-from data.model.oci import tag as oci_tag
-from data.database import Repository, db_for_update
-from data.database import ApprTag
-from data.database import (Tag, Manifest, ManifestBlob, ManifestChild, ManifestLegacyImage,
- ManifestLabel, Label, TagManifestLabel)
-from data.database import RepositoryTag, TagManifest, Image, DerivedStorageForImage
-from data.database import TagManifestToManifest, TagToRepositoryTag, TagManifestLabelMap
-
-logger = logging.getLogger(__name__)
-
-class _GarbageCollectorContext(object):
- def __init__(self, repository):
- self.repository = repository
- self.manifest_ids = set()
- self.label_ids = set()
- self.blob_ids = set()
- self.legacy_image_ids = set()
-
- def add_manifest_id(self, manifest_id):
- self.manifest_ids.add(manifest_id)
-
- def add_label_id(self, label_id):
- self.label_ids.add(label_id)
-
- def add_blob_id(self, blob_id):
- self.blob_ids.add(blob_id)
-
- def add_legacy_image_id(self, legacy_image_id):
- self.legacy_image_ids.add(legacy_image_id)
-
- def mark_label_id_removed(self, label_id):
- self.label_ids.remove(label_id)
-
- def mark_manifest_removed(self, manifest):
- self.manifest_ids.remove(manifest.id)
-
- def mark_legacy_image_removed(self, legacy_image):
- self.legacy_image_ids.remove(legacy_image.id)
-
- def mark_blob_id_removed(self, blob_id):
- self.blob_ids.remove(blob_id)
-
-
-def purge_repository(namespace_name, repository_name):
- """ Completely delete all traces of the repository. Will return True upon
- complete success, and False upon partial or total failure. Garbage
- collection is incremental and repeatable, so this return value does
- not need to be checked or responded to.
- """
- try:
- repo = _basequery.get_existing_repository(namespace_name, repository_name)
- except Repository.DoesNotExist:
- return False
-
- assert repo.name == repository_name
-
- # Delete the repository of all Appr-referenced entries.
- # Note that new-model Tag's must be deleted in *two* passes, as they can reference parent tags,
- # and MySQL is... particular... about such relationships when deleting.
- if repo.kind.name == 'application':
- ApprTag.delete().where(ApprTag.repository == repo, ~(ApprTag.linked_tag >> None)).execute()
- ApprTag.delete().where(ApprTag.repository == repo).execute()
- else:
- # GC to remove the images and storage.
- _purge_repository_contents(repo)
-
- # Ensure there are no additional tags, manifests, images or blobs in the repository.
- assert ApprTag.select().where(ApprTag.repository == repo).count() == 0
- assert Tag.select().where(Tag.repository == repo).count() == 0
- assert RepositoryTag.select().where(RepositoryTag.repository == repo).count() == 0
- assert Manifest.select().where(Manifest.repository == repo).count() == 0
- assert ManifestBlob.select().where(ManifestBlob.repository == repo).count() == 0
- assert Image.select().where(Image.repository == repo).count() == 0
-
- # Delete the rest of the repository metadata.
- try:
- # Make sure the repository still exists.
- fetched = _basequery.get_existing_repository(namespace_name, repository_name)
- except Repository.DoesNotExist:
- return False
-
- fetched.delete_instance(recursive=True, delete_nullable=False)
-
- # Run callbacks
- for callback in config.repo_cleanup_callbacks:
- callback(namespace_name, repository_name)
-
- return True
-
-
-def _chunk_iterate_for_deletion(query, chunk_size=10):
- """ Returns an iterator that loads the rows returned by the given query in chunks. Note that
- order is not guaranteed here, so this will only work (i.e. not return duplicates) if
- the rows returned are being deleted between calls.
- """
- while True:
- results = list(query.limit(chunk_size))
- if not results:
- raise StopIteration
-
- yield results
-
-
-def _purge_repository_contents(repo):
- """ Purges all the contents of a repository, removing all of its tags,
- manifests and images.
- """
- logger.debug('Purging repository %s', repo)
-
- # Purge via all the tags.
- while True:
- found = False
- for tags in _chunk_iterate_for_deletion(Tag.select().where(Tag.repository == repo)):
- logger.debug('Found %s tags to GC under repository %s', len(tags), repo)
- found = True
- context = _GarbageCollectorContext(repo)
- for tag in tags:
- logger.debug('Deleting tag %s under repository %s', tag, repo)
- assert tag.repository_id == repo.id
- _purge_oci_tag(tag, context, allow_non_expired=True)
-
- _run_garbage_collection(context)
-
- if not found:
- break
-
- # TODO: remove this once we're fully on the OCI data model.
- while True:
- found = False
- repo_tag_query = RepositoryTag.select().where(RepositoryTag.repository == repo)
- for tags in _chunk_iterate_for_deletion(repo_tag_query):
- logger.debug('Found %s tags to GC under repository %s', len(tags), repo)
- found = True
- context = _GarbageCollectorContext(repo)
-
- for tag in tags:
- logger.debug('Deleting tag %s under repository %s', tag, repo)
- assert tag.repository_id == repo.id
- _purge_pre_oci_tag(tag, context, allow_non_expired=True)
-
- _run_garbage_collection(context)
-
- if not found:
- break
-
- # Add all remaining images to a new context. We do this here to minimize the number of images
- # we need to load.
- while True:
- found_image = False
- image_context = _GarbageCollectorContext(repo)
- for image in Image.select().where(Image.repository == repo):
- found_image = True
- logger.debug('Deleting image %s under repository %s', image, repo)
- assert image.repository_id == repo.id
- image_context.add_legacy_image_id(image.id)
-
- _run_garbage_collection(image_context)
-
- if not found_image:
- break
-
-
-def garbage_collect_repo(repo):
- """ Performs garbage collection over the contents of a repository. """
- # Purge expired tags.
- had_changes = False
-
- for tags in _chunk_iterate_for_deletion(oci_tag.lookup_unrecoverable_tags(repo)):
- logger.debug('Found %s tags to GC under repository %s', len(tags), repo)
- context = _GarbageCollectorContext(repo)
- for tag in tags:
- logger.debug('Deleting tag %s under repository %s', tag, repo)
- assert tag.repository_id == repo.id
- assert tag.lifetime_end_ms is not None
- _purge_oci_tag(tag, context)
-
- _run_garbage_collection(context)
- had_changes = True
-
- for tags in _chunk_iterate_for_deletion(pre_oci_tag.lookup_unrecoverable_tags(repo)):
- logger.debug('Found %s tags to GC under repository %s', len(tags), repo)
- context = _GarbageCollectorContext(repo)
- for tag in tags:
- logger.debug('Deleting tag %s under repository %s', tag, repo)
- assert tag.repository_id == repo.id
- assert tag.lifetime_end_ts is not None
- _purge_pre_oci_tag(tag, context)
-
- _run_garbage_collection(context)
- had_changes = True
-
- return had_changes
-
-
-def _run_garbage_collection(context):
- """ Runs the garbage collection loop, deleting manifests, images, labels and blobs
- in an iterative fashion.
- """
- has_changes = True
-
- while has_changes:
- has_changes = False
-
- # GC all manifests encountered.
- for manifest_id in list(context.manifest_ids):
- if _garbage_collect_manifest(manifest_id, context):
- has_changes = True
-
- # GC all images encountered.
- for image_id in list(context.legacy_image_ids):
- if _garbage_collect_legacy_image(image_id, context):
- has_changes = True
-
- # GC all labels encountered.
- for label_id in list(context.label_ids):
- if _garbage_collect_label(label_id, context):
- has_changes = True
-
- # GC any blobs encountered.
- if context.blob_ids:
- storage_ids_removed = set(storage.garbage_collect_storage(context.blob_ids))
- for blob_removed_id in storage_ids_removed:
- context.mark_blob_id_removed(blob_removed_id)
- has_changes = True
-
-
-def _purge_oci_tag(tag, context, allow_non_expired=False):
- assert tag.repository_id == context.repository.id
-
- if not allow_non_expired:
- assert tag.lifetime_end_ms is not None
- assert tag.lifetime_end_ms <= oci_tag.get_epoch_timestamp_ms()
-
- # Add the manifest to be GCed.
- context.add_manifest_id(tag.manifest_id)
-
- with db_transaction():
- # Reload the tag and verify its lifetime_end_ms has not changed.
- try:
- reloaded_tag = db_for_update(Tag.select().where(Tag.id == tag.id)).get()
- except Tag.DoesNotExist:
- return False
-
- assert reloaded_tag.id == tag.id
- assert reloaded_tag.repository_id == context.repository.id
- if reloaded_tag.lifetime_end_ms != tag.lifetime_end_ms:
- return False
-
- # Delete mapping rows.
- TagToRepositoryTag.delete().where(TagToRepositoryTag.tag == tag).execute()
-
- # Delete the tag.
- tag.delete_instance()
-
-
-def _purge_pre_oci_tag(tag, context, allow_non_expired=False):
- assert tag.repository_id == context.repository.id
-
- if not allow_non_expired:
- assert tag.lifetime_end_ts is not None
- assert tag.lifetime_end_ts <= pre_oci_tag.get_epoch_timestamp()
-
- # If it exists, GC the tag manifest.
- try:
- tag_manifest = TagManifest.select().where(TagManifest.tag == tag).get()
- _garbage_collect_legacy_manifest(tag_manifest.id, context)
- except TagManifest.DoesNotExist:
- pass
-
- # Add the tag's legacy image to be GCed.
- context.add_legacy_image_id(tag.image_id)
-
- with db_transaction():
- # Reload the tag and verify its lifetime_end_ts has not changed.
- try:
- reloaded_tag = db_for_update(RepositoryTag.select().where(RepositoryTag.id == tag.id)).get()
- except RepositoryTag.DoesNotExist:
- return False
-
- assert reloaded_tag.id == tag.id
- assert reloaded_tag.repository_id == context.repository.id
- if reloaded_tag.lifetime_end_ts != tag.lifetime_end_ts:
- return False
-
- # Delete mapping rows.
- TagToRepositoryTag.delete().where(TagToRepositoryTag.repository_tag == reloaded_tag).execute()
-
- # Delete the tag.
- reloaded_tag.delete_instance()
-
-
-def _check_manifest_used(manifest_id):
- assert manifest_id is not None
-
- with db_transaction():
- # Check if the manifest is referenced by any other tag.
- try:
- Tag.select().where(Tag.manifest == manifest_id).get()
- return True
- except Tag.DoesNotExist:
- pass
-
- # Check if the manifest is referenced as a child of another manifest.
- try:
- ManifestChild.select().where(ManifestChild.child_manifest == manifest_id).get()
- return True
- except ManifestChild.DoesNotExist:
- pass
-
- return False
-
-
-def _garbage_collect_manifest(manifest_id, context):
- assert manifest_id is not None
-
- # Make sure the manifest isn't referenced.
- if _check_manifest_used(manifest_id):
- return False
-
- # Add the manifest's blobs to the context to be GCed.
- for manifest_blob in ManifestBlob.select().where(ManifestBlob.manifest == manifest_id):
- context.add_blob_id(manifest_blob.blob_id)
-
- # Retrieve the manifest's associated image, if any.
- try:
- legacy_image_id = ManifestLegacyImage.get(manifest=manifest_id).image_id
- context.add_legacy_image_id(legacy_image_id)
- except ManifestLegacyImage.DoesNotExist:
- legacy_image_id = None
-
- # Add child manifests to be GCed.
- for connector in ManifestChild.select().where(ManifestChild.manifest == manifest_id):
- context.add_manifest_id(connector.child_manifest_id)
-
- # Add the labels to be GCed.
- for manifest_label in ManifestLabel.select().where(ManifestLabel.manifest == manifest_id):
- context.add_label_id(manifest_label.label_id)
-
- # Delete the manifest.
- with db_transaction():
- try:
- manifest = Manifest.select().where(Manifest.id == manifest_id).get()
- except Manifest.DoesNotExist:
- return False
-
- assert manifest.id == manifest_id
- assert manifest.repository_id == context.repository.id
- if _check_manifest_used(manifest_id):
- return False
-
- # Delete any label mappings.
- (TagManifestLabelMap
- .delete()
- .where(TagManifestLabelMap.manifest == manifest_id)
- .execute())
-
- # Delete any mapping rows for the manifest.
- TagManifestToManifest.delete().where(TagManifestToManifest.manifest == manifest_id).execute()
-
- # Delete any label rows.
- ManifestLabel.delete().where(ManifestLabel.manifest == manifest_id,
- ManifestLabel.repository == context.repository).execute()
-
- # Delete any child manifest rows.
- ManifestChild.delete().where(ManifestChild.manifest == manifest_id,
- ManifestChild.repository == context.repository).execute()
-
- # Delete the manifest blobs for the manifest.
- ManifestBlob.delete().where(ManifestBlob.manifest == manifest_id,
- ManifestBlob.repository == context.repository).execute()
-
- # Delete the manifest legacy image row.
- if legacy_image_id:
- (ManifestLegacyImage
- .delete()
- .where(ManifestLegacyImage.manifest == manifest_id,
- ManifestLegacyImage.repository == context.repository)
- .execute())
-
- # Delete the manifest.
- manifest.delete_instance()
-
- context.mark_manifest_removed(manifest)
- return True
-
-
-def _garbage_collect_legacy_manifest(legacy_manifest_id, context):
- assert legacy_manifest_id is not None
-
- # Add the labels to be GCed.
- query = TagManifestLabel.select().where(TagManifestLabel.annotated == legacy_manifest_id)
- for manifest_label in query:
- context.add_label_id(manifest_label.label_id)
-
- # Delete the tag manifest.
- with db_transaction():
- try:
- tag_manifest = TagManifest.select().where(TagManifest.id == legacy_manifest_id).get()
- except TagManifest.DoesNotExist:
- return False
-
- assert tag_manifest.id == legacy_manifest_id
- assert tag_manifest.tag.repository_id == context.repository.id
-
- # Delete any label mapping rows.
- (TagManifestLabelMap
- .delete()
- .where(TagManifestLabelMap.tag_manifest == legacy_manifest_id)
- .execute())
-
- # Delete the label rows.
- TagManifestLabel.delete().where(TagManifestLabel.annotated == legacy_manifest_id).execute()
-
- # Delete the mapping row if it exists.
- try:
- tmt = (TagManifestToManifest
- .select()
- .where(TagManifestToManifest.tag_manifest == tag_manifest)
- .get())
- context.add_manifest_id(tmt.manifest_id)
- tmt.delete_instance()
- except TagManifestToManifest.DoesNotExist:
- pass
-
- # Delete the tag manifest.
- tag_manifest.delete_instance()
-
- return True
-
-
-def _check_image_used(legacy_image_id):
- assert legacy_image_id is not None
-
- with db_transaction():
- # Check if the image is referenced by a manifest.
- try:
- ManifestLegacyImage.select().where(ManifestLegacyImage.image == legacy_image_id).get()
- return True
- except ManifestLegacyImage.DoesNotExist:
- pass
-
- # Check if the image is referenced by a tag.
- try:
- RepositoryTag.select().where(RepositoryTag.image == legacy_image_id).get()
- return True
- except RepositoryTag.DoesNotExist:
- pass
-
- # Check if the image is referenced by another image.
- try:
- Image.select().where(Image.parent == legacy_image_id).get()
- return True
- except Image.DoesNotExist:
- pass
-
- return False
-
-
-def _garbage_collect_legacy_image(legacy_image_id, context):
- assert legacy_image_id is not None
-
- # Check if the image is referenced.
- if _check_image_used(legacy_image_id):
- return False
-
- # We have an unreferenced image. We can now delete it.
- # Grab any derived storage for the image.
- for derived in (DerivedStorageForImage
- .select()
- .where(DerivedStorageForImage.source_image == legacy_image_id)):
- context.add_blob_id(derived.derivative_id)
-
- try:
- image = Image.select().where(Image.id == legacy_image_id).get()
- except Image.DoesNotExist:
- return False
-
- assert image.repository_id == context.repository.id
-
- # Add the image's blob to be GCed.
- context.add_blob_id(image.storage_id)
-
- # If the image has a parent ID, add the parent for GC.
- if image.parent_id is not None:
- context.add_legacy_image_id(image.parent_id)
-
- # Delete the image.
- with db_transaction():
- if _check_image_used(legacy_image_id):
- return False
-
- try:
- image = Image.select().where(Image.id == legacy_image_id).get()
- except Image.DoesNotExist:
- return False
-
- assert image.id == legacy_image_id
- assert image.repository_id == context.repository.id
-
- # Delete any derived storage for the image.
- (DerivedStorageForImage
- .delete()
- .where(DerivedStorageForImage.source_image == legacy_image_id)
- .execute())
-
- # Delete the image itself.
- image.delete_instance()
-
- context.mark_legacy_image_removed(image)
-
- if config.image_cleanup_callbacks:
- for callback in config.image_cleanup_callbacks:
- callback([image])
-
- return True
-
-
-def _check_label_used(label_id):
- assert label_id is not None
-
- with db_transaction():
- # Check if the label is referenced by another manifest or tag manifest.
- try:
- ManifestLabel.select().where(ManifestLabel.label == label_id).get()
- return True
- except ManifestLabel.DoesNotExist:
- pass
-
- try:
- TagManifestLabel.select().where(TagManifestLabel.label == label_id).get()
- return True
- except TagManifestLabel.DoesNotExist:
- pass
-
- return False
-
-
-def _garbage_collect_label(label_id, context):
- assert label_id is not None
-
- # We can now delete the label.
- with db_transaction():
- if _check_label_used(label_id):
- return False
-
- result = Label.delete().where(Label.id == label_id).execute() == 1
-
- if result:
- context.mark_label_id_removed(label_id)
-
- return result
diff --git a/data/model/health.py b/data/model/health.py
index b40cee025..dcef9022b 100644
--- a/data/model/health.py
+++ b/data/model/health.py
@@ -1,9 +1,12 @@
import logging
-from data.database import TeamRole, validate_database_url
+from data.database import TeamRole
+from util.config.validator import validate_database_url
+
logger = logging.getLogger(__name__)
+
def check_health(app_config):
# Attempt to connect to the database first. If the DB is not responding,
# using the validate_database_url will timeout quickly, as opposed to
@@ -11,12 +14,12 @@ def check_health(app_config):
# check).
try:
validate_database_url(app_config['DB_URI'], {}, connect_timeout=3)
- except Exception as ex:
- return (False, 'Could not connect to the database: %s' % ex.message)
+ except Exception:
+ logger.exception('Could not connect to the database')
+ return False
# We will connect to the db, check that it contains some team role kinds
try:
- okay = bool(list(TeamRole.select().limit(1)))
- return (okay, 'Could not connect to the database' if not okay else None)
- except Exception as ex:
- return (False, 'Could not connect to the database: %s' % ex.message)
+ return bool(list(TeamRole.select().limit(1)))
+ except:
+ return False
diff --git a/data/model/image.py b/data/model/image.py
index 1c6f1b952..0da208b46 100644
--- a/data/model/image.py
+++ b/data/model/image.py
@@ -1,110 +1,39 @@
import logging
-import hashlib
-import json
-
-from collections import defaultdict
-from datetime import datetime
import dateutil.parser
-from peewee import JOIN, IntegrityError, fn
+from peewee import JOIN_LEFT_OUTER, fn
+from datetime import datetime
-from data.model import (DataModelException, db_transaction, _basequery, storage,
- InvalidImageException)
+from data.model import DataModelException, db_transaction, _basequery, storage
from data.database import (Image, Repository, ImageStoragePlacement, Namespace, ImageStorage,
- ImageStorageLocation, RepositoryPermission, DerivedStorageForImage,
- ImageStorageTransformation, User)
+ ImageStorageLocation, RepositoryPermission, db_for_update)
-from util.canonicaljson import canonicalize
logger = logging.getLogger(__name__)
-def _namespace_id_for_username(username):
- try:
- return User.get(username=username).id
- except User.DoesNotExist:
- return None
-
-
-def get_image_with_storage(docker_image_id, storage_uuid):
- """ Returns the image with the given docker image ID and storage uuid or None if none.
- """
- try:
- return (Image
- .select(Image, ImageStorage)
- .join(ImageStorage)
- .where(Image.docker_image_id == docker_image_id,
- ImageStorage.uuid == storage_uuid)
- .get())
- except Image.DoesNotExist:
- return None
-
def get_parent_images(namespace_name, repository_name, image_obj):
- """ Returns a list of parent Image objects starting with the most recent parent
- and ending with the base layer. The images in this query will include the storage.
- """
+ """ Returns a list of parent Image objects in chronilogical order. """
parents = image_obj.ancestors
# Ancestors are in the format ///...//, with each path section
# containing the database Id of the image row.
parent_db_ids = parents.strip('/').split('/')
+
if parent_db_ids == ['']:
return []
def filter_to_parents(query):
return query.where(Image.id << parent_db_ids)
- parents = _get_repository_images_and_storages(namespace_name, repository_name,
- filter_to_parents)
+ parents = get_repository_images_base(namespace_name, repository_name, filter_to_parents)
+
id_to_image = {unicode(image.id): image for image in parents}
- try:
- return [id_to_image[parent_id] for parent_id in reversed(parent_db_ids)]
- except KeyError as ke:
- logger.exception('Could not find an expected parent image for image %s', image_obj.id)
- raise DataModelException('Unknown parent image')
-
-def get_placements_for_images(images):
- """ Returns the placements for the given images, as a map from image storage ID to placements. """
- if not images:
- return {}
-
- query = (ImageStoragePlacement
- .select(ImageStoragePlacement, ImageStorageLocation, ImageStorage)
- .join(ImageStorageLocation)
- .switch(ImageStoragePlacement)
- .join(ImageStorage)
- .where(ImageStorage.id << [image.storage_id for image in images]))
-
- placement_map = defaultdict(list)
- for placement in query:
- placement_map[placement.storage.id].append(placement)
-
- return dict(placement_map)
-
-
-def get_image_and_placements(namespace_name, repo_name, docker_image_id):
- """ Returns the repo image (with a storage object) and storage placements for the image
- or (None, None) if non found.
- """
- repo_image = get_repo_image_and_storage(namespace_name, repo_name, docker_image_id)
- if repo_image is None:
- return (None, None)
-
- query = (ImageStoragePlacement
- .select(ImageStoragePlacement, ImageStorageLocation)
- .join(ImageStorageLocation)
- .switch(ImageStoragePlacement)
- .join(ImageStorage)
- .where(ImageStorage.id == repo_image.storage_id))
-
- return repo_image, list(query)
+ return [id_to_image[parent_id] for parent_id in parent_db_ids]
def get_repo_image(namespace_name, repository_name, docker_image_id):
- """ Returns the repository image with the given Docker image ID or None if none.
- Does not include the storage object.
- """
def limit_to_image_id(query):
return query.where(Image.docker_image_id == docker_image_id).limit(1)
@@ -115,44 +44,17 @@ def get_repo_image(namespace_name, repository_name, docker_image_id):
return None
-def get_repo_image_and_storage(namespace_name, repository_name, docker_image_id):
- """ Returns the repository image with the given Docker image ID or None if none.
- Includes the storage object.
- """
+def get_repo_image_extended(namespace_name, repository_name, docker_image_id):
def limit_to_image_id(query):
- return query.where(Image.docker_image_id == docker_image_id)
+ return query.where(Image.docker_image_id == docker_image_id).limit(1)
- images = _get_repository_images_and_storages(namespace_name, repository_name, limit_to_image_id)
+ images = get_repository_images_base(namespace_name, repository_name, limit_to_image_id)
if not images:
return None
return images[0]
-def get_image_by_id(namespace_name, repository_name, docker_image_id):
- """ Returns the repository image with the given Docker image ID or raises if not found.
- Includes the storage object.
- """
- image = get_repo_image_and_storage(namespace_name, repository_name, docker_image_id)
- if not image:
- raise InvalidImageException('Unable to find image \'%s\' for repo \'%s/%s\'' %
- (docker_image_id, namespace_name, repository_name))
- return image
-
-
-def _get_repository_images_and_storages(namespace_name, repository_name, query_modifier):
- query = (Image
- .select(Image, ImageStorage)
- .join(ImageStorage)
- .switch(Image)
- .join(Repository)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(Repository.name == repository_name, Namespace.username == namespace_name))
-
- query = query_modifier(query)
- return query
-
-
def _get_repository_images(namespace_name, repository_name, query_modifier):
query = (Image
.select()
@@ -164,11 +66,55 @@ def _get_repository_images(namespace_name, repository_name, query_modifier):
return query
-def lookup_repository_images(repo, docker_image_ids):
+def get_repository_images_base(namespace_name, repository_name, query_modifier):
+ query = (ImageStoragePlacement
+ .select(ImageStoragePlacement, Image, ImageStorage, ImageStorageLocation)
+ .join(ImageStorageLocation)
+ .switch(ImageStoragePlacement)
+ .join(ImageStorage, JOIN_LEFT_OUTER)
+ .join(Image)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Repository.name == repository_name, Namespace.username == namespace_name))
+
+ query = query_modifier(query)
+
+ location_list = list(query)
+
+ images = {}
+ for location in location_list:
+ # Make sure we're always retrieving the same image object.
+ image = location.storage.image
+
+ # Set the storage to the one we got from the location, to prevent another query
+ image.storage = location.storage
+
+ if not image.id in images:
+ images[image.id] = image
+ image.storage.locations = set()
+ else:
+ image = images[image.id]
+
+ # Add the location to the image's locations set.
+ image.storage.locations.add(location.location.name)
+
+ return images.values()
+
+
+def lookup_repository_images(namespace_name, repository_name, docker_image_ids):
return (Image
- .select(Image, ImageStorage)
- .join(ImageStorage)
- .where(Image.repository == repo, Image.docker_image_id << docker_image_ids))
+ .select()
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Repository.name == repository_name, Namespace.username == namespace_name,
+ Image.docker_image_id << docker_image_ids))
+
+
+def get_matching_repository_images(namespace_name, repository_name, docker_image_ids):
+ def modify_query(query):
+ return query.where(Image.docker_image_id << docker_image_ids)
+
+ return get_repository_images_base(namespace_name, repository_name, modify_query)
def get_repository_images_without_placements(repo_obj, with_ancestor=None):
@@ -186,8 +132,15 @@ def get_repository_images_without_placements(repo_obj, with_ancestor=None):
def get_repository_images(namespace_name, repository_name):
- """ Returns all the repository images in the repository. Does not include storage objects. """
- return _get_repository_images(namespace_name, repository_name, lambda q: q)
+ return get_repository_images_base(namespace_name, repository_name, lambda q: q)
+
+
+def get_image_by_id(namespace_name, repository_name, docker_image_id):
+ image = get_repo_image_extended(namespace_name, repository_name, docker_image_id)
+ if not image:
+ raise DataModelException('Unable to find image \'%s\' for repo \'%s/%s\'' %
+ (docker_image_id, namespace_name, repository_name))
+ return image
def __translate_ancestry(old_ancestry, translations, repo_obj, username, preferred_location):
@@ -213,6 +166,8 @@ def __translate_ancestry(old_ancestry, translations, repo_obj, username, preferr
def _find_or_link_image(existing_image, repo_obj, username, translations, preferred_location):
+ # TODO(jake): This call is currently recursively done under a single transaction. Can we make
+ # it instead be done under a set of transactions?
with db_transaction():
# Check for an existing image, under the transaction, to make sure it doesn't already exist.
repo_image = get_repo_image(repo_obj.namespace_user.username, repo_obj.name,
@@ -231,23 +186,12 @@ def _find_or_link_image(existing_image, repo_obj, username, translations, prefer
username, preferred_location)
copied_storage = to_copy.storage
-
- translated_parent_id = None
- if new_image_ancestry != '/':
- translated_parent_id = int(new_image_ancestry.split('/')[-2])
+ copied_storage.locations = {placement.location.name
+ for placement in copied_storage.imagestorageplacement_set}
new_image = Image.create(docker_image_id=existing_image.docker_image_id,
- repository=repo_obj,
- storage=copied_storage,
- ancestors=new_image_ancestry,
- command=existing_image.command,
- created=existing_image.created,
- comment=existing_image.comment,
- v1_json_metadata=existing_image.v1_json_metadata,
- aggregate_size=existing_image.aggregate_size,
- parent=translated_parent_id,
- v1_checksum=existing_image.v1_checksum)
-
+ repository=repo_obj, storage=copied_storage,
+ ancestors=new_image_ancestry)
logger.debug('Storing translation %s -> %s', existing_image.id, new_image.id)
translations[existing_image.id] = new_image.id
@@ -272,14 +216,13 @@ def find_create_or_link_image(docker_image_id, repo_obj, username, translations,
.join(ImageStorage)
.switch(Image)
.join(Repository)
- .join(RepositoryPermission, JOIN.LEFT_OUTER)
+ .join(RepositoryPermission, JOIN_LEFT_OUTER)
.switch(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(ImageStorage.uploading == False,
Image.docker_image_id == docker_image_id))
- existing_image_query = _basequery.filter_to_repos_for_user(existing_image_query,
- _namespace_id_for_username(username))
+ existing_image_query = _basequery.filter_to_repos_for_user(existing_image_query, username)
# If there is an existing image, we try to translate its ancestry and copy its storage.
new_image = None
@@ -304,7 +247,7 @@ def find_create_or_link_image(docker_image_id, repo_obj, username, translations,
return repo_image
logger.debug('Creating new storage for docker id: %s', docker_image_id)
- new_storage = storage.create_v1_storage(preferred_location)
+ new_storage = storage.create_storage(preferred_location)
return Image.create(docker_image_id=docker_image_id,
repository=repo_obj, storage=new_storage,
@@ -312,205 +255,87 @@ def find_create_or_link_image(docker_image_id, repo_obj, username, translations,
def set_image_metadata(docker_image_id, namespace_name, repository_name, created_date_str, comment,
- command, v1_json_metadata, parent=None):
- """ Sets metadata that is specific to how a binary piece of storage fits into the layer tree.
- """
+ command, parent=None):
with db_transaction():
+ query = (Image
+ .select(Image, ImageStorage)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(Image)
+ .join(ImageStorage)
+ .where(Repository.name == repository_name, Namespace.username == namespace_name,
+ Image.docker_image_id == docker_image_id))
+
try:
- fetched = (Image
- .select(Image, ImageStorage)
- .join(Repository)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .switch(Image)
- .join(ImageStorage)
- .where(Repository.name == repository_name, Namespace.username == namespace_name,
- Image.docker_image_id == docker_image_id)
- .get())
+ fetched = db_for_update(query).get()
except Image.DoesNotExist:
raise DataModelException('No image with specified id and repository')
- fetched.created = datetime.now()
+ # We cleanup any old checksum in case it's a retry after a fail
+ fetched.storage.checksum = None
+ fetched.storage.created = datetime.now()
+
if created_date_str is not None:
try:
- fetched.created = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
+ fetched.storage.created = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
except:
# parse raises different exceptions, so we cannot use a specific kind of handler here.
pass
- # We cleanup any old checksum in case it's a retry after a fail
- fetched.v1_checksum = None
- fetched.comment = comment
- fetched.command = command
- fetched.v1_json_metadata = v1_json_metadata
+ fetched.storage.comment = comment
+ fetched.storage.command = command
if parent:
fetched.ancestors = '%s%s/' % (parent.ancestors, parent.id)
- fetched.parent = parent
fetched.save()
+ fetched.storage.save()
return fetched
-def get_image(repo, docker_image_id):
+def set_image_size(docker_image_id, namespace_name, repository_name, image_size, uncompressed_size):
try:
- return (Image
- .select(Image, ImageStorage)
- .join(ImageStorage)
- .where(Image.docker_image_id == docker_image_id, Image.repository == repo)
- .get())
- except Image.DoesNotExist:
- return None
-
-
-def get_image_by_db_id(id):
- try:
- return Image.get(id=id)
- except Image.DoesNotExist:
- return None
-
-
-def synthesize_v1_image(repo, image_storage_id, storage_image_size, docker_image_id,
- created_date_str, comment, command, v1_json_metadata, parent_image=None):
- """ Find an existing image with this docker image id, and if none exists, write one with the
- specified metadata.
- """
- ancestors = '/'
- if parent_image is not None:
- ancestors = '{0}{1}/'.format(parent_image.ancestors, parent_image.id)
-
- created = None
- if created_date_str is not None:
- try:
- created = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
- except:
- # parse raises different exceptions, so we cannot use a specific kind of handler here.
- pass
-
- # Get the aggregate size for the image.
- aggregate_size = _basequery.calculate_image_aggregate_size(ancestors, storage_image_size,
- parent_image)
-
- try:
- return Image.create(docker_image_id=docker_image_id, ancestors=ancestors, comment=comment,
- command=command, v1_json_metadata=v1_json_metadata, created=created,
- storage=image_storage_id, repository=repo, parent=parent_image,
- aggregate_size=aggregate_size)
- except IntegrityError:
- return Image.get(docker_image_id=docker_image_id, repository=repo)
-
-
-def ensure_image_locations(*names):
- with db_transaction():
- locations = ImageStorageLocation.select().where(ImageStorageLocation.name << names)
-
- insert_names = list(names)
-
- for location in locations:
- insert_names.remove(location.name)
-
- if not insert_names:
- return
-
- data = [{'name': name} for name in insert_names]
- ImageStorageLocation.insert_many(data).execute()
-
-
-def get_max_id_for_sec_scan():
- """ Gets the maximum id for a clair sec scan """
- return Image.select(fn.Max(Image.id)).scalar()
-
-
-def get_min_id_for_sec_scan(version):
- """ Gets the minimum id for a clair sec scan """
- return (Image
- .select(fn.Min(Image.id))
- .where(Image.security_indexed_engine < version)
- .scalar())
-
-
-def total_image_count():
- """ Returns the total number of images in DB """
- return Image.select().count()
-
-
-def get_image_pk_field():
- """ Returns the primary key for Image DB model """
- return Image.id
-
-
-def get_images_eligible_for_scan(clair_version):
- """ Returns a query that gives all images eligible for a clair scan """
- return (get_image_with_storage_and_parent_base()
- .where(Image.security_indexed_engine < clair_version)
- .where(ImageStorage.uploading == False))
-
-
-def get_image_with_storage_and_parent_base():
- Parent = Image.alias()
- ParentImageStorage = ImageStorage.alias()
-
- return (Image
- .select(Image, ImageStorage, Parent, ParentImageStorage)
- .join(ImageStorage)
- .switch(Image)
- .join(Parent, JOIN.LEFT_OUTER, on=(Image.parent == Parent.id))
- .join(ParentImageStorage, JOIN.LEFT_OUTER, on=(ParentImageStorage.id == Parent.storage)))
-
-
-def set_secscan_status(image, indexed, version):
- return (Image
- .update(security_indexed=indexed, security_indexed_engine=version)
- .where(Image.id == image.id)
- .where((Image.security_indexed_engine != version) | (Image.security_indexed != indexed))
- .execute()) != 0
-
-
-def _get_uniqueness_hash(varying_metadata):
- if not varying_metadata:
- return None
-
- return hashlib.sha256(json.dumps(canonicalize(varying_metadata))).hexdigest()
-
-
-def find_or_create_derived_storage(source_image, transformation_name, preferred_location,
- varying_metadata=None):
- existing = find_derived_storage_for_image(source_image, transformation_name, varying_metadata)
- if existing is not None:
- return existing
-
- uniqueness_hash = _get_uniqueness_hash(varying_metadata)
- trans = ImageStorageTransformation.get(name=transformation_name)
- new_storage = storage.create_v1_storage(preferred_location)
-
- try:
- derived = DerivedStorageForImage.create(source_image=source_image, derivative=new_storage,
- transformation=trans, uniqueness_hash=uniqueness_hash)
- except IntegrityError:
- # Storage was created while this method executed. Just return the existing.
- ImageStoragePlacement.delete().where(ImageStoragePlacement.storage == new_storage).execute()
- new_storage.delete_instance()
- return find_derived_storage_for_image(source_image, transformation_name, varying_metadata)
-
- return derived
-
-
-def find_derived_storage_for_image(source_image, transformation_name, varying_metadata=None):
- uniqueness_hash = _get_uniqueness_hash(varying_metadata)
-
- try:
- found = (DerivedStorageForImage
- .select(ImageStorage, DerivedStorageForImage)
- .join(ImageStorage)
- .switch(DerivedStorageForImage)
- .join(ImageStorageTransformation)
- .where(DerivedStorageForImage.source_image == source_image,
- ImageStorageTransformation.name == transformation_name,
- DerivedStorageForImage.uniqueness_hash == uniqueness_hash)
+ image = (Image
+ .select(Image, ImageStorage)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(Image)
+ .join(ImageStorage, JOIN_LEFT_OUTER)
+ .where(Repository.name == repository_name, Namespace.username == namespace_name,
+ Image.docker_image_id == docker_image_id)
.get())
- return found
- except DerivedStorageForImage.DoesNotExist:
+
+ except Image.DoesNotExist:
+ raise DataModelException('No image with specified id and repository')
+
+ image.storage.image_size = image_size
+ image.storage.uncompressed_size = uncompressed_size
+
+ ancestors = image.ancestors.split('/')[1:-1]
+ if ancestors:
+ try:
+ # TODO(jschorr): Switch to this faster route once we have full ancestor aggregate_size
+ # parent_image = Image.get(Image.id == ancestors[-1])
+ # total_size = image_size + parent_image.storage.aggregate_size
+ total_size = (ImageStorage
+ .select(fn.Sum(ImageStorage.image_size))
+ .join(Image)
+ .where(Image.id << ancestors)
+ .scalar()) + image_size
+
+ image.storage.aggregate_size = total_size
+ except Image.DoesNotExist:
+ pass
+ else:
+ image.storage.aggregate_size = image_size
+
+ image.storage.save()
+
+ return image
+
+
+def get_image(repo, dockerfile_id):
+ try:
+ return Image.get(Image.docker_image_id == dockerfile_id, Image.repository == repo)
+ except Image.DoesNotExist:
return None
-
-
-def delete_derived_storage(derived_storage):
- derived_storage.derivative.delete_instance(recursive=True)
diff --git a/data/model/label.py b/data/model/label.py
deleted file mode 100644
index fce7479ba..000000000
--- a/data/model/label.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import logging
-
-from cachetools.func import lru_cache
-
-from data.database import (Label, TagManifestLabel, MediaType, LabelSourceType, db_transaction,
- ManifestLabel, TagManifestLabelMap, TagManifestToManifest)
-from data.model import InvalidLabelKeyException, InvalidMediaTypeException, DataModelException
-from data.text import prefix_search
-from util.validation import validate_label_key
-from util.validation import is_json
-
-logger = logging.getLogger(__name__)
-
-
-@lru_cache(maxsize=1)
-def get_label_source_types():
- source_type_map = {}
- for kind in LabelSourceType.select():
- source_type_map[kind.id] = kind.name
- source_type_map[kind.name] = kind.id
-
- return source_type_map
-
-
-@lru_cache(maxsize=1)
-def get_media_types():
- media_type_map = {}
- for kind in MediaType.select():
- media_type_map[kind.id] = kind.name
- media_type_map[kind.name] = kind.id
-
- return media_type_map
-
-
-def _get_label_source_type_id(name):
- kinds = get_label_source_types()
- return kinds[name]
-
-
-def _get_media_type_id(name):
- kinds = get_media_types()
- return kinds[name]
-
-
-def create_manifest_label(tag_manifest, key, value, source_type_name, media_type_name=None):
- """ Creates a new manifest label on a specific tag manifest. """
- if not key:
- raise InvalidLabelKeyException()
-
- # Note that we don't prevent invalid label names coming from the manifest to be stored, as Docker
- # does not currently prevent them from being put into said manifests.
- if not validate_label_key(key) and source_type_name != 'manifest':
- raise InvalidLabelKeyException()
-
- # Find the matching media type. If none specified, we infer.
- if media_type_name is None:
- media_type_name = 'text/plain'
- if is_json(value):
- media_type_name = 'application/json'
-
- media_type_id = _get_media_type_id(media_type_name)
- if media_type_id is None:
- raise InvalidMediaTypeException()
-
- source_type_id = _get_label_source_type_id(source_type_name)
-
- with db_transaction():
- label = Label.create(key=key, value=value, source_type=source_type_id, media_type=media_type_id)
- tag_manifest_label = TagManifestLabel.create(annotated=tag_manifest, label=label,
- repository=tag_manifest.tag.repository)
- try:
- mapping_row = TagManifestToManifest.get(tag_manifest=tag_manifest)
- if mapping_row.manifest:
- manifest_label = ManifestLabel.create(manifest=mapping_row.manifest, label=label,
- repository=tag_manifest.tag.repository)
- TagManifestLabelMap.create(manifest_label=manifest_label,
- tag_manifest_label=tag_manifest_label,
- label=label,
- manifest=mapping_row.manifest,
- tag_manifest=tag_manifest)
- except TagManifestToManifest.DoesNotExist:
- pass
-
- return label
-
-
-def list_manifest_labels(tag_manifest, prefix_filter=None):
- """ Lists all labels found on the given tag manifest. """
- query = (Label.select(Label, MediaType)
- .join(MediaType)
- .switch(Label)
- .join(LabelSourceType)
- .switch(Label)
- .join(TagManifestLabel)
- .where(TagManifestLabel.annotated == tag_manifest))
-
- if prefix_filter is not None:
- query = query.where(prefix_search(Label.key, prefix_filter))
-
- return query
-
-
-def get_manifest_label(label_uuid, tag_manifest):
- """ Retrieves the manifest label on the tag manifest with the given ID. """
- try:
- return (Label.select(Label, LabelSourceType)
- .join(LabelSourceType)
- .where(Label.uuid == label_uuid)
- .switch(Label)
- .join(TagManifestLabel)
- .where(TagManifestLabel.annotated == tag_manifest)
- .get())
- except Label.DoesNotExist:
- return None
-
-
-def delete_manifest_label(label_uuid, tag_manifest):
- """ Deletes the manifest label on the tag manifest with the given ID. """
-
- # Find the label itself.
- label = get_manifest_label(label_uuid, tag_manifest)
- if label is None:
- return None
-
- if not label.source_type.mutable:
- raise DataModelException('Cannot delete immutable label')
-
- # Delete the mapping records and label.
- (TagManifestLabelMap
- .delete()
- .where(TagManifestLabelMap.label == label)
- .execute())
-
- deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute()
- if deleted_count != 1:
- logger.warning('More than a single label deleted for matching label %s', label_uuid)
-
- deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute()
- if deleted_count != 1:
- logger.warning('More than a single label deleted for matching label %s', label_uuid)
-
- label.delete_instance(recursive=False)
- return label
diff --git a/data/model/log.py b/data/model/log.py
index e78ec4b1b..ad5713d6d 100644
--- a/data/model/log.py
+++ b/data/model/log.py
@@ -1,299 +1,125 @@
import json
-import logging
-from datetime import datetime, timedelta
-from calendar import timegm
-from cachetools.func import lru_cache
+from peewee import JOIN_LEFT_OUTER, SQL, fn
+from datetime import datetime, timedelta, date
+from cachetools import lru_cache
-from peewee import JOIN, fn, PeeweeException
+from data.database import LogEntry, LogEntryKind, User, db
-from data.database import LogEntryKind, User, RepositoryActionCount, db, LogEntry3
-from data.model import config, user, DataModelException
-
-logger = logging.getLogger(__name__)
-
-ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING = ['pull_repo']
-
-
-def _logs_query(selections, start_time=None, end_time=None, performer=None, repository=None,
- namespace=None, ignore=None, model=LogEntry3, id_range=None):
- """ Returns a query for selecting logs from the table, with various options and filters. """
- assert (start_time is not None and end_time is not None) or (id_range is not None)
- joined = (model.select(*selections).switch(model))
-
- if id_range is not None:
- joined = joined.where(model.id >= id_range[0], model.id <= id_range[1])
- else:
- joined = joined.where(model.datetime >= start_time, model.datetime < end_time)
+def _logs_query(selections, start_time, end_time, performer=None, repository=None, namespace=None):
+ joined = (LogEntry
+ .select(*selections)
+ .switch(LogEntry)
+ .where(LogEntry.datetime >= start_time, LogEntry.datetime < end_time))
if repository:
- joined = joined.where(model.repository == repository)
+ joined = joined.where(LogEntry.repository == repository)
if performer:
- joined = joined.where(model.performer == performer)
+ joined = joined.where(LogEntry.performer == performer)
- if namespace and not repository:
- namespace_user = user.get_user_or_org(namespace)
- if namespace_user is None:
- raise DataModelException('Invalid namespace requested')
-
- joined = joined.where(model.account == namespace_user.id)
-
- if ignore:
- kind_map = get_log_entry_kinds()
- ignore_ids = [kind_map[kind_name] for kind_name in ignore]
- joined = joined.where(~(model.kind << ignore_ids))
+ if namespace:
+ joined = joined.join(User).where(User.username == namespace)
return joined
-def _latest_logs_query(selections, performer=None, repository=None, namespace=None, ignore=None,
- model=LogEntry3, size=None):
- """ Returns a query for selecting the latest logs from the table, with various options and
- filters. """
- query = (model.select(*selections).switch(model))
-
- if repository:
- query = query.where(model.repository == repository)
-
- if performer:
- query = query.where(model.repository == repository)
-
- if namespace and not repository:
- namespace_user = user.get_user_or_org(namespace)
- if namespace_user is None:
- raise DataModelException('Invalid namespace requested')
-
- query = query.where(model.account == namespace_user.id)
-
- if ignore:
- kind_map = get_log_entry_kinds()
- ignore_ids = [kind_map[kind_name] for kind_name in ignore]
- query = query.where(~(model.kind << ignore_ids))
-
- query = query.order_by(model.datetime.desc(), model.id)
-
- if size:
- query = query.limit(size)
-
- return query
-
-
@lru_cache(maxsize=1)
def get_log_entry_kinds():
kind_map = {}
for kind in LogEntryKind.select():
kind_map[kind.id] = kind.name
- kind_map[kind.name] = kind.id
return kind_map
-def _get_log_entry_kind(name):
- kinds = get_log_entry_kinds()
- return kinds[name]
+def get_aggregated_logs(start_time, end_time, performer=None, repository=None, namespace=None):
+ date = db.extract_date('day', LogEntry.datetime)
+ selections = [LogEntry.kind, date.alias('day'), fn.Count(LogEntry.id).alias('count')]
+ query = _logs_query(selections, start_time, end_time, performer, repository, namespace)
+ return query.group_by(date, LogEntry.kind)
-def get_aggregated_logs(start_time, end_time, performer=None, repository=None, namespace=None,
- ignore=None, model=LogEntry3):
- """ Returns the count of logs, by kind and day, for the logs matching the given filters. """
- date = db.extract_date('day', model.datetime)
- selections = [model.kind, date.alias('day'), fn.Count(model.id).alias('count')]
- query = _logs_query(selections, start_time, end_time, performer, repository, namespace, ignore,
- model=model)
- return query.group_by(date, model.kind)
+def list_logs(start_time, end_time, performer=None, repository=None, namespace=None, page=None,
+ count=None):
-
-def get_logs_query(start_time=None, end_time=None, performer=None, repository=None, namespace=None,
- ignore=None, model=LogEntry3, id_range=None):
- """ Returns the logs matching the given filters. """
Performer = User.alias()
- Account = User.alias()
- selections = [model, Performer]
+ selections = [LogEntry, Performer]
- if namespace is None and repository is None:
- selections.append(Account)
+ query = _logs_query(selections, start_time, end_time, performer, repository, namespace)
+ query = (query.switch(LogEntry)
+ .join(Performer, JOIN_LEFT_OUTER,
+ on=(LogEntry.performer == Performer.id).alias('performer')))
- query = _logs_query(selections, start_time, end_time, performer, repository, namespace, ignore,
- model=model, id_range=id_range)
- query = (query.switch(model).join(Performer, JOIN.LEFT_OUTER,
- on=(model.performer == Performer.id).alias('performer')))
+ if page and count:
+ query = query.paginate(page, count)
- if namespace is None and repository is None:
- query = (query.switch(model).join(Account, JOIN.LEFT_OUTER,
- on=(model.account == Account.id).alias('account')))
-
- return query
+ return list(query.order_by(LogEntry.datetime.desc()))
-def get_latest_logs_query(performer=None, repository=None, namespace=None, ignore=None,
- model=LogEntry3, size=None):
- """ Returns the latest logs matching the given filters. """
- Performer = User.alias()
- Account = User.alias()
- selections = [model, Performer]
-
- if namespace is None and repository is None:
- selections.append(Account)
-
- query = _latest_logs_query(selections, performer, repository, namespace, ignore, model=model,
- size=size)
- query = (query.switch(model).join(Performer, JOIN.LEFT_OUTER,
- on=(model.performer == Performer.id).alias('performer')))
-
- if namespace is None and repository is None:
- query = (query.switch(model).join(Account, JOIN.LEFT_OUTER,
- on=(model.account == Account.id).alias('account')))
-
- return query
-
-
-def _json_serialize(obj):
- if isinstance(obj, datetime):
- return timegm(obj.utctimetuple())
-
- return obj
-
-
-def log_action(kind_name, user_or_organization_name, performer=None, repository=None, ip=None,
- metadata={}, timestamp=None):
- """ Logs an entry in the LogEntry table. """
+def log_action(kind_name, user_or_organization_name, performer=None, repository=None,
+ ip=None, metadata={}, timestamp=None):
if not timestamp:
timestamp = datetime.today()
- account = None
- if user_or_organization_name is not None:
- account = User.get(User.username == user_or_organization_name).id
- else:
- account = config.app_config.get('SERVICE_LOG_ACCOUNT_ID')
- if account is None:
- account = user.get_minimum_user_id()
-
- if performer is not None:
- performer = performer.id
-
- if repository is not None:
- repository = repository.id
-
- kind = _get_log_entry_kind(kind_name)
- metadata_json = json.dumps(metadata, default=_json_serialize)
- log_data = {
- 'kind': kind,
- 'account': account,
- 'performer': performer,
- 'repository': repository,
- 'ip': ip,
- 'metadata_json': metadata_json,
- 'datetime': timestamp
- }
-
- try:
- LogEntry3.create(**log_data)
- except PeeweeException as ex:
- strict_logging_disabled = config.app_config.get('ALLOW_PULLS_WITHOUT_STRICT_LOGGING')
- if strict_logging_disabled and kind_name in ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING:
- logger.exception('log_action failed', extra=({'exception': ex}).update(log_data))
- else:
- raise
+ kind = LogEntryKind.get(LogEntryKind.name == kind_name)
+ account = User.get(User.username == user_or_organization_name)
+ LogEntry.create(kind=kind, account=account, performer=performer,
+ repository=repository, ip=ip, metadata_json=json.dumps(metadata),
+ datetime=timestamp)
-def get_stale_logs_start_id(model):
- """ Gets the oldest log entry. """
- try:
- return (model.select(fn.Min(model.id)).tuples())[0][0]
- except IndexError:
- return None
-
-
-def get_stale_logs(start_id, end_id, model, cutoff_date):
- """ Returns all the logs with IDs between start_id and end_id inclusively. """
- return model.select().where((model.id >= start_id),
- (model.id <= end_id),
- model.datetime <= cutoff_date)
-
-
-def delete_stale_logs(start_id, end_id, model):
- """ Deletes all the logs with IDs between start_id and end_id. """
- model.delete().where((model.id >= start_id), (model.id <= end_id)).execute()
-
-
-def get_repository_action_counts(repo, start_date):
- """ Returns the daily aggregated action counts for the given repository, starting at the given
- start date.
+def _get_repository_events(repository, time_delta, time_delta_earlier, clause):
+ """ Returns a pair representing the count of the number of events for the given
+ repository in each of the specified time deltas. The date ranges are calculated by
+ taking the current time today and subtracting the time delta given. Since
+ we want to grab *two* ranges, we restrict the second range to be greater
+ than the first (i.e. referring to an earlier time), so we can conduct the
+ lookup in a single query. The clause is used to further filter the kind of
+ events being found.
"""
- return RepositoryActionCount.select().where(RepositoryActionCount.repository == repo,
- RepositoryActionCount.date >= start_date)
+ since = date.today() - time_delta
+ since_earlier = date.today() - time_delta_earlier
+
+ if since_earlier >= since:
+ raise ValueError('time_delta_earlier must be greater than time_delta')
+
+ # This uses a CASE WHEN inner clause to further filter the count.
+ formatted = since.strftime('%Y-%m-%d')
+ case_query = 'CASE WHEN datetime >= \'%s\' THEN 1 ELSE 0 END' % formatted
+
+ result = (LogEntry
+ .select(fn.Sum(SQL(case_query)), fn.Count(SQL('*')))
+ .where(LogEntry.repository == repository)
+ .where(clause)
+ .where(LogEntry.datetime >= since_earlier)
+ .tuples()
+ .get())
+
+ return (int(result[0]) if result[0] else 0, int(result[1]) if result[1] else 0)
-def get_repositories_action_sums(repository_ids):
- """ Returns a map from repository ID to total actions within that repository in the last week. """
- if not repository_ids:
- return {}
-
- # Filter the join to recent entries only.
- last_week = datetime.now() - timedelta(weeks=1)
- tuples = (RepositoryActionCount.select(RepositoryActionCount.repository,
- fn.Sum(RepositoryActionCount.count))
- .where(RepositoryActionCount.repository << repository_ids)
- .where(RepositoryActionCount.date >= last_week)
- .group_by(RepositoryActionCount.repository).tuples())
-
- action_count_map = {}
- for record in tuples:
- action_count_map[record[0]] = record[1]
-
- return action_count_map
+def get_repository_pushes(repository, time_delta, time_delta_earlier):
+ push_repo = LogEntryKind.get(name='push_repo')
+ clauses = (LogEntry.kind == push_repo)
+ return _get_repository_events(repository, time_delta, time_delta_earlier, clauses)
-def get_minimum_id_for_logs(start_time, repository_id=None, namespace_id=None, model=LogEntry3):
- """ Returns the minimum ID for logs matching the given repository or namespace in
- the logs table, starting at the given start time.
- """
- # First try bounded by a day. Most repositories will meet this criteria, and therefore
- # can make a much faster query.
- day_after = start_time + timedelta(days=1)
- result = _get_bounded_id(fn.Min, model.datetime >= start_time,
- repository_id, namespace_id, model.datetime < day_after, model=model)
- if result is not None:
- return result
-
- return _get_bounded_id(fn.Min, model.datetime >= start_time, repository_id, namespace_id,
- model=model)
+def get_repository_pulls(repository, time_delta, time_delta_earlier):
+ repo_pull = LogEntryKind.get(name='pull_repo')
+ repo_verb = LogEntryKind.get(name='repo_verb')
+ clauses = ((LogEntry.kind == repo_pull) | (LogEntry.kind == repo_verb))
+ return _get_repository_events(repository, time_delta, time_delta_earlier, clauses)
-def get_maximum_id_for_logs(end_time, repository_id=None, namespace_id=None, model=LogEntry3):
- """ Returns the maximum ID for logs matching the given repository or namespace in
- the logs table, ending at the given end time.
- """
- # First try bounded by a day. Most repositories will meet this criteria, and therefore
- # can make a much faster query.
- day_before = end_time - timedelta(days=1)
- result = _get_bounded_id(fn.Max, model.datetime <= end_time,
- repository_id, namespace_id, model.datetime > day_before, model=model)
- if result is not None:
- return result
-
- return _get_bounded_id(fn.Max, model.datetime <= end_time, repository_id, namespace_id,
- model=model)
-
-
-def _get_bounded_id(fn, filter_clause, repository_id, namespace_id, reduction_clause=None,
- model=LogEntry3):
- assert (namespace_id is not None) or (repository_id is not None)
- query = (model
- .select(fn(model.id))
- .where(filter_clause))
-
- if reduction_clause is not None:
- query = query.where(reduction_clause)
-
- if repository_id is not None:
- query = query.where(model.repository == repository_id)
- else:
- query = query.where(model.account == namespace_id)
-
- row = query.tuples()[0]
- if not row:
- return None
-
- return row[0]
+def get_repository_usage():
+ one_month_ago = date.today() - timedelta(weeks=4)
+ repo_pull = LogEntryKind.get(name='pull_repo')
+ repo_verb = LogEntryKind.get(name='repo_verb')
+ return (LogEntry
+ .select(LogEntry.ip, LogEntry.repository)
+ .where((LogEntry.kind == repo_pull) | (LogEntry.kind == repo_verb))
+ .where(~(LogEntry.repository >> None))
+ .where(LogEntry.datetime >= one_month_ago)
+ .group_by(LogEntry.ip, LogEntry.repository)
+ .count())
diff --git a/data/model/message.py b/data/model/message.py
deleted file mode 100644
index 24df4d0ba..000000000
--- a/data/model/message.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from data.database import Messages, MediaType
-
-
-def get_messages():
- """Query the data base for messages and returns a container of database message objects"""
- return Messages.select(Messages, MediaType).join(MediaType)
-
-def create(messages):
- """Insert messages into the database."""
- inserted = []
- for message in messages:
- severity = message['severity']
- media_type_name = message['media_type']
- media_type = MediaType.get(name=media_type_name)
-
- inserted.append(Messages.create(content=message['content'], media_type=media_type,
- severity=severity))
- return inserted
-
-def delete_message(uuids):
- """Delete message from the database"""
- if not uuids:
- return
- Messages.delete().where(Messages.uuid << uuids).execute()
diff --git a/data/model/modelutil.py b/data/model/modelutil.py
deleted file mode 100644
index 4048e4eff..000000000
--- a/data/model/modelutil.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import dateutil.parser
-
-from datetime import datetime
-
-from peewee import SQL
-
-
-def paginate(query, model, descending=False, page_token=None, limit=50, sort_field_alias=None,
- max_page=None, sort_field_name=None):
- """ Paginates the given query using an field range, starting at the optional page_token.
- Returns a *list* of matching results along with an unencrypted page_token for the
- next page, if any. If descending is set to True, orders by the field descending rather
- than ascending.
- """
- # Note: We use the sort_field_alias for the order_by, but not the where below. The alias is
- # necessary for certain queries that use unions in MySQL, as it gets confused on which field
- # to order by. The where clause, on the other hand, cannot use the alias because Postgres does
- # not allow aliases in where clauses.
- sort_field_name = sort_field_name or 'id'
- sort_field = getattr(model, sort_field_name)
-
- if sort_field_alias is not None:
- sort_field_name = sort_field_alias
- sort_field = SQL(sort_field_alias)
-
- if descending:
- query = query.order_by(sort_field.desc())
- else:
- query = query.order_by(sort_field)
-
- start_index = pagination_start(page_token)
- if start_index is not None:
- if descending:
- query = query.where(sort_field <= start_index)
- else:
- query = query.where(sort_field >= start_index)
-
- query = query.limit(limit + 1)
-
- page_number = (page_token.get('page_number') or None) if page_token else None
- if page_number is not None and max_page is not None and page_number > max_page:
- return [], None
-
- return paginate_query(query, limit=limit, sort_field_name=sort_field_name,
- page_number=page_number)
-
-
-def pagination_start(page_token=None):
- """ Returns the start index for pagination for the given page token. Will return None if None. """
- if page_token is not None:
- start_index = page_token.get('start_index')
- if page_token.get('is_datetime'):
- start_index = dateutil.parser.parse(start_index)
- return start_index
- return None
-
-
-def paginate_query(query, limit=50, sort_field_name=None, page_number=None):
- """ Executes the given query and returns a page's worth of results, as well as the page token
- for the next page (if any).
- """
- results = list(query)
- page_token = None
- if len(results) > limit:
- start_index = getattr(results[limit], sort_field_name or 'id')
- is_datetime = False
- if isinstance(start_index, datetime):
- start_index = start_index.isoformat() + "Z"
- is_datetime = True
-
- page_token = {
- 'start_index': start_index,
- 'page_number': page_number + 1 if page_number else 1,
- 'is_datetime': is_datetime,
- }
-
- return results[0:limit], page_token
diff --git a/data/model/notification.py b/data/model/notification.py
index 11a84fea7..b894a1cf7 100644
--- a/data/model/notification.py
+++ b/data/model/notification.py
@@ -1,24 +1,23 @@
import json
-from peewee import SQL
+from peewee import JOIN_LEFT_OUTER
+from data.model import InvalidNotificationException, db_transaction
from data.database import (Notification, NotificationKind, User, Team, TeamMember, TeamRole,
RepositoryNotification, ExternalNotificationEvent, Repository,
- ExternalNotificationMethod, Namespace, db_for_update)
-from data.model import InvalidNotificationException, db_transaction
+ ExternalNotificationMethod, Namespace)
-def create_notification(kind_name, target, metadata={}, lookup_path=None):
+def create_notification(kind_name, target, metadata={}):
kind_ref = NotificationKind.get(name=kind_name)
notification = Notification.create(kind=kind_ref, target=target,
- metadata_json=json.dumps(metadata),
- lookup_path=lookup_path)
+ metadata_json=json.dumps(metadata))
return notification
def create_unique_notification(kind_name, target, metadata={}):
with db_transaction():
- if list_notifications(target, kind_name).count() == 0:
+ if list_notifications(target, kind_name, limit=1).count() == 0:
create_notification(kind_name, target, metadata)
@@ -30,69 +29,47 @@ def lookup_notification(user, uuid):
return results[0]
-def lookup_notifications_by_path_prefix(prefix):
- return list((Notification
- .select()
- .where(Notification.lookup_path % prefix)))
-
-
def list_notifications(user, kind_name=None, id_filter=None, include_dismissed=False,
page=None, limit=None):
-
- base_query = (Notification
- .select(Notification.id,
- Notification.uuid,
- Notification.kind,
- Notification.metadata_json,
- Notification.dismissed,
- Notification.lookup_path,
- Notification.created,
- Notification.created.alias('cd'),
- Notification.target)
- .join(NotificationKind))
-
- if kind_name is not None:
- base_query = base_query.where(NotificationKind.name == kind_name)
-
- if id_filter is not None:
- base_query = base_query.where(Notification.uuid == id_filter)
-
- if not include_dismissed:
- base_query = base_query.where(Notification.dismissed == False)
-
- # Lookup directly for the user.
- user_direct = base_query.clone().where(Notification.target == user)
-
- # Lookup via organizations admined by the user.
Org = User.alias()
AdminTeam = Team.alias()
AdminTeamMember = TeamMember.alias()
AdminUser = User.alias()
- via_orgs = (base_query.clone()
- .join(Org, on=(Org.id == Notification.target))
- .join(AdminTeam, on=(Org.id == AdminTeam.organization))
- .join(TeamRole, on=(AdminTeam.role == TeamRole.id))
- .switch(AdminTeam)
- .join(AdminTeamMember, on=(AdminTeam.id == AdminTeamMember.team))
- .join(AdminUser, on=(AdminTeamMember.user == AdminUser.id))
- .where((AdminUser.id == user) & (TeamRole.name == 'admin')))
+ query = (Notification.select()
+ .join(User)
+ .switch(Notification)
+ .join(Org, JOIN_LEFT_OUTER, on=(Org.id == Notification.target))
+ .join(AdminTeam, JOIN_LEFT_OUTER, on=(Org.id == AdminTeam.organization))
+ .join(TeamRole, JOIN_LEFT_OUTER, on=(AdminTeam.role == TeamRole.id))
+ .switch(AdminTeam)
+ .join(AdminTeamMember, JOIN_LEFT_OUTER, on=(AdminTeam.id == AdminTeamMember.team))
+ .join(AdminUser, JOIN_LEFT_OUTER, on=(AdminTeamMember.user == AdminUser.id))
+ .where((Notification.target == user) |
+ ((AdminUser.id == user) & (TeamRole.name == 'admin')))
+ .order_by(Notification.created)
+ .desc())
- query = user_direct | via_orgs
+ if not include_dismissed:
+ query = query.switch(Notification).where(Notification.dismissed == False)
+
+ if kind_name:
+ query = (query
+ .switch(Notification)
+ .join(NotificationKind)
+ .where(NotificationKind.name == kind_name))
+
+ if id_filter:
+ query = (query
+ .switch(Notification)
+ .where(Notification.uuid == id_filter))
if page:
query = query.paginate(page, limit)
elif limit:
query = query.limit(limit)
- return query.order_by(SQL('cd desc'))
-
-
-def delete_all_notifications_by_path_prefix(prefix):
- (Notification
- .delete()
- .where(Notification.lookup_path ** (prefix + '%'))
- .execute())
+ return query
def delete_all_notifications_by_kind(kind_name):
@@ -113,10 +90,9 @@ def delete_matching_notifications(target, kind_name, **kwargs):
kind_ref = NotificationKind.get(name=kind_name)
# Load all notifications for the user with the given kind.
- notifications = (Notification
- .select()
- .where(Notification.target == target,
- Notification.kind == kind_ref))
+ notifications = Notification.select().where(
+ Notification.target == target,
+ Notification.kind == kind_ref)
# For each, match the metadata to the specified values.
for notification in notifications:
@@ -137,69 +113,31 @@ def delete_matching_notifications(target, kind_name, **kwargs):
notification.delete_instance()
-def increment_notification_failure_count(uuid):
- """ This increments the number of failures by one """
- (RepositoryNotification
- .update(number_of_failures=RepositoryNotification.number_of_failures + 1)
- .where(RepositoryNotification.uuid == uuid)
- .execute())
-
-
-def reset_notification_number_of_failures(namespace_name, repository_name, uuid):
- """ This resets the number of failures for a repo notification to 0 """
- try:
- notification = RepositoryNotification.select().where(RepositoryNotification.uuid == uuid).get()
- if (notification.repository.namespace_user.username != namespace_name or
- notification.repository.name != repository_name):
- raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid)
- reset_number_of_failures_to_zero(notification.id)
- return notification
- except RepositoryNotification.DoesNotExist:
- return None
-
-
-def reset_number_of_failures_to_zero(notification_id):
- """ This resets the number of failures for a repo notification to 0 """
- RepositoryNotification.update(number_of_failures=0).where(RepositoryNotification.id == notification_id).execute()
-
-
-def create_repo_notification(repo, event_name, method_name, method_config, event_config, title=None):
+def create_repo_notification(repo, event_name, method_name, config):
event = ExternalNotificationEvent.get(ExternalNotificationEvent.name == event_name)
method = ExternalNotificationMethod.get(ExternalNotificationMethod.name == method_name)
return RepositoryNotification.create(repository=repo, event=event, method=method,
- config_json=json.dumps(method_config), title=title,
- event_config_json=json.dumps(event_config))
-
-
-def _base_get_notification(uuid):
- """ This is a base query for get statements """
- return (RepositoryNotification
- .select(RepositoryNotification, Repository, Namespace)
- .join(Repository)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(RepositoryNotification.uuid == uuid))
-
-
-def get_enabled_notification(uuid):
- """ This returns a notification with less than 3 failures """
- try:
- return _base_get_notification(uuid).where(RepositoryNotification.number_of_failures < 3).get()
- except RepositoryNotification.DoesNotExist:
- raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid)
+ config_json=json.dumps(config))
def get_repo_notification(uuid):
try:
- return _base_get_notification(uuid).get()
+ return (RepositoryNotification
+ .select(RepositoryNotification, Repository, Namespace)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(RepositoryNotification.uuid == uuid)
+ .get())
except RepositoryNotification.DoesNotExist:
- raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid)
+ raise InvalidNotificationException('No repository notification found with id: %s' % uuid)
def delete_repo_notification(namespace_name, repository_name, uuid):
found = get_repo_notification(uuid)
- if found.repository.namespace_user.username != namespace_name or found.repository.name != repository_name:
- raise InvalidNotificationException('No repository notifiation found with uuid: %s' % uuid)
+ if (found.repository.namespace_user.username != namespace_name or
+ found.repository.name != repository_name):
+ raise InvalidNotificationException('No repository notifiation found with id: %s' % uuid)
found.delete_instance()
return found
diff --git a/data/model/oauth.py b/data/model/oauth.py
index 182c08f32..8c3fb5624 100644
--- a/data/model/oauth.py
+++ b/data/model/oauth.py
@@ -6,21 +6,14 @@ from datetime import datetime, timedelta
from oauth2lib.provider import AuthorizationProvider
from oauth2lib import utils
-from active_migration import ActiveDataMigration, ERTMigrationFlags
from data.database import (OAuthApplication, OAuthAuthorizationCode, OAuthAccessToken, User,
- random_string_generator)
-from data.fields import DecryptedValue, Credential
-from data.model import user, config
+ AccessToken, random_string_generator)
+from data.model import user
from auth import scopes
-from util import get_app_url
logger = logging.getLogger(__name__)
-ACCESS_TOKEN_PREFIX_LENGTH = 20
-ACCESS_TOKEN_MINIMUM_CODE_LENGTH = 20
-AUTHORIZATION_CODE_PREFIX_LENGTH = 20
-
class DatabaseAuthorizationProvider(AuthorizationProvider):
def get_authorized_user(self):
@@ -46,23 +39,13 @@ class DatabaseAuthorizationProvider(AuthorizationProvider):
def validate_client_secret(self, client_id, client_secret):
try:
- application = OAuthApplication.get(client_id=client_id)
-
- # TODO(remove-unenc): Remove legacy check.
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- if application.secure_client_secret is None:
- return application.client_secret == client_secret
-
- assert application.secure_client_secret is not None
- return application.secure_client_secret.matches(client_secret)
+ OAuthApplication.get(client_id=client_id, client_secret=client_secret)
+ return True
except OAuthApplication.DoesNotExist:
return False
def validate_redirect_uri(self, client_id, redirect_uri):
- internal_redirect_url = '%s%s' % (get_app_url(config.app_config),
- url_for('web.oauth_local_handler'))
-
- if redirect_uri == internal_redirect_url:
+ if redirect_uri == url_for('web.oauth_local_handler', _external=True):
return True
try:
@@ -100,88 +83,66 @@ class DatabaseAuthorizationProvider(AuthorizationProvider):
# Make sure the token contains the given scopes (at least).
return scopes.is_subset_string(long_scope_string, scope)
- def from_authorization_code(self, client_id, full_code, scope):
- code_name = full_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
- code_credential = full_code[AUTHORIZATION_CODE_PREFIX_LENGTH:]
-
+ def from_authorization_code(self, client_id, code, scope):
try:
found = (OAuthAuthorizationCode
.select()
.join(OAuthApplication)
- .where(OAuthApplication.client_id == client_id,
- OAuthAuthorizationCode.code_name == code_name,
+ .where(OAuthApplication.client_id == client_id, OAuthAuthorizationCode.code == code,
OAuthAuthorizationCode.scope == scope)
.get())
- if not found.code_credential.matches(code_credential):
- return None
-
logger.debug('Returning data: %s', found.data)
return found.data
except OAuthAuthorizationCode.DoesNotExist:
- # Fallback to the legacy lookup of the full code.
- # TODO(remove-unenc): Remove legacy fallback.
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- try:
- found = (OAuthAuthorizationCode
- .select()
- .join(OAuthApplication)
- .where(OAuthApplication.client_id == client_id,
- OAuthAuthorizationCode.code == full_code,
- OAuthAuthorizationCode.scope == scope)
- .get())
- logger.debug('Returning data: %s', found.data)
- return found.data
- except OAuthAuthorizationCode.DoesNotExist:
- return None
- else:
- return None
+ return None
- def persist_authorization_code(self, client_id, full_code, scope):
+ def from_refresh_token(self, client_id, refresh_token, scope):
+ try:
+ found = (OAuthAccessToken
+ .select()
+ .join(OAuthApplication)
+ .where(OAuthApplication.client_id == client_id,
+ OAuthAccessToken.refresh_token == refresh_token,
+ OAuthAccessToken.scope == scope)
+ .get())
+ return found.data
+ except OAuthAccessToken.DoesNotExist:
+ return None
+
+ def persist_authorization_code(self, client_id, code, scope):
oauth_app = OAuthApplication.get(client_id=client_id)
data = self._generate_data_string()
+ OAuthAuthorizationCode.create(application=oauth_app, code=code, scope=scope, data=data)
- assert len(full_code) >= (AUTHORIZATION_CODE_PREFIX_LENGTH * 2)
- code_name = full_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
- code_credential = full_code[AUTHORIZATION_CODE_PREFIX_LENGTH:]
-
- # TODO(remove-unenc): Remove legacy fallback.
- full_code = None
- if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
- full_code = code_name + code_credential
-
- OAuthAuthorizationCode.create(application=oauth_app,
- code=full_code,
- scope=scope,
- code_name=code_name,
- code_credential=Credential.from_string(code_credential),
- data=data)
-
- def persist_token_information(self, client_id, scope, access_token, token_type,
- expires_in, refresh_token, data):
- assert not refresh_token
+ def persist_token_information(self, client_id, scope, access_token, token_type, expires_in,
+ refresh_token, data):
found = user.get_user(json.loads(data)['username'])
if not found:
raise RuntimeError('Username must be in the data field')
- token_name = access_token[:ACCESS_TOKEN_PREFIX_LENGTH]
- token_code = access_token[ACCESS_TOKEN_PREFIX_LENGTH:]
-
- assert token_name
- assert token_code
- assert len(token_name) == ACCESS_TOKEN_PREFIX_LENGTH
- assert len(token_code) >= ACCESS_TOKEN_MINIMUM_CODE_LENGTH
-
oauth_app = OAuthApplication.get(client_id=client_id)
expires_at = datetime.utcnow() + timedelta(seconds=expires_in)
- OAuthAccessToken.create(application=oauth_app,
- authorized_user=found,
- scope=scope,
- token_name=token_name,
- token_code=Credential.from_string(token_code),
- access_token='',
- token_type=token_type,
- expires_at=expires_at,
- data=data)
+ OAuthAccessToken.create(application=oauth_app, authorized_user=found, scope=scope,
+ access_token=access_token, token_type=token_type,
+ expires_at=expires_at, refresh_token=refresh_token, data=data)
+
+ def discard_authorization_code(self, client_id, code):
+ found = (OAuthAuthorizationCode
+ .select()
+ .join(OAuthApplication)
+ .where(OAuthApplication.client_id == client_id, OAuthAuthorizationCode.code == code)
+ .get())
+ found.delete_instance()
+
+ def discard_refresh_token(self, client_id, refresh_token):
+ found = (AccessToken
+ .select()
+ .join(OAuthApplication)
+ .where(OAuthApplication.client_id == client_id,
+ OAuthAccessToken.refresh_token == refresh_token)
+ .get())
+ found.delete_instance()
+
def get_auth_denied_response(self, response_type, client_id, redirect_uri, **params):
# Ensure proper response_type
@@ -196,29 +157,29 @@ class DatabaseAuthorizationProvider(AuthorizationProvider):
return self._make_redirect_error_response(redirect_uri, 'authorization_denied')
+
def get_token_response(self, response_type, client_id, redirect_uri, **params):
# Ensure proper response_type
if response_type != 'token':
err = 'unsupported_response_type'
return self._make_redirect_error_response(redirect_uri, err)
- # Check for a valid client ID.
- is_valid_client_id = self.validate_client_id(client_id)
- if not is_valid_client_id:
- err = 'unauthorized_client'
- return self._make_redirect_error_response(redirect_uri, err)
-
- # Check for a valid redirect URI.
+ # Check redirect URI
is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
if not is_valid_redirect_uri:
return self._invalid_redirect_uri_response()
# Check conditions
+ is_valid_client_id = self.validate_client_id(client_id)
is_valid_access = self.validate_access()
scope = params.get('scope', '')
are_valid_scopes = self.validate_scope(client_id, scope)
# Return proper error responses on invalid conditions
+ if not is_valid_client_id:
+ err = 'unauthorized_client'
+ return self._make_redirect_error_response(redirect_uri, err)
+
if not is_valid_access:
err = 'access_denied'
return self._make_redirect_error_response(redirect_uri, err)
@@ -227,119 +188,37 @@ class DatabaseAuthorizationProvider(AuthorizationProvider):
err = 'invalid_scope'
return self._make_redirect_error_response(redirect_uri, err)
- # Make sure we have enough random data in the token to have a public
- # prefix and a private encrypted suffix.
- access_token = str(self.generate_access_token())
- assert len(access_token) - ACCESS_TOKEN_PREFIX_LENGTH >= 20
-
+ access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
+ refresh_token = None # No refresh token for this kind of flow
data = self._generate_data_string()
- self.persist_token_information(client_id=client_id,
- scope=scope,
- access_token=access_token,
- token_type=token_type,
- expires_in=expires_in,
- refresh_token=None,
- data=data)
+ self.persist_token_information(client_id=client_id, scope=scope, access_token=access_token,
+ token_type=token_type, expires_in=expires_in,
+ refresh_token=refresh_token, data=data)
url = utils.build_url(redirect_uri, params)
url += '#access_token=%s&token_type=%s&expires_in=%s' % (access_token, token_type, expires_in)
return self._make_response(headers={'Location': url}, status_code=302)
- def from_refresh_token(self, client_id, refresh_token, scope):
- raise NotImplementedError()
-
- def discard_authorization_code(self, client_id, full_code):
- code_name = full_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
- try:
- found = (OAuthAuthorizationCode
- .select()
- .join(OAuthApplication)
- .where(OAuthApplication.client_id == client_id,
- OAuthAuthorizationCode.code_name == code_name)
- .get())
- found.delete_instance()
- return
- except OAuthAuthorizationCode.DoesNotExist:
- pass
-
- # Legacy: full code.
- # TODO(remove-unenc): Remove legacy fallback.
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- try:
- found = (OAuthAuthorizationCode
- .select()
- .join(OAuthApplication)
- .where(OAuthApplication.client_id == client_id,
- OAuthAuthorizationCode.code == full_code)
- .get())
- found.delete_instance()
- except OAuthAuthorizationCode.DoesNotExist:
- pass
-
- def discard_refresh_token(self, client_id, refresh_token):
- raise NotImplementedError()
-
def create_application(org, name, application_uri, redirect_uri, **kwargs):
- client_secret = kwargs.pop('client_secret', random_string_generator(length=40)())
-
- # TODO(remove-unenc): Remove legacy field.
- old_client_secret = None
- if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
- old_client_secret = client_secret
-
- return OAuthApplication.create(organization=org,
- name=name,
- application_uri=application_uri,
- redirect_uri=redirect_uri,
- client_secret=old_client_secret,
- secure_client_secret=DecryptedValue(client_secret),
- **kwargs)
+ return OAuthApplication.create(organization=org, name=name, application_uri=application_uri,
+ redirect_uri=redirect_uri, **kwargs)
def validate_access_token(access_token):
- assert isinstance(access_token, basestring)
- token_name = access_token[:ACCESS_TOKEN_PREFIX_LENGTH]
- if not token_name:
- return None
-
- token_code = access_token[ACCESS_TOKEN_PREFIX_LENGTH:]
- if not token_code:
- return None
-
try:
found = (OAuthAccessToken
.select(OAuthAccessToken, User)
.join(User)
- .where(OAuthAccessToken.token_name == token_name)
+ .where(OAuthAccessToken.access_token == access_token)
.get())
-
- if found.token_code is None or not found.token_code.matches(token_code):
- return None
-
return found
except OAuthAccessToken.DoesNotExist:
- pass
-
- # Legacy lookup.
- # TODO(remove-unenc): Remove this once migrated.
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- try:
- assert access_token
- found = (OAuthAccessToken
- .select(OAuthAccessToken, User)
- .join(User)
- .where(OAuthAccessToken.access_token == access_token)
- .get())
- return found
- except OAuthAccessToken.DoesNotExist:
- return None
-
- return None
+ return None
def get_application_for_client_id(client_id):
@@ -350,13 +229,7 @@ def get_application_for_client_id(client_id):
def reset_client_secret(application):
- client_secret = random_string_generator(length=40)()
-
- # TODO(remove-unenc): Remove legacy field.
- if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
- application.client_secret = client_secret
-
- application.secure_client_secret = DecryptedValue(client_secret)
+ application.client_secret = random_string_generator(length=40)()
application.save()
return application
@@ -377,13 +250,6 @@ def delete_application(org, client_id):
return application
-def lookup_access_token_by_uuid(token_uuid):
- try:
- return OAuthAccessToken.get(OAuthAccessToken.uuid == token_uuid)
- except OAuthAccessToken.DoesNotExist:
- return None
-
-
def lookup_access_token_for_user(user_obj, token_uuid):
try:
return OAuthAccessToken.get(OAuthAccessToken.authorized_user == user_obj,
@@ -412,23 +278,9 @@ def list_applications_for_org(org):
return query
-def create_access_token_for_testing(user_obj, client_id, scope, access_token=None, expires_in=9000):
- access_token = access_token or random_string_generator(length=40)()
- token_name = access_token[:ACCESS_TOKEN_PREFIX_LENGTH]
- token_code = access_token[ACCESS_TOKEN_PREFIX_LENGTH:]
-
- assert len(token_name) == ACCESS_TOKEN_PREFIX_LENGTH
- assert len(token_code) >= ACCESS_TOKEN_MINIMUM_CODE_LENGTH
-
- expires_at = datetime.utcnow() + timedelta(seconds=expires_in)
+def create_access_token_for_testing(user_obj, client_id, scope):
+ expires_at = datetime.utcnow() + timedelta(seconds=10000)
application = get_application_for_client_id(client_id)
- created = OAuthAccessToken.create(application=application,
- authorized_user=user_obj,
- scope=scope,
- token_type='token',
- access_token='',
- token_code=Credential.from_string(token_code),
- token_name=token_name,
- expires_at=expires_at,
- data='')
- return created, access_token
+ OAuthAccessToken.create(application=application, authorized_user=user_obj, scope=scope,
+ token_type='token', access_token='test',
+ expires_at=expires_at, refresh_token='', data='')
diff --git a/data/model/oci/__init__.py b/data/model/oci/__init__.py
deleted file mode 100644
index 39bcef2eb..000000000
--- a/data/model/oci/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# There MUST NOT be any circular dependencies between these subsections. If there are fix it by
-# moving the minimal number of things to shared
-from data.model.oci import (
- blob,
- label,
- manifest,
- shared,
- tag,
-)
diff --git a/data/model/oci/blob.py b/data/model/oci/blob.py
deleted file mode 100644
index f7739c21b..000000000
--- a/data/model/oci/blob.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from data.database import ImageStorage, ManifestBlob
-from data.model import BlobDoesNotExist
-from data.model.storage import get_storage_by_uuid, InvalidImageException
-from data.model.blob import get_repository_blob_by_digest as legacy_get
-
-def get_repository_blob_by_digest(repository, blob_digest):
- """ Find the content-addressable blob linked to the specified repository and
- returns it or None if none.
- """
- try:
- storage = (ImageStorage
- .select(ImageStorage.uuid)
- .join(ManifestBlob)
- .where(ManifestBlob.repository == repository,
- ImageStorage.content_checksum == blob_digest,
- ImageStorage.uploading == False)
- .get())
-
- return get_storage_by_uuid(storage.uuid)
- except (ImageStorage.DoesNotExist, InvalidImageException):
- # TODO: Remove once we are no longer using the legacy tables.
- # Try the legacy call.
- try:
- return legacy_get(repository, blob_digest)
- except BlobDoesNotExist:
- return None
diff --git a/data/model/oci/label.py b/data/model/oci/label.py
deleted file mode 100644
index d019e6d2d..000000000
--- a/data/model/oci/label.py
+++ /dev/null
@@ -1,142 +0,0 @@
-import logging
-
-
-from data.model import InvalidLabelKeyException, InvalidMediaTypeException, DataModelException
-from data.database import (Label, Manifest, TagManifestLabel, MediaType, LabelSourceType,
- db_transaction, ManifestLabel, TagManifestLabelMap,
- TagManifestToManifest, Repository, TagManifest)
-from data.text import prefix_search
-from util.validation import validate_label_key
-from util.validation import is_json
-
-logger = logging.getLogger(__name__)
-
-def list_manifest_labels(manifest_id, prefix_filter=None):
- """ Lists all labels found on the given manifest, with an optional filter by key prefix. """
- query = (Label
- .select(Label, MediaType)
- .join(MediaType)
- .switch(Label)
- .join(LabelSourceType)
- .switch(Label)
- .join(ManifestLabel)
- .where(ManifestLabel.manifest == manifest_id))
-
- if prefix_filter is not None:
- query = query.where(prefix_search(Label.key, prefix_filter))
-
- return query
-
-
-def get_manifest_label(label_uuid, manifest):
- """ Retrieves the manifest label on the manifest with the given UUID or None if none. """
- try:
- return (Label
- .select(Label, LabelSourceType)
- .join(LabelSourceType)
- .where(Label.uuid == label_uuid)
- .switch(Label)
- .join(ManifestLabel)
- .where(ManifestLabel.manifest == manifest)
- .get())
- except Label.DoesNotExist:
- return None
-
-
-def create_manifest_label(manifest_id, key, value, source_type_name, media_type_name=None,
- adjust_old_model=True):
- """ Creates a new manifest label on a specific tag manifest. """
- if not key:
- raise InvalidLabelKeyException()
-
- # Note that we don't prevent invalid label names coming from the manifest to be stored, as Docker
- # does not currently prevent them from being put into said manifests.
- if not validate_label_key(key) and source_type_name != 'manifest':
- raise InvalidLabelKeyException('Key `%s` is invalid' % key)
-
- # Find the matching media type. If none specified, we infer.
- if media_type_name is None:
- media_type_name = 'text/plain'
- if is_json(value):
- media_type_name = 'application/json'
-
- try:
- media_type_id = Label.media_type.get_id(media_type_name)
- except MediaType.DoesNotExist:
- raise InvalidMediaTypeException()
-
- source_type_id = Label.source_type.get_id(source_type_name)
-
- # Ensure the manifest exists.
- try:
- manifest = (Manifest
- .select(Manifest, Repository)
- .join(Repository)
- .where(Manifest.id == manifest_id)
- .get())
- except Manifest.DoesNotExist:
- return None
-
- repository = manifest.repository
-
- # TODO: Remove this code once the TagManifest table is gone.
- tag_manifest = None
- if adjust_old_model:
- try:
- mapping_row = (TagManifestToManifest
- .select(TagManifestToManifest, TagManifest)
- .join(TagManifest)
- .where(TagManifestToManifest.manifest == manifest)
- .get())
- tag_manifest = mapping_row.tag_manifest
- except TagManifestToManifest.DoesNotExist:
- tag_manifest = None
-
- with db_transaction():
- label = Label.create(key=key, value=value, source_type=source_type_id, media_type=media_type_id)
- manifest_label = ManifestLabel.create(manifest=manifest_id, label=label, repository=repository)
-
- # If there exists a mapping to a TagManifest, add the old-style label.
- # TODO: Remove this code once the TagManifest table is gone.
- if tag_manifest:
- tag_manifest_label = TagManifestLabel.create(annotated=tag_manifest, label=label,
- repository=repository)
- TagManifestLabelMap.create(manifest_label=manifest_label,
- tag_manifest_label=tag_manifest_label,
- label=label,
- manifest=manifest,
- tag_manifest=tag_manifest)
-
- return label
-
-
-def delete_manifest_label(label_uuid, manifest):
- """ Deletes the manifest label on the tag manifest with the given ID. Returns the label deleted
- or None if none.
- """
- # Find the label itself.
- label = get_manifest_label(label_uuid, manifest)
- if label is None:
- return None
-
- if not label.source_type.mutable:
- raise DataModelException('Cannot delete immutable label')
-
- # Delete the mapping records and label.
- # TODO: Remove this code once the TagManifest table is gone.
- with db_transaction():
- (TagManifestLabelMap
- .delete()
- .where(TagManifestLabelMap.label == label)
- .execute())
-
- deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute()
- if deleted_count != 1:
- logger.warning('More than a single label deleted for matching label %s', label_uuid)
-
- deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute()
- if deleted_count != 1:
- logger.warning('More than a single label deleted for matching label %s', label_uuid)
-
- label.delete_instance(recursive=False)
- return label
diff --git a/data/model/oci/manifest.py b/data/model/oci/manifest.py
deleted file mode 100644
index 85b66efc5..000000000
--- a/data/model/oci/manifest.py
+++ /dev/null
@@ -1,321 +0,0 @@
-import logging
-
-from collections import namedtuple
-
-from peewee import IntegrityError
-
-from data.database import (Tag, Manifest, ManifestBlob, ManifestLegacyImage, ManifestChild,
- db_transaction)
-from data.model import BlobDoesNotExist
-from data.model.blob import get_or_create_shared_blob, get_shared_blob
-from data.model.oci.tag import filter_to_alive_tags, create_temporary_tag_if_necessary
-from data.model.oci.label import create_manifest_label
-from data.model.oci.retriever import RepositoryContentRetriever
-from data.model.storage import lookup_repo_storages_by_content_checksum
-from data.model.image import lookup_repository_images, get_image, synthesize_v1_image
-from image.docker.schema2 import EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES
-from image.docker.schema1 import ManifestException
-from image.docker.schema2.list import MalformedSchema2ManifestList
-from util.validation import is_json
-
-
-TEMP_TAG_EXPIRATION_SEC = 300 # 5 minutes
-
-
-logger = logging.getLogger(__name__)
-
-CreatedManifest = namedtuple('CreatedManifest', ['manifest', 'newly_created', 'labels_to_apply'])
-
-
-class CreateManifestException(Exception):
- """ Exception raised when creating a manifest fails and explicit exception
- raising is requested. """
-
-
-def lookup_manifest(repository_id, manifest_digest, allow_dead=False, require_available=False,
- temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC):
- """ Returns the manifest with the specified digest under the specified repository
- or None if none. If allow_dead is True, then manifests referenced by only
- dead tags will also be returned. If require_available is True, the manifest
- will be marked with a temporary tag to ensure it remains available.
- """
- if not require_available:
- return _lookup_manifest(repository_id, manifest_digest, allow_dead=allow_dead)
-
- with db_transaction():
- found = _lookup_manifest(repository_id, manifest_digest, allow_dead=allow_dead)
- if found is None:
- return None
-
- create_temporary_tag_if_necessary(found, temp_tag_expiration_sec)
- return found
-
-
-def _lookup_manifest(repository_id, manifest_digest, allow_dead=False):
- query = (Manifest
- .select()
- .where(Manifest.repository == repository_id)
- .where(Manifest.digest == manifest_digest))
-
- if allow_dead:
- try:
- return query.get()
- except Manifest.DoesNotExist:
- return None
-
- # Try first to filter to those manifests referenced by an alive tag,
- try:
- return filter_to_alive_tags(query.join(Tag)).get()
- except Manifest.DoesNotExist:
- pass
-
- # Try referenced as the child of a manifest that has an alive tag.
- query = (query
- .join(ManifestChild, on=(ManifestChild.child_manifest == Manifest.id))
- .join(Tag, on=(Tag.manifest == ManifestChild.manifest)))
-
- query = filter_to_alive_tags(query)
-
- try:
- return query.get()
- except Manifest.DoesNotExist:
- return None
-
-
-def get_or_create_manifest(repository_id, manifest_interface_instance, storage,
- temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC,
- for_tagging=False, raise_on_error=False):
- """ Returns a CreatedManifest for the manifest in the specified repository with the matching
- digest (if it already exists) or, if not yet created, creates and returns the manifest.
-
- Returns None if there was an error creating the manifest, unless raise_on_error is specified,
- in which case a CreateManifestException exception will be raised instead to provide more
- context to the error.
-
- Note that *all* blobs referenced by the manifest must exist already in the repository or this
- method will fail with a None.
- """
- existing = lookup_manifest(repository_id, manifest_interface_instance.digest, allow_dead=True,
- require_available=True,
- temp_tag_expiration_sec=temp_tag_expiration_sec)
- if existing is not None:
- return CreatedManifest(manifest=existing, newly_created=False, labels_to_apply=None)
-
- return _create_manifest(repository_id, manifest_interface_instance, storage,
- temp_tag_expiration_sec, for_tagging=for_tagging,
- raise_on_error=raise_on_error)
-
-
-def _create_manifest(repository_id, manifest_interface_instance, storage,
- temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC,
- for_tagging=False, raise_on_error=False):
- # Validate the manifest.
- retriever = RepositoryContentRetriever.for_repository(repository_id, storage)
- try:
- manifest_interface_instance.validate(retriever)
- except (ManifestException, MalformedSchema2ManifestList, BlobDoesNotExist, IOError) as ex:
- logger.exception('Could not validate manifest `%s`', manifest_interface_instance.digest)
- if raise_on_error:
- raise CreateManifestException(ex)
-
- return None
-
- # Load, parse and get/create the child manifests, if any.
- child_manifest_refs = manifest_interface_instance.child_manifests(retriever)
- child_manifest_rows = {}
- child_manifest_label_dicts = []
-
- if child_manifest_refs is not None:
- for child_manifest_ref in child_manifest_refs:
- # Load and parse the child manifest.
- try:
- child_manifest = child_manifest_ref.manifest_obj
- except (ManifestException, MalformedSchema2ManifestList, BlobDoesNotExist, IOError) as ex:
- logger.exception('Could not load manifest list for manifest `%s`',
- manifest_interface_instance.digest)
- if raise_on_error:
- raise CreateManifestException(ex)
-
- return None
-
- # Retrieve its labels.
- labels = child_manifest.get_manifest_labels(retriever)
- if labels is None:
- logger.exception('Could not load manifest labels for child manifest')
- return None
-
- # Get/create the child manifest in the database.
- child_manifest_info = get_or_create_manifest(repository_id, child_manifest, storage,
- raise_on_error=raise_on_error)
- if child_manifest_info is None:
- logger.error('Could not get/create child manifest')
- return None
-
- child_manifest_rows[child_manifest_info.manifest.digest] = child_manifest_info.manifest
- child_manifest_label_dicts.append(labels)
-
- # Ensure all the blobs in the manifest exist.
- digests = set(manifest_interface_instance.local_blob_digests)
- blob_map = {}
-
- # If the special empty layer is required, simply load it directly. This is much faster
- # than trying to load it on a per repository basis, and that is unnecessary anyway since
- # this layer is predefined.
- if EMPTY_LAYER_BLOB_DIGEST in digests:
- digests.remove(EMPTY_LAYER_BLOB_DIGEST)
- blob_map[EMPTY_LAYER_BLOB_DIGEST] = get_shared_blob(EMPTY_LAYER_BLOB_DIGEST)
- if not blob_map[EMPTY_LAYER_BLOB_DIGEST]:
- logger.warning('Could not find the special empty blob in storage')
- return None
-
- if digests:
- query = lookup_repo_storages_by_content_checksum(repository_id, digests)
- blob_map.update({s.content_checksum: s for s in query})
- for digest_str in digests:
- if digest_str not in blob_map:
- logger.warning('Unknown blob `%s` under manifest `%s` for repository `%s`', digest_str,
- manifest_interface_instance.digest, repository_id)
-
- if raise_on_error:
- raise CreateManifestException('Unknown blob `%s`' % digest_str)
-
- return None
-
- # Special check: If the empty layer blob is needed for this manifest, add it to the
- # blob map. This is necessary because Docker decided to elide sending of this special
- # empty layer in schema version 2, but we need to have it referenced for GC and schema version 1.
- if EMPTY_LAYER_BLOB_DIGEST not in blob_map:
- if manifest_interface_instance.get_requires_empty_layer_blob(retriever):
- shared_blob = get_or_create_shared_blob(EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES, storage)
- assert not shared_blob.uploading
- assert shared_blob.content_checksum == EMPTY_LAYER_BLOB_DIGEST
- blob_map[EMPTY_LAYER_BLOB_DIGEST] = shared_blob
-
- # Determine and populate the legacy image if necessary. Manifest lists will not have a legacy
- # image.
- legacy_image = None
- if manifest_interface_instance.has_legacy_image:
- legacy_image_id = _populate_legacy_image(repository_id, manifest_interface_instance, blob_map,
- retriever)
- if legacy_image_id is None:
- return None
-
- legacy_image = get_image(repository_id, legacy_image_id)
- if legacy_image is None:
- return None
-
- # Create the manifest and its blobs.
- media_type = Manifest.media_type.get_id(manifest_interface_instance.media_type)
- storage_ids = {storage.id for storage in blob_map.values()}
-
- with db_transaction():
- # Check for the manifest. This is necessary because Postgres doesn't handle IntegrityErrors
- # well under transactions.
- try:
- manifest = Manifest.get(repository=repository_id, digest=manifest_interface_instance.digest)
- return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None)
- except Manifest.DoesNotExist:
- pass
-
- # Create the manifest.
- try:
- manifest = Manifest.create(repository=repository_id,
- digest=manifest_interface_instance.digest,
- media_type=media_type,
- manifest_bytes=manifest_interface_instance.bytes.as_encoded_str())
- except IntegrityError:
- manifest = Manifest.get(repository=repository_id, digest=manifest_interface_instance.digest)
- return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None)
-
- # Insert the blobs.
- blobs_to_insert = [dict(manifest=manifest, repository=repository_id,
- blob=storage_id) for storage_id in storage_ids]
- if blobs_to_insert:
- ManifestBlob.insert_many(blobs_to_insert).execute()
-
- # Set the legacy image (if applicable).
- if legacy_image is not None:
- ManifestLegacyImage.create(repository=repository_id, image=legacy_image, manifest=manifest)
-
- # Insert the manifest child rows (if applicable).
- if child_manifest_rows:
- children_to_insert = [dict(manifest=manifest, child_manifest=child_manifest,
- repository=repository_id)
- for child_manifest in child_manifest_rows.values()]
- ManifestChild.insert_many(children_to_insert).execute()
-
- # If this manifest is being created not for immediate tagging, add a temporary tag to the
- # manifest to ensure it isn't being GCed. If the manifest *is* for tagging, then since we're
- # creating a new one here, it cannot be GCed (since it isn't referenced by anything yet), so
- # its safe to elide the temp tag operation. If we ever change GC code to collect *all* manifests
- # in a repository for GC, then we will have to reevaluate this optimization at that time.
- if not for_tagging:
- create_temporary_tag_if_necessary(manifest, temp_tag_expiration_sec)
-
- # Define the labels for the manifest (if any).
- labels = manifest_interface_instance.get_manifest_labels(retriever)
- if labels:
- for key, value in labels.iteritems():
- media_type = 'application/json' if is_json(value) else 'text/plain'
- create_manifest_label(manifest, key, value, 'manifest', media_type)
-
- # Return the dictionary of labels to apply (i.e. those labels that cause an action to be taken
- # on the manifest or its resulting tags). We only return those labels either defined on
- # the manifest or shared amongst all the child manifests. We intersect amongst all child manifests
- # to ensure that any action performed is defined in all manifests.
- labels_to_apply = labels or {}
- if child_manifest_label_dicts:
- labels_to_apply = child_manifest_label_dicts[0].viewitems()
- for child_manifest_label_dict in child_manifest_label_dicts[1:]:
- # Intersect the key+values of the labels to ensure we get the exact same result
- # for all the child manifests.
- labels_to_apply = labels_to_apply & child_manifest_label_dict.viewitems()
-
- labels_to_apply = dict(labels_to_apply)
-
- return CreatedManifest(manifest=manifest, newly_created=True, labels_to_apply=labels_to_apply)
-
-
-def _populate_legacy_image(repository_id, manifest_interface_instance, blob_map, retriever):
- # Lookup all the images and their parent images (if any) inside the manifest.
- # This will let us know which v1 images we need to synthesize and which ones are invalid.
- docker_image_ids = list(manifest_interface_instance.get_legacy_image_ids(retriever))
- images_query = lookup_repository_images(repository_id, docker_image_ids)
- image_storage_map = {i.docker_image_id: i.storage for i in images_query}
-
- # Rewrite any v1 image IDs that do not match the checksum in the database.
- try:
- rewritten_images = manifest_interface_instance.generate_legacy_layers(image_storage_map,
- retriever)
- rewritten_images = list(rewritten_images)
- parent_image_map = {}
-
- for rewritten_image in rewritten_images:
- if not rewritten_image.image_id in image_storage_map:
- parent_image = None
- if rewritten_image.parent_image_id:
- parent_image = parent_image_map.get(rewritten_image.parent_image_id)
- if parent_image is None:
- parent_image = get_image(repository_id, rewritten_image.parent_image_id)
- if parent_image is None:
- return None
-
- storage_reference = blob_map[rewritten_image.content_checksum]
- synthesized = synthesize_v1_image(
- repository_id,
- storage_reference.id,
- storage_reference.image_size,
- rewritten_image.image_id,
- rewritten_image.created,
- rewritten_image.comment,
- rewritten_image.command,
- rewritten_image.compat_json,
- parent_image,
- )
-
- parent_image_map[rewritten_image.image_id] = synthesized
- except ManifestException:
- logger.exception("exception when rewriting v1 metadata")
- return None
-
- return rewritten_images[-1].image_id
diff --git a/data/model/oci/retriever.py b/data/model/oci/retriever.py
deleted file mode 100644
index b6e9633e0..000000000
--- a/data/model/oci/retriever.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from image.docker.interfaces import ContentRetriever
-from data.database import Manifest
-from data.model.oci.blob import get_repository_blob_by_digest
-from data.model.storage import get_layer_path
-
-class RepositoryContentRetriever(ContentRetriever):
- """ Implementation of the ContentRetriever interface for manifests that retrieves
- config blobs and child manifests for the specified repository.
- """
- def __init__(self, repository_id, storage):
- self.repository_id = repository_id
- self.storage = storage
-
- @classmethod
- def for_repository(cls, repository_id, storage):
- return RepositoryContentRetriever(repository_id, storage)
-
- def get_manifest_bytes_with_digest(self, digest):
- """ Returns the bytes of the manifest with the given digest or None if none found. """
- query = (Manifest
- .select()
- .where(Manifest.repository == self.repository_id)
- .where(Manifest.digest == digest))
-
- try:
- return query.get().manifest_bytes
- except Manifest.DoesNotExist:
- return None
-
- def get_blob_bytes_with_digest(self, digest):
- """ Returns the bytes of the blob with the given digest or None if none found. """
- blob = get_repository_blob_by_digest(self.repository_id, digest)
- if blob is None:
- return None
-
- assert blob.locations is not None
- return self.storage.get_content(blob.locations, get_layer_path(blob))
diff --git a/data/model/oci/shared.py b/data/model/oci/shared.py
deleted file mode 100644
index 887eda383..000000000
--- a/data/model/oci/shared.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from data.database import Manifest, ManifestLegacyImage, Image
-
-def get_legacy_image_for_manifest(manifest_id):
- """ Returns the legacy image associated with the given manifest, if any, or None if none. """
- try:
- query = (ManifestLegacyImage
- .select(ManifestLegacyImage, Image)
- .join(Image)
- .where(ManifestLegacyImage.manifest == manifest_id))
- return query.get().image
- except ManifestLegacyImage.DoesNotExist:
- return None
-
-
-def get_manifest_for_legacy_image(image_id):
- """ Returns a manifest that is associated with the given image, if any, or None if none. """
- try:
- query = (ManifestLegacyImage
- .select(ManifestLegacyImage, Manifest)
- .join(Manifest)
- .where(ManifestLegacyImage.image == image_id))
- return query.get().manifest
- except ManifestLegacyImage.DoesNotExist:
- return None
diff --git a/data/model/oci/tag.py b/data/model/oci/tag.py
deleted file mode 100644
index 4ad1b8c18..000000000
--- a/data/model/oci/tag.py
+++ /dev/null
@@ -1,505 +0,0 @@
-import uuid
-import logging
-
-from calendar import timegm
-from peewee import fn
-
-from data.database import (Tag, Manifest, ManifestLegacyImage, Image, ImageStorage,
- MediaType, RepositoryTag, TagManifest, TagManifestToManifest,
- get_epoch_timestamp_ms, db_transaction, Repository,
- TagToRepositoryTag, Namespace, RepositoryNotification,
- ExternalNotificationEvent)
-from data.model.oci.shared import get_legacy_image_for_manifest
-from data.model import config
-from image.docker.schema1 import (DOCKER_SCHEMA1_CONTENT_TYPES, DockerSchema1Manifest,
- MalformedSchema1Manifest)
-from util.bytes import Bytes
-from util.timedeltastring import convert_to_timedelta
-
-logger = logging.getLogger(__name__)
-
-
-def get_tag_by_id(tag_id):
- """ Returns the tag with the given ID, joined with its manifest or None if none. """
- try:
- return Tag.select(Tag, Manifest).join(Manifest).where(Tag.id == tag_id).get()
- except Tag.DoesNotExist:
- return None
-
-
-def get_tag(repository_id, tag_name):
- """ Returns the alive, non-hidden tag with the given name under the specified repository or
- None if none. The tag is returned joined with its manifest.
- """
- query = (Tag
- .select(Tag, Manifest)
- .join(Manifest)
- .where(Tag.repository == repository_id)
- .where(Tag.name == tag_name))
-
- query = filter_to_alive_tags(query)
-
- try:
- found = query.get()
- assert not found.hidden
- return found
- except Tag.DoesNotExist:
- return None
-
-
-def lookup_alive_tags_shallow(repository_id, start_pagination_id=None, limit=None):
- """ Returns a list of the tags alive in the specified repository. Note that the tags returned
- *only* contain their ID and name. Also note that the Tags are returned ordered by ID.
- """
- query = (Tag
- .select(Tag.id, Tag.name)
- .where(Tag.repository == repository_id)
- .order_by(Tag.id))
-
- if start_pagination_id is not None:
- query = query.where(Tag.id >= start_pagination_id)
-
- if limit is not None:
- query = query.limit(limit)
-
- return filter_to_alive_tags(query)
-
-
-def list_alive_tags(repository_id):
- """ Returns a list of all the tags alive in the specified repository.
- Tag's returned are joined with their manifest.
- """
- query = (Tag
- .select(Tag, Manifest)
- .join(Manifest)
- .where(Tag.repository == repository_id))
-
- return filter_to_alive_tags(query)
-
-
-def list_repository_tag_history(repository_id, page, page_size, specific_tag_name=None,
- active_tags_only=False, since_time_ms=None):
- """ Returns a tuple of the full set of tags found in the specified repository, including those
- that are no longer alive (unless active_tags_only is True), and whether additional tags exist.
- If specific_tag_name is given, the tags are further filtered by name. If since is given, tags
- are further filtered to newer than that date.
-
- Note that the returned Manifest will not contain the manifest contents.
- """
- query = (Tag
- .select(Tag, Manifest.id, Manifest.digest, Manifest.media_type)
- .join(Manifest)
- .where(Tag.repository == repository_id)
- .order_by(Tag.lifetime_start_ms.desc(), Tag.name)
- .limit(page_size + 1)
- .offset(page_size * (page - 1)))
-
- if specific_tag_name is not None:
- query = query.where(Tag.name == specific_tag_name)
-
- if since_time_ms is not None:
- query = query.where((Tag.lifetime_start_ms > since_time_ms) | (Tag.lifetime_end_ms > since_time_ms))
-
- if active_tags_only:
- query = filter_to_alive_tags(query)
-
- query = filter_to_visible_tags(query)
- results = list(query)
-
- return results[0:page_size], len(results) > page_size
-
-
-def get_legacy_images_for_tags(tags):
- """ Returns a map from tag ID to the legacy image for the tag. """
- if not tags:
- return {}
-
- query = (ManifestLegacyImage
- .select(ManifestLegacyImage, Image, ImageStorage)
- .join(Image)
- .join(ImageStorage)
- .where(ManifestLegacyImage.manifest << [tag.manifest_id for tag in tags]))
-
- by_manifest = {mli.manifest_id: mli.image for mli in query}
- return {tag.id: by_manifest[tag.manifest_id] for tag in tags if tag.manifest_id in by_manifest}
-
-
-def find_matching_tag(repository_id, tag_names, tag_kinds=None):
- """ Finds an alive tag in the specified repository with one of the specified tag names and
- returns it or None if none. Tag's returned are joined with their manifest.
- """
- assert repository_id
- assert tag_names
-
- query = (Tag
- .select(Tag, Manifest)
- .join(Manifest)
- .where(Tag.repository == repository_id)
- .where(Tag.name << tag_names))
-
- if tag_kinds:
- query = query.where(Tag.tag_kind << tag_kinds)
-
- try:
- found = filter_to_alive_tags(query).get()
- assert not found.hidden
- return found
- except Tag.DoesNotExist:
- return None
-
-
-def get_most_recent_tag_lifetime_start(repository_ids):
- """ Returns a map from repo ID to the timestamp of the most recently pushed alive tag
- for each specified repository or None if none.
- """
- assert len(repository_ids) > 0 and None not in repository_ids
-
- query = (Tag.select(Tag.repository, fn.Max(Tag.lifetime_start_ms))
- .where(Tag.repository << [repo_id for repo_id in repository_ids])
- .group_by(Tag.repository))
- tuples = filter_to_alive_tags(query).tuples()
-
- return {repo_id: timestamp for repo_id, timestamp in tuples}
-
-
-def get_most_recent_tag(repository_id):
- """ Returns the most recently pushed alive tag in the specified repository or None if none.
- The Tag returned is joined with its manifest.
- """
- assert repository_id
-
- query = (Tag
- .select(Tag, Manifest)
- .join(Manifest)
- .where(Tag.repository == repository_id)
- .order_by(Tag.lifetime_start_ms.desc()))
-
- try:
- found = filter_to_alive_tags(query).get()
- assert not found.hidden
- return found
- except Tag.DoesNotExist:
- return None
-
-
-def get_expired_tag(repository_id, tag_name):
- """ Returns a tag with the given name that is expired in the repository or None if none.
- """
- try:
- return (Tag
- .select()
- .where(Tag.name == tag_name, Tag.repository == repository_id)
- .where(~(Tag.lifetime_end_ms >> None))
- .where(Tag.lifetime_end_ms <= get_epoch_timestamp_ms())
- .get())
- except Tag.DoesNotExist:
- return None
-
-
-def create_temporary_tag_if_necessary(manifest, expiration_sec):
- """ Creates a temporary tag pointing to the given manifest, with the given expiration in seconds,
- unless there is an existing tag that will keep the manifest around.
- """
- tag_name = '$temp-%s' % str(uuid.uuid4())
- now_ms = get_epoch_timestamp_ms()
- end_ms = now_ms + (expiration_sec * 1000)
-
- # Check if there is an existing tag on the manifest that won't expire within the
- # timeframe. If so, no need for a temporary tag.
- with db_transaction():
- try:
- (Tag
- .select()
- .where(Tag.manifest == manifest,
- (Tag.lifetime_end_ms >> None) | (Tag.lifetime_end_ms >= end_ms))
- .get())
- return None
- except Tag.DoesNotExist:
- pass
-
- return Tag.create(name=tag_name,
- repository=manifest.repository_id,
- lifetime_start_ms=now_ms,
- lifetime_end_ms=end_ms,
- reversion=False,
- hidden=True,
- manifest=manifest,
- tag_kind=Tag.tag_kind.get_id('tag'))
-
-
-def retarget_tag(tag_name, manifest_id, is_reversion=False, now_ms=None, adjust_old_model=True):
- """ Creates or updates a tag with the specified name to point to the given manifest under
- its repository. If this action is a reversion to a previous manifest, is_reversion
- should be set to True. Returns the newly created tag row or None on error.
- """
- try:
- manifest = (Manifest
- .select(Manifest, MediaType)
- .join(MediaType)
- .where(Manifest.id == manifest_id)
- .get())
- except Manifest.DoesNotExist:
- return None
-
- # CHECK: Make sure that we are not mistargeting a schema 1 manifest to a tag with a different
- # name.
- if manifest.media_type.name in DOCKER_SCHEMA1_CONTENT_TYPES:
- try:
- parsed = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest.manifest_bytes),
- validate=False)
- if parsed.tag != tag_name:
- logger.error('Tried to re-target schema1 manifest with tag `%s` to tag `%s', parsed.tag,
- tag_name)
- return None
- except MalformedSchema1Manifest:
- logger.exception('Could not parse schema1 manifest')
- return None
-
- legacy_image = get_legacy_image_for_manifest(manifest)
- now_ms = now_ms or get_epoch_timestamp_ms()
- now_ts = int(now_ms / 1000)
-
- with db_transaction():
- # Lookup an existing tag in the repository with the same name and, if present, mark it
- # as expired.
- existing_tag = get_tag(manifest.repository_id, tag_name)
- if existing_tag is not None:
- _, okay = set_tag_end_ms(existing_tag, now_ms)
-
- # TODO: should we retry here and/or use a for-update?
- if not okay:
- return None
-
- # Create a new tag pointing to the manifest with a lifetime start of now.
- created = Tag.create(name=tag_name, repository=manifest.repository_id, lifetime_start_ms=now_ms,
- reversion=is_reversion, manifest=manifest,
- tag_kind=Tag.tag_kind.get_id('tag'))
-
- # TODO: Remove the linkage code once RepositoryTag is gone.
- # If this is a schema 1 manifest, then add a TagManifest linkage to it. Otherwise, it will only
- # be pullable via the new OCI model.
- if adjust_old_model:
- if manifest.media_type.name in DOCKER_SCHEMA1_CONTENT_TYPES and legacy_image is not None:
- old_style_tag = RepositoryTag.create(repository=manifest.repository_id, image=legacy_image,
- name=tag_name, lifetime_start_ts=now_ts,
- reversion=is_reversion)
- TagToRepositoryTag.create(tag=created, repository_tag=old_style_tag,
- repository=manifest.repository_id)
-
- tag_manifest = TagManifest.create(tag=old_style_tag, digest=manifest.digest,
- json_data=manifest.manifest_bytes)
- TagManifestToManifest.create(tag_manifest=tag_manifest, manifest=manifest,
- repository=manifest.repository_id)
-
- return created
-
-
-def delete_tag(repository_id, tag_name):
- """ Deletes the alive tag with the given name in the specified repository and returns the deleted
- tag. If the tag did not exist, returns None.
- """
- tag = get_tag(repository_id, tag_name)
- if tag is None:
- return None
-
- return _delete_tag(tag, get_epoch_timestamp_ms())
-
-
-def _delete_tag(tag, now_ms):
- """ Deletes the given tag by marking it as expired. """
- now_ts = int(now_ms / 1000)
-
- with db_transaction():
- updated = (Tag
- .update(lifetime_end_ms=now_ms)
- .where(Tag.id == tag.id, Tag.lifetime_end_ms == tag.lifetime_end_ms)
- .execute())
- if updated != 1:
- return None
-
- # TODO: Remove the linkage code once RepositoryTag is gone.
- try:
- old_style_tag = (TagToRepositoryTag
- .select(TagToRepositoryTag, RepositoryTag)
- .join(RepositoryTag)
- .where(TagToRepositoryTag.tag == tag)
- .get()).repository_tag
-
- old_style_tag.lifetime_end_ts = now_ts
- old_style_tag.save()
- except TagToRepositoryTag.DoesNotExist:
- pass
-
- return tag
-
-
-def delete_tags_for_manifest(manifest):
- """ Deletes all tags pointing to the given manifest. Returns the list of tags
- deleted.
- """
- query = Tag.select().where(Tag.manifest == manifest)
- query = filter_to_alive_tags(query)
- query = filter_to_visible_tags(query)
-
- tags = list(query)
- now_ms = get_epoch_timestamp_ms()
-
- with db_transaction():
- for tag in tags:
- _delete_tag(tag, now_ms)
-
- return tags
-
-
-def filter_to_visible_tags(query):
- """ Adjusts the specified Tag query to only return those tags that are visible.
- """
- return query.where(Tag.hidden == False)
-
-
-def filter_to_alive_tags(query, now_ms=None, model=Tag):
- """ Adjusts the specified Tag query to only return those tags alive. If now_ms is specified,
- the given timestamp (in MS) is used in place of the current timestamp for determining wherther
- a tag is alive.
- """
- if now_ms is None:
- now_ms = get_epoch_timestamp_ms()
-
- return (query.where((model.lifetime_end_ms >> None) | (model.lifetime_end_ms > now_ms))
- .where(model.hidden == False))
-
-
-def set_tag_expiration_sec_for_manifest(manifest_id, expiration_seconds):
- """ Sets the tag expiration for any tags that point to the given manifest ID. """
- query = Tag.select().where(Tag.manifest == manifest_id)
- query = filter_to_alive_tags(query)
- tags = list(query)
- for tag in tags:
- assert not tag.hidden
- set_tag_end_ms(tag, tag.lifetime_start_ms + (expiration_seconds * 1000))
-
- return tags
-
-
-def set_tag_expiration_for_manifest(manifest_id, expiration_datetime):
- """ Sets the tag expiration for any tags that point to the given manifest ID. """
- query = Tag.select().where(Tag.manifest == manifest_id)
- query = filter_to_alive_tags(query)
- tags = list(query)
- for tag in tags:
- assert not tag.hidden
- change_tag_expiration(tag, expiration_datetime)
-
- return tags
-
-
-def change_tag_expiration(tag_id, expiration_datetime):
- """ Changes the expiration of the specified tag to the given expiration datetime. If
- the expiration datetime is None, then the tag is marked as not expiring. Returns
- a tuple of the previous expiration timestamp in seconds (if any), and whether the
- operation succeeded.
- """
- try:
- tag = Tag.get(id=tag_id)
- except Tag.DoesNotExist:
- return (None, False)
-
- new_end_ms = None
- min_expire_sec = convert_to_timedelta(config.app_config.get('LABELED_EXPIRATION_MINIMUM', '1h'))
- max_expire_sec = convert_to_timedelta(config.app_config.get('LABELED_EXPIRATION_MAXIMUM', '104w'))
-
- if expiration_datetime is not None:
- lifetime_start_ts = int(tag.lifetime_start_ms / 1000)
-
- offset = timegm(expiration_datetime.utctimetuple()) - lifetime_start_ts
- offset = min(max(offset, min_expire_sec.total_seconds()), max_expire_sec.total_seconds())
- new_end_ms = tag.lifetime_start_ms + (offset * 1000)
-
- if new_end_ms == tag.lifetime_end_ms:
- return (None, True)
-
- return set_tag_end_ms(tag, new_end_ms)
-
-
-def lookup_unrecoverable_tags(repo):
- """ Returns the tags in a repository that are expired and past their time machine recovery
- period. """
- expired_clause = get_epoch_timestamp_ms() - (Namespace.removed_tag_expiration_s * 1000)
- return (Tag
- .select()
- .join(Repository)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(Tag.repository == repo)
- .where(~(Tag.lifetime_end_ms >> None), Tag.lifetime_end_ms <= expired_clause))
-
-
-def set_tag_end_ms(tag, end_ms):
- """ Sets the end timestamp for a tag. Should only be called by change_tag_expiration
- or tests.
- """
-
- with db_transaction():
- updated = (Tag
- .update(lifetime_end_ms=end_ms)
- .where(Tag.id == tag)
- .where(Tag.lifetime_end_ms == tag.lifetime_end_ms)
- .execute())
- if updated != 1:
- return (None, False)
-
- # TODO: Remove the linkage code once RepositoryTag is gone.
- try:
- old_style_tag = (TagToRepositoryTag
- .select(TagToRepositoryTag, RepositoryTag)
- .join(RepositoryTag)
- .where(TagToRepositoryTag.tag == tag)
- .get()).repository_tag
-
- old_style_tag.lifetime_end_ts = end_ms / 1000 if end_ms is not None else None
- old_style_tag.save()
- except TagToRepositoryTag.DoesNotExist:
- pass
-
- return (tag.lifetime_end_ms, True)
-
-
-def tags_containing_legacy_image(image):
- """ Yields all alive Tags containing the given image as a legacy image, somewhere in its
- legacy image hierarchy.
- """
- ancestors_str = '%s%s/%%' % (image.ancestors, image.id)
- tags = (Tag
- .select()
- .join(Repository)
- .switch(Tag)
- .join(Manifest)
- .join(ManifestLegacyImage)
- .join(Image)
- .where(Tag.repository == image.repository_id)
- .where(Image.repository == image.repository_id)
- .where((Image.id == image.id) |
- (Image.ancestors ** ancestors_str)))
- return filter_to_alive_tags(tags)
-
-
-def lookup_notifiable_tags_for_legacy_image(docker_image_id, storage_uuid, event_name):
- """ Yields any alive Tags found in repositories with an event with the given name registered
- and whose legacy Image has the given docker image ID and storage UUID.
- """
- event = ExternalNotificationEvent.get(name=event_name)
- images = (Image
- .select()
- .join(ImageStorage)
- .where(Image.docker_image_id == docker_image_id,
- ImageStorage.uuid == storage_uuid))
-
- for image in list(images):
- # Ensure the image is under a repository that supports the event.
- try:
- RepositoryNotification.get(repository=image.repository_id, event=event)
- except RepositoryNotification.DoesNotExist:
- continue
-
- # If found in a repository with the valid event, yield the tag(s) that contains the image.
- for tag in tags_containing_legacy_image(image):
- yield tag
diff --git a/data/model/oci/test/__init__.py b/data/model/oci/test/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/data/model/oci/test/test_oci_label.py b/data/model/oci/test/test_oci_label.py
deleted file mode 100644
index 2ba04521b..000000000
--- a/data/model/oci/test/test_oci_label.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import pytest
-
-from playhouse.test_utils import assert_query_count
-
-from data.database import Manifest, ManifestLabel
-from data.model.oci.label import (create_manifest_label, list_manifest_labels, get_manifest_label,
- delete_manifest_label, DataModelException)
-
-from test.fixtures import *
-
-
-@pytest.mark.parametrize('key, value, source_type, expected_error', [
- ('foo', 'bar', 'manifest', None),
-
- pytest.param('..foo', 'bar', 'manifest', None, id='invalid key on manifest'),
- pytest.param('..foo', 'bar', 'api', 'is invalid', id='invalid key on api'),
-])
-def test_create_manifest_label(key, value, source_type, expected_error, initialized_db):
- manifest = Manifest.get()
-
- if expected_error:
- with pytest.raises(DataModelException) as ex:
- create_manifest_label(manifest, key, value, source_type)
-
- assert ex.match(expected_error)
- return
-
- label = create_manifest_label(manifest, key, value, source_type)
- labels = [ml.label_id for ml in ManifestLabel.select().where(ManifestLabel.manifest == manifest)]
- assert label.id in labels
-
- with assert_query_count(1):
- assert label in list_manifest_labels(manifest)
-
- assert label not in list_manifest_labels(manifest, 'someprefix')
- assert label in list_manifest_labels(manifest, key[0:2])
-
- with assert_query_count(1):
- assert get_manifest_label(label.uuid, manifest) == label
-
-
-def test_list_manifest_labels(initialized_db):
- manifest = Manifest.get()
-
- label1 = create_manifest_label(manifest, 'foo', '1', 'manifest')
- label2 = create_manifest_label(manifest, 'bar', '2', 'api')
- label3 = create_manifest_label(manifest, 'baz', '3', 'internal')
-
- assert label1 in list_manifest_labels(manifest)
- assert label2 in list_manifest_labels(manifest)
- assert label3 in list_manifest_labels(manifest)
-
- other_manifest = Manifest.select().where(Manifest.id != manifest.id).get()
- assert label1 not in list_manifest_labels(other_manifest)
- assert label2 not in list_manifest_labels(other_manifest)
- assert label3 not in list_manifest_labels(other_manifest)
-
-
-def test_get_manifest_label(initialized_db):
- found = False
- for manifest_label in ManifestLabel.select():
- assert (get_manifest_label(manifest_label.label.uuid, manifest_label.manifest) ==
- manifest_label.label)
- assert manifest_label.label in list_manifest_labels(manifest_label.manifest)
- found = True
-
- assert found
-
-
-def test_delete_manifest_label(initialized_db):
- found = False
- for manifest_label in list(ManifestLabel.select()):
- assert (get_manifest_label(manifest_label.label.uuid, manifest_label.manifest) ==
- manifest_label.label)
- assert manifest_label.label in list_manifest_labels(manifest_label.manifest)
-
- if manifest_label.label.source_type.mutable:
- assert delete_manifest_label(manifest_label.label.uuid, manifest_label.manifest)
- assert manifest_label.label not in list_manifest_labels(manifest_label.manifest)
- assert get_manifest_label(manifest_label.label.uuid, manifest_label.manifest) is None
- else:
- with pytest.raises(DataModelException):
- delete_manifest_label(manifest_label.label.uuid, manifest_label.manifest)
-
- found = True
-
- assert found
diff --git a/data/model/oci/test/test_oci_manifest.py b/data/model/oci/test/test_oci_manifest.py
deleted file mode 100644
index 4c5d6ed3b..000000000
--- a/data/model/oci/test/test_oci_manifest.py
+++ /dev/null
@@ -1,560 +0,0 @@
-import json
-
-from playhouse.test_utils import assert_query_count
-
-from app import docker_v2_signing_key, storage
-
-from digest.digest_tools import sha256_digest
-from data.database import (Tag, ManifestBlob, ImageStorageLocation, ManifestChild,
- ImageStorage, Image, RepositoryTag, get_epoch_timestamp_ms)
-from data.model.oci.manifest import lookup_manifest, get_or_create_manifest
-from data.model.oci.tag import filter_to_alive_tags, get_tag
-from data.model.oci.shared import get_legacy_image_for_manifest
-from data.model.oci.label import list_manifest_labels
-from data.model.oci.retriever import RepositoryContentRetriever
-from data.model.repository import get_repository, create_repository
-from data.model.image import find_create_or_link_image
-from data.model.blob import store_blob_record_and_temp_link
-from data.model.storage import get_layer_path
-from image.docker.schema1 import DockerSchema1ManifestBuilder, DockerSchema1Manifest
-from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
-from image.docker.schema2.list import DockerSchema2ManifestListBuilder
-from util.bytes import Bytes
-
-from test.fixtures import *
-
-def test_lookup_manifest(initialized_db):
- found = False
- for tag in filter_to_alive_tags(Tag.select()):
- found = True
- repo = tag.repository
- digest = tag.manifest.digest
- with assert_query_count(1):
- assert lookup_manifest(repo, digest) == tag.manifest
-
- assert found
-
- for tag in Tag.select():
- repo = tag.repository
- digest = tag.manifest.digest
- with assert_query_count(1):
- assert lookup_manifest(repo, digest, allow_dead=True) == tag.manifest
-
-
-def test_lookup_manifest_dead_tag(initialized_db):
- dead_tag = Tag.select().where(Tag.lifetime_end_ms <= get_epoch_timestamp_ms()).get()
- assert dead_tag.lifetime_end_ms <= get_epoch_timestamp_ms()
-
- assert lookup_manifest(dead_tag.repository, dead_tag.manifest.digest) is None
- assert (lookup_manifest(dead_tag.repository, dead_tag.manifest.digest, allow_dead=True) ==
- dead_tag.manifest)
-
-
-def create_manifest_for_testing(repository, differentiation_field='1'):
- # Populate a manifest.
- layer_json = json.dumps({
- 'config': {},
- "rootfs": {
- "type": "layers",
- "diff_ids": []
- },
- "history": [],
- })
-
- # Add a blob containing the config.
- _, config_digest = _populate_blob(layer_json)
-
- remote_digest = sha256_digest('something')
- builder = DockerSchema2ManifestBuilder()
- builder.set_config_digest(config_digest, len(layer_json))
- builder.add_layer(remote_digest, 1234, urls=['http://hello/world' + differentiation_field])
- manifest = builder.build()
-
- created = get_or_create_manifest(repository, manifest, storage)
- assert created
- return created.manifest, manifest
-
-
-def test_lookup_manifest_child_tag(initialized_db):
- repository = create_repository('devtable', 'newrepo', None)
- manifest, manifest_impl = create_manifest_for_testing(repository)
-
- # Mark the hidden tag as dead.
- hidden_tag = Tag.get(manifest=manifest, hidden=True)
- hidden_tag.lifetime_end_ms = hidden_tag.lifetime_start_ms
- hidden_tag.save()
-
- # Ensure the manifest cannot currently be looked up, as it is not pointed to by an alive tag.
- assert lookup_manifest(repository, manifest.digest) is None
- assert lookup_manifest(repository, manifest.digest, allow_dead=True) is not None
-
- # Populate a manifest list.
- list_builder = DockerSchema2ManifestListBuilder()
- list_builder.add_manifest(manifest_impl, 'amd64', 'linux')
- manifest_list = list_builder.build()
-
- # Write the manifest list, which should also write the manifests themselves.
- created_tuple = get_or_create_manifest(repository, manifest_list, storage)
- assert created_tuple is not None
-
- # Since the manifests are not yet referenced by a tag, they cannot be found.
- assert lookup_manifest(repository, manifest.digest) is None
- assert lookup_manifest(repository, manifest_list.digest) is None
-
- # Unless we ask for "dead" manifests.
- assert lookup_manifest(repository, manifest.digest, allow_dead=True) is not None
- assert lookup_manifest(repository, manifest_list.digest, allow_dead=True) is not None
-
-
-def _populate_blob(content):
- digest = str(sha256_digest(content))
- location = ImageStorageLocation.get(name='local_us')
- blob = store_blob_record_and_temp_link('devtable', 'newrepo', digest, location,
- len(content), 120)
- storage.put_content(['local_us'], get_layer_path(blob), content)
- return blob, digest
-
-
-@pytest.mark.parametrize('schema_version', [
- 1,
- 2,
-])
-def test_get_or_create_manifest(schema_version, initialized_db):
- repository = create_repository('devtable', 'newrepo', None)
-
- expected_labels = {
- 'Foo': 'Bar',
- 'Baz': 'Meh',
- }
-
- layer_json = json.dumps({
- 'id': 'somelegacyid',
- 'config': {
- 'Labels': expected_labels,
- },
- "rootfs": {
- "type": "layers",
- "diff_ids": []
- },
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "do something",
- },
- ],
- })
-
- # Create a legacy image.
- find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
-
- # Add a blob containing the config.
- _, config_digest = _populate_blob(layer_json)
-
- # Add a blob of random data.
- random_data = 'hello world'
- _, random_digest = _populate_blob(random_data)
-
- # Build the manifest.
- if schema_version == 1:
- builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
- builder.add_layer(random_digest, layer_json)
- sample_manifest_instance = builder.build(docker_v2_signing_key)
- elif schema_version == 2:
- builder = DockerSchema2ManifestBuilder()
- builder.set_config_digest(config_digest, len(layer_json))
- builder.add_layer(random_digest, len(random_data))
- sample_manifest_instance = builder.build()
-
- # Create a new manifest.
- created_manifest = get_or_create_manifest(repository, sample_manifest_instance, storage)
- created = created_manifest.manifest
- newly_created = created_manifest.newly_created
-
- assert newly_created
- assert created is not None
- assert created.media_type.name == sample_manifest_instance.media_type
- assert created.digest == sample_manifest_instance.digest
- assert created.manifest_bytes == sample_manifest_instance.bytes.as_encoded_str()
- assert created_manifest.labels_to_apply == expected_labels
-
- # Verify it has a temporary tag pointing to it.
- assert Tag.get(manifest=created, hidden=True).lifetime_end_ms
-
- # Verify the legacy image.
- legacy_image = get_legacy_image_for_manifest(created)
- assert legacy_image is not None
- assert legacy_image.storage.content_checksum == random_digest
-
- # Verify the linked blobs.
- blob_digests = [mb.blob.content_checksum for mb
- in ManifestBlob.select().where(ManifestBlob.manifest == created)]
-
- assert random_digest in blob_digests
- if schema_version == 2:
- assert config_digest in blob_digests
-
- # Retrieve it again and ensure it is the same manifest.
- created_manifest2 = get_or_create_manifest(repository, sample_manifest_instance, storage)
- created2 = created_manifest2.manifest
- newly_created2 = created_manifest2.newly_created
-
- assert not newly_created2
- assert created2 == created
-
- # Ensure it again has a temporary tag.
- assert Tag.get(manifest=created2, hidden=True).lifetime_end_ms
-
- # Ensure the labels were added.
- labels = list(list_manifest_labels(created))
- assert len(labels) == 2
-
- labels_dict = {label.key: label.value for label in labels}
- assert labels_dict == expected_labels
-
-
-def test_get_or_create_manifest_invalid_image(initialized_db):
- repository = get_repository('devtable', 'simple')
-
- latest_tag = get_tag(repository, 'latest')
- parsed = DockerSchema1Manifest(Bytes.for_string_or_unicode(latest_tag.manifest.manifest_bytes),
- validate=False)
-
- builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
- builder.add_layer(parsed.blob_digests[0], '{"id": "foo", "parent": "someinvalidimageid"}')
- sample_manifest_instance = builder.build(docker_v2_signing_key)
-
- created_manifest = get_or_create_manifest(repository, sample_manifest_instance, storage)
- assert created_manifest is None
-
-
-def test_get_or_create_manifest_list(initialized_db):
- repository = create_repository('devtable', 'newrepo', None)
-
- expected_labels = {
- 'Foo': 'Bar',
- 'Baz': 'Meh',
- }
-
- layer_json = json.dumps({
- 'id': 'somelegacyid',
- 'config': {
- 'Labels': expected_labels,
- },
- "rootfs": {
- "type": "layers",
- "diff_ids": []
- },
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "do something",
- },
- ],
- })
-
- # Create a legacy image.
- find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
-
- # Add a blob containing the config.
- _, config_digest = _populate_blob(layer_json)
-
- # Add a blob of random data.
- random_data = 'hello world'
- _, random_digest = _populate_blob(random_data)
-
- # Build the manifests.
- v1_builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
- v1_builder.add_layer(random_digest, layer_json)
- v1_manifest = v1_builder.build(docker_v2_signing_key).unsigned()
-
- v2_builder = DockerSchema2ManifestBuilder()
- v2_builder.set_config_digest(config_digest, len(layer_json))
- v2_builder.add_layer(random_digest, len(random_data))
- v2_manifest = v2_builder.build()
-
- # Write the manifests.
- v1_created = get_or_create_manifest(repository, v1_manifest, storage)
- assert v1_created
- assert v1_created.manifest.digest == v1_manifest.digest
-
- v2_created = get_or_create_manifest(repository, v2_manifest, storage)
- assert v2_created
- assert v2_created.manifest.digest == v2_manifest.digest
-
- # Build the manifest list.
- list_builder = DockerSchema2ManifestListBuilder()
- list_builder.add_manifest(v1_manifest, 'amd64', 'linux')
- list_builder.add_manifest(v2_manifest, 'amd32', 'linux')
- manifest_list = list_builder.build()
-
- # Write the manifest list, which should also write the manifests themselves.
- created_tuple = get_or_create_manifest(repository, manifest_list, storage)
- assert created_tuple is not None
-
- created_list = created_tuple.manifest
- assert created_list
- assert created_list.media_type.name == manifest_list.media_type
- assert created_list.digest == manifest_list.digest
-
- # Ensure the child manifest links exist.
- child_manifests = {cm.child_manifest.digest: cm.child_manifest
- for cm in ManifestChild.select().where(ManifestChild.manifest == created_list)}
- assert len(child_manifests) == 2
- assert v1_manifest.digest in child_manifests
- assert v2_manifest.digest in child_manifests
-
- assert child_manifests[v1_manifest.digest].media_type.name == v1_manifest.media_type
- assert child_manifests[v2_manifest.digest].media_type.name == v2_manifest.media_type
-
-
-def test_get_or_create_manifest_list_duplicate_child_manifest(initialized_db):
- repository = create_repository('devtable', 'newrepo', None)
-
- expected_labels = {
- 'Foo': 'Bar',
- 'Baz': 'Meh',
- }
-
- layer_json = json.dumps({
- 'id': 'somelegacyid',
- 'config': {
- 'Labels': expected_labels,
- },
- "rootfs": {
- "type": "layers",
- "diff_ids": []
- },
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "do something",
- },
- ],
- })
-
- # Create a legacy image.
- find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
-
- # Add a blob containing the config.
- _, config_digest = _populate_blob(layer_json)
-
- # Add a blob of random data.
- random_data = 'hello world'
- _, random_digest = _populate_blob(random_data)
-
- # Build the manifest.
- v2_builder = DockerSchema2ManifestBuilder()
- v2_builder.set_config_digest(config_digest, len(layer_json))
- v2_builder.add_layer(random_digest, len(random_data))
- v2_manifest = v2_builder.build()
-
- # Write the manifest.
- v2_created = get_or_create_manifest(repository, v2_manifest, storage)
- assert v2_created
- assert v2_created.manifest.digest == v2_manifest.digest
-
- # Build the manifest list, with the child manifest repeated.
- list_builder = DockerSchema2ManifestListBuilder()
- list_builder.add_manifest(v2_manifest, 'amd64', 'linux')
- list_builder.add_manifest(v2_manifest, 'amd32', 'linux')
- manifest_list = list_builder.build()
-
- # Write the manifest list, which should also write the manifests themselves.
- created_tuple = get_or_create_manifest(repository, manifest_list, storage)
- assert created_tuple is not None
-
- created_list = created_tuple.manifest
- assert created_list
- assert created_list.media_type.name == manifest_list.media_type
- assert created_list.digest == manifest_list.digest
-
- # Ensure the child manifest links exist.
- child_manifests = {cm.child_manifest.digest: cm.child_manifest
- for cm in ManifestChild.select().where(ManifestChild.manifest == created_list)}
- assert len(child_manifests) == 1
- assert v2_manifest.digest in child_manifests
- assert child_manifests[v2_manifest.digest].media_type.name == v2_manifest.media_type
-
- # Try to create again and ensure we get back the same manifest list.
- created2_tuple = get_or_create_manifest(repository, manifest_list, storage)
- assert created2_tuple is not None
- assert created2_tuple.manifest == created_list
-
-
-def test_get_or_create_manifest_with_remote_layers(initialized_db):
- repository = create_repository('devtable', 'newrepo', None)
-
- layer_json = json.dumps({
- 'config': {},
- "rootfs": {
- "type": "layers",
- "diff_ids": []
- },
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "do something",
- },
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "do something",
- },
- ],
- })
-
- # Add a blob containing the config.
- _, config_digest = _populate_blob(layer_json)
-
- # Add a blob of random data.
- random_data = 'hello world'
- _, random_digest = _populate_blob(random_data)
-
- remote_digest = sha256_digest('something')
-
- builder = DockerSchema2ManifestBuilder()
- builder.set_config_digest(config_digest, len(layer_json))
- builder.add_layer(remote_digest, 1234, urls=['http://hello/world'])
- builder.add_layer(random_digest, len(random_data))
- manifest = builder.build()
-
- assert remote_digest in manifest.blob_digests
- assert remote_digest not in manifest.local_blob_digests
-
- assert manifest.has_remote_layer
- assert not manifest.has_legacy_image
- assert manifest.get_schema1_manifest('foo', 'bar', 'baz', None) is None
-
- # Write the manifest.
- created_tuple = get_or_create_manifest(repository, manifest, storage)
- assert created_tuple is not None
-
- created_manifest = created_tuple.manifest
- assert created_manifest
- assert created_manifest.media_type.name == manifest.media_type
- assert created_manifest.digest == manifest.digest
-
- # Verify the legacy image.
- legacy_image = get_legacy_image_for_manifest(created_manifest)
- assert legacy_image is None
-
- # Verify the linked blobs.
- blob_digests = {mb.blob.content_checksum for mb
- in ManifestBlob.select().where(ManifestBlob.manifest == created_manifest)}
-
- assert random_digest in blob_digests
- assert config_digest in blob_digests
- assert remote_digest not in blob_digests
-
-
-def create_manifest_for_testing(repository, differentiation_field='1', include_shared_blob=False):
- # Populate a manifest.
- layer_json = json.dumps({
- 'config': {},
- "rootfs": {
- "type": "layers",
- "diff_ids": []
- },
- "history": [],
- })
-
- # Add a blob containing the config.
- _, config_digest = _populate_blob(layer_json)
-
- remote_digest = sha256_digest('something')
- builder = DockerSchema2ManifestBuilder()
- builder.set_config_digest(config_digest, len(layer_json))
- builder.add_layer(remote_digest, 1234, urls=['http://hello/world' + differentiation_field])
-
- if include_shared_blob:
- _, blob_digest = _populate_blob('some data here')
- builder.add_layer(blob_digest, 4567)
-
- manifest = builder.build()
-
- created = get_or_create_manifest(repository, manifest, storage)
- assert created
- return created.manifest, manifest
-
-
-def test_retriever(initialized_db):
- repository = create_repository('devtable', 'newrepo', None)
-
- layer_json = json.dumps({
- 'config': {},
- "rootfs": {
- "type": "layers",
- "diff_ids": []
- },
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "do something",
- },
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "do something",
- },
- ],
- })
-
- # Add a blob containing the config.
- _, config_digest = _populate_blob(layer_json)
-
- # Add a blob of random data.
- random_data = 'hello world'
- _, random_digest = _populate_blob(random_data)
-
- # Add another blob of random data.
- other_random_data = 'hi place'
- _, other_random_digest = _populate_blob(other_random_data)
-
- remote_digest = sha256_digest('something')
-
- builder = DockerSchema2ManifestBuilder()
- builder.set_config_digest(config_digest, len(layer_json))
- builder.add_layer(other_random_digest, len(other_random_data))
- builder.add_layer(random_digest, len(random_data))
- manifest = builder.build()
-
- assert config_digest in manifest.blob_digests
- assert random_digest in manifest.blob_digests
- assert other_random_digest in manifest.blob_digests
-
- assert config_digest in manifest.local_blob_digests
- assert random_digest in manifest.local_blob_digests
- assert other_random_digest in manifest.local_blob_digests
-
- # Write the manifest.
- created_tuple = get_or_create_manifest(repository, manifest, storage)
- assert created_tuple is not None
-
- created_manifest = created_tuple.manifest
- assert created_manifest
- assert created_manifest.media_type.name == manifest.media_type
- assert created_manifest.digest == manifest.digest
-
- # Verify the linked blobs.
- blob_digests = {mb.blob.content_checksum for mb
- in ManifestBlob.select().where(ManifestBlob.manifest == created_manifest)}
-
- assert random_digest in blob_digests
- assert other_random_digest in blob_digests
- assert config_digest in blob_digests
-
- # Delete any Image rows linking to the blobs from temp tags.
- for blob_digest in blob_digests:
- storage_row = ImageStorage.get(content_checksum=blob_digest)
- for image in list(Image.select().where(Image.storage == storage_row)):
- all_temp = all([rt.hidden for rt
- in RepositoryTag.select().where(RepositoryTag.image == image)])
- if all_temp:
- RepositoryTag.delete().where(RepositoryTag.image == image).execute()
- image.delete_instance(recursive=True)
-
- # Verify the blobs in the retriever.
- retriever = RepositoryContentRetriever(repository, storage)
- assert (retriever.get_manifest_bytes_with_digest(created_manifest.digest) ==
- manifest.bytes.as_encoded_str())
-
- for blob_digest in blob_digests:
- assert retriever.get_blob_bytes_with_digest(blob_digest) is not None
diff --git a/data/model/oci/test/test_oci_tag.py b/data/model/oci/test/test_oci_tag.py
deleted file mode 100644
index d37828cf7..000000000
--- a/data/model/oci/test/test_oci_tag.py
+++ /dev/null
@@ -1,378 +0,0 @@
-from calendar import timegm
-from datetime import timedelta, datetime
-
-from playhouse.test_utils import assert_query_count
-
-from data.database import (Tag, ManifestLegacyImage, TagToRepositoryTag, TagManifestToManifest,
- TagManifest, Manifest, Repository)
-from data.model.oci.test.test_oci_manifest import create_manifest_for_testing
-from data.model.oci.tag import (find_matching_tag, get_most_recent_tag,
- get_most_recent_tag_lifetime_start, list_alive_tags,
- get_legacy_images_for_tags, filter_to_alive_tags,
- filter_to_visible_tags, list_repository_tag_history,
- get_expired_tag, get_tag, delete_tag,
- delete_tags_for_manifest, change_tag_expiration,
- set_tag_expiration_for_manifest, retarget_tag,
- create_temporary_tag_if_necessary,
- lookup_alive_tags_shallow,
- lookup_unrecoverable_tags,
- get_epoch_timestamp_ms)
-from data.model.repository import get_repository, create_repository
-
-from test.fixtures import *
-
-@pytest.mark.parametrize('namespace_name, repo_name, tag_names, expected', [
- ('devtable', 'simple', ['latest'], 'latest'),
- ('devtable', 'simple', ['unknown', 'latest'], 'latest'),
- ('devtable', 'simple', ['unknown'], None),
-])
-def test_find_matching_tag(namespace_name, repo_name, tag_names, expected, initialized_db):
- repo = get_repository(namespace_name, repo_name)
- if expected is not None:
- with assert_query_count(1):
- found = find_matching_tag(repo, tag_names)
-
- assert found is not None
- assert found.name == expected
- assert not found.lifetime_end_ms
- else:
- with assert_query_count(1):
- assert find_matching_tag(repo, tag_names) is None
-
-
-def test_get_most_recent_tag_lifetime_start(initialized_db):
- repo = get_repository('devtable', 'simple')
- tag = get_most_recent_tag(repo)
-
- with assert_query_count(1):
- tags = get_most_recent_tag_lifetime_start([repo])
- assert tags[repo.id] == tag.lifetime_start_ms
-
-
-def test_get_most_recent_tag(initialized_db):
- repo = get_repository('outsideorg', 'coolrepo')
-
- with assert_query_count(1):
- assert get_most_recent_tag(repo).name == 'latest'
-
-
-def test_get_most_recent_tag_empty_repo(initialized_db):
- empty_repo = create_repository('devtable', 'empty', None)
-
- with assert_query_count(1):
- assert get_most_recent_tag(empty_repo) is None
-
-
-def test_list_alive_tags(initialized_db):
- found = False
- for tag in filter_to_visible_tags(filter_to_alive_tags(Tag.select())):
- tags = list_alive_tags(tag.repository)
- assert tag in tags
-
- with assert_query_count(1):
- legacy_images = get_legacy_images_for_tags(tags)
-
- for tag in tags:
- assert ManifestLegacyImage.get(manifest=tag.manifest).image == legacy_images[tag.id]
-
- found = True
-
- assert found
-
- # Ensure hidden tags cannot be listed.
- tag = Tag.get()
- tag.hidden = True
- tag.save()
-
- tags = list_alive_tags(tag.repository)
- assert tag not in tags
-
-
-def test_lookup_alive_tags_shallow(initialized_db):
- found = False
- for tag in filter_to_visible_tags(filter_to_alive_tags(Tag.select())):
- tags = lookup_alive_tags_shallow(tag.repository)
- found = True
- assert tag in tags
-
- assert found
-
- # Ensure hidden tags cannot be listed.
- tag = Tag.get()
- tag.hidden = True
- tag.save()
-
- tags = lookup_alive_tags_shallow(tag.repository)
- assert tag not in tags
-
-
-def test_get_tag(initialized_db):
- found = False
- for tag in filter_to_visible_tags(filter_to_alive_tags(Tag.select())):
- repo = tag.repository
-
- with assert_query_count(1):
- assert get_tag(repo, tag.name) == tag
- found = True
-
- assert found
-
-
-@pytest.mark.parametrize('namespace_name, repo_name', [
- ('devtable', 'simple'),
- ('devtable', 'complex'),
-])
-def test_list_repository_tag_history(namespace_name, repo_name, initialized_db):
- repo = get_repository(namespace_name, repo_name)
-
- with assert_query_count(1):
- results, has_more = list_repository_tag_history(repo, 1, 100)
-
- assert results
- assert not has_more
-
-
-def test_list_repository_tag_history_with_history(initialized_db):
- repo = get_repository('devtable', 'history')
-
- with assert_query_count(1):
- results, _ = list_repository_tag_history(repo, 1, 100)
-
- assert len(results) == 2
- assert results[0].lifetime_end_ms is None
- assert results[1].lifetime_end_ms is not None
-
- with assert_query_count(1):
- results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
-
- assert len(results) == 2
- assert results[0].lifetime_end_ms is None
- assert results[1].lifetime_end_ms is not None
-
- with assert_query_count(1):
- results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='foobar')
-
- assert len(results) == 0
-
-
-def test_list_repository_tag_history_all_tags(initialized_db):
- for tag in Tag.select():
- repo = tag.repository
- with assert_query_count(1):
- results, _ = list_repository_tag_history(repo, 1, 1000)
-
- assert (tag in results) == (not tag.hidden)
-
-
-@pytest.mark.parametrize('namespace_name, repo_name, tag_name, expected', [
- ('devtable', 'simple', 'latest', False),
- ('devtable', 'simple', 'unknown', False),
- ('devtable', 'complex', 'latest', False),
-
- ('devtable', 'history', 'latest', True),
-])
-def test_get_expired_tag(namespace_name, repo_name, tag_name, expected, initialized_db):
- repo = get_repository(namespace_name, repo_name)
-
- with assert_query_count(1):
- assert bool(get_expired_tag(repo, tag_name)) == expected
-
-
-def test_delete_tag(initialized_db):
- found = False
- for tag in list(filter_to_visible_tags(filter_to_alive_tags(Tag.select()))):
- repo = tag.repository
-
- assert get_tag(repo, tag.name) == tag
- assert tag.lifetime_end_ms is None
-
- with assert_query_count(4):
- assert delete_tag(repo, tag.name) == tag
-
- assert get_tag(repo, tag.name) is None
- found = True
-
- assert found
-
-
-def test_delete_tags_for_manifest(initialized_db):
- for tag in list(filter_to_visible_tags(filter_to_alive_tags(Tag.select()))):
- repo = tag.repository
- assert get_tag(repo, tag.name) == tag
-
- with assert_query_count(5):
- assert delete_tags_for_manifest(tag.manifest) == [tag]
-
- assert get_tag(repo, tag.name) is None
-
-
-def test_delete_tags_for_manifest_same_manifest(initialized_db):
- new_repo = model.repository.create_repository('devtable', 'newrepo', None)
- manifest_1, _ = create_manifest_for_testing(new_repo, '1')
- manifest_2, _ = create_manifest_for_testing(new_repo, '2')
-
- assert manifest_1.digest != manifest_2.digest
-
- # Add some tag history, moving a tag back and forth between two manifests.
- retarget_tag('latest', manifest_1)
- retarget_tag('latest', manifest_2)
- retarget_tag('latest', manifest_1)
- retarget_tag('latest', manifest_2)
-
- retarget_tag('another1', manifest_1)
- retarget_tag('another2', manifest_2)
-
- # Delete all tags pointing to the first manifest.
- delete_tags_for_manifest(manifest_1)
-
- assert get_tag(new_repo, 'latest').manifest == manifest_2
- assert get_tag(new_repo, 'another1') is None
- assert get_tag(new_repo, 'another2').manifest == manifest_2
-
- # Delete all tags pointing to the second manifest, which should actually delete the `latest`
- # tag now.
- delete_tags_for_manifest(manifest_2)
- assert get_tag(new_repo, 'latest') is None
- assert get_tag(new_repo, 'another1') is None
- assert get_tag(new_repo, 'another2') is None
-
-
-@pytest.mark.parametrize('timedelta, expected_timedelta', [
- pytest.param(timedelta(seconds=1), timedelta(hours=1), id='less than minimum'),
- pytest.param(timedelta(weeks=300), timedelta(weeks=104), id='more than maxium'),
- pytest.param(timedelta(weeks=1), timedelta(weeks=1), id='within range'),
-])
-def test_change_tag_expiration(timedelta, expected_timedelta, initialized_db):
- now = datetime.utcnow()
- now_ms = timegm(now.utctimetuple()) * 1000
-
- tag = Tag.get()
- tag.lifetime_start_ms = now_ms
- tag.save()
-
- original_end_ms, okay = change_tag_expiration(tag, now + timedelta)
- assert okay
- assert original_end_ms == tag.lifetime_end_ms
-
- updated_tag = Tag.get(id=tag.id)
- offset = expected_timedelta.total_seconds() * 1000
- expected_ms = (updated_tag.lifetime_start_ms + offset)
- assert updated_tag.lifetime_end_ms == expected_ms
-
- original_end_ms, okay = change_tag_expiration(tag, None)
- assert okay
- assert original_end_ms == expected_ms
-
- updated_tag = Tag.get(id=tag.id)
- assert updated_tag.lifetime_end_ms is None
-
-
-def test_set_tag_expiration_for_manifest(initialized_db):
- tag = Tag.get()
- manifest = tag.manifest
- assert manifest is not None
-
- set_tag_expiration_for_manifest(manifest, datetime.utcnow() + timedelta(weeks=1))
-
- updated_tag = Tag.get(id=tag.id)
- assert updated_tag.lifetime_end_ms is not None
-
-
-def test_create_temporary_tag_if_necessary(initialized_db):
- tag = Tag.get()
- manifest = tag.manifest
- assert manifest is not None
-
- # Ensure no tag is created, since an existing one is present.
- created = create_temporary_tag_if_necessary(manifest, 60)
- assert created is None
-
- # Mark the tag as deleted.
- tag.lifetime_end_ms = 1
- tag.save()
-
- # Now create a temp tag.
- created = create_temporary_tag_if_necessary(manifest, 60)
- assert created is not None
- assert created.hidden
- assert created.name.startswith('$temp-')
- assert created.manifest == manifest
- assert created.lifetime_end_ms is not None
- assert created.lifetime_end_ms == (created.lifetime_start_ms + 60000)
-
- # Try again and ensure it is not created.
- created = create_temporary_tag_if_necessary(manifest, 30)
- assert created is None
-
-
-def test_retarget_tag(initialized_db):
- repo = get_repository('devtable', 'history')
- results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
-
- assert len(results) == 2
- assert results[0].lifetime_end_ms is None
- assert results[1].lifetime_end_ms is not None
-
- # Revert back to the original manifest.
- created = retarget_tag('latest', results[0].manifest, is_reversion=True,
- now_ms=results[1].lifetime_end_ms + 10000)
- assert created.lifetime_end_ms is None
- assert created.reversion
- assert created.name == 'latest'
- assert created.manifest == results[0].manifest
-
- # Verify in the history.
- results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
-
- assert len(results) == 3
- assert results[0].lifetime_end_ms is None
- assert results[1].lifetime_end_ms is not None
- assert results[2].lifetime_end_ms is not None
-
- assert results[0] == created
-
- # Verify old-style tables.
- repository_tag = TagToRepositoryTag.get(tag=created).repository_tag
- assert repository_tag.lifetime_start_ts == int(created.lifetime_start_ms / 1000)
-
- tag_manifest = TagManifest.get(tag=repository_tag)
- assert TagManifestToManifest.get(tag_manifest=tag_manifest).manifest == created.manifest
-
-
-def test_retarget_tag_wrong_name(initialized_db):
- repo = get_repository('devtable', 'history')
- results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
- assert len(results) == 2
-
- created = retarget_tag('someothername', results[1].manifest, is_reversion=True)
- assert created is None
-
- results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
- assert len(results) == 2
-
-
-def test_lookup_unrecoverable_tags(initialized_db):
- # Ensure no existing tags are found.
- for repo in Repository.select():
- assert not list(lookup_unrecoverable_tags(repo))
-
- # Mark a tag as outside the expiration window and ensure it is found.
- repo = get_repository('devtable', 'history')
- results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
- assert len(results) == 2
-
- results[1].lifetime_end_ms = 1
- results[1].save()
-
- # Ensure the tag is now found.
- found = list(lookup_unrecoverable_tags(repo))
- assert found
- assert len(found) == 1
- assert found[0] == results[1]
-
- # Mark the tag as expiring in the future and ensure it is no longer found.
- results[1].lifetime_end_ms = get_epoch_timestamp_ms() + 1000000
- results[1].save()
-
- found = list(lookup_unrecoverable_tags(repo))
- assert not found
diff --git a/data/model/organization.py b/data/model/organization.py
index b42f0d454..a855385e9 100644
--- a/data/model/organization.py
+++ b/data/model/organization.py
@@ -1,28 +1,29 @@
from data.database import (User, FederatedLogin, TeamMember, Team, TeamRole, RepositoryPermission,
- Repository, Namespace, DeletedNamespace)
+ Repository, Namespace)
from data.model import (user, team, DataModelException, InvalidOrganizationException,
InvalidUsernameException, db_transaction, _basequery)
-def create_organization(name, email, creating_user, email_required=True, is_possible_abuser=False):
- with db_transaction():
- try:
- # Create the org
- new_org = user.create_user_noverify(name, email, email_required=email_required,
- is_possible_abuser=is_possible_abuser)
- new_org.organization = True
- new_org.save()
+def create_organization(name, email, creating_user):
+ try:
+ # Create the org
+ new_org = user.create_user_noverify(name, email)
+ new_org.organization = True
+ new_org.save()
- # Create a team for the owners
- owners_team = team.create_team('owners', new_org, 'admin')
+ # Create a team for the owners
+ owners_team = team.create_team('owners', new_org, 'admin')
- # Add the user who created the org to the owners team
- team.add_user_to_team(creating_user, owners_team)
+ # Add the user who created the org to the owners team
+ team.add_user_to_team(creating_user, owners_team)
- return new_org
- except InvalidUsernameException as iue:
- raise InvalidOrganizationException(iue.message)
+ return new_org
+ except InvalidUsernameException:
+ msg = ('Invalid organization name: %s Organization names must consist ' +
+ 'solely of lower case letters, numbers, and underscores. ' +
+ '[a-z0-9_]') % name
+ raise InvalidOrganizationException(msg)
def get_organization(name):
@@ -34,30 +35,23 @@ def get_organization(name):
def convert_user_to_organization(user_obj, admin_user):
- if user_obj.robot:
- raise DataModelException('Cannot convert a robot into an organization')
+ # Change the user to an organization.
+ user_obj.organization = True
- with db_transaction():
- # Change the user to an organization and disable this account for login.
- user_obj.organization = True
- user_obj.password_hash = None
- user_obj.save()
+ # disable this account for login.
+ user_obj.password_hash = None
+ user_obj.save()
- # Clear any federated auth pointing to this user.
- FederatedLogin.delete().where(FederatedLogin.user == user_obj).execute()
+ # Clear any federated auth pointing to this user
+ FederatedLogin.delete().where(FederatedLogin.user == user_obj).execute()
- # Delete any user-specific permissions on repositories.
- (RepositoryPermission.delete()
- .where(RepositoryPermission.user == user_obj)
- .execute())
+ # Create a team for the owners
+ owners_team = team.create_team('owners', user_obj, 'admin')
- # Create a team for the owners
- owners_team = team.create_team('owners', user_obj, 'admin')
+ # Add the user who will admin the org to the owners team
+ team.add_user_to_team(admin_user, owners_team)
- # Add the user who will admin the org to the owners team
- team.add_user_to_team(admin_user, owners_team)
-
- return user_obj
+ return user_obj
def get_user_organizations(username):
@@ -78,9 +72,6 @@ def __get_org_admin_users(org):
.where(Team.organization == org, TeamRole.name == 'admin', User.robot == False)
.distinct())
-def get_admin_users(org):
- """ Returns the owner users for the organization. """
- return __get_org_admin_users(org)
def remove_organization_member(org, user_obj):
org_admins = [u.username for u in __get_org_admin_users(org)]
@@ -108,28 +99,15 @@ def remove_organization_member(org, user_obj):
TeamMember.delete().where(TeamMember.id << members).execute()
-def get_organization_member_set(org, include_robots=False, users_filter=None):
- """ Returns the set of all member usernames under the given organization, with optional
- filtering by robots and/or by a specific set of User objects.
- """
+def get_organization_member_set(orgname):
Org = User.alias()
org_users = (User
.select(User.username)
.join(TeamMember)
.join(Team)
- .where(Team.organization == org)
+ .join(Org, on=(Org.id == Team.organization))
+ .where(Org.username == orgname)
.distinct())
-
- if not include_robots:
- org_users = org_users.where(User.robot == False)
-
- if users_filter is not None:
- ids_list = [u.id for u in users_filter if u is not None]
- if not ids_list:
- return set()
-
- org_users = org_users.where(User.id << ids_list)
-
return {user.username for user in org_users}
@@ -145,23 +123,6 @@ def get_all_repo_users_transitive_via_teams(namespace_name, repository_name):
.where(Namespace.username == namespace_name, Repository.name == repository_name))
-def get_organizations(deleted=False):
- query = User.select().where(User.organization == True, User.robot == False)
+def get_organizations():
+ return User.select().where(User.organization == True, User.robot == False)
- if not deleted:
- query = query.where(User.id.not_in(DeletedNamespace.select(DeletedNamespace.namespace)))
-
- return query
-
-
-def get_active_org_count():
- return get_organizations().count()
-
-
-def add_user_as_admin(user_obj, org_obj):
- try:
- admin_role = TeamRole.get(name='admin')
- admin_team = Team.select().where(Team.role == admin_role, Team.organization == org_obj).get()
- team.add_user_to_team(user_obj, admin_team)
- except team.UserAlreadyInTeam:
- pass
diff --git a/data/model/permission.py b/data/model/permission.py
index e38584561..52dcf40f1 100644
--- a/data/model/permission.py
+++ b/data/model/permission.py
@@ -1,19 +1,8 @@
-from peewee import JOIN
+from peewee import JOIN_LEFT_OUTER
from data.database import (RepositoryPermission, User, Repository, Visibility, Role, TeamMember,
PermissionPrototype, Team, TeamRole, Namespace)
from data.model import DataModelException, _basequery
-from util.names import parse_robot_username
-
-def list_team_permissions(team):
- return (RepositoryPermission
- .select(RepositoryPermission)
- .join(Repository)
- .join(Visibility)
- .switch(RepositoryPermission)
- .join(Role)
- .switch(RepositoryPermission)
- .where(RepositoryPermission.team == team))
def list_robot_permissions(robot_name):
@@ -28,23 +17,18 @@ def list_robot_permissions(robot_name):
.where(User.username == robot_name, User.robot == True))
-def list_organization_member_permissions(organization, limit_to_user=None):
+def list_organization_member_permissions(organization):
query = (RepositoryPermission
.select(RepositoryPermission, Repository, User)
.join(Repository)
.switch(RepositoryPermission)
.join(User)
- .where(Repository.namespace_user == organization))
-
- if limit_to_user is not None:
- query = query.where(RepositoryPermission.user == limit_to_user)
- else:
- query = query.where(User.robot == False)
-
+ .where(Repository.namespace_user == organization)
+ .where(User.robot == False))
return query
-def get_all_user_repository_permissions(user):
+def get_all_user_permissions(user):
return _get_user_repo_permissions(user)
@@ -52,12 +36,7 @@ def get_user_repo_permissions(user, repo):
return _get_user_repo_permissions(user, limit_to_repository_obj=repo)
-def get_user_repository_permissions(user, namespace, repo_name):
- return _get_user_repo_permissions(user, limit_namespace=namespace, limit_repo_name=repo_name)
-
-
-def _get_user_repo_permissions(user, limit_to_repository_obj=None, limit_namespace=None,
- limit_repo_name=None):
+def _get_user_repo_permissions(user, limit_to_repository_obj=None):
UserThroughTeam = User.alias()
base_query = (RepositoryPermission
@@ -70,9 +49,6 @@ def _get_user_repo_permissions(user, limit_to_repository_obj=None, limit_namespa
if limit_to_repository_obj is not None:
base_query = base_query.where(RepositoryPermission.repository == limit_to_repository_obj)
- elif limit_namespace and limit_repo_name:
- base_query = base_query.where(Repository.name == limit_repo_name,
- Namespace.username == limit_namespace)
direct = (base_query
.clone()
@@ -112,13 +88,13 @@ def get_prototype_permissions(org):
query = (PermissionPrototype
.select()
.where(PermissionPrototype.org == org)
- .join(ActivatingUser, JOIN.LEFT_OUTER,
+ .join(ActivatingUser, JOIN_LEFT_OUTER,
on=(ActivatingUser.id == PermissionPrototype.activating_user))
- .join(DelegateUser, JOIN.LEFT_OUTER,
+ .join(DelegateUser, JOIN_LEFT_OUTER,
on=(DelegateUser.id == PermissionPrototype.delegate_user))
- .join(Team, JOIN.LEFT_OUTER,
+ .join(Team, JOIN_LEFT_OUTER,
on=(Team.id == PermissionPrototype.delegate_team))
- .join(Role, JOIN.LEFT_OUTER, on=(Role.id == PermissionPrototype.role)))
+ .join(Role, JOIN_LEFT_OUTER, on=(Role.id == PermissionPrototype.role)))
return query
@@ -140,16 +116,12 @@ def add_prototype_permission(org, role_name, activating_user,
delegate_user=delegate_user, delegate_team=delegate_team)
-def get_org_wide_permissions(user, org_filter=None):
+def get_org_wide_permissions(user):
Org = User.alias()
team_with_role = Team.select(Team, Org, TeamRole).join(TeamRole)
with_org = team_with_role.switch(Team).join(Org, on=(Team.organization ==
Org.id))
with_user = with_org.switch(Team).join(TeamMember).join(User)
-
- if org_filter:
- with_user.where(Org.username == org_filter)
-
return with_user.where(User.id == user, Org.organization == True)
@@ -293,17 +265,6 @@ def set_user_repo_permission(username, namespace_name, repository_name, role_nam
user = User.get(User.username == username)
except User.DoesNotExist:
raise DataModelException('Invalid username: %s' % username)
-
- if user.robot:
- parts = parse_robot_username(user.username)
- if not parts:
- raise DataModelException('Invalid robot: %s' % username)
-
- robot_namespace, _ = parts
- if robot_namespace != namespace_name:
- raise DataModelException('Cannot add robot %s under namespace %s' %
- (username, namespace_name))
-
return __set_entity_repo_permission(user, 'user', namespace_name, repository_name, role_name)
diff --git a/data/model/release.py b/data/model/release.py
deleted file mode 100644
index f827eaeb0..000000000
--- a/data/model/release.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from data.database import QuayRelease, QuayRegion, QuayService
-
-
-def set_region_release(service_name, region_name, version):
- service, _ = QuayService.get_or_create(name=service_name)
- region, _ = QuayRegion.get_or_create(name=region_name)
-
- return QuayRelease.get_or_create(service=service, version=version, region=region)
-
-
-def get_recent_releases(service_name, region_name):
- return (QuayRelease
- .select(QuayRelease)
- .join(QuayService)
- .switch(QuayRelease)
- .join(QuayRegion)
- .where(QuayService.name == service_name,
- QuayRegion.name == region_name,
- QuayRelease.reverted == False,
- )
- .order_by(QuayRelease.created.desc()))
diff --git a/data/model/repo_mirror.py b/data/model/repo_mirror.py
deleted file mode 100644
index 4b9a03a87..000000000
--- a/data/model/repo_mirror.py
+++ /dev/null
@@ -1,529 +0,0 @@
-import re
-
-from datetime import datetime, timedelta
-
-from peewee import IntegrityError, fn
-from jsonschema import ValidationError
-
-from data.database import (RepoMirrorConfig, RepoMirrorRule, RepoMirrorRuleType, RepoMirrorStatus,
- RepositoryState, Repository, uuid_generator, db_transaction)
-from data.fields import DecryptedValue
-from data.model import DataModelException
-from util.names import parse_robot_username
-
-
-# TODO: Move these to the configuration
-MAX_SYNC_RETRIES = 3
-MAX_SYNC_DURATION = 60*60*2 # 2 Hours
-
-
-def get_eligible_mirrors():
- """
- Returns the RepoMirrorConfig that are ready to run now. This includes those that are:
- 1. Not currently syncing but whose start time is in the past
- 2. Status of "sync now"
- 3. Currently marked as syncing but whose expiration time is in the past
- """
- now = datetime.utcnow()
- immediate_candidates_filter = ((RepoMirrorConfig.sync_status == RepoMirrorStatus.SYNC_NOW) &
- (RepoMirrorConfig.sync_expiration_date >> None))
-
- ready_candidates_filter = ((RepoMirrorConfig.sync_start_date <= now) &
- (RepoMirrorConfig.sync_retries_remaining > 0) &
- (RepoMirrorConfig.sync_status != RepoMirrorStatus.SYNCING) &
- (RepoMirrorConfig.sync_expiration_date >> None) &
- (RepoMirrorConfig.is_enabled == True))
-
- expired_candidates_filter = ((RepoMirrorConfig.sync_start_date <= now) &
- (RepoMirrorConfig.sync_retries_remaining > 0) &
- (RepoMirrorConfig.sync_status == RepoMirrorStatus.SYNCING) &
- (RepoMirrorConfig.sync_expiration_date <= now) &
- (RepoMirrorConfig.is_enabled == True))
-
- return (RepoMirrorConfig
- .select()
- .join(Repository)
- .where(Repository.state == RepositoryState.MIRROR)
- .where(immediate_candidates_filter | ready_candidates_filter | expired_candidates_filter)
- .order_by(RepoMirrorConfig.sync_start_date.asc()))
-
-
-def get_max_id_for_repo_mirror_config():
- """ Gets the maximum id for repository mirroring """
- return RepoMirrorConfig.select(fn.Max(RepoMirrorConfig.id)).scalar()
-
-
-def get_min_id_for_repo_mirror_config():
- """ Gets the minimum id for a repository mirroring """
- return RepoMirrorConfig.select(fn.Min(RepoMirrorConfig.id)).scalar()
-
-
-def claim_mirror(mirror):
- """
- Attempt to create an exclusive lock on the RepoMirrorConfig and return it.
- If unable to create the lock, `None` will be returned.
- """
-
- # Attempt to update the RepoMirrorConfig to mark it as "claimed"
- now = datetime.utcnow()
- expiration_date = now + timedelta(seconds=MAX_SYNC_DURATION)
- query = (RepoMirrorConfig
- .update(sync_status=RepoMirrorStatus.SYNCING,
- sync_expiration_date=expiration_date,
- sync_transaction_id=uuid_generator())
- .where(RepoMirrorConfig.id == mirror.id,
- RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id))
-
- # If the update was successful, then it was claimed. Return the updated instance.
- if query.execute():
- return RepoMirrorConfig.get_by_id(mirror.id)
-
- return None # Another process must have claimed the mirror faster.
-
-
-def release_mirror(mirror, sync_status):
- """
- Return a mirror to the queue and update its status.
-
- Upon success, move next sync to be at the next interval in the future. Failures remain with
- current date to ensure they are picked up for repeat attempt. After MAX_SYNC_RETRIES,
- the next sync will be moved ahead as if it were a success. This is to allow a daily sync,
- for example, to retry the next day. Without this, users would need to manually run syncs
- to clear failure state.
- """
- if sync_status == RepoMirrorStatus.FAIL:
- retries = max(0, mirror.sync_retries_remaining - 1)
-
- if sync_status == RepoMirrorStatus.SUCCESS or retries < 1:
- now = datetime.utcnow()
- delta = now - mirror.sync_start_date
- delta_seconds = (delta.days * 24 * 60 * 60) + delta.seconds
- next_start_date = now + timedelta(seconds=mirror.sync_interval - (delta_seconds % mirror.sync_interval))
- retries = MAX_SYNC_RETRIES
- else:
- next_start_date = mirror.sync_start_date
-
- query = (RepoMirrorConfig
- .update(sync_transaction_id=uuid_generator(),
- sync_status=sync_status,
- sync_start_date=next_start_date,
- sync_expiration_date=None,
- sync_retries_remaining=retries)
- .where(RepoMirrorConfig.id == mirror.id,
- RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id))
-
- if query.execute():
- return RepoMirrorConfig.get_by_id(mirror.id)
-
- # Unable to release Mirror. Has it been claimed by another process?
- return None
-
-
-def expire_mirror(mirror):
- """
- Set the mirror to synchronize ASAP and reset its failure count.
- """
-
- # Set the next-sync date to now
- # TODO: Verify the `where` conditions would not expire a currently syncing mirror.
- query = (RepoMirrorConfig
- .update(sync_transaction_id=uuid_generator(),
- sync_expiration_date=datetime.utcnow(),
- sync_retries_remaining=MAX_SYNC_RETRIES)
- .where(RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id,
- RepoMirrorConfig.id == mirror.id,
- RepoMirrorConfig.state != RepoMirrorStatus.SYNCING))
-
- # Fetch and return the latest updates
- if query.execute():
- return RepoMirrorConfig.get_by_id(mirror.id)
-
- # Unable to update expiration date. Perhaps another process has claimed it?
- return None # TODO: Raise some Exception?
-
-
-def create_mirroring_rule(repository, rule_value, rule_type=RepoMirrorRuleType.TAG_GLOB_CSV):
- """
- Create a RepoMirrorRule for a given Repository.
- """
-
- if rule_type != RepoMirrorRuleType.TAG_GLOB_CSV:
- raise ValidationError('validation failed: rule_type must be TAG_GLOB_CSV')
-
- if not isinstance(rule_value, list) or len(rule_value) < 1:
- raise ValidationError('validation failed: rule_value for TAG_GLOB_CSV must be a list with at least one rule')
-
- rule = RepoMirrorRule.create(repository=repository, rule_type=rule_type, rule_value=rule_value)
- return rule
-
-
-def enable_mirroring_for_repository(repository,
- root_rule,
- internal_robot,
- external_reference,
- sync_interval,
- external_registry_username=None,
- external_registry_password=None,
- external_registry_config=None,
- is_enabled=True,
- sync_start_date=None):
- """
- Create a RepoMirrorConfig and set the Repository to the MIRROR state.
- """
- assert internal_robot.robot
-
- namespace, _ = parse_robot_username(internal_robot.username)
- if namespace != repository.namespace_user.username:
- raise DataModelException('Cannot use robot for mirroring')
-
- with db_transaction():
- # Create the RepoMirrorConfig
- try:
- username = DecryptedValue(external_registry_username) if external_registry_username else None
- password = DecryptedValue(external_registry_password) if external_registry_password else None
- mirror = RepoMirrorConfig.create(repository=repository,
- root_rule=root_rule,
- is_enabled=is_enabled,
- internal_robot=internal_robot,
- external_reference=external_reference,
- external_registry_username=username,
- external_registry_password=password,
- external_registry_config=external_registry_config or {},
- sync_interval=sync_interval,
- sync_start_date=sync_start_date or datetime.utcnow())
- except IntegrityError:
- return RepoMirrorConfig.get(repository=repository)
-
- # Change Repository state to mirroring mode as needed
- if repository.state != RepositoryState.MIRROR:
- query = (Repository
- .update(state=RepositoryState.MIRROR)
- .where(Repository.id == repository.id))
- if not query.execute():
- raise DataModelException('Could not change the state of the repository')
-
- return mirror
-
-
-def update_sync_status(mirror, sync_status):
- """
- Update the sync status
- """
- query = (RepoMirrorConfig
- .update(sync_transaction_id=uuid_generator(),
- sync_status=sync_status)
- .where(RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id,
- RepoMirrorConfig.id == mirror.id))
- if query.execute():
- return RepoMirrorConfig.get_by_id(mirror.id)
-
- return None
-
-
-def update_sync_status_to_sync_now(mirror):
- """
- This will change the sync status to SYNC_NOW and set the retries remaining to one, if it is
- less than one. None will be returned in cases where this is not possible, such as if the
- mirror is in the SYNCING state.
- """
-
- if mirror.sync_status == RepoMirrorStatus.SYNCING:
- return None
-
- retries = max(mirror.sync_retries_remaining, 1)
-
- query = (RepoMirrorConfig
- .update(sync_transaction_id=uuid_generator(),
- sync_status=RepoMirrorStatus.SYNC_NOW,
- sync_expiration_date=None,
- sync_retries_remaining=retries)
- .where(RepoMirrorConfig.id == mirror.id,
- RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id))
-
- if query.execute():
- return RepoMirrorConfig.get_by_id(mirror.id)
-
- return None
-
-
-def update_sync_status_to_cancel(mirror):
- """
- If the mirror is SYNCING, it will be force-claimed (ignoring existing transaction id), and the
- state will set to NEVER_RUN. None will be returned in cases where this is not possible, such
- as if the mirror is not in the SYNCING state.
- """
-
- if mirror.sync_status != RepoMirrorStatus.SYNCING and mirror.sync_status != RepoMirrorStatus.SYNC_NOW:
- return None
-
- query = (RepoMirrorConfig
- .update(sync_transaction_id=uuid_generator(),
- sync_status=RepoMirrorStatus.NEVER_RUN,
- sync_expiration_date=None)
- .where(RepoMirrorConfig.id == mirror.id))
-
- if query.execute():
- return RepoMirrorConfig.get_by_id(mirror.id)
-
- return None
-
-
-def update_with_transaction(mirror, **kwargs):
- """
- Helper function which updates a Repository's RepoMirrorConfig while also rolling its
- sync_transaction_id for locking purposes.
- """
-
- # RepoMirrorConfig attributes which can be modified
- mutable_attributes = (
- 'is_enabled',
- 'mirror_type',
- 'external_reference',
- 'external_registry_username',
- 'external_registry_password',
- 'external_registry_config',
- 'sync_interval',
- 'sync_start_date',
- 'sync_expiration_date',
- 'sync_retries_remaining',
- 'sync_status',
- 'sync_transaction_id'
- )
-
- # Key-Value map of changes to make
- filtered_kwargs = {key:kwargs.pop(key) for key in mutable_attributes if key in kwargs}
-
- # Roll the sync_transaction_id to a new value
- filtered_kwargs['sync_transaction_id'] = uuid_generator()
-
- # Generate the query to perform the updates
- query = (RepoMirrorConfig
- .update(filtered_kwargs)
- .where(RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id,
- RepoMirrorConfig.id == mirror.id))
-
- # Apply the change(s) and return the object if successful
- if query.execute():
- return RepoMirrorConfig.get_by_id(mirror.id)
- else:
- return None
-
-
-def get_mirror(repository):
- """
- Return the RepoMirrorConfig associated with the given Repository, or None if it doesn't exist.
- """
- try:
- return RepoMirrorConfig.get(repository=repository)
- except RepoMirrorConfig.DoesNotExist:
- return None
-
-
-def enable_mirror(repository):
- """
- Enables a RepoMirrorConfig.
- """
- mirror = get_mirror(repository)
- return bool(update_with_transaction(mirror, is_enabled=True))
-
-
-def disable_mirror(repository):
- """
- Disables a RepoMirrorConfig.
- """
- mirror = get_mirror(repository)
- return bool(update_with_transaction(mirror, is_enabled=False))
-
-
-def delete_mirror(repository):
- """
- Delete a Repository Mirroring configuration.
- """
- raise NotImplementedError("TODO: Not Implemented")
-
-
-def change_remote(repository, remote_repository):
- """
- Update the external repository for Repository Mirroring.
- """
- mirror = get_mirror(repository)
- updates = {
- 'external_reference': remote_repository
- }
- return bool(update_with_transaction(mirror, **updates))
-
-
-def change_credentials(repository, username, password):
- """
- Update the credentials used to access the remote repository.
- """
- mirror = get_mirror(repository)
- updates = {
- 'external_registry_username': username,
- 'external_registry_password': password,
- }
- return bool(update_with_transaction(mirror, **updates))
-
-
-def change_username(repository, username):
- """
- Update the Username used to access the external repository.
- """
- mirror = get_mirror(repository)
- return bool(update_with_transaction(mirror, external_registry_username=username))
-
-
-def change_sync_interval(repository, interval):
- """
- Update the interval at which a repository will be synchronized.
- """
- mirror = get_mirror(repository)
- return bool(update_with_transaction(mirror, sync_interval=interval))
-
-
-def change_sync_start_date(repository, dt):
- """
- Specify when the repository should be synchronized next.
- """
- mirror = get_mirror(repository)
- return bool(update_with_transaction(mirror, sync_start_date=dt))
-
-
-def change_root_rule(repository, rule):
- """
- Specify which rule should be used for repository mirroring.
- """
- assert rule.repository == repository
- mirror = get_mirror(repository)
- return bool(update_with_transaction(mirror, root_rule=rule))
-
-
-def change_sync_status(repository, sync_status):
- """
- Change Repository's mirroring status.
- """
- mirror = get_mirror(repository)
- return update_with_transaction(mirror, sync_status=sync_status)
-
-
-def change_retries_remaining(repository, retries_remaining):
- """
- Change the number of retries remaining for mirroring a repository.
- """
- mirror = get_mirror(repository)
- return update_with_transaction(mirror, sync_retries_remaining=retries_remaining)
-
-
-def change_external_registry_config(repository, config_updates):
- """
- Update the 'external_registry_config' with the passed in fields. Config has:
- verify_tls: True|False
- proxy: JSON fields 'http_proxy', 'https_proxy', andn 'no_proxy'
- """
- mirror = get_mirror(repository)
- external_registry_config = mirror.external_registry_config
-
- if 'verify_tls' in config_updates:
- external_registry_config['verify_tls'] = config_updates['verify_tls']
-
- if 'proxy' in config_updates:
- proxy_updates = config_updates['proxy']
- for key in ('http_proxy', 'https_proxy', 'no_proxy'):
- if key in config_updates['proxy']:
- if 'proxy' not in external_registry_config:
- external_registry_config['proxy'] = {}
- else:
- external_registry_config['proxy'][key] = proxy_updates[key]
-
- return update_with_transaction(mirror, external_registry_config=external_registry_config)
-
-
-def get_mirroring_robot(repository):
- """
- Return the robot used for mirroring. Returns None if the repository does not have an associated
- RepoMirrorConfig or the robot does not exist.
- """
- mirror = get_mirror(repository)
- if mirror:
- return mirror.internal_robot
-
- return None
-
-
-def set_mirroring_robot(repository, robot):
- """
- Sets the mirroring robot for the repository.
- """
- assert robot.robot
- namespace, _ = parse_robot_username(robot.username)
- if namespace != repository.namespace_user.username:
- raise DataModelException('Cannot use robot for mirroring')
-
- mirror = get_mirror(repository)
- mirror.internal_robot = robot
- mirror.save()
-
-
-# -------------------- Mirroring Rules --------------------------#
-
-def validate_rule(rule_type, rule_value):
- if rule_type != RepoMirrorRuleType.TAG_GLOB_CSV:
- raise ValidationError('validation failed: rule_type must be TAG_GLOB_CSV')
-
- if not rule_value or not isinstance(rule_value, list) or len(rule_value) < 1:
- raise ValidationError('validation failed: rule_value for TAG_GLOB_CSV must be a list with at least one rule')
-
-
-def create_rule(repository, rule_value, rule_type=RepoMirrorRuleType.TAG_GLOB_CSV, left_child=None, right_child=None):
- """
- Create a new Rule for mirroring a Repository
- """
-
- validate_rule(rule_type, rule_value)
-
- rule_kwargs = {
- 'repository': repository,
- 'rule_value': rule_value,
- 'rule_type': rule_type,
- 'left_child': left_child,
- 'right_child': right_child,
- }
- rule = RepoMirrorRule.create(**rule_kwargs)
- return rule
-
-
-def list_rules(repository):
- """
- Returns all RepoMirrorRules associated with a Repository.
- """
- rules = RepoMirrorRule.select().where(RepoMirrorRule.repository == repository).all()
- return rules
-
-
-def get_root_rule(repository):
- """
- Return the primary mirroring Rule
- """
- mirror = get_mirror(repository)
- try:
- rule = RepoMirrorRule.get(repository=repository)
- return rule
- except RepoMirrorRule.DoesNotExist:
- return None
-
-
-def change_rule(repository, rule_type, rule_value):
- """
- Update the value of an existing rule.
- """
-
- validate_rule(rule_type, rule_value)
-
- mirrorRule = get_root_rule(repository)
- if not mirrorRule:
- raise ValidationError('validation failed: rule not found')
-
- query = (RepoMirrorRule
- .update(rule_value=rule_value)
- .where(RepoMirrorRule.id == mirrorRule.id))
- return query.execute()
diff --git a/data/model/repository.py b/data/model/repository.py
index 3400bfde8..c518fc5f7 100644
--- a/data/model/repository.py
+++ b/data/model/repository.py
@@ -1,113 +1,89 @@
import logging
-import random
-from enum import Enum
+from peewee import JOIN_LEFT_OUTER, fn
from datetime import timedelta, datetime
-from peewee import Case, JOIN, fn, SQL, IntegrityError
-from cachetools.func import ttl_cache
-from data.model import (
- config, DataModelException, tag, db_transaction, storage, permission, _basequery)
-from data.database import (
- Repository, Namespace, RepositoryTag, Star, Image, ImageStorage, User, Visibility,
- RepositoryPermission, RepositoryActionCount, Role, RepositoryAuthorizedEmail,
- DerivedStorageForImage, Label, db_for_update, get_epoch_timestamp,
- db_random_func, db_concat_func, RepositorySearchScore, RepositoryKind, ApprTag,
- ManifestLegacyImage, Manifest, ManifestChild)
-from data.text import prefix_search
-from util.itertoolrecipes import take
+from data.model import (DataModelException, tag, db_transaction, storage, image, permission,
+ _basequery, config)
+from data.database import (Repository, Namespace, RepositoryTag, Star, Image, ImageStorage, User,
+ Visibility, RepositoryPermission, TupleSelector, RepositoryActionCount,
+ Role, RepositoryAuthorizedEmail, db_for_update, get_epoch_timestamp,
+ db_random_func)
+
logger = logging.getLogger(__name__)
-SEARCH_FIELDS = Enum("SearchFields", ["name", "description"])
-class RepoStateConfigException(Exception):
- """ Repository.state value requires further configuration to operate. """
- pass
-
-
-def get_repo_kind_name(repo):
- return Repository.kind.get_name(repo.kind_id)
-
-
-def get_repository_count():
- return Repository.select().count()
-
-
-def get_public_repo_visibility():
- return _basequery.get_public_repo_visibility()
-
-
-def create_repository(namespace, name, creating_user, visibility='private', repo_kind='image',
- description=None):
+def create_repository(namespace, name, creating_user, visibility='private'):
+ private = Visibility.get(name=visibility)
namespace_user = User.get(username=namespace)
- yesterday = datetime.now() - timedelta(days=1)
+ repo = Repository.create(name=name, visibility=private, namespace_user=namespace_user)
+ admin = Role.get(name='admin')
- with db_transaction():
- repo = Repository.create(name=name, visibility=Repository.visibility.get_id(visibility),
- namespace_user=namespace_user,
- kind=Repository.kind.get_id(repo_kind),
- description=description)
-
- RepositoryActionCount.create(repository=repo, count=0, date=yesterday)
- RepositorySearchScore.create(repository=repo, score=0)
-
- # Note: We put the admin create permission under the transaction to ensure it is created.
- if creating_user and not creating_user.organization:
- admin = Role.get(name='admin')
- RepositoryPermission.create(user=creating_user, repository=repo, role=admin)
-
- # Apply default permissions (only occurs for repositories under organizations)
- if creating_user and not creating_user.organization and creating_user.username != namespace:
- permission.apply_default_permissions(repo, creating_user)
+ if creating_user and not creating_user.organization:
+ RepositoryPermission.create(user=creating_user, repository=repo, role=admin)
+ if creating_user.username != namespace:
+ # Permission prototypes only work for orgs
+ permission.apply_default_permissions(repo, creating_user)
return repo
-def get_repository(namespace_name, repository_name, kind_filter=None):
+def get_repository(namespace_name, repository_name):
try:
- return _basequery.get_existing_repository(namespace_name, repository_name,
- kind_filter=kind_filter)
+ return _basequery.get_existing_repository(namespace_name, repository_name)
except Repository.DoesNotExist:
return None
-def get_or_create_repository(namespace, name, creating_user, visibility='private',
- repo_kind='image'):
- repo = get_repository(namespace, name, repo_kind)
- if repo is None:
- repo = create_repository(namespace, name, creating_user, visibility, repo_kind)
- return repo
+def _purge_all_repository_tags(namespace_name, repository_name):
+ """ Immediately purge all repository tags without respecting the lifeline procedure """
+ try:
+ repo = _basequery.get_existing_repository(namespace_name, repository_name)
+ except Repository.DoesNotExist:
+ raise DataModelException('Invalid repository \'%s/%s\'' %
+ (namespace_name, repository_name))
+ RepositoryTag.delete().where(RepositoryTag.repository == repo.id).execute()
-@ttl_cache(maxsize=1, ttl=600)
-def _get_gc_expiration_policies():
- policy_tuples_query = (
- Namespace.select(Namespace.removed_tag_expiration_s).distinct()
- .limit(100) # This sucks but it's the only way to limit memory
- .tuples())
- return [policy[0] for policy in policy_tuples_query]
+def purge_repository(namespace_name, repository_name):
+ # Delete all tags to allow gc to reclaim storage
+ _purge_all_repository_tags(namespace_name, repository_name)
+
+ # Gc to remove the images and storage
+ garbage_collect_repository(namespace_name, repository_name)
+
+ # Delete the rest of the repository metadata
+ fetched = _basequery.get_existing_repository(namespace_name, repository_name)
+ fetched.delete_instance(recursive=True, delete_nullable=False)
-def get_random_gc_policy():
- """ Return a single random policy from the database to use when garbage collecting.
- """
- return random.choice(_get_gc_expiration_policies())
+def find_repository_with_garbage(filter_list=None):
+ # TODO(jschorr): Remove the filter once we have turned the experiment on for everyone.
+ if filter_list is not None and not filter_list:
+ return None
-
-def find_repository_with_garbage(limit_to_gc_policy_s):
- expiration_timestamp = get_epoch_timestamp() - limit_to_gc_policy_s
+ epoch_timestamp = get_epoch_timestamp()
try:
- candidates = (RepositoryTag.select(RepositoryTag.repository).join(Repository)
+ candidates = (RepositoryTag
+ .select(RepositoryTag.repository)
+ .join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(~(RepositoryTag.lifetime_end_ts >> None),
- (RepositoryTag.lifetime_end_ts <= expiration_timestamp),
- (Namespace.removed_tag_expiration_s == limit_to_gc_policy_s)).limit(500)
- .distinct().alias('candidates'))
+ (RepositoryTag.lifetime_end_ts <=
+ (epoch_timestamp - Namespace.removed_tag_expiration_s)))
+ .limit(500)
+ .alias('candidates'))
- found = (RepositoryTag.select(candidates.c.repository_id).from_(candidates)
- .order_by(db_random_func()).get())
+ if filter_list:
+ candidates = candidates.where(Namespace.username << filter_list)
+
+ found = (RepositoryTag
+ .select(candidates.c.repository_id)
+ .from_(candidates)
+ .order_by(db_random_func())
+ .get())
if found is None:
return
@@ -119,6 +95,61 @@ def find_repository_with_garbage(limit_to_gc_policy_s):
return None
+def garbage_collect_repository(namespace_name, repository_name):
+ # If the namespace is the async experiment, don't perform garbage collection here.
+ # TODO(jschorr): Remove this check once we have turned the experiment on for everyone.
+ if namespace_name in config.app_config.get('EXP_ASYNC_GARBAGE_COLLECTION', []):
+ return
+
+ repo = get_repository(namespace_name, repository_name)
+ if repo is not None:
+ garbage_collect_repo(repo)
+
+
+def garbage_collect_repo(repo):
+ logger.debug('Garbage collecting repository %s', repo.id)
+
+ storage_id_whitelist = {}
+ tag.garbage_collect_tags(repo)
+
+ with db_transaction():
+ # Get a list of all images used by tags in the repository
+ tagged_images = (Image
+ .select(Image.id, Image.ancestors)
+ .join(RepositoryTag)
+ .where(Image.repository == repo))
+
+ def gen_referenced_ancestors():
+ for tagged_image in tagged_images:
+ # The ancestor list is in the format '/1/2/3/', extract just the ids
+ ancestor_id_strings = tagged_image.ancestors.split('/')[1:-1]
+ for img_id_str in ancestor_id_strings:
+ yield int(img_id_str)
+ yield tagged_image.id
+
+ referenced_ancestors = set(gen_referenced_ancestors())
+
+ # We desire two pieces of information from the database from the following
+ # query: all of the image ids which are associated with this repository,
+ # and the storages which are associated with those images. In order to
+ # fetch just this information, and bypass all of the peewee model parsing
+ # code, which is overkill for just two fields, we use a tuple query, and
+ # feed that directly to the dictionary tuple constructor which takes an
+ # iterable of tuples containing [(k, v), (k, v), ...]
+ all_repo_images = Image.select(Image.id, Image.storage).where(Image.repository == repo).tuples()
+ images_to_storages = dict(all_repo_images)
+ to_remove = set(images_to_storages.keys()).difference(referenced_ancestors)
+
+ if len(to_remove) > 0:
+ logger.info('Cleaning up unreferenced images: %s', to_remove)
+ storage_id_whitelist = {images_to_storages[to_remove_id] for to_remove_id in to_remove}
+ Image.delete().where(Image.id << list(to_remove)).execute()
+
+ if len(to_remove) > 0:
+ logger.info('Garbage collecting storage for images: %s', to_remove)
+ storage.garbage_collect_storage(storage_id_whitelist)
+
+
def star_repository(user, repository):
""" Stars a repository. """
star = Star.create(user=user.id, repository=repository.id)
@@ -128,31 +159,29 @@ def star_repository(user, repository):
def unstar_repository(user, repository):
""" Unstars a repository. """
try:
- (Star.delete().where(Star.repository == repository.id, Star.user == user.id).execute())
+ (Star
+ .delete()
+ .where(Star.repository == repository.id, Star.user == user.id)
+ .execute())
except Star.DoesNotExist:
raise DataModelException('Star not found.')
-def set_trust(repo, trust_enabled):
- repo.trust_enabled = trust_enabled
- repo.save()
-
-
-def set_description(repo, description):
- repo.description = description
- repo.save()
-
-
-def get_user_starred_repositories(user, kind_filter='image'):
+def get_user_starred_repositories(user, limit=None, page=None):
""" Retrieves all of the repositories a user has starred. """
- try:
- repo_kind = Repository.kind.get_id(kind_filter)
- except RepositoryKind.DoesNotExist:
- raise DataModelException('Unknown kind of repository')
+ query = (Repository
+ .select(Repository, User, Visibility)
+ .join(Star)
+ .switch(Repository)
+ .join(User)
+ .switch(Repository)
+ .join(Visibility)
+ .where(Star.user == user))
- query = (Repository.select(Repository, User, Visibility, Repository.id.alias('rid')).join(Star)
- .switch(Repository).join(User).switch(Repository).join(Visibility)
- .where(Star.user == user, Repository.kind == repo_kind))
+ if page and limit:
+ query = query.paginate(page, limit)
+ elif limit:
+ query = query.limit(limit)
return query
@@ -160,212 +189,177 @@ def get_user_starred_repositories(user, kind_filter='image'):
def repository_is_starred(user, repository):
""" Determines whether a user has starred a repository or not. """
try:
- (Star.select().where(Star.repository == repository.id, Star.user == user.id).get())
+ (Star
+ .select()
+ .where(Star.repository == repository.id, Star.user == user.id)
+ .get())
return True
except Star.DoesNotExist:
return False
-def get_stars(repository_ids):
- """ Returns a map from repository ID to the number of stars for each repository in the
- given repository IDs list.
- """
+def get_when_last_modified(repository_ids):
if not repository_ids:
return {}
- tuples = (Star.select(Star.repository, fn.Count(Star.id))
- .where(Star.repository << repository_ids).group_by(Star.repository).tuples())
+ tuples = (RepositoryTag
+ .select(RepositoryTag.repository, fn.Max(RepositoryTag.lifetime_start_ts))
+ .where(RepositoryTag.repository << repository_ids)
+ .group_by(RepositoryTag.repository)
+ .tuples())
- star_map = {}
+ last_modified_map = {}
for record in tuples:
- star_map[record[0]] = record[1]
+ last_modified_map[record[0]] = record[1]
- return star_map
+ return last_modified_map
-def get_visible_repositories(username, namespace=None, kind_filter='image', include_public=False,
- start_id=None, limit=None):
+def get_action_counts(repository_ids):
+ if not repository_ids:
+ return {}
+
+ # Filter the join to recent entries only.
+ last_week = datetime.now() - timedelta(weeks=1)
+ tuples = (RepositoryActionCount
+ .select(RepositoryActionCount.repository, fn.Sum(RepositoryActionCount.count))
+ .where(RepositoryActionCount.repository << repository_ids)
+ .where(RepositoryActionCount.date >= last_week)
+ .group_by(RepositoryActionCount.repository)
+ .tuples())
+
+ action_count_map = {}
+ for record in tuples:
+ action_count_map[record[0]] = record[1]
+
+ return action_count_map
+
+
+def get_visible_repositories(username, namespace=None, page=None, limit=None, include_public=False):
""" Returns the repositories visible to the given user (if any).
"""
if not include_public and not username:
- # Short circuit by returning a query that will find no repositories. We need to return a query
- # here, as it will be modified by other queries later on.
- return Repository.select(Repository.id.alias('rid')).where(Repository.id == -1)
+ return []
- query = (Repository.select(Repository.name,
- Repository.id.alias('rid'), Repository.description,
- Namespace.username, Repository.visibility, Repository.kind)
- .switch(Repository).join(Namespace, on=(Repository.namespace_user == Namespace.id)))
+ fields = [Repository.name, Repository.id, Repository.description, Visibility.name,
+ Namespace.username]
- user_id = None
- if username:
- # Note: We only need the permissions table if we will filter based on a user's permissions.
- query = query.switch(Repository).distinct().join(RepositoryPermission, JOIN.LEFT_OUTER)
- found_namespace = _get_namespace_user(username)
- if not found_namespace:
- return Repository.select(Repository.id.alias('rid')).where(Repository.id == -1)
+ query = _visible_repository_query(username=username, page=page,
+ limit=limit, namespace=namespace, include_public=include_public,
+ select_models=fields)
- user_id = found_namespace.id
+ if limit:
+ query = query.limit(limit)
- query = _basequery.filter_to_repos_for_user(query, user_id, namespace, kind_filter,
- include_public, start_id=start_id)
-
- if limit is not None:
- query = query.limit(limit).order_by(SQL('rid'))
+ if namespace:
+ query = query.where(Namespace.username == namespace)
return query
-def get_app_repository(namespace_name, repository_name):
- """ Find an application repository. """
- try:
- return _basequery.get_existing_repository(namespace_name, repository_name,
- kind_filter='application')
- except Repository.DoesNotExist:
- return None
+def _visible_repository_query(username=None, include_public=True, limit=None,
+ page=None, namespace=None, select_models=[]):
+ query = (Repository
+ .select(*select_models) # MySQL/RDS complains is there are selected models for counts.
+ .distinct()
+ .join(Visibility)
+ .switch(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(Repository)
+ .join(RepositoryPermission, JOIN_LEFT_OUTER))
+
+ query = _basequery.filter_to_repos_for_user(query, username, namespace, include_public)
+ if page:
+ query = query.paginate(page, limit)
+ elif limit:
+ query = query.limit(limit)
+
+ return query
-def get_app_search(lookup, search_fields=None, username=None, limit=50):
- if search_fields is None:
- search_fields = set([SEARCH_FIELDS.name.name])
-
- return get_filtered_matching_repositories(lookup, filter_username=username,
- search_fields=search_fields, repo_kind='application',
- offset=0, limit=limit)
-
-
-def _get_namespace_user(username):
- try:
- return User.get(username=username)
- except User.DoesNotExist:
- return None
-
-
-def get_filtered_matching_repositories(lookup_value, filter_username=None, repo_kind='image',
- offset=0, limit=25, search_fields=None):
- """ Returns an iterator of all repositories matching the given lookup value, with optional
- filtering to a specific user. If the user is unspecified, only public repositories will
- be returned.
+def get_sorted_matching_repositories(prefix, only_public, checker, limit=10):
+ """ Returns repositories matching the given prefix string and passing the given checker
+ function.
"""
- if search_fields is None:
- search_fields = set([SEARCH_FIELDS.description.name, SEARCH_FIELDS.name.name])
+ last_week = datetime.now() - timedelta(weeks=1)
+ results = []
+ existing_ids = []
- # Build the unfiltered search query.
- unfiltered_query = _get_sorted_matching_repositories(lookup_value, repo_kind=repo_kind,
- search_fields=search_fields,
- include_private=filter_username is not None,
- ids_only=filter_username is not None)
+ def get_search_results(search_clause, with_count=False):
+ if len(results) >= limit:
+ return
- # Add a filter to the iterator, if necessary.
- if filter_username is not None:
- filter_user = _get_namespace_user(filter_username)
- if filter_user is None:
- return []
+ select_items = [Repository, Namespace]
+ if with_count:
+ select_items.append(fn.Sum(RepositoryActionCount.count).alias('count'))
- iterator = _filter_repositories_visible_to_user(unfiltered_query, filter_user.id, limit,
- repo_kind)
- if offset > 0:
- take(offset, iterator)
-
- # Return the results.
- return list(take(limit, iterator))
-
- return list(unfiltered_query.offset(offset).limit(limit))
-
-
-def _filter_repositories_visible_to_user(unfiltered_query, filter_user_id, limit, repo_kind):
- encountered = set()
- chunk_count = limit * 2
- unfiltered_page = 0
- iteration_count = 0
-
- while iteration_count < 10: # Just to be safe
- # Find the next chunk's worth of repository IDs, paginated by the chunk size.
- unfiltered_page = unfiltered_page + 1
- found_ids = [r.id for r in unfiltered_query.paginate(unfiltered_page, chunk_count)]
-
- # Make sure we haven't encountered these results before. This code is used to handle
- # the case where we've previously seen a result, as pagination is not necessary
- # stable in SQL databases.
- unfiltered_repository_ids = set(found_ids)
- new_unfiltered_ids = unfiltered_repository_ids - encountered
- if not new_unfiltered_ids:
- break
-
- encountered.update(new_unfiltered_ids)
-
- # Filter the repositories found to only those visible to the current user.
query = (Repository
- .select(Repository, Namespace)
- .distinct()
- .join(Namespace, on=(Namespace.id == Repository.namespace_user)).switch(Repository)
- .join(RepositoryPermission).where(Repository.id << list(new_unfiltered_ids)))
-
- filtered = _basequery.filter_to_repos_for_user(query, filter_user_id, repo_kind=repo_kind)
-
- # Sort the filtered repositories by their initial order.
- all_filtered_repos = list(filtered)
- all_filtered_repos.sort(key=lambda repo: found_ids.index(repo.id))
-
- # Yield the repositories in sorted order.
- for filtered_repo in all_filtered_repos:
- yield filtered_repo
-
- # If the number of found IDs is less than the chunk count, then we're done.
- if len(found_ids) < chunk_count:
- break
-
- iteration_count = iteration_count + 1
-
-
-def _get_sorted_matching_repositories(lookup_value, repo_kind='image', include_private=False,
- search_fields=None, ids_only=False):
- """ Returns a query of repositories matching the given lookup string, with optional inclusion of
- private repositories. Note that this method does *not* filter results based on visibility
- to users.
- """
- select_fields = [Repository.id] if ids_only else [Repository, Namespace]
-
- if not lookup_value:
- # This is a generic listing of repositories. Simply return the sorted repositories based
- # on RepositorySearchScore.
- query = (Repository
- .select(*select_fields)
- .join(RepositorySearchScore)
- .order_by(RepositorySearchScore.score.desc()))
- else:
- if search_fields is None:
- search_fields = set([SEARCH_FIELDS.description.name, SEARCH_FIELDS.name.name])
-
- # Always search at least on name (init clause)
- clause = Repository.name.match(lookup_value)
- computed_score = RepositorySearchScore.score.alias('score')
-
- # If the description field is in the search fields, then we need to compute a synthetic score
- # to discount the weight of the description more than the name.
- if SEARCH_FIELDS.description.name in search_fields:
- clause = Repository.description.match(lookup_value) | clause
- cases = [(Repository.name.match(lookup_value), 100 * RepositorySearchScore.score),]
- computed_score = Case(None, cases, RepositorySearchScore.score).alias('score')
-
- select_fields.append(computed_score)
- query = (Repository.select(*select_fields)
- .join(RepositorySearchScore)
- .where(clause)
- .order_by(SQL('score').desc()))
-
- if repo_kind is not None:
- query = query.where(Repository.kind == Repository.kind.get_id(repo_kind))
-
- if not include_private:
- query = query.where(Repository.visibility == _basequery.get_public_repo_visibility())
-
- if not ids_only:
- query = (query
+ .select(*select_items)
+ .join(Namespace, JOIN_LEFT_OUTER, on=(Namespace.id == Repository.namespace_user))
.switch(Repository)
- .join(Namespace, on=(Namespace.id == Repository.namespace_user)))
+ .where(search_clause)
+ .group_by(Repository, Namespace))
- return query
+ if only_public:
+ query = query.where(Repository.visibility == _basequery.get_public_repo_visibility())
+
+ if existing_ids:
+ query = query.where(~(Repository.id << existing_ids))
+
+ if with_count:
+ query = (query
+ .switch(Repository)
+ .join(RepositoryActionCount)
+ .where(RepositoryActionCount.date >= last_week)
+ .order_by(fn.Sum(RepositoryActionCount.count).desc()))
+
+ for result in query:
+ if len(results) >= limit:
+ return results
+
+ # Note: We compare IDs here, instead of objects, because calling .visibility on the
+ # Repository will kick off a new SQL query to retrieve that visibility enum value. We don't
+ # join the visibility table in SQL, as well, because it is ungodly slow in MySQL :-/
+ result.is_public = result.visibility_id == _basequery.get_public_repo_visibility().id
+ result.count = result.count if with_count else 0
+
+ if not checker(result):
+ continue
+
+ results.append(result)
+ existing_ids.append(result.id)
+
+ # For performance reasons, we conduct the repo name and repo namespace searches on their
+ # own. This also affords us the ability to give higher precedence to repository names matching
+ # over namespaces, which is semantically correct.
+ get_search_results(Repository.name ** (prefix + '%'), with_count=True)
+ get_search_results(Repository.name ** (prefix + '%'), with_count=False)
+
+ get_search_results(Namespace.username ** (prefix + '%'), with_count=True)
+ get_search_results(Namespace.username ** (prefix + '%'), with_count=False)
+
+ return results
+
+
+def get_matching_repositories(repo_term, username=None, limit=10, include_public=True):
+ namespace_term = repo_term
+ name_term = repo_term
+
+ visible = _visible_repository_query(username, include_public=include_public)
+
+ search_clauses = (Repository.name ** ('%' + name_term + '%') |
+ Namespace.username ** ('%' + namespace_term + '%'))
+
+ # Handle the case where the user has already entered a namespace path.
+ if repo_term.find('/') > 0:
+ parts = repo_term.split('/', 1)
+ namespace_term = '/'.join(parts[:-1])
+ name_term = parts[-1]
+
+ search_clauses = (Repository.name ** ('%' + name_term + '%') &
+ Namespace.username ** ('%' + namespace_term + '%'))
+
+ return visible.where(search_clauses).limit(limit)
def lookup_repository(repo_id):
@@ -376,15 +370,19 @@ def lookup_repository(repo_id):
def is_repository_public(repository):
- return repository.visibility_id == _basequery.get_public_repo_visibility().id
+ return repository.visibility == _basequery.get_public_repo_visibility()
def repository_is_public(namespace_name, repository_name):
try:
- (Repository.select().join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .switch(Repository).join(Visibility).where(Namespace.username == namespace_name,
- Repository.name == repository_name,
- Visibility.name == 'public').get())
+ (Repository
+ .select()
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(Repository)
+ .join(Visibility)
+ .where(Namespace.username == namespace_name, Repository.name == repository_name,
+ Visibility.name == 'public')
+ .get())
return True
except Repository.DoesNotExist:
return False
@@ -401,10 +399,13 @@ def set_repository_visibility(repo, visibility):
def get_email_authorized_for_repo(namespace, repository, email):
try:
- return (RepositoryAuthorizedEmail.select(RepositoryAuthorizedEmail, Repository, Namespace)
- .join(Repository).join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ return (RepositoryAuthorizedEmail
+ .select(RepositoryAuthorizedEmail, Repository, Namespace)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Namespace.username == namespace, Repository.name == repository,
- RepositoryAuthorizedEmail.email == email).get())
+ RepositoryAuthorizedEmail.email == email)
+ .get())
except RepositoryAuthorizedEmail.DoesNotExist:
return None
@@ -413,16 +414,20 @@ def create_email_authorization_for_repo(namespace_name, repository_name, email):
try:
repo = _basequery.get_existing_repository(namespace_name, repository_name)
except Repository.DoesNotExist:
- raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
+ raise DataModelException('Invalid repository %s/%s' %
+ (namespace_name, repository_name))
return RepositoryAuthorizedEmail.create(repository=repo, email=email, confirmed=False)
def confirm_email_authorization_for_repo(code):
try:
- found = (RepositoryAuthorizedEmail.select(RepositoryAuthorizedEmail, Repository, Namespace)
- .join(Repository).join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(RepositoryAuthorizedEmail.code == code).get())
+ found = (RepositoryAuthorizedEmail
+ .select(RepositoryAuthorizedEmail, Repository, Namespace)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(RepositoryAuthorizedEmail.code == code)
+ .get())
except RepositoryAuthorizedEmail.DoesNotExist:
raise DataModelException('Invalid confirmation code.')
@@ -432,26 +437,3 @@ def confirm_email_authorization_for_repo(code):
return found
-def is_empty(namespace_name, repository_name):
- """ Returns if the repository referenced by the given namespace and name is empty. If the repo
- doesn't exist, returns True.
- """
- try:
- tag.list_repository_tags(namespace_name, repository_name).limit(1).get()
- return False
- except RepositoryTag.DoesNotExist:
- return True
-
-
-def get_repository_state(namespace_name, repository_name):
- """ Return the Repository State if the Repository exists. Otherwise, returns None. """
- repo = get_repository(namespace_name, repository_name)
- if repo:
- return repo.state
-
- return None
-
-
-def set_repository_state(repo, state):
- repo.state = state
- repo.save()
diff --git a/data/model/repositoryactioncount.py b/data/model/repositoryactioncount.py
deleted file mode 100644
index 759edc093..000000000
--- a/data/model/repositoryactioncount.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import logging
-
-from collections import namedtuple
-from peewee import IntegrityError
-
-from datetime import date, timedelta, datetime
-from data.database import (Repository, LogEntry, LogEntry2, LogEntry3, RepositoryActionCount,
- RepositorySearchScore, db_random_func, fn)
-
-logger = logging.getLogger(__name__)
-
-search_bucket = namedtuple('SearchBucket', ['delta', 'days', 'weight'])
-
-# Defines the various buckets for search scoring. Each bucket is computed using the given time
-# delta from today *minus the previous bucket's time period*. Once all the actions over the
-# bucket's time period have been collected, they are multiplied by the given modifier. The modifiers
-# for this bucket were determined via the integral of (2/((x/183)+1)^2)/183 over the period of days
-# in the bucket; this integral over 0..183 has a sum of 1, so we get a good normalize score result.
-SEARCH_BUCKETS = [
- search_bucket(timedelta(days=1), 1, 0.010870),
- search_bucket(timedelta(days=7), 6, 0.062815),
- search_bucket(timedelta(days=31), 24, 0.21604),
- search_bucket(timedelta(days=183), 152, 0.71028),
-]
-
-def find_uncounted_repository():
- """ Returns a repository that has not yet had an entry added into the RepositoryActionCount
- table for yesterday.
- """
- try:
- # Get a random repository to count.
- today = date.today()
- yesterday = today - timedelta(days=1)
- has_yesterday_actions = (RepositoryActionCount
- .select(RepositoryActionCount.repository)
- .where(RepositoryActionCount.date == yesterday))
-
- to_count = (Repository
- .select()
- .where(~(Repository.id << (has_yesterday_actions)))
- .order_by(db_random_func()).get())
- return to_count
- except Repository.DoesNotExist:
- return None
-
-
-def count_repository_actions(to_count, day):
- """ Aggregates repository actions from the LogEntry table for the specified day. Returns the
- count or None on error.
- """
- # TODO: Clean this up a bit.
- def lookup_action_count(model):
- return (model
- .select()
- .where(model.repository == to_count,
- model.datetime >= day,
- model.datetime < (day + timedelta(days=1)))
- .count())
-
- actions = (lookup_action_count(LogEntry3) + lookup_action_count(LogEntry2) +
- lookup_action_count(LogEntry))
-
- return actions
-
-
-def store_repository_action_count(repository, day, action_count):
- """ Stores the action count for a repository for a specific day. Returns False if the
- repository already has an entry for the specified day.
- """
- try:
- RepositoryActionCount.create(repository=repository, date=day, count=action_count)
- return True
- except IntegrityError:
- logger.debug('Count already written for repository %s', repository.id)
- return False
-
-
-def update_repository_score(repo):
- """ Updates the repository score entry for the given table by retrieving information from
- the RepositoryActionCount table. Note that count_repository_actions for the repo should
- be called first. Returns True if the row was updated and False otherwise.
- """
- today = date.today()
-
- # Retrieve the counts for each bucket and calculate the final score.
- final_score = 0.0
- last_end_timedelta = timedelta(days=0)
-
- for bucket in SEARCH_BUCKETS:
- start_date = today - bucket.delta
- end_date = today - last_end_timedelta
- last_end_timedelta = bucket.delta
-
- query = (RepositoryActionCount
- .select(fn.Sum(RepositoryActionCount.count), fn.Count(RepositoryActionCount.id))
- .where(RepositoryActionCount.date >= start_date,
- RepositoryActionCount.date < end_date,
- RepositoryActionCount.repository == repo))
-
- bucket_tuple = query.tuples()[0]
- logger.debug('Got bucket tuple %s for bucket %s for repository %s', bucket_tuple, bucket,
- repo.id)
-
- if bucket_tuple[0] is None:
- continue
-
- bucket_sum = float(bucket_tuple[0])
- bucket_count = int(bucket_tuple[1])
- if not bucket_count:
- continue
-
- bucket_score = bucket_sum / (bucket_count * 1.0)
- final_score += bucket_score * bucket.weight
-
- # Update the existing repo search score row or create a new one.
- normalized_score = int(final_score * 100.0)
- try:
- try:
- search_score_row = RepositorySearchScore.get(repository=repo)
- search_score_row.last_updated = datetime.now()
- search_score_row.score = normalized_score
- search_score_row.save()
- return True
- except RepositorySearchScore.DoesNotExist:
- RepositorySearchScore.create(repository=repo, score=normalized_score, last_updated=today)
- return True
- except IntegrityError:
- logger.debug('RepositorySearchScore row already existed; skipping')
- return False
diff --git a/data/model/service_keys.py b/data/model/service_keys.py
deleted file mode 100644
index eb460299b..000000000
--- a/data/model/service_keys.py
+++ /dev/null
@@ -1,205 +0,0 @@
-import re
-
-from calendar import timegm
-from datetime import datetime, timedelta
-from peewee import JOIN
-
-from Crypto.PublicKey import RSA
-from jwkest.jwk import RSAKey
-
-from data.database import db_for_update, User, ServiceKey, ServiceKeyApproval
-from data.model import (ServiceKeyDoesNotExist, ServiceKeyAlreadyApproved, ServiceNameInvalid,
- db_transaction, config)
-from data.model.notification import create_notification, delete_all_notifications_by_path_prefix
-from util.security.fingerprint import canonical_kid
-
-
-_SERVICE_NAME_REGEX = re.compile(r'^[a-z0-9_]+$')
-
-def _expired_keys_clause(service):
- return ((ServiceKey.service == service) &
- (ServiceKey.expiration_date <= datetime.utcnow()))
-
-
-def _stale_expired_keys_service_clause(service):
- return ((ServiceKey.service == service) & _stale_expired_keys_clause())
-
-
-def _stale_expired_keys_clause():
- expired_ttl = timedelta(seconds=config.app_config['EXPIRED_SERVICE_KEY_TTL_SEC'])
- return (ServiceKey.expiration_date <= (datetime.utcnow() - expired_ttl))
-
-
-def _stale_unapproved_keys_clause(service):
- unapproved_ttl = timedelta(seconds=config.app_config['UNAPPROVED_SERVICE_KEY_TTL_SEC'])
- return ((ServiceKey.service == service) &
- (ServiceKey.approval >> None) &
- (ServiceKey.created_date <= (datetime.utcnow() - unapproved_ttl)))
-
-
-def _gc_expired(service):
- ServiceKey.delete().where(_stale_expired_keys_service_clause(service) |
- _stale_unapproved_keys_clause(service)).execute()
-
-
-def _verify_service_name(service_name):
- if not _SERVICE_NAME_REGEX.match(service_name):
- raise ServiceNameInvalid
-
-
-def _notify_superusers(key):
- notification_metadata = {
- 'name': key.name,
- 'kid': key.kid,
- 'service': key.service,
- 'jwk': key.jwk,
- 'metadata': key.metadata,
- 'created_date': timegm(key.created_date.utctimetuple()),
- }
-
- if key.expiration_date is not None:
- notification_metadata['expiration_date'] = timegm(key.expiration_date.utctimetuple())
-
- if len(config.app_config['SUPER_USERS']) > 0:
- superusers = User.select().where(User.username << config.app_config['SUPER_USERS'])
- for superuser in superusers:
- create_notification('service_key_submitted', superuser, metadata=notification_metadata,
- lookup_path='/service_key_approval/{0}/{1}'.format(key.kid, superuser.id))
-
-
-def create_service_key(name, kid, service, jwk, metadata, expiration_date, rotation_duration=None):
- _verify_service_name(service)
- _gc_expired(service)
-
- key = ServiceKey.create(name=name, kid=kid, service=service, jwk=jwk, metadata=metadata,
- expiration_date=expiration_date, rotation_duration=rotation_duration)
-
- _notify_superusers(key)
- return key
-
-
-def generate_service_key(service, expiration_date, kid=None, name='', metadata=None,
- rotation_duration=None):
- private_key = RSA.generate(2048)
- jwk = RSAKey(key=private_key.publickey()).serialize()
- if kid is None:
- kid = canonical_kid(jwk)
-
- key = create_service_key(name, kid, service, jwk, metadata or {}, expiration_date,
- rotation_duration=rotation_duration)
- return (private_key, key)
-
-
-def replace_service_key(old_kid, kid, jwk, metadata, expiration_date):
- try:
- with db_transaction():
- key = db_for_update(ServiceKey.select().where(ServiceKey.kid == old_kid)).get()
- key.metadata.update(metadata)
-
- ServiceKey.create(name=key.name, kid=kid, service=key.service, jwk=jwk,
- metadata=key.metadata, expiration_date=expiration_date,
- rotation_duration=key.rotation_duration, approval=key.approval)
- key.delete_instance()
- except ServiceKey.DoesNotExist:
- raise ServiceKeyDoesNotExist
-
- _notify_superusers(key)
- delete_all_notifications_by_path_prefix('/service_key_approval/{0}'.format(old_kid))
- _gc_expired(key.service)
-
-
-def update_service_key(kid, name=None, metadata=None):
- try:
- with db_transaction():
- key = db_for_update(ServiceKey.select().where(ServiceKey.kid == kid)).get()
- if name is not None:
- key.name = name
-
- if metadata is not None:
- key.metadata.update(metadata)
-
- key.save()
- except ServiceKey.DoesNotExist:
- raise ServiceKeyDoesNotExist
-
-
-def delete_service_key(kid):
- try:
- key = ServiceKey.get(kid=kid)
- ServiceKey.delete().where(ServiceKey.kid == kid).execute()
- except ServiceKey.DoesNotExist:
- raise ServiceKeyDoesNotExist
-
- delete_all_notifications_by_path_prefix('/service_key_approval/{0}'.format(kid))
- _gc_expired(key.service)
- return key
-
-
-def set_key_expiration(kid, expiration_date):
- try:
- service_key = get_service_key(kid, alive_only=False, approved_only=False)
- except ServiceKey.DoesNotExist:
- raise ServiceKeyDoesNotExist
-
- service_key.expiration_date = expiration_date
- service_key.save()
-
-
-def approve_service_key(kid, approval_type, approver=None, notes=''):
- try:
- with db_transaction():
- key = db_for_update(ServiceKey.select().where(ServiceKey.kid == kid)).get()
- if key.approval is not None:
- raise ServiceKeyAlreadyApproved
-
- approval = ServiceKeyApproval.create(approver=approver, approval_type=approval_type,
- notes=notes)
- key.approval = approval
- key.save()
- except ServiceKey.DoesNotExist:
- raise ServiceKeyDoesNotExist
-
- delete_all_notifications_by_path_prefix('/service_key_approval/{0}'.format(kid))
- return key
-
-
-def _list_service_keys_query(kid=None, service=None, approved_only=True, alive_only=True,
- approval_type=None):
- query = ServiceKey.select().join(ServiceKeyApproval, JOIN.LEFT_OUTER)
-
- if approved_only:
- query = query.where(~(ServiceKey.approval >> None))
-
- if alive_only:
- query = query.where((ServiceKey.expiration_date > datetime.utcnow()) |
- (ServiceKey.expiration_date >> None))
-
- if approval_type is not None:
- query = query.where(ServiceKeyApproval.approval_type == approval_type)
-
- if service is not None:
- query = query.where(ServiceKey.service == service)
- query = query.where(~(_expired_keys_clause(service)) |
- ~(_stale_unapproved_keys_clause(service)))
-
- if kid is not None:
- query = query.where(ServiceKey.kid == kid)
-
- query = query.where(~(_stale_expired_keys_clause()) | (ServiceKey.expiration_date >> None))
- return query
-
-
-def list_all_keys():
- return list(_list_service_keys_query(approved_only=False, alive_only=False))
-
-
-def list_service_keys(service):
- return list(_list_service_keys_query(service=service))
-
-
-def get_service_key(kid, service=None, alive_only=True, approved_only=True):
- try:
- return _list_service_keys_query(kid=kid, service=service, approved_only=approved_only,
- alive_only=alive_only).get()
- except ServiceKey.DoesNotExist:
- raise ServiceKeyDoesNotExist
diff --git a/data/model/sqlalchemybridge.py b/data/model/sqlalchemybridge.py
index e469eff00..43248b55a 100644
--- a/data/model/sqlalchemybridge.py
+++ b/data/model/sqlalchemybridge.py
@@ -1,5 +1,5 @@
from sqlalchemy import (Table, MetaData, Column, ForeignKey, Integer, String, Boolean, Text,
- DateTime, Date, BigInteger, Index, text)
+ DateTime, Date, BigInteger, Index)
from peewee import (PrimaryKeyField, CharField, BooleanField, DateTimeField, TextField,
ForeignKeyField, BigIntegerField, IntegerField, DateField)
@@ -15,6 +15,7 @@ OPTION_TRANSLATIONS = {
'null': 'nullable',
}
+
def gen_sqlalchemy_metadata(peewee_model_list):
metadata = MetaData(naming_convention={
"ix": 'ix_%(column_0_label)s',
@@ -27,10 +28,9 @@ def gen_sqlalchemy_metadata(peewee_model_list):
meta = model._meta
all_indexes = set(meta.indexes)
- fulltext_indexes = []
columns = []
- for field in meta.sorted_fields:
+ for field in meta.get_fields():
alchemy_type = None
col_args = []
col_kwargs = {}
@@ -48,10 +48,10 @@ def gen_sqlalchemy_metadata(peewee_model_list):
alchemy_type = Text
elif isinstance(field, ForeignKeyField):
alchemy_type = Integer
+ target_name = '%s.%s' % (field.to_field.model_class._meta.db_table,
+ field.to_field.db_column)
+ col_args.append(ForeignKey(target_name))
all_indexes.add(((field.name, ), field.unique))
- if not field.deferred:
- target_name = '%s.%s' % (field.rel_model._meta.table_name, field.rel_field.column_name)
- col_args.append(ForeignKey(target_name))
elif isinstance(field, BigIntegerField):
alchemy_type = BigInteger
elif isinstance(field, IntegerField):
@@ -59,10 +59,6 @@ def gen_sqlalchemy_metadata(peewee_model_list):
else:
raise RuntimeError('Unknown column type: %s' % field)
- if hasattr(field, '__fulltext__'):
- # Add the fulltext index for the field, based on whether we are under MySQL or Postgres.
- fulltext_indexes.append(field.name)
-
for option_name in OPTIONS_TO_COPY:
alchemy_option_name = (OPTION_TRANSLATIONS[option_name]
if option_name in OPTION_TRANSLATIONS else option_name)
@@ -73,22 +69,15 @@ def gen_sqlalchemy_metadata(peewee_model_list):
if field.unique or field.index:
all_indexes.add(((field.name, ), field.unique))
- new_col = Column(field.column_name, alchemy_type, *col_args, **col_kwargs)
+ new_col = Column(field.db_column, alchemy_type, *col_args, **col_kwargs)
columns.append(new_col)
- new_table = Table(meta.table_name, metadata, *columns)
+ new_table = Table(meta.db_table, metadata, *columns)
for col_prop_names, unique in all_indexes:
- col_names = [meta.fields[prop_name].column_name for prop_name in col_prop_names]
- index_name = '%s_%s' % (meta.table_name, '_'.join(col_names))
+ col_names = [meta.fields[prop_name].db_column for prop_name in col_prop_names]
+ index_name = '%s_%s' % (meta.db_table, '_'.join(col_names))
col_refs = [getattr(new_table.c, col_name) for col_name in col_names]
Index(index_name, *col_refs, unique=unique)
- for col_field_name in fulltext_indexes:
- index_name = '%s_%s__fulltext' % (meta.table_name, col_field_name)
- col_ref = getattr(new_table.c, col_field_name)
- Index(index_name, col_ref, postgresql_ops={col_field_name: 'gin_trgm_ops'},
- postgresql_using='gin',
- mysql_prefix='FULLTEXT')
-
return metadata
diff --git a/data/model/storage.py b/data/model/storage.py
index adfa54cd9..d1ab07b85 100644
--- a/data/model/storage.py
+++ b/data/model/storage.py
@@ -1,156 +1,92 @@
import logging
-from peewee import SQL, IntegrityError
-from cachetools.func import lru_cache
-from collections import namedtuple
+from peewee import JOIN_LEFT_OUTER, fn
-from data.model import (config, db_transaction, InvalidImageException, TorrentInfoDoesNotExist,
- DataModelException, _basequery)
-from data.database import (ImageStorage, Image, ImageStoragePlacement, ImageStorageLocation,
- ImageStorageTransformation, ImageStorageSignature,
- ImageStorageSignatureKind, Repository, Namespace, TorrentInfo, ApprBlob,
- ensure_under_transaction, ManifestBlob)
+from data.model import config, db_transaction, InvalidImageException
+from data.database import (ImageStorage, Image, DerivedImageStorage, ImageStoragePlacement,
+ ImageStorageLocation, ImageStorageTransformation, ImageStorageSignature,
+ ImageStorageSignatureKind)
logger = logging.getLogger(__name__)
-_Location = namedtuple('location', ['id', 'name'])
-@lru_cache(maxsize=1)
-def get_image_locations():
- location_map = {}
- for location in ImageStorageLocation.select():
- location_tuple = _Location(location.id, location.name)
- location_map[location.id] = location_tuple
- location_map[location.name] = location_tuple
+def find_or_create_derived_storage(source, transformation_name, preferred_location):
+ existing = find_derived_storage(source, transformation_name)
+ if existing is not None:
+ return existing
- return location_map
-
-
-def get_image_location_for_name(location_name):
- locations = get_image_locations()
- return locations[location_name]
-
-
-def get_image_location_for_id(location_id):
- locations = get_image_locations()
- return locations[location_id]
-
-
-def add_storage_placement(storage, location_name):
- """ Adds a storage placement for the given storage at the given location. """
- location = get_image_location_for_name(location_name)
- try:
- ImageStoragePlacement.create(location=location.id, storage=storage)
- except IntegrityError:
- # Placement already exists. Nothing to do.
- pass
-
-
-def _orphaned_storage_query(candidate_ids):
- """ Returns the subset of the candidate ImageStorage IDs representing storages that are no
- longer referenced by images.
- """
- # Issue a union query to find all storages that are still referenced by a candidate storage. This
- # is much faster than the group_by and having call we used to use here.
- nonorphaned_queries = []
- for counter, candidate_id in enumerate(candidate_ids):
- query_alias = 'q{0}'.format(counter)
-
- # TODO: remove the join with Image once fully on the OCI data model.
- storage_subq = (ImageStorage
- .select(ImageStorage.id)
- .join(Image)
- .where(ImageStorage.id == candidate_id)
- .limit(1)
- .alias(query_alias))
-
- nonorphaned_queries.append(ImageStorage
- .select(SQL('*'))
- .from_(storage_subq))
-
- manifest_storage_subq = (ImageStorage
- .select(ImageStorage.id)
- .join(ManifestBlob)
- .where(ImageStorage.id == candidate_id)
- .limit(1)
- .alias(query_alias))
-
- nonorphaned_queries.append(ImageStorage
- .select(SQL('*'))
- .from_(manifest_storage_subq))
-
- # Build the set of storages that are missing. These storages are orphaned.
- nonorphaned_storage_ids = {storage.id for storage
- in _basequery.reduce_as_tree(nonorphaned_queries)}
- return list(candidate_ids - nonorphaned_storage_ids)
+ logger.debug('Creating storage dervied from source: %s', source.uuid)
+ trans = ImageStorageTransformation.get(name=transformation_name)
+ new_storage = create_storage(preferred_location)
+ DerivedImageStorage.create(source=source, derivative=new_storage, transformation=trans)
+ return new_storage
def garbage_collect_storage(storage_id_whitelist):
- """ Performs GC on a possible subset of the storage's with the IDs found in the
- whitelist. The storages in the whitelist will be checked, and any orphaned will
- be removed, with those IDs being returned.
- """
if len(storage_id_whitelist) == 0:
- return []
+ return
- def placements_to_filtered_paths_set(placements_list):
- """ Returns the list of paths to remove from storage, filtered from the given placements
- query by removing any CAS paths that are still referenced by storage(s) in the database.
- """
- with ensure_under_transaction():
- if not placements_list:
- return set()
+ def placements_query_to_paths_set(placements_query):
+ return {(placement.location.name, config.store.image_path(placement.storage.uuid))
+ for placement in placements_query}
- # Find the content checksums not referenced by other storages. Any that are, we cannot
- # remove.
- content_checksums = set([placement.storage.content_checksum for placement in placements_list
- if placement.storage.cas_path])
+ def orphaned_storage_query(select_base_query, candidates, group_by):
+ return (select_base_query
+ .switch(ImageStorage)
+ .join(Image, JOIN_LEFT_OUTER)
+ .switch(ImageStorage)
+ .join(DerivedImageStorage, JOIN_LEFT_OUTER,
+ on=(ImageStorage.id == DerivedImageStorage.derivative))
+ .where(ImageStorage.id << list(candidates))
+ .group_by(*group_by)
+ .having((fn.Count(Image.id) == 0) & (fn.Count(DerivedImageStorage.id) == 0)))
- unreferenced_checksums = set()
- if content_checksums:
- # Check the current image storage.
- query = (ImageStorage
- .select(ImageStorage.content_checksum)
- .where(ImageStorage.content_checksum << list(content_checksums)))
- is_referenced_checksums = set([image_storage.content_checksum for image_storage in query])
- if is_referenced_checksums:
- logger.warning('GC attempted to remove CAS checksums %s, which are still IS referenced',
- is_referenced_checksums)
+ # Note: We remove the derived image storage in its own transaction as a way to reduce the
+ # time that the transaction holds on the database indicies. This could result in a derived
+ # image storage being deleted for an image storage which is later reused during this time,
+ # but since these are caches anyway, it isn't terrible and worth the tradeoff (for now).
+ logger.debug('Garbage collecting derived storage from candidates: %s', storage_id_whitelist)
+ with db_transaction():
+ # Find out which derived storages will be removed, and add them to the whitelist
+ # The comma after ImageStorage.id is VERY important, it makes it a tuple, which is a sequence
+ orphaned_from_candidates = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id),
+ storage_id_whitelist,
+ (ImageStorage.id,)))
- # Check the ApprBlob table as well.
- query = ApprBlob.select(ApprBlob.digest).where(ApprBlob.digest << list(content_checksums))
- appr_blob_referenced_checksums = set([blob.digest for blob in query])
- if appr_blob_referenced_checksums:
- logger.warning('GC attempted to remove CAS checksums %s, which are ApprBlob referenced',
- appr_blob_referenced_checksums)
+ if len(orphaned_from_candidates) > 0:
+ derived_to_remove = (ImageStorage
+ .select(ImageStorage.id)
+ .join(DerivedImageStorage,
+ on=(ImageStorage.id == DerivedImageStorage.derivative))
+ .where(DerivedImageStorage.source << orphaned_from_candidates))
+ storage_id_whitelist.update({derived.id for derived in derived_to_remove})
- unreferenced_checksums = (content_checksums - appr_blob_referenced_checksums -
- is_referenced_checksums)
-
- # Return all placements for all image storages found not at a CAS path or with a content
- # checksum that is referenced.
- return {(get_image_location_for_id(placement.location_id).name,
- get_layer_path(placement.storage))
- for placement in placements_list
- if not placement.storage.cas_path or
- placement.storage.content_checksum in unreferenced_checksums}
+ # Remove the dervived image storages with sources of orphaned storages
+ (DerivedImageStorage
+ .delete()
+ .where(DerivedImageStorage.source << orphaned_from_candidates)
+ .execute())
# Note: Both of these deletes must occur in the same transaction (unfortunately) because a
# storage without any placement is invalid, and a placement cannot exist without a storage.
- # TODO: We might want to allow for null storages on placements, which would allow us to
+ # TODO(jake): We might want to allow for null storages on placements, which would allow us to
# delete the storages, then delete the placements in a non-transaction.
logger.debug('Garbage collecting storages from candidates: %s', storage_id_whitelist)
with db_transaction():
- orphaned_storage_ids = _orphaned_storage_query(storage_id_whitelist)
- if len(orphaned_storage_ids) == 0:
- # Nothing to GC.
- return []
+ # Track all of the data that should be removed from blob storage
+ placements_to_remove = list(orphaned_storage_query(ImageStoragePlacement
+ .select(ImageStoragePlacement,
+ ImageStorage,
+ ImageStorageLocation)
+ .join(ImageStorageLocation)
+ .switch(ImageStoragePlacement)
+ .join(ImageStorage),
+ storage_id_whitelist,
+ (ImageStorage, ImageStoragePlacement,
+ ImageStorageLocation)))
- placements_to_remove = list(ImageStoragePlacement
- .select(ImageStoragePlacement, ImageStorage)
- .join(ImageStorage)
- .where(ImageStorage.id << orphaned_storage_ids))
+ paths_to_remove = placements_query_to_paths_set(placements_to_remove)
# Remove the placements for orphaned storages
if len(placements_to_remove) > 0:
@@ -162,28 +98,16 @@ def garbage_collect_storage(storage_id_whitelist):
logger.debug('Removed %s image storage placements', placements_removed)
# Remove all orphaned storages
- torrents_removed = (TorrentInfo
- .delete()
- .where(TorrentInfo.storage << orphaned_storage_ids)
- .execute())
- logger.debug('Removed %s torrent info records', torrents_removed)
-
- signatures_removed = (ImageStorageSignature
+ # The comma after ImageStorage.id is VERY important, it makes it a tuple, which is a sequence
+ orphaned_storages = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id),
+ storage_id_whitelist,
+ (ImageStorage.id,)).alias('osq'))
+ if len(orphaned_storages) > 0:
+ storages_removed = (ImageStorage
.delete()
- .where(ImageStorageSignature.storage << orphaned_storage_ids)
+ .where(ImageStorage.id << orphaned_storages)
.execute())
- logger.debug('Removed %s image storage signatures', signatures_removed)
-
- storages_removed = (ImageStorage
- .delete()
- .where(ImageStorage.id << orphaned_storage_ids)
- .execute())
- logger.debug('Removed %s image storage records', storages_removed)
-
- # Determine the paths to remove. We cannot simply remove all paths matching storages, as CAS
- # can share the same path. We further filter these paths by checking for any storages still in
- # the database with the same content checksum.
- paths_to_remove = placements_to_filtered_paths_set(placements_to_remove)
+ logger.debug('Removed %s image storage records', storages_removed)
# We are going to make the conscious decision to not delete image storage blobs inside
# transactions.
@@ -192,182 +116,80 @@ def garbage_collect_storage(storage_id_whitelist):
logger.debug('Removing %s from %s', image_path, location_name)
config.store.remove({location_name}, image_path)
- return orphaned_storage_ids
-
-def create_v1_storage(location_name):
- storage = ImageStorage.create(cas_path=False, uploading=True)
- location = get_image_location_for_name(location_name)
- ImageStoragePlacement.create(location=location.id, storage=storage)
+def create_storage(location_name):
+ storage = ImageStorage.create()
+ location = ImageStorageLocation.get(name=location_name)
+ ImageStoragePlacement.create(location=location, storage=storage)
storage.locations = {location_name}
return storage
-def find_or_create_storage_signature(storage, signature_kind_name):
- found = lookup_storage_signature(storage, signature_kind_name)
+def find_or_create_storage_signature(storage, signature_kind):
+ found = lookup_storage_signature(storage, signature_kind)
if found is None:
- kind = ImageStorageSignatureKind.get(name=signature_kind_name)
+ kind = ImageStorageSignatureKind.get(name=signature_kind)
found = ImageStorageSignature.create(storage=storage, kind=kind)
return found
-def lookup_storage_signature(storage, signature_kind_name):
- kind = ImageStorageSignatureKind.get(name=signature_kind_name)
+def lookup_storage_signature(storage, signature_kind):
+ kind = ImageStorageSignatureKind.get(name=signature_kind)
try:
return (ImageStorageSignature
- .select()
- .where(ImageStorageSignature.storage == storage, ImageStorageSignature.kind == kind)
- .get())
+ .select()
+ .where(ImageStorageSignature.storage == storage,
+ ImageStorageSignature.kind == kind)
+ .get())
except ImageStorageSignature.DoesNotExist:
return None
-def _get_storage(query_modifier):
- query = (ImageStoragePlacement
- .select(ImageStoragePlacement, ImageStorage)
- .switch(ImageStoragePlacement)
- .join(ImageStorage))
+def find_derived_storage(source, transformation_name):
+ try:
+ found = (ImageStorage
+ .select(ImageStorage, DerivedImageStorage)
+ .join(DerivedImageStorage, on=(ImageStorage.id == DerivedImageStorage.derivative))
+ .join(ImageStorageTransformation)
+ .where(DerivedImageStorage.source == source,
+ ImageStorageTransformation.name == transformation_name)
+ .get())
- placements = list(query_modifier(query))
+ found.locations = {placement.location.name for placement in found.imagestorageplacement_set}
+ return found
+ except ImageStorage.DoesNotExist:
+ return None
- if not placements:
- raise InvalidImageException()
- found = placements[0].storage
- found.locations = {get_image_location_for_id(placement.location_id).name
- for placement in placements}
- return found
+def delete_derived_storage_by_uuid(storage_uuid):
+ try:
+ image_storage = get_storage_by_uuid(storage_uuid)
+ except InvalidImageException:
+ return
+
+ try:
+ DerivedImageStorage.get(derivative=image_storage)
+ except DerivedImageStorage.DoesNotExist:
+ return
+
+ image_storage.delete_instance(recursive=True)
def get_storage_by_uuid(storage_uuid):
- def filter_to_uuid(query):
- return query.where(ImageStorage.uuid == storage_uuid)
+ placements = list(ImageStoragePlacement
+ .select(ImageStoragePlacement, ImageStorage, ImageStorageLocation)
+ .join(ImageStorageLocation)
+ .switch(ImageStoragePlacement)
+ .join(ImageStorage)
+ .where(ImageStorage.uuid == storage_uuid))
- try:
- return _get_storage(filter_to_uuid)
- except InvalidImageException:
+ if not placements:
raise InvalidImageException('No storage found with uuid: %s', storage_uuid)
+ found = placements[0].storage
+ found.locations = {placement.location.name for placement in placements}
-def get_layer_path(storage_record):
- """ Returns the path in the storage engine to the layer data referenced by the storage row. """
- assert storage_record.cas_path is not None
- return get_layer_path_for_storage(storage_record.uuid, storage_record.cas_path,
- storage_record.content_checksum)
+ return found
-def get_layer_path_for_storage(storage_uuid, cas_path, content_checksum):
- """ Returns the path in the storage engine to the layer data referenced by the storage
- information. """
- store = config.store
- if not cas_path:
- logger.debug('Serving layer from legacy v1 path for storage %s', storage_uuid)
- return store.v1_image_layer_path(storage_uuid)
-
- return store.blob_path(content_checksum)
-
-
-def lookup_repo_storages_by_content_checksum(repo, checksums, by_manifest=False):
- """ Looks up repository storages (without placements) matching the given repository
- and checksum. """
- if not checksums:
- return []
-
- # There may be many duplicates of the checksums, so for performance reasons we are going
- # to use a union to select just one storage with each checksum
- queries = []
-
- for counter, checksum in enumerate(set(checksums)):
- query_alias = 'q{0}'.format(counter)
-
- # TODO: Remove once we have a new-style model for tracking temp uploaded blobs and
- # all legacy tables have been removed.
- if by_manifest:
- candidate_subq = (ImageStorage
- .select(ImageStorage.id, ImageStorage.content_checksum,
- ImageStorage.image_size, ImageStorage.uuid, ImageStorage.cas_path,
- ImageStorage.uncompressed_size, ImageStorage.uploading)
- .join(ManifestBlob)
- .where(ManifestBlob.repository == repo,
- ImageStorage.content_checksum == checksum)
- .limit(1)
- .alias(query_alias))
- else:
- candidate_subq = (ImageStorage
- .select(ImageStorage.id, ImageStorage.content_checksum,
- ImageStorage.image_size, ImageStorage.uuid, ImageStorage.cas_path,
- ImageStorage.uncompressed_size, ImageStorage.uploading)
- .join(Image)
- .where(Image.repository == repo, ImageStorage.content_checksum == checksum)
- .limit(1)
- .alias(query_alias))
-
- queries.append(ImageStorage
- .select(SQL('*'))
- .from_(candidate_subq))
-
- return _basequery.reduce_as_tree(queries)
-
-
-def set_image_storage_metadata(docker_image_id, namespace_name, repository_name, image_size,
- uncompressed_size):
- """ Sets metadata that is specific to the binary storage of the data, irrespective of how it
- is used in the layer tree.
- """
- if image_size is None:
- raise DataModelException('Empty image size field')
-
- try:
- image = (Image
- .select(Image, ImageStorage)
- .join(Repository)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .switch(Image)
- .join(ImageStorage)
- .where(Repository.name == repository_name, Namespace.username == namespace_name,
- Image.docker_image_id == docker_image_id)
- .get())
- except ImageStorage.DoesNotExist:
- raise InvalidImageException('No image with specified id and repository')
-
- # We MUST do this here, it can't be done in the corresponding image call because the storage
- # has not yet been pushed
- image.aggregate_size = _basequery.calculate_image_aggregate_size(image.ancestors, image_size,
- image.parent)
- image.save()
-
- image.storage.image_size = image_size
- image.storage.uncompressed_size = uncompressed_size
- image.storage.save()
- return image.storage
-
-
-def get_storage_locations(uuid):
- query = (ImageStoragePlacement
- .select()
- .join(ImageStorage)
- .where(ImageStorage.uuid == uuid))
-
- return [get_image_location_for_id(placement.location_id).name for placement in query]
-
-
-def save_torrent_info(storage_object, piece_length, pieces):
- try:
- return TorrentInfo.get(storage=storage_object, piece_length=piece_length)
- except TorrentInfo.DoesNotExist:
- try:
- return TorrentInfo.create(storage=storage_object, piece_length=piece_length, pieces=pieces)
- except IntegrityError:
- # TorrentInfo already exists for this storage.
- return TorrentInfo.get(storage=storage_object, piece_length=piece_length)
-
-
-def get_torrent_info(blob):
- try:
- return (TorrentInfo
- .select()
- .where(TorrentInfo.storage == blob)
- .get())
- except TorrentInfo.DoesNotExist:
- raise TorrentInfoDoesNotExist
diff --git a/data/model/tag.py b/data/model/tag.py
index 437a9765b..199d99704 100644
--- a/data/model/tag.py
+++ b/data/model/tag.py
@@ -1,55 +1,8 @@
-import logging
-
-from calendar import timegm
-from datetime import datetime
from uuid import uuid4
-from peewee import IntegrityError, JOIN, fn
-from data.model import (image, storage, db_transaction, DataModelException, _basequery,
- InvalidManifestException, TagAlreadyCreatedException, StaleTagException,
- config)
-from data.database import (RepositoryTag, Repository, Image, ImageStorage, Namespace, TagManifest,
- RepositoryNotification, Label, TagManifestLabel, get_epoch_timestamp,
- db_for_update, Manifest, ManifestLabel, ManifestBlob,
- ManifestLegacyImage, TagManifestToManifest,
- TagManifestLabelMap, TagToRepositoryTag, Tag, get_epoch_timestamp_ms)
-from util.timedeltastring import convert_to_timedelta
-
-
-logger = logging.getLogger(__name__)
-
-
-def get_max_id_for_sec_scan():
- """ Gets the maximum id for security scanning """
- return RepositoryTag.select(fn.Max(RepositoryTag.id)).scalar()
-
-
-def get_min_id_for_sec_scan(version):
- """ Gets the minimum id for a security scanning """
- return _tag_alive(RepositoryTag
- .select(fn.Min(RepositoryTag.id))
- .join(Image)
- .where(Image.security_indexed_engine < version)).scalar()
-
-
-def get_tag_pk_field():
- """ Returns the primary key for Image DB model """
- return RepositoryTag.id
-
-
-def get_tags_images_eligible_for_scan(clair_version):
- Parent = Image.alias()
- ParentImageStorage = ImageStorage.alias()
-
- return _tag_alive(RepositoryTag
- .select(Image, ImageStorage, Parent, ParentImageStorage, RepositoryTag)
- .join(Image, on=(RepositoryTag.image == Image.id))
- .join(ImageStorage, on=(Image.storage == ImageStorage.id))
- .switch(Image)
- .join(Parent, JOIN.LEFT_OUTER, on=(Image.parent == Parent.id))
- .join(ParentImageStorage, JOIN.LEFT_OUTER, on=(ParentImageStorage.id == Parent.storage))
- .where(RepositoryTag.hidden == False)
- .where(Image.security_indexed_engine < clair_version))
+from data.model import image, db_transaction, DataModelException, _basequery
+from data.database import (RepositoryTag, Repository, Image, ImageStorage, Namespace,
+ get_epoch_timestamp, db_for_update)
def _tag_alive(query, now_ts=None):
@@ -59,179 +12,6 @@ def _tag_alive(query, now_ts=None):
(RepositoryTag.lifetime_end_ts > now_ts))
-def filter_has_repository_event(query, event):
- """ Filters the query by ensuring the repositories returned have the given event. """
- return (query
- .join(Repository)
- .join(RepositoryNotification)
- .where(RepositoryNotification.event == event))
-
-
-def filter_tags_have_repository_event(query, event):
- """ Filters the query by ensuring the repository tags live in a repository that has the given
- event. Also returns the image storage for the tag's image and orders the results by
- lifetime_start_ts.
- """
- query = filter_has_repository_event(query, event)
- query = query.switch(RepositoryTag).join(Image).join(ImageStorage)
- query = query.switch(RepositoryTag).order_by(RepositoryTag.lifetime_start_ts.desc())
- return query
-
-
-_MAX_SUB_QUERIES = 100
-_MAX_IMAGE_LOOKUP_COUNT = 500
-
-def get_matching_tags_for_images(image_pairs, filter_images=None, filter_tags=None,
- selections=None):
- """ Returns all tags that contain the images with the given docker_image_id and storage_uuid,
- as specified as an iterable of pairs. """
- if not image_pairs:
- return []
-
- image_pairs_set = set(image_pairs)
-
- # Find all possible matching image+storages.
- images = []
-
- while image_pairs:
- image_pairs_slice = image_pairs[:_MAX_IMAGE_LOOKUP_COUNT]
-
- ids = [pair[0] for pair in image_pairs_slice]
- uuids = [pair[1] for pair in image_pairs_slice]
-
- images_query = (Image
- .select(Image.id, Image.docker_image_id, Image.ancestors, ImageStorage.uuid)
- .join(ImageStorage)
- .where(Image.docker_image_id << ids, ImageStorage.uuid << uuids)
- .switch(Image))
-
- if filter_images is not None:
- images_query = filter_images(images_query)
-
- images.extend(list(images_query))
- image_pairs = image_pairs[_MAX_IMAGE_LOOKUP_COUNT:]
-
- # Filter down to those images actually in the pairs set and build the set of queries to run.
- individual_image_queries = []
-
- for img in images:
- # Make sure the image found is in the set of those requested, and that we haven't already
- # processed it. We need this check because the query above checks for images with matching
- # IDs OR storage UUIDs, rather than the expected ID+UUID pair. We do this for efficiency
- # reasons, and it is highly unlikely we'll find an image with a mismatch, but we need this
- # check to be absolutely sure.
- pair = (img.docker_image_id, img.storage.uuid)
- if pair not in image_pairs_set:
- continue
-
- # Remove the pair so we don't try it again.
- image_pairs_set.remove(pair)
-
- ancestors_str = '%s%s/%%' % (img.ancestors, img.id)
- query = (Image
- .select(Image.id)
- .where((Image.id == img.id) | (Image.ancestors ** ancestors_str)))
-
- individual_image_queries.append(query)
-
- if not individual_image_queries:
- return []
-
- # Shard based on the max subquery count. This is used to prevent going over the DB's max query
- # size, as well as to prevent the DB from locking up on a massive query.
- sharded_queries = []
- while individual_image_queries:
- shard = individual_image_queries[:_MAX_SUB_QUERIES]
- sharded_queries.append(_basequery.reduce_as_tree(shard))
- individual_image_queries = individual_image_queries[_MAX_SUB_QUERIES:]
-
- # Collect IDs of the tags found for each query.
- tags = {}
- for query in sharded_queries:
- ImageAlias = Image.alias()
- tag_query = (_tag_alive(RepositoryTag
- .select(*(selections or []))
- .distinct()
- .join(ImageAlias)
- .where(RepositoryTag.hidden == False)
- .where(ImageAlias.id << query)
- .switch(RepositoryTag)))
-
- if filter_tags is not None:
- tag_query = filter_tags(tag_query)
-
- for tag in tag_query:
- tags[tag.id] = tag
-
- return tags.values()
-
-
-def get_matching_tags(docker_image_id, storage_uuid, *args):
- """ Returns a query pointing to all tags that contain the image with the
- given docker_image_id and storage_uuid. """
- image_row = image.get_image_with_storage(docker_image_id, storage_uuid)
- if image_row is None:
- return RepositoryTag.select().where(RepositoryTag.id < 0) # Empty query.
-
- ancestors_str = '%s%s/%%' % (image_row.ancestors, image_row.id)
- return _tag_alive(RepositoryTag
- .select(*args)
- .distinct()
- .join(Image)
- .join(ImageStorage)
- .where(RepositoryTag.hidden == False)
- .where((Image.id == image_row.id) |
- (Image.ancestors ** ancestors_str)))
-
-
-def get_tags_for_image(image_id, *args):
- return _tag_alive(RepositoryTag
- .select(*args)
- .distinct()
- .where(RepositoryTag.image == image_id,
- RepositoryTag.hidden == False))
-
-
-def get_tag_manifest_digests(tags):
- """ Returns a map from tag ID to its associated manifest digest, if any. """
- if not tags:
- return dict()
-
- manifests = (TagManifest
- .select(TagManifest.tag, TagManifest.digest)
- .where(TagManifest.tag << [t.id for t in tags]))
-
- return {manifest.tag_id: manifest.digest for manifest in manifests}
-
-
-def list_active_repo_tags(repo, start_id=None, limit=None, include_images=True):
- """ Returns all of the active, non-hidden tags in a repository, joined to they images
- and (if present), their manifest.
- """
- if include_images:
- query = _tag_alive(RepositoryTag
- .select(RepositoryTag, Image, ImageStorage, TagManifest.digest)
- .join(Image)
- .join(ImageStorage)
- .where(RepositoryTag.repository == repo, RepositoryTag.hidden == False)
- .switch(RepositoryTag)
- .join(TagManifest, JOIN.LEFT_OUTER)
- .order_by(RepositoryTag.id))
- else:
- query = _tag_alive(RepositoryTag
- .select(RepositoryTag)
- .where(RepositoryTag.repository == repo, RepositoryTag.hidden == False)
- .order_by(RepositoryTag.id))
-
- if start_id is not None:
- query = query.where(RepositoryTag.id >= start_id)
-
- if limit is not None:
- query = query.limit(limit)
-
- return query
-
-
def list_repository_tags(namespace_name, repository_name, include_hidden=False,
include_storage=False):
to_select = (RepositoryTag, Image)
@@ -256,66 +36,33 @@ def list_repository_tags(namespace_name, repository_name, include_hidden=False,
return query
-def create_or_update_tag(namespace_name, repository_name, tag_name, tag_docker_image_id,
- reversion=False, now_ms=None):
+def create_or_update_tag(namespace_name, repository_name, tag_name,
+ tag_docker_image_id, reversion=False):
try:
repo = _basequery.get_existing_repository(namespace_name, repository_name)
except Repository.DoesNotExist:
raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
- return create_or_update_tag_for_repo(repo.id, tag_name, tag_docker_image_id, reversion=reversion,
- now_ms=now_ms)
-
-def create_or_update_tag_for_repo(repository_id, tag_name, tag_docker_image_id, reversion=False,
- oci_manifest=None, now_ms=None):
- now_ms = now_ms or get_epoch_timestamp_ms()
- now_ts = int(now_ms / 1000)
+ now_ts = get_epoch_timestamp()
with db_transaction():
try:
tag = db_for_update(_tag_alive(RepositoryTag
.select()
- .where(RepositoryTag.repository == repository_id,
+ .where(RepositoryTag.repository == repo,
RepositoryTag.name == tag_name), now_ts)).get()
tag.lifetime_end_ts = now_ts
tag.save()
-
- # Check for an OCI tag.
- try:
- oci_tag = db_for_update(Tag
- .select()
- .join(TagToRepositoryTag)
- .where(TagToRepositoryTag.repository_tag == tag)).get()
- oci_tag.lifetime_end_ms = now_ms
- oci_tag.save()
- except Tag.DoesNotExist:
- pass
except RepositoryTag.DoesNotExist:
pass
- except IntegrityError:
- msg = 'Tag with name %s was stale when we tried to update it; Please retry the push'
- raise StaleTagException(msg % tag_name)
try:
- image_obj = Image.get(Image.docker_image_id == tag_docker_image_id,
- Image.repository == repository_id)
+ image_obj = Image.get(Image.docker_image_id == tag_docker_image_id, Image.repository == repo)
except Image.DoesNotExist:
raise DataModelException('Invalid image with id: %s' % tag_docker_image_id)
- try:
- created = RepositoryTag.create(repository=repository_id, image=image_obj, name=tag_name,
- lifetime_start_ts=now_ts, reversion=reversion)
- if oci_manifest:
- # Create the OCI tag as well.
- oci_tag = Tag.create(repository=repository_id, manifest=oci_manifest, name=tag_name,
- lifetime_start_ms=now_ms, reversion=reversion,
- tag_kind=Tag.tag_kind.get_id('tag'))
- TagToRepositoryTag.create(tag=oci_tag, repository_tag=created, repository=repository_id)
-
- return created
- except IntegrityError:
- msg = 'Tag with name %s and lifetime start %s already exists'
- raise TagAlreadyCreatedException(msg % (tag_name, now_ts))
+ return RepositoryTag.create(repository=repo, image=image_obj, name=tag_name,
+ lifetime_start_ts=now_ts, reversion=reversion)
def create_temporary_hidden_tag(repo, image_obj, expiration_s):
@@ -329,23 +76,8 @@ def create_temporary_hidden_tag(repo, image_obj, expiration_s):
return tag_name
-def lookup_unrecoverable_tags(repo):
- """ Returns the tags in a repository that are expired and past their time machine recovery
- period. """
- expired_clause = get_epoch_timestamp() - Namespace.removed_tag_expiration_s
- return (RepositoryTag
- .select()
- .join(Repository)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(RepositoryTag.repository == repo)
- .where(~(RepositoryTag.lifetime_end_ts >> None),
- RepositoryTag.lifetime_end_ts <= expired_clause))
-
-
-def delete_tag(namespace_name, repository_name, tag_name, now_ms=None):
- now_ms = now_ms or get_epoch_timestamp_ms()
- now_ts = int(now_ms / 1000)
-
+def delete_tag(namespace_name, repository_name, tag_name):
+ now_ts = get_epoch_timestamp()
with db_transaction():
try:
query = _tag_alive(RepositoryTag
@@ -364,453 +96,67 @@ def delete_tag(namespace_name, repository_name, tag_name, now_ms=None):
found.lifetime_end_ts = now_ts
found.save()
- try:
- oci_tag_query = TagToRepositoryTag.select().where(TagToRepositoryTag.repository_tag == found)
- oci_tag = db_for_update(oci_tag_query).get().tag
- oci_tag.lifetime_end_ms = now_ms
- oci_tag.save()
- except TagToRepositoryTag.DoesNotExist:
- pass
- return found
+def garbage_collect_tags(repo):
+ expired_time = get_epoch_timestamp() - repo.namespace_user.removed_tag_expiration_s
+
+ tags_to_delete = list(RepositoryTag
+ .select(RepositoryTag.id)
+ .where(RepositoryTag.repository == repo,
+ ~(RepositoryTag.lifetime_end_ts >> None),
+ (RepositoryTag.lifetime_end_ts <= expired_time))
+ .order_by(RepositoryTag.id))
+ if len(tags_to_delete) > 0:
+ (RepositoryTag
+ .delete()
+ .where(RepositoryTag.id << tags_to_delete)
+ .execute())
-def _get_repo_tag_image(tag_name, include_storage, modifier):
- query = Image.select().join(RepositoryTag)
+def get_tag_image(namespace_name, repository_name, tag_name):
+ def limit_to_tag(query):
+ return _tag_alive(query
+ .switch(Image)
+ .join(RepositoryTag)
+ .where(RepositoryTag.name == tag_name))
- if include_storage:
- query = (Image
- .select(Image, ImageStorage)
- .join(ImageStorage)
- .switch(Image)
- .join(RepositoryTag))
-
- images = _tag_alive(modifier(query.where(RepositoryTag.name == tag_name)))
+ images = image.get_repository_images_base(namespace_name, repository_name, limit_to_tag)
if not images:
raise DataModelException('Unable to find image for tag.')
else:
return images[0]
-def get_repo_tag_image(repo, tag_name, include_storage=False):
- def modifier(query):
- return query.where(RepositoryTag.repository == repo)
-
- return _get_repo_tag_image(tag_name, include_storage, modifier)
-
-
-def get_tag_image(namespace_name, repository_name, tag_name, include_storage=False):
- def modifier(query):
- return (query
- .switch(RepositoryTag)
- .join(Repository)
- .join(Namespace)
- .where(Namespace.username == namespace_name, Repository.name == repository_name))
-
- return _get_repo_tag_image(tag_name, include_storage, modifier)
-
-
-def list_repository_tag_history(repo_obj, page=1, size=100, specific_tag=None, active_tags_only=False, since_time=None):
- # Only available on OCI model
- if since_time is not None:
- raise NotImplementedError
-
+def list_repository_tag_history(repo_obj, page=1, size=100, specific_tag=None):
query = (RepositoryTag
- .select(RepositoryTag, Image, ImageStorage)
+ .select(RepositoryTag, Image)
.join(Image)
- .join(ImageStorage)
- .switch(RepositoryTag)
.where(RepositoryTag.repository == repo_obj)
.where(RepositoryTag.hidden == False)
- .order_by(RepositoryTag.lifetime_start_ts.desc(), RepositoryTag.name)
- .limit(size + 1)
- .offset(size * (page - 1)))
-
- if active_tags_only:
- query = _tag_alive(query)
+ .order_by(RepositoryTag.lifetime_start_ts.desc())
+ .paginate(page, size))
if specific_tag:
query = query.where(RepositoryTag.name == specific_tag)
- tags = list(query)
- if not tags:
- return [], {}, False
-
- manifest_map = get_tag_manifest_digests(tags)
- return tags[0:size], manifest_map, len(tags) > size
-
-
-def restore_tag_to_manifest(repo_obj, tag_name, manifest_digest):
- """ Restores a tag to a specific manifest digest. """
- with db_transaction():
- # Verify that the manifest digest already existed under this repository under the
- # tag.
- try:
- tag_manifest = (TagManifest
- .select(TagManifest, RepositoryTag, Image)
- .join(RepositoryTag)
- .join(Image)
- .where(RepositoryTag.repository == repo_obj)
- .where(RepositoryTag.name == tag_name)
- .where(TagManifest.digest == manifest_digest)
- .get())
- except TagManifest.DoesNotExist:
- raise DataModelException('Cannot restore to unknown or invalid digest')
-
- # Lookup the existing image, if any.
- try:
- existing_image = get_repo_tag_image(repo_obj, tag_name)
- except DataModelException:
- existing_image = None
-
- docker_image_id = tag_manifest.tag.image.docker_image_id
- oci_manifest = None
- try:
- oci_manifest = Manifest.get(repository=repo_obj, digest=manifest_digest)
- except Manifest.DoesNotExist:
- pass
-
- # Change the tag and tag manifest to point to the updated image.
- updated_tag = create_or_update_tag_for_repo(repo_obj, tag_name, docker_image_id,
- reversion=True, oci_manifest=oci_manifest)
- tag_manifest.tag = updated_tag
- tag_manifest.save()
- return existing_image
-
-
-def restore_tag_to_image(repo_obj, tag_name, docker_image_id):
- """ Restores a tag to a specific image ID. """
- with db_transaction():
- # Verify that the image ID already existed under this repository under the
- # tag.
- try:
- (RepositoryTag
- .select()
- .join(Image)
- .where(RepositoryTag.repository == repo_obj)
- .where(RepositoryTag.name == tag_name)
- .where(Image.docker_image_id == docker_image_id)
- .get())
- except RepositoryTag.DoesNotExist:
- raise DataModelException('Cannot restore to unknown or invalid image')
-
- # Lookup the existing image, if any.
- try:
- existing_image = get_repo_tag_image(repo_obj, tag_name)
- except DataModelException:
- existing_image = None
-
- create_or_update_tag_for_repo(repo_obj, tag_name, docker_image_id, reversion=True)
- return existing_image
-
-
-def store_tag_manifest_for_testing(namespace_name, repository_name, tag_name, manifest,
- leaf_layer_id, storage_id_map):
- """ Stores a tag manifest for a specific tag name in the database. Returns the TagManifest
- object, as well as a boolean indicating whether the TagManifest was created.
- """
- try:
- repo = _basequery.get_existing_repository(namespace_name, repository_name)
- except Repository.DoesNotExist:
- raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
-
- return store_tag_manifest_for_repo(repo.id, tag_name, manifest, leaf_layer_id, storage_id_map)
-
-
-def store_tag_manifest_for_repo(repository_id, tag_name, manifest, leaf_layer_id, storage_id_map,
- reversion=False):
- """ Stores a tag manifest for a specific tag name in the database. Returns the TagManifest
- object, as well as a boolean indicating whether the TagManifest was created.
- """
- # Create the new-style OCI manifest and its blobs.
- oci_manifest = _populate_manifest_and_blobs(repository_id, manifest, storage_id_map,
- leaf_layer_id=leaf_layer_id)
-
- # Create the tag for the tag manifest.
- tag = create_or_update_tag_for_repo(repository_id, tag_name, leaf_layer_id,
- reversion=reversion, oci_manifest=oci_manifest)
-
- # Add a tag manifest pointing to that tag.
- try:
- manifest = TagManifest.get(digest=manifest.digest)
- manifest.tag = tag
- manifest.save()
- return manifest, False
- except TagManifest.DoesNotExist:
- created = _associate_manifest(tag, oci_manifest)
- return created, True
-
-
-def get_active_tag(namespace, repo_name, tag_name):
- return _tag_alive(RepositoryTag
- .select()
- .join(Repository)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(RepositoryTag.name == tag_name, Repository.name == repo_name,
- Namespace.username == namespace)).get()
-
-def get_active_tag_for_repo(repo, tag_name):
- try:
- return _tag_alive(RepositoryTag
- .select(RepositoryTag, Image, ImageStorage)
- .join(Image)
- .join(ImageStorage)
- .where(RepositoryTag.name == tag_name,
- RepositoryTag.repository == repo,
- RepositoryTag.hidden == False)).get()
- except RepositoryTag.DoesNotExist:
- return None
-
-def get_expired_tag_in_repo(repo, tag_name):
- return (RepositoryTag
- .select()
- .where(RepositoryTag.name == tag_name, RepositoryTag.repository == repo)
- .where(~(RepositoryTag.lifetime_end_ts >> None))
- .where(RepositoryTag.lifetime_end_ts <= get_epoch_timestamp())
- .get())
-
-
-def get_possibly_expired_tag(namespace, repo_name, tag_name):
- return (RepositoryTag
- .select()
- .join(Repository)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(RepositoryTag.name == tag_name, Repository.name == repo_name,
- Namespace.username == namespace)).get()
-
-def associate_generated_tag_manifest_with_tag(tag, manifest, storage_id_map):
- oci_manifest = _populate_manifest_and_blobs(tag.repository, manifest, storage_id_map)
-
- with db_transaction():
- try:
- (Tag
- .select()
- .join(TagToRepositoryTag)
- .where(TagToRepositoryTag.repository_tag == tag)).get()
- except Tag.DoesNotExist:
- oci_tag = Tag.create(repository=tag.repository, manifest=oci_manifest, name=tag.name,
- reversion=tag.reversion,
- lifetime_start_ms=tag.lifetime_start_ts * 1000,
- lifetime_end_ms=(tag.lifetime_end_ts * 1000
- if tag.lifetime_end_ts else None),
- tag_kind=Tag.tag_kind.get_id('tag'))
- TagToRepositoryTag.create(tag=oci_tag, repository_tag=tag, repository=tag.repository)
-
- return _associate_manifest(tag, oci_manifest)
-
-
-def _associate_manifest(tag, oci_manifest):
- with db_transaction():
- tag_manifest = TagManifest.create(tag=tag, digest=oci_manifest.digest,
- json_data=oci_manifest.manifest_bytes)
- TagManifestToManifest.create(tag_manifest=tag_manifest, manifest=oci_manifest)
- return tag_manifest
-
-
-def _populate_manifest_and_blobs(repository, manifest, storage_id_map, leaf_layer_id=None):
- leaf_layer_id = leaf_layer_id or manifest.leaf_layer_v1_image_id
- try:
- legacy_image = Image.get(Image.docker_image_id == leaf_layer_id,
- Image.repository == repository)
- except Image.DoesNotExist:
- raise DataModelException('Invalid image with id: %s' % leaf_layer_id)
-
- storage_ids = set()
- for blob_digest in manifest.local_blob_digests:
- image_storage_id = storage_id_map.get(blob_digest)
- if image_storage_id is None:
- logger.error('Missing blob for manifest `%s` in: %s', blob_digest, storage_id_map)
- raise DataModelException('Missing blob for manifest `%s`' % blob_digest)
-
- if image_storage_id in storage_ids:
- continue
-
- storage_ids.add(image_storage_id)
-
- return populate_manifest(repository, manifest, legacy_image, storage_ids)
-
-
-def populate_manifest(repository, manifest, legacy_image, storage_ids):
- """ Populates the rows for the manifest, including its blobs and legacy image. """
- media_type = Manifest.media_type.get_id(manifest.media_type)
-
- # Check for an existing manifest. If present, return it.
- try:
- return Manifest.get(repository=repository, digest=manifest.digest)
- except Manifest.DoesNotExist:
- pass
-
- with db_transaction():
- try:
- manifest_row = Manifest.create(digest=manifest.digest, repository=repository,
- manifest_bytes=manifest.bytes.as_encoded_str(),
- media_type=media_type)
- except IntegrityError as ie:
- logger.debug('Got integrity error when trying to write manifest: %s', ie)
- return Manifest.get(repository=repository, digest=manifest.digest)
-
- ManifestLegacyImage.create(manifest=manifest_row, repository=repository, image=legacy_image)
-
- blobs_to_insert = [dict(manifest=manifest_row, repository=repository,
- blob=storage_id) for storage_id in storage_ids]
- if blobs_to_insert:
- ManifestBlob.insert_many(blobs_to_insert).execute()
-
- return manifest_row
-
-
-def get_tag_manifest(tag):
- try:
- return TagManifest.get(tag=tag)
- except TagManifest.DoesNotExist:
- return None
-
-
-def load_tag_manifest(namespace, repo_name, tag_name):
- try:
- return (_load_repo_manifests(namespace, repo_name)
- .where(RepositoryTag.name == tag_name)
- .get())
- except TagManifest.DoesNotExist:
- msg = 'Manifest not found for tag {0} in repo {1}/{2}'.format(tag_name, namespace, repo_name)
- raise InvalidManifestException(msg)
-
-
-def delete_manifest_by_digest(namespace, repo_name, digest):
- tag_manifests = list(_load_repo_manifests(namespace, repo_name)
- .where(TagManifest.digest == digest))
-
- now_ms = get_epoch_timestamp_ms()
- for tag_manifest in tag_manifests:
- try:
- tag = _tag_alive(RepositoryTag.select().where(RepositoryTag.id == tag_manifest.tag_id)).get()
- delete_tag(namespace, repo_name, tag_manifest.tag.name, now_ms)
- except RepositoryTag.DoesNotExist:
- pass
-
- return [tag_manifest.tag for tag_manifest in tag_manifests]
-
-
-def load_manifest_by_digest(namespace, repo_name, digest, allow_dead=False):
- try:
- return (_load_repo_manifests(namespace, repo_name, allow_dead=allow_dead)
- .where(TagManifest.digest == digest)
- .get())
- except TagManifest.DoesNotExist:
- msg = 'Manifest not found with digest {0} in repo {1}/{2}'.format(digest, namespace, repo_name)
- raise InvalidManifestException(msg)
-
-
-def _load_repo_manifests(namespace, repo_name, allow_dead=False):
- query = (TagManifest
- .select(TagManifest, RepositoryTag)
- .join(RepositoryTag)
- .join(Image)
- .join(Repository)
- .join(Namespace, on=(Namespace.id == Repository.namespace_user))
- .where(Repository.name == repo_name, Namespace.username == namespace))
-
- if not allow_dead:
- query = _tag_alive(query)
-
return query
-def change_repository_tag_expiration(namespace_name, repo_name, tag_name, expiration_date):
- """ Changes the expiration of the tag with the given name to the given expiration datetime. If
- the expiration datetime is None, then the tag is marked as not expiring.
- """
+
+def revert_tag(repo_obj, tag_name, docker_image_id):
+ """ Reverts a tag to a specific image ID. """
+ # Verify that the image ID already existed under this repository under the
+ # tag.
try:
- tag = get_active_tag(namespace_name, repo_name, tag_name)
- return change_tag_expiration(tag, expiration_date)
+ (RepositoryTag
+ .select()
+ .join(Image)
+ .where(RepositoryTag.repository == repo_obj)
+ .where(RepositoryTag.name == tag_name)
+ .where(Image.docker_image_id == docker_image_id)
+ .get())
except RepositoryTag.DoesNotExist:
- return (None, False)
+ raise DataModelException('Cannot revert to unknown or invalid image')
+ return create_or_update_tag(repo_obj.namespace_user.username, repo_obj.name, tag_name,
+ docker_image_id, reversion=True)
-def set_tag_expiration_for_manifest(tag_manifest, expiration_sec):
- """
- Changes the expiration of the tag that points to the given manifest to be its lifetime start +
- the expiration seconds.
- """
- expiration_time_ts = tag_manifest.tag.lifetime_start_ts + expiration_sec
- expiration_date = datetime.utcfromtimestamp(expiration_time_ts)
- return change_tag_expiration(tag_manifest.tag, expiration_date)
-
-
-def change_tag_expiration(tag, expiration_date):
- """ Changes the expiration of the given tag to the given expiration datetime. If
- the expiration datetime is None, then the tag is marked as not expiring.
- """
- end_ts = None
- min_expire_sec = convert_to_timedelta(config.app_config.get('LABELED_EXPIRATION_MINIMUM', '1h'))
- max_expire_sec = convert_to_timedelta(config.app_config.get('LABELED_EXPIRATION_MAXIMUM', '104w'))
-
- if expiration_date is not None:
- offset = timegm(expiration_date.utctimetuple()) - tag.lifetime_start_ts
- offset = min(max(offset, min_expire_sec.total_seconds()), max_expire_sec.total_seconds())
- end_ts = tag.lifetime_start_ts + offset
-
- if end_ts == tag.lifetime_end_ts:
- return (None, True)
-
- return set_tag_end_ts(tag, end_ts)
-
-
-def set_tag_end_ts(tag, end_ts):
- """ Sets the end timestamp for a tag. Should only be called by change_tag_expiration
- or tests.
- """
- end_ms = end_ts * 1000 if end_ts is not None else None
-
- with db_transaction():
- # Note: We check not just the ID of the tag but also its lifetime_end_ts, to ensure that it has
- # not changed while we were updating it expiration.
- result = (RepositoryTag
- .update(lifetime_end_ts=end_ts)
- .where(RepositoryTag.id == tag.id,
- RepositoryTag.lifetime_end_ts == tag.lifetime_end_ts)
- .execute())
-
- # Check for a mapping to an OCI tag.
- try:
- oci_tag = (Tag
- .select()
- .join(TagToRepositoryTag)
- .where(TagToRepositoryTag.repository_tag == tag)
- .get())
-
- (Tag
- .update(lifetime_end_ms=end_ms)
- .where(Tag.id == oci_tag.id,
- Tag.lifetime_end_ms == oci_tag.lifetime_end_ms)
- .execute())
- except Tag.DoesNotExist:
- pass
-
- return (tag.lifetime_end_ts, result > 0)
-
-
-def find_matching_tag(repo_id, tag_names):
- """ Finds the most recently pushed alive tag in the repository with one of the given names,
- if any.
- """
- try:
- return (_tag_alive(RepositoryTag
- .select()
- .where(RepositoryTag.repository == repo_id,
- RepositoryTag.name << list(tag_names))
- .order_by(RepositoryTag.lifetime_start_ts.desc()))
- .get())
- except RepositoryTag.DoesNotExist:
- return None
-
-
-def get_most_recent_tag(repo_id):
- """ Returns the most recently pushed alive tag in the repository, or None if none. """
- try:
- return (_tag_alive(RepositoryTag
- .select()
- .where(RepositoryTag.repository == repo_id, RepositoryTag.hidden == False)
- .order_by(RepositoryTag.lifetime_start_ts.desc()))
- .get())
- except RepositoryTag.DoesNotExist:
- return None
diff --git a/data/model/team.py b/data/model/team.py
index 4988d74ac..532d55d3a 100644
--- a/data/model/team.py
+++ b/data/model/team.py
@@ -1,41 +1,13 @@
-import json
-import re
-import uuid
-
-from datetime import datetime
-from peewee import fn
-
-from data.database import (Team, TeamMember, TeamRole, User, TeamMemberInvite, RepositoryPermission,
- TeamSync, LoginService, FederatedLogin, db_random_func, db_transaction)
+from data.database import Team, TeamMember, TeamRole, User, TeamMemberInvite, Repository
from data.model import (DataModelException, InvalidTeamException, UserAlreadyInTeam,
- InvalidTeamMemberException, _basequery)
-from data.text import prefix_search
+ InvalidTeamMemberException, user, _basequery)
from util.validation import validate_username
-from util.morecollections import AttrDict
-
-
-MIN_TEAMNAME_LENGTH = 2
-MAX_TEAMNAME_LENGTH = 255
-
-VALID_TEAMNAME_REGEX = r'^([a-z0-9]+(?:[._-][a-z0-9]+)*)$'
-
-
-def validate_team_name(teamname):
- if not re.match(VALID_TEAMNAME_REGEX, teamname):
- return (False, 'Namespace must match expression ' + VALID_TEAMNAME_REGEX)
-
- length_match = (len(teamname) >= MIN_TEAMNAME_LENGTH and len(teamname) <= MAX_TEAMNAME_LENGTH)
- if not length_match:
- return (False, 'Team must be between %s and %s characters in length' %
- (MIN_TEAMNAME_LENGTH, MAX_TEAMNAME_LENGTH))
-
- return (True, '')
def create_team(name, org_obj, team_role_name, description=''):
- (teamname_valid, teamname_issue) = validate_team_name(name)
- if not teamname_valid:
- raise InvalidTeamException('Invalid team name %s: %s' % (name, teamname_issue))
+ (username_valid, username_issue) = validate_username(name)
+ if not username_valid:
+ raise InvalidTeamException('Invalid team name %s: %s' % (name, username_issue))
if not org_obj.organization:
raise InvalidTeamException('Specified organization %s was not an organization' %
@@ -79,6 +51,10 @@ def remove_user_from_team(org_name, team_name, username, removed_by_username):
user_in_team.delete_instance()
+def get_team_org_role(team):
+ return TeamRole.get(TeamRole.id == team.role.id)
+
+
def set_team_org_permission(team, team_role_name, set_by_username):
if team.role.name == 'admin' and team_role_name != 'admin':
# We need to make sure we're not removing the users only admin role
@@ -121,10 +97,12 @@ def remove_team(org_name, team_name, removed_by_username):
team = found[0]
if team.role.name == 'admin':
admin_teams = list(__get_user_admin_teams(org_name, removed_by_username))
+
if len(admin_teams) <= 1:
- # The team we are trying to remove is the only admin team containing this user.
- msg = "Deleting team '%s' would remove admin ability for user '%s' in organization '%s'"
- raise DataModelException(msg % (team_name, removed_by_username, org_name))
+ # The team we are trying to remove is the only admin team for this user
+ msg = ('Deleting team \'%s\' would remove all admin from user \'%s\'' %
+ (team_name, removed_by_username))
+ raise DataModelException(msg)
team.delete_instance(recursive=True, delete_nullable=True)
@@ -143,13 +121,11 @@ def add_or_invite_to_team(inviter, team, user_obj=None, email=None, requires_inv
raise InvalidTeamMemberException('Cannot add the specified robot to this team, ' +
'as it is not a member of the organization')
else:
- query = (TeamMember
- .select()
- .where(TeamMember.user == user_obj)
- .join(Team)
- .join(User)
- .where(User.username == orgname, User.organization == True))
- requires_invite = not any(query)
+ Org = User.alias()
+ found = User.select(User.username)
+ found = found.where(User.username == user_obj.username).join(TeamMember).join(Team)
+ found = found.join(Org, on=(Org.username == orgname)).limit(1)
+ requires_invite = not any(found)
# If we have a valid user and no invite is required, simply add the user to the team.
if user_obj and not requires_invite:
@@ -161,13 +137,13 @@ def add_or_invite_to_team(inviter, team, user_obj=None, email=None, requires_inv
def get_matching_user_teams(team_prefix, user_obj, limit=10):
- team_prefix_search = prefix_search(Team.name, team_prefix)
query = (Team
- .select(Team.id.distinct(), Team)
+ .select()
.join(User)
.switch(Team)
.join(TeamMember)
- .where(TeamMember.user == user_obj, team_prefix_search)
+ .where(TeamMember.user == user_obj, Team.name ** (team_prefix + '%'))
+ .distinct(Team.id)
.limit(limit))
return query
@@ -186,82 +162,31 @@ def get_organization_team(orgname, teamname):
def get_matching_admined_teams(team_prefix, user_obj, limit=10):
- team_prefix_search = prefix_search(Team.name, team_prefix)
admined_orgs = (_basequery.get_user_organizations(user_obj.username)
.switch(Team)
.join(TeamRole)
.where(TeamRole.name == 'admin'))
query = (Team
- .select(Team.id.distinct(), Team)
+ .select()
.join(User)
.switch(Team)
.join(TeamMember)
- .where(team_prefix_search, Team.organization << (admined_orgs))
+ .where(Team.name ** (team_prefix + '%'), Team.organization << (admined_orgs))
+ .distinct(Team.id)
.limit(limit))
return query
def get_matching_teams(team_prefix, organization):
- team_prefix_search = prefix_search(Team.name, team_prefix)
- query = Team.select().where(team_prefix_search, Team.organization == organization)
+ query = Team.select().where(Team.name ** (team_prefix + '%'),
+ Team.organization == organization)
return query.limit(10)
-def get_teams_within_org(organization, has_external_auth=False):
- """ Returns a AttrDict of team info (id, name, description), its role under the org,
- the number of repositories on which it has permission, and the number of members.
- """
- query = (Team.select()
- .where(Team.organization == organization)
- .join(TeamRole))
-
- def _team_view(team):
- return {
- 'id': team.id,
- 'name': team.name,
- 'description': team.description,
- 'role_name': Team.role.get_name(team.role_id),
-
- 'repo_count': 0,
- 'member_count': 0,
-
- 'is_synced': False,
- }
-
- teams = {team.id: _team_view(team) for team in query}
- if not teams:
- # Just in case. Should ideally never happen.
- return []
-
- # Add repository permissions count.
- permission_tuples = (RepositoryPermission.select(RepositoryPermission.team,
- fn.Count(RepositoryPermission.id))
- .where(RepositoryPermission.team << teams.keys())
- .group_by(RepositoryPermission.team)
- .tuples())
-
- for perm_tuple in permission_tuples:
- teams[perm_tuple[0]]['repo_count'] = perm_tuple[1]
-
- # Add the member count.
- members_tuples = (TeamMember.select(TeamMember.team,
- fn.Count(TeamMember.id))
- .where(TeamMember.team << teams.keys())
- .group_by(TeamMember.team)
- .tuples())
-
- for member_tuple in members_tuples:
- teams[member_tuple[0]]['member_count'] = member_tuple[1]
-
- # Add syncing information.
- if has_external_auth:
- sync_query = TeamSync.select(TeamSync.team).where(TeamSync.team << teams.keys())
- for team_sync in sync_query:
- teams[team_sync.team_id]['is_synced'] = True
-
- return [AttrDict(team_info) for team_info in teams.values()]
+def get_teams_within_org(organization):
+ return Team.select().where(Team.organization == organization)
def get_user_teams_within_org(username, organization):
@@ -273,9 +198,8 @@ def get_user_teams_within_org(username, organization):
def list_organization_members_by_teams(organization):
query = (TeamMember
.select(Team, User)
- .join(Team)
- .switch(TeamMember)
- .join(User)
+ .annotate(Team)
+ .annotate(User)
.where(Team.organization == organization))
return query
@@ -287,13 +211,8 @@ def get_organization_team_member_invites(teamid):
def delete_team_email_invite(team, email):
- try:
- found = TeamMemberInvite.get(TeamMemberInvite.email == email, TeamMemberInvite.team == team)
- except TeamMemberInvite.DoesNotExist:
- return False
-
+ found = TeamMemberInvite.get(TeamMemberInvite.email == email, TeamMemberInvite.team == team)
found.delete_instance()
- return True
def delete_team_user_invite(team, user_obj):
@@ -306,10 +225,6 @@ def delete_team_user_invite(team, user_obj):
return True
-def lookup_team_invites_by_email(email):
- return TeamMemberInvite.select().where(TeamMemberInvite.email == email)
-
-
def lookup_team_invites(user_obj):
return TeamMemberInvite.select().where(TeamMemberInvite.user == user_obj)
@@ -352,168 +267,27 @@ def find_matching_team_invite(code, user_obj):
return found
-def find_organization_invites(organization, user_obj):
- """ Finds all organization team invites for the given user under the given organization. """
- invite_check = (TeamMemberInvite.user == user_obj)
- if user_obj.verified:
- invite_check = invite_check | (TeamMemberInvite.email == user_obj.email)
-
- query = (TeamMemberInvite
- .select()
- .join(Team)
- .where(invite_check, Team.organization == organization))
- return query
-
-
def confirm_team_invite(code, user_obj):
""" Confirms the given team invite code for the given user by adding the user to the team
and deleting the code. Raises a DataModelException if the code was not found or does
- not apply to the given user. If the user is invited to two or more teams under the
- same organization, they are automatically confirmed for all of them. """
+ not apply to the given user. """
found = find_matching_team_invite(code, user_obj)
- # Find all matching invitations for the user under the organization.
- code_found = False
- for invite in find_organization_invites(found.team.organization, user_obj):
- # Add the user to the team.
- try:
- code_found = True
- add_user_to_team(user_obj, invite.team)
- except UserAlreadyInTeam:
- # Ignore.
- pass
+ # If the invite is for a specific user, we have to confirm that here.
+ if found.user is not None and found.user != user_obj:
+ message = """This invite is intended for user "%s".
+ Please login to that account and try again.""" % found.user.username
+ raise DataModelException(message)
- # Delete the invite and return the team.
- invite.delete_instance()
-
- if not code_found:
- if found.user:
- message = """This invite is intended for user "%s".
- Please login to that account and try again.""" % found.user.username
- raise DataModelException(message)
- else:
- message = """This invite is intended for email "%s".
- Please login to that account and try again.""" % found.email
- raise DataModelException(message)
+ # Add the user to the team.
+ try:
+ add_user_to_team(user_obj, found.team)
+ except UserAlreadyInTeam:
+ # Ignore.
+ pass
+ # Delete the invite and return the team.
team = found.team
inviter = found.inviter
+ found.delete_instance()
return (team, inviter)
-
-
-def get_federated_team_member_mapping(team, login_service_name):
- """ Returns a dict of all federated IDs for all team members in the team whose users are
- bound to the login service within the given name. The dictionary is from federated service
- identifier (username) to their Quay User table ID.
- """
- login_service = LoginService.get(name=login_service_name)
-
- query = (FederatedLogin
- .select(FederatedLogin.service_ident, User.id)
- .join(User)
- .join(TeamMember)
- .join(Team)
- .where(Team.id == team, User.robot == False, FederatedLogin.service == login_service))
- return dict(query.tuples())
-
-
-def list_team_users(team):
- """ Returns an iterator of all the *users* found in a team. Does not include robots. """
- return (User
- .select()
- .join(TeamMember)
- .join(Team)
- .where(Team.id == team, User.robot == False))
-
-
-def list_team_robots(team):
- """ Returns an iterator of all the *robots* found in a team. Does not include users. """
- return (User
- .select()
- .join(TeamMember)
- .join(Team)
- .where(Team.id == team, User.robot == True))
-
-
-def set_team_syncing(team, login_service_name, config):
- """ Sets the given team to sync to the given service using the given config. """
- login_service = LoginService.get(name=login_service_name)
- return TeamSync.create(team=team, transaction_id='', service=login_service,
- config=json.dumps(config))
-
-
-def remove_team_syncing(orgname, teamname):
- """ Removes syncing on the team matching the given organization name and team name. """
- existing = get_team_sync_information(orgname, teamname)
- if existing:
- existing.delete_instance()
-
-
-def get_stale_team(stale_timespan):
- """ Returns a team that is setup to sync to an external group, and who has not been synced in
- now - stale_timespan. Returns None if none found.
- """
- stale_at = datetime.now() - stale_timespan
-
- try:
- candidates = (TeamSync
- .select(TeamSync.id)
- .where((TeamSync.last_updated <= stale_at) | (TeamSync.last_updated >> None))
- .limit(500)
- .alias('candidates'))
-
- found = (TeamSync
- .select(candidates.c.id)
- .from_(candidates)
- .order_by(db_random_func())
- .get())
-
- if found is None:
- return
-
- return TeamSync.select(TeamSync, Team).join(Team).where(TeamSync.id == found.id).get()
- except TeamSync.DoesNotExist:
- return None
-
-
-def get_team_sync_information(orgname, teamname):
- """ Returns the team syncing information for the team with the given name under the organization
- with the given name or None if none.
- """
- query = (TeamSync
- .select(TeamSync, LoginService)
- .join(Team)
- .join(User)
- .switch(TeamSync)
- .join(LoginService)
- .where(Team.name == teamname, User.organization == True, User.username == orgname))
-
- try:
- return query.get()
- except TeamSync.DoesNotExist:
- return None
-
-
-def update_sync_status(team_sync_info):
- """ Attempts to update the transaction ID and last updated time on a TeamSync object. If the
- transaction ID on the entry in the DB does not match that found on the object, this method
- returns False, which indicates another caller updated it first.
- """
- new_transaction_id = str(uuid.uuid4())
- query = (TeamSync
- .update(transaction_id=new_transaction_id, last_updated=datetime.now())
- .where(TeamSync.id == team_sync_info.id,
- TeamSync.transaction_id == team_sync_info.transaction_id))
- return query.execute() == 1
-
-
-def delete_members_not_present(team, member_id_set):
- """ Deletes all members of the given team that are not found in the member ID set. """
- with db_transaction():
- user_ids = set([u.id for u in list_team_users(team)])
- to_delete = list(user_ids - member_id_set)
- if to_delete:
- query = TeamMember.delete().where(TeamMember.team == team, TeamMember.user << to_delete)
- return query.execute()
-
- return 0
diff --git a/data/model/test/__init__.py b/data/model/test/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/data/model/test/test_appspecifictoken.py b/data/model/test/test_appspecifictoken.py
deleted file mode 100644
index 96a7491f5..000000000
--- a/data/model/test/test_appspecifictoken.py
+++ /dev/null
@@ -1,126 +0,0 @@
-from datetime import datetime, timedelta
-from mock import patch
-
-import pytest
-
-from data.model import config as _config
-from data import model
-from data.model.appspecifictoken import create_token, revoke_token, access_valid_token
-from data.model.appspecifictoken import gc_expired_tokens, get_expiring_tokens
-from data.model.appspecifictoken import get_full_token_string
-from util.timedeltastring import convert_to_timedelta
-
-from test.fixtures import *
-
-@pytest.mark.parametrize('expiration', [
- (None),
- ('-1m'),
- ('-1d'),
- ('-1w'),
- ('10m'),
- ('10d'),
- ('10w'),
-])
-def test_gc(expiration, initialized_db):
- user = model.user.get_user('devtable')
-
- expiration_date = None
- is_expired = False
- if expiration:
- if expiration[0] == '-':
- is_expired = True
- expiration_date = datetime.now() - convert_to_timedelta(expiration[1:])
- else:
- expiration_date = datetime.now() + convert_to_timedelta(expiration)
-
- # Create a token.
- token = create_token(user, 'Some token', expiration=expiration_date)
-
- # GC tokens.
- gc_expired_tokens(timedelta(seconds=0))
-
- # Ensure the token was GCed if expired and not if it wasn't.
- assert (access_valid_token(get_full_token_string(token)) is None) == is_expired
-
-
-def test_access_token(initialized_db):
- user = model.user.get_user('devtable')
-
- # Create a token.
- token = create_token(user, 'Some token')
- assert token.last_accessed is None
-
- # Lookup the token.
- token = access_valid_token(get_full_token_string(token))
- assert token.last_accessed is not None
-
- # Revoke the token.
- revoke_token(token)
-
- # Ensure it cannot be accessed
- assert access_valid_token(get_full_token_string(token)) is None
-
-
-def test_expiring_soon(initialized_db):
- user = model.user.get_user('devtable')
-
- # Create some tokens.
- create_token(user, 'Some token')
- exp_token = create_token(user, 'Some expiring token', datetime.now() + convert_to_timedelta('1d'))
- create_token(user, 'Some other token', expiration=datetime.now() + convert_to_timedelta('2d'))
-
- # Get the token expiring soon.
- expiring_soon = get_expiring_tokens(user, convert_to_timedelta('25h'))
- assert expiring_soon
- assert len(expiring_soon) == 1
- assert expiring_soon[0].id == exp_token.id
-
- expiring_soon = get_expiring_tokens(user, convert_to_timedelta('49h'))
- assert expiring_soon
- assert len(expiring_soon) == 2
-
-
-@pytest.fixture(scope='function')
-def app_config():
- with patch.dict(_config.app_config, {}, clear=True):
- yield _config.app_config
-
-@pytest.mark.parametrize('expiration', [
- (None),
- ('10m'),
- ('10d'),
- ('10w'),
-])
-@pytest.mark.parametrize('default_expiration', [
- (None),
- ('10m'),
- ('10d'),
- ('10w'),
-])
-def test_create_access_token(expiration, default_expiration, initialized_db, app_config):
- user = model.user.get_user('devtable')
- expiration_date = datetime.now() + convert_to_timedelta(expiration) if expiration else None
- with patch.dict(_config.app_config, {}, clear=True):
- app_config['APP_SPECIFIC_TOKEN_EXPIRATION'] = default_expiration
- if expiration:
- exp_token = create_token(user, 'Some token', expiration=expiration_date)
- assert exp_token.expiration == expiration_date
- else:
- exp_token = create_token(user, 'Some token')
- assert (exp_token.expiration is None) == (default_expiration is None)
-
-
-@pytest.mark.parametrize('invalid_token', [
- '',
- 'foo',
- 'a' * 40,
- 'b' * 40,
- '%s%s' % ('b' * 40, 'a' * 40),
- '%s%s' % ('a' * 39, 'b' * 40),
- '%s%s' % ('a' * 40, 'b' * 39),
- '%s%s' % ('a' * 40, 'b' * 41),
-])
-def test_invalid_access_token(invalid_token, initialized_db):
- user = model.user.get_user('devtable')
- token = access_valid_token(invalid_token)
- assert token is None
diff --git a/data/model/test/test_basequery.py b/data/model/test/test_basequery.py
deleted file mode 100644
index 84e248327..000000000
--- a/data/model/test/test_basequery.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import pytest
-
-from peewee import JOIN
-from playhouse.test_utils import assert_query_count
-
-from data.database import Repository, RepositoryPermission, TeamMember, Namespace
-from data.model._basequery import filter_to_repos_for_user
-from data.model.organization import get_admin_users
-from data.model.user import get_namespace_user
-from util.names import parse_robot_username
-
-from test.fixtures import *
-
-def _is_team_member(team, user):
- return user.id in [member.user_id for member in
- TeamMember.select().where(TeamMember.team == team)]
-
-def _get_visible_repositories_for_user(user, repo_kind='image', include_public=False,
- namespace=None):
- """ Returns all repositories directly visible to the given user, by either repo permission,
- or the user being the admin of a namespace.
- """
- for repo in Repository.select():
- if repo_kind is not None and repo.kind.name != repo_kind:
- continue
-
- if namespace is not None and repo.namespace_user.username != namespace:
- continue
-
- if include_public and repo.visibility.name == 'public':
- yield repo
- continue
-
- # Direct repo permission.
- try:
- RepositoryPermission.get(repository=repo, user=user).get()
- yield repo
- continue
- except RepositoryPermission.DoesNotExist:
- pass
-
- # Team permission.
- found_in_team = False
- for perm in RepositoryPermission.select().where(RepositoryPermission.repository == repo):
- if perm.team and _is_team_member(perm.team, user):
- found_in_team = True
- break
-
- if found_in_team:
- yield repo
- continue
-
- # Org namespace admin permission.
- if user in get_admin_users(repo.namespace_user):
- yield repo
- continue
-
-
-@pytest.mark.parametrize('username', [
- 'devtable',
- 'devtable+dtrobot',
- 'public',
- 'reader',
-])
-@pytest.mark.parametrize('include_public', [
- True,
- False
-])
-@pytest.mark.parametrize('filter_to_namespace', [
- True,
- False
-])
-@pytest.mark.parametrize('repo_kind', [
- None,
- 'image',
- 'application',
-])
-def test_filter_repositories(username, include_public, filter_to_namespace, repo_kind,
- initialized_db):
- namespace = username if filter_to_namespace else None
- if '+' in username and filter_to_namespace:
- namespace, _ = parse_robot_username(username)
-
- user = get_namespace_user(username)
- query = (Repository
- .select()
- .distinct()
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .switch(Repository)
- .join(RepositoryPermission, JOIN.LEFT_OUTER))
-
- # Prime the cache.
- Repository.kind.get_id('image')
-
- with assert_query_count(1):
- found = list(filter_to_repos_for_user(query, user.id,
- namespace=namespace,
- include_public=include_public,
- repo_kind=repo_kind))
-
- expected = list(_get_visible_repositories_for_user(user,
- repo_kind=repo_kind,
- namespace=namespace,
- include_public=include_public))
-
- assert len(found) == len(expected)
- assert {r.id for r in found} == {r.id for r in expected}
diff --git a/data/model/test/test_build.py b/data/model/test/test_build.py
deleted file mode 100644
index c43d6e683..000000000
--- a/data/model/test/test_build.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import pytest
-
-from mock import patch
-
-from data.database import BUILD_PHASE, RepositoryBuildTrigger, RepositoryBuild
-from data.model.build import (update_trigger_disable_status, create_repository_build,
- get_repository_build, update_phase_then_close)
-from test.fixtures import *
-
-TEST_FAIL_THRESHOLD = 5
-TEST_INTERNAL_ERROR_THRESHOLD = 2
-
-@pytest.mark.parametrize('starting_failure_count, starting_error_count, status, expected_reason', [
- (0, 0, BUILD_PHASE.COMPLETE, None),
- (10, 10, BUILD_PHASE.COMPLETE, None),
-
- (TEST_FAIL_THRESHOLD - 1, TEST_INTERNAL_ERROR_THRESHOLD - 1, BUILD_PHASE.COMPLETE, None),
- (TEST_FAIL_THRESHOLD - 1, 0, BUILD_PHASE.ERROR, 'successive_build_failures'),
- (0, TEST_INTERNAL_ERROR_THRESHOLD - 1, BUILD_PHASE.INTERNAL_ERROR,
- 'successive_build_internal_errors'),
-])
-def test_update_trigger_disable_status(starting_failure_count, starting_error_count, status,
- expected_reason, initialized_db):
- test_config = {
- 'SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD': TEST_FAIL_THRESHOLD,
- 'SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD': TEST_INTERNAL_ERROR_THRESHOLD,
- }
-
- trigger = model.build.list_build_triggers('devtable', 'building')[0]
- trigger.successive_failure_count = starting_failure_count
- trigger.successive_internal_error_count = starting_error_count
- trigger.enabled = True
- trigger.save()
-
- with patch('data.model.config.app_config', test_config):
- update_trigger_disable_status(trigger, status)
- updated_trigger = RepositoryBuildTrigger.get(uuid=trigger.uuid)
-
- assert updated_trigger.enabled == (expected_reason is None)
-
- if expected_reason is not None:
- assert updated_trigger.disabled_reason.name == expected_reason
- else:
- assert updated_trigger.disabled_reason is None
- assert updated_trigger.successive_failure_count == 0
- assert updated_trigger.successive_internal_error_count == 0
-
-
-def test_archivable_build_logs(initialized_db):
- # Make sure there are no archivable logs.
- result = model.build.get_archivable_build()
- assert result is None
-
- # Add a build that cannot (yet) be archived.
- repo = model.repository.get_repository('devtable', 'simple')
- token = model.token.create_access_token(repo, 'write')
- created = RepositoryBuild.create(repository=repo, access_token=token,
- phase=model.build.BUILD_PHASE.WAITING,
- logs_archived=False, job_config='{}',
- display_name='')
-
- # Make sure there are no archivable logs.
- result = model.build.get_archivable_build()
- assert result is None
-
- # Change the build to being complete.
- created.phase = model.build.BUILD_PHASE.COMPLETE
- created.save()
-
- # Make sure we now find an archivable build.
- result = model.build.get_archivable_build()
- assert result.id == created.id
-
-
-def test_update_build_phase(initialized_db):
- build = create_build(model.repository.get_repository("devtable", "building"))
-
- repo_build = get_repository_build(build.uuid)
-
- assert repo_build.phase == BUILD_PHASE.WAITING
- assert update_phase_then_close(build.uuid, BUILD_PHASE.COMPLETE)
-
- repo_build = get_repository_build(build.uuid)
- assert repo_build.phase == BUILD_PHASE.COMPLETE
-
- repo_build.delete_instance()
- assert not update_phase_then_close(repo_build.uuid, BUILD_PHASE.PULLING)
-
-
-def create_build(repository):
- new_token = model.token.create_access_token(repository, 'write', 'build-worker')
- repo = 'ci.devtable.com:5000/%s/%s' % (repository.namespace_user.username, repository.name)
- job_config = {
- 'repository': repo,
- 'docker_tags': ['latest'],
- 'build_subdir': '',
- 'trigger_metadata': {
- 'commit': '3482adc5822c498e8f7db2e361e8d57b3d77ddd9',
- 'ref': 'refs/heads/master',
- 'default_branch': 'master'
- }
- }
- build = create_repository_build(repository, new_token, job_config,
- '68daeebd-a5b9-457f-80a0-4363b882f8ea',
- "build_name")
- build.save()
- return build
diff --git a/data/model/test/test_gc.py b/data/model/test/test_gc.py
deleted file mode 100644
index 79d13779e..000000000
--- a/data/model/test/test_gc.py
+++ /dev/null
@@ -1,725 +0,0 @@
-import hashlib
-import pytest
-
-from datetime import datetime, timedelta
-
-from mock import patch
-
-from app import storage, docker_v2_signing_key
-
-from contextlib import contextmanager
-from playhouse.test_utils import assert_query_count
-
-from freezegun import freeze_time
-
-from data import model, database
-from data.database import (Image, ImageStorage, DerivedStorageForImage, Label, TagManifestLabel,
- ApprBlob, Manifest, TagManifestToManifest, ManifestBlob, Tag,
- TagToRepositoryTag)
-from data.model.oci.test.test_oci_manifest import create_manifest_for_testing
-from image.docker.schema1 import DockerSchema1ManifestBuilder
-from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
-from image.docker.schemas import parse_manifest_from_bytes
-from util.bytes import Bytes
-
-from test.fixtures import *
-
-
-ADMIN_ACCESS_USER = 'devtable'
-PUBLIC_USER = 'public'
-
-REPO = 'somerepo'
-
-def _set_tag_expiration_policy(namespace, expiration_s):
- namespace_user = model.user.get_user(namespace)
- model.user.change_user_tag_expiration(namespace_user, expiration_s)
-
-
-@pytest.fixture()
-def default_tag_policy(initialized_db):
- _set_tag_expiration_policy(ADMIN_ACCESS_USER, 0)
- _set_tag_expiration_policy(PUBLIC_USER, 0)
-
-
-def create_image(docker_image_id, repository_obj, username):
- preferred = storage.preferred_locations[0]
- image = model.image.find_create_or_link_image(docker_image_id, repository_obj, username, {},
- preferred)
- image.storage.uploading = False
- image.storage.save()
-
- # Create derived images as well.
- model.image.find_or_create_derived_storage(image, 'squash', preferred)
- model.image.find_or_create_derived_storage(image, 'aci', preferred)
-
- # Add some torrent info.
- try:
- database.TorrentInfo.get(storage=image.storage)
- except database.TorrentInfo.DoesNotExist:
- model.storage.save_torrent_info(image.storage, 1, 'helloworld')
-
- # Add some additional placements to the image.
- for location_name in ['local_eu']:
- location = database.ImageStorageLocation.get(name=location_name)
-
- try:
- database.ImageStoragePlacement.get(location=location, storage=image.storage)
- except:
- continue
-
- database.ImageStoragePlacement.create(location=location, storage=image.storage)
-
- return image.storage
-
-
-def store_tag_manifest(namespace, repo_name, tag_name, image_id):
- builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name)
- storage_id_map = {}
- try:
- image_storage = ImageStorage.select().where(~(ImageStorage.content_checksum >> None)).get()
- builder.add_layer(image_storage.content_checksum, '{"id": "foo"}')
- storage_id_map[image_storage.content_checksum] = image_storage.id
- except ImageStorage.DoesNotExist:
- pass
-
- manifest = builder.build(docker_v2_signing_key)
- manifest_row, _ = model.tag.store_tag_manifest_for_testing(namespace, repo_name, tag_name,
- manifest, image_id, storage_id_map)
- return manifest_row
-
-
-def create_repository(namespace=ADMIN_ACCESS_USER, name=REPO, **kwargs):
- user = model.user.get_user(namespace)
- repo = model.repository.create_repository(namespace, name, user)
-
- # Populate the repository with the tags.
- image_map = {}
- for tag_name in kwargs:
- image_ids = kwargs[tag_name]
- parent = None
-
- for image_id in image_ids:
- if not image_id in image_map:
- image_map[image_id] = create_image(image_id, repo, namespace)
-
- v1_metadata = {
- 'id': image_id,
- }
- if parent is not None:
- v1_metadata['parent'] = parent.docker_image_id
-
- # Set the ancestors for the image.
- parent = model.image.set_image_metadata(image_id, namespace, name, '', '', '', v1_metadata,
- parent=parent)
-
- # Set the tag for the image.
- tag_manifest = store_tag_manifest(namespace, name, tag_name, image_ids[-1])
-
- # Add some labels to the tag.
- model.label.create_manifest_label(tag_manifest, 'foo', 'bar', 'manifest')
- model.label.create_manifest_label(tag_manifest, 'meh', 'grah', 'manifest')
-
- return repo
-
-
-def gc_now(repository):
- assert model.gc.garbage_collect_repo(repository)
-
-
-def delete_tag(repository, tag, perform_gc=True, expect_gc=True):
- model.tag.delete_tag(repository.namespace_user.username, repository.name, tag)
- if perform_gc:
- assert model.gc.garbage_collect_repo(repository) == expect_gc
-
-
-def move_tag(repository, tag, docker_image_id, expect_gc=True):
- model.tag.create_or_update_tag(repository.namespace_user.username, repository.name, tag,
- docker_image_id)
- assert model.gc.garbage_collect_repo(repository) == expect_gc
-
-
-def assert_not_deleted(repository, *args):
- for docker_image_id in args:
- assert model.image.get_image_by_id(repository.namespace_user.username, repository.name,
- docker_image_id)
-
-
-def assert_deleted(repository, *args):
- for docker_image_id in args:
- try:
- # Verify the image is missing when accessed by the repository.
- model.image.get_image_by_id(repository.namespace_user.username, repository.name,
- docker_image_id)
- except model.DataModelException:
- return
-
- assert False, 'Expected image %s to be deleted' % docker_image_id
-
-
-def _get_dangling_storage_count():
- storage_ids = set([current.id for current in ImageStorage.select()])
- referenced_by_image = set([image.storage_id for image in Image.select()])
- referenced_by_manifest = set([blob.blob_id for blob in ManifestBlob.select()])
- referenced_by_derived = set([derived.derivative_id
- for derived in DerivedStorageForImage.select()])
- return len(storage_ids - referenced_by_image - referenced_by_derived - referenced_by_manifest)
-
-
-def _get_dangling_label_count():
- return len(_get_dangling_labels())
-
-
-def _get_dangling_labels():
- label_ids = set([current.id for current in Label.select()])
- referenced_by_manifest = set([mlabel.label_id for mlabel in TagManifestLabel.select()])
- return label_ids - referenced_by_manifest
-
-
-def _get_dangling_manifest_count():
- manifest_ids = set([current.id for current in Manifest.select()])
- referenced_by_tag_manifest = set([tmt.manifest_id for tmt in TagManifestToManifest.select()])
- return len(manifest_ids - referenced_by_tag_manifest)
-
-
-
-@contextmanager
-def assert_gc_integrity(expect_storage_removed=True, check_oci_tags=True):
- """ Specialized assertion for ensuring that GC cleans up all dangling storages
- and labels, invokes the callback for images removed and doesn't invoke the
- callback for images *not* removed.
- """
- # Add a callback for when images are removed.
- removed_image_storages = []
- model.config.register_image_cleanup_callback(removed_image_storages.extend)
-
- # Store the number of dangling storages and labels.
- existing_storage_count = _get_dangling_storage_count()
- existing_label_count = _get_dangling_label_count()
- existing_manifest_count = _get_dangling_manifest_count()
- yield
-
- # Ensure the number of dangling storages, manifests and labels has not changed.
- updated_storage_count = _get_dangling_storage_count()
- assert updated_storage_count == existing_storage_count
-
- updated_label_count = _get_dangling_label_count()
- assert updated_label_count == existing_label_count, _get_dangling_labels()
-
- updated_manifest_count = _get_dangling_manifest_count()
- assert updated_manifest_count == existing_manifest_count
-
- # Ensure that for each call to the image+storage cleanup callback, the image and its
- # storage is not found *anywhere* in the database.
- for removed_image_and_storage in removed_image_storages:
- with pytest.raises(Image.DoesNotExist):
- Image.get(id=removed_image_and_storage.id)
-
- # Ensure that image storages are only removed if not shared.
- shared = Image.select().where(Image.storage == removed_image_and_storage.storage_id).count()
- if shared == 0:
- shared = (ManifestBlob
- .select()
- .where(ManifestBlob.blob == removed_image_and_storage.storage_id)
- .count())
-
- if shared == 0:
- with pytest.raises(ImageStorage.DoesNotExist):
- ImageStorage.get(id=removed_image_and_storage.storage_id)
-
- with pytest.raises(ImageStorage.DoesNotExist):
- ImageStorage.get(uuid=removed_image_and_storage.storage.uuid)
-
- # Ensure all CAS storage is in the storage engine.
- preferred = storage.preferred_locations[0]
- for storage_row in ImageStorage.select():
- if storage_row.cas_path:
- storage.get_content({preferred}, storage.blob_path(storage_row.content_checksum))
-
- for blob_row in ApprBlob.select():
- storage.get_content({preferred}, storage.blob_path(blob_row.digest))
-
- # Ensure there are no danglings OCI tags.
- if check_oci_tags:
- oci_tags = {t.id for t in Tag.select()}
- referenced_oci_tags = {t.tag_id for t in TagToRepositoryTag.select()}
- assert not oci_tags - referenced_oci_tags
-
- # Ensure all tags have valid manifests.
- for manifest in {t.manifest for t in Tag.select()}:
- # Ensure that the manifest's blobs all exist.
- found_blobs = {b.blob.content_checksum
- for b in ManifestBlob.select().where(ManifestBlob.manifest == manifest)}
-
- parsed = parse_manifest_from_bytes(Bytes.for_string_or_unicode(manifest.manifest_bytes),
- manifest.media_type.name)
- assert set(parsed.local_blob_digests) == found_blobs
-
-
-def test_has_garbage(default_tag_policy, initialized_db):
- """ Remove all existing repositories, then add one without garbage, check, then add one with
- garbage, and check again.
- """
- # Delete all existing repos.
- for repo in database.Repository.select().order_by(database.Repository.id):
- assert model.gc.purge_repository(repo.namespace_user.username, repo.name)
-
- # Change the time machine expiration on the namespace.
- (database.User
- .update(removed_tag_expiration_s=1000000000)
- .where(database.User.username == ADMIN_ACCESS_USER)
- .execute())
-
- # Create a repository without any garbage.
- repository = create_repository(latest=['i1', 'i2', 'i3'])
-
- # Ensure that no repositories are returned by the has garbage check.
- assert model.repository.find_repository_with_garbage(1000000000) is None
-
- # Delete a tag.
- delete_tag(repository, 'latest', perform_gc=False)
-
- # There should still not be any repositories with garbage, due to time machine.
- assert model.repository.find_repository_with_garbage(1000000000) is None
-
- # Change the time machine expiration on the namespace.
- (database.User
- .update(removed_tag_expiration_s=0)
- .where(database.User.username == ADMIN_ACCESS_USER)
- .execute())
-
- # Now we should find the repository for GC.
- repository = model.repository.find_repository_with_garbage(0)
- assert repository is not None
- assert repository.name == REPO
-
- # GC the repository.
- assert model.gc.garbage_collect_repo(repository)
-
- # There should now be no repositories with garbage.
- assert model.repository.find_repository_with_garbage(0) is None
-
-
-def test_find_garbage_policy_functions(default_tag_policy, initialized_db):
- with assert_query_count(1):
- one_policy = model.repository.get_random_gc_policy()
- all_policies = model.repository._get_gc_expiration_policies()
- assert one_policy in all_policies
-
-
-def test_one_tag(default_tag_policy, initialized_db):
- """ Create a repository with a single tag, then remove that tag and verify that the repository
- is now empty. """
- with assert_gc_integrity():
- repository = create_repository(latest=['i1', 'i2', 'i3'])
- delete_tag(repository, 'latest')
- assert_deleted(repository, 'i1', 'i2', 'i3')
-
-
-def test_two_tags_unshared_images(default_tag_policy, initialized_db):
- """ Repository has two tags with no shared images between them. """
- with assert_gc_integrity():
- repository = create_repository(latest=['i1', 'i2', 'i3'], other=['f1', 'f2'])
- delete_tag(repository, 'latest')
- assert_deleted(repository, 'i1', 'i2', 'i3')
- assert_not_deleted(repository, 'f1', 'f2')
-
-
-def test_two_tags_shared_images(default_tag_policy, initialized_db):
- """ Repository has two tags with shared images. Deleting the tag should only remove the
- unshared images.
- """
- with assert_gc_integrity():
- repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1'])
- delete_tag(repository, 'latest')
- assert_deleted(repository, 'i2', 'i3')
- assert_not_deleted(repository, 'i1', 'f1')
-
-
-def test_unrelated_repositories(default_tag_policy, initialized_db):
- """ Two repositories with different images. Removing the tag from one leaves the other's
- images intact.
- """
- with assert_gc_integrity():
- repository1 = create_repository(latest=['i1', 'i2', 'i3'], name='repo1')
- repository2 = create_repository(latest=['j1', 'j2', 'j3'], name='repo2')
-
- delete_tag(repository1, 'latest')
-
- assert_deleted(repository1, 'i1', 'i2', 'i3')
- assert_not_deleted(repository2, 'j1', 'j2', 'j3')
-
-
-def test_related_repositories(default_tag_policy, initialized_db):
- """ Two repositories with shared images. Removing the tag from one leaves the other's
- images intact.
- """
- with assert_gc_integrity():
- repository1 = create_repository(latest=['i1', 'i2', 'i3'], name='repo1')
- repository2 = create_repository(latest=['i1', 'i2', 'j1'], name='repo2')
-
- delete_tag(repository1, 'latest')
-
- assert_deleted(repository1, 'i3')
- assert_not_deleted(repository2, 'i1', 'i2', 'j1')
-
-
-def test_inaccessible_repositories(default_tag_policy, initialized_db):
- """ Two repositories under different namespaces should result in the images being deleted
- but not completely removed from the database.
- """
- with assert_gc_integrity():
- repository1 = create_repository(namespace=ADMIN_ACCESS_USER, latest=['i1', 'i2', 'i3'])
- repository2 = create_repository(namespace=PUBLIC_USER, latest=['i1', 'i2', 'i3'])
-
- delete_tag(repository1, 'latest')
- assert_deleted(repository1, 'i1', 'i2', 'i3')
- assert_not_deleted(repository2, 'i1', 'i2', 'i3')
-
-
-def test_many_multiple_shared_images(default_tag_policy, initialized_db):
- """ Repository has multiple tags with shared images. Delete all but one tag.
- """
- with assert_gc_integrity():
- repository = create_repository(latest=['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j0'],
- master=['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1'])
-
- # Delete tag latest. Should only delete j0, since it is not shared.
- delete_tag(repository, 'latest')
-
- assert_deleted(repository, 'j0')
- assert_not_deleted(repository, 'i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1')
-
- # Delete tag master. Should delete the rest of the images.
- delete_tag(repository, 'master')
-
- assert_deleted(repository, 'i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1')
-
-
-def test_multiple_shared_images(default_tag_policy, initialized_db):
- """ Repository has multiple tags with shared images. Selectively deleting the tags, and
- verifying at each step.
- """
- with assert_gc_integrity():
- repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1', 'f2'],
- third=['t1', 't2', 't3'], fourth=['i1', 'f1'])
-
- # Current state:
- # latest -> i3->i2->i1
- # other -> f2->f1->i1
- # third -> t3->t2->t1
- # fourth -> f1->i1
-
- # Delete tag other. Should delete f2, since it is not shared.
- delete_tag(repository, 'other')
- assert_deleted(repository, 'f2')
- assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3', 'f1')
-
- # Current state:
- # latest -> i3->i2->i1
- # third -> t3->t2->t1
- # fourth -> f1->i1
-
- # Move tag fourth to i3. This should remove f1 since it is no longer referenced.
- move_tag(repository, 'fourth', 'i3')
- assert_deleted(repository, 'f1')
- assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3')
-
- # Current state:
- # latest -> i3->i2->i1
- # third -> t3->t2->t1
- # fourth -> i3->i2->i1
-
- # Delete tag 'latest'. This should do nothing since fourth is on the same branch.
- delete_tag(repository, 'latest')
- assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3')
-
- # Current state:
- # third -> t3->t2->t1
- # fourth -> i3->i2->i1
-
- # Delete tag 'third'. This should remove t1->t3.
- delete_tag(repository, 'third')
- assert_deleted(repository, 't1', 't2', 't3')
- assert_not_deleted(repository, 'i1', 'i2', 'i3')
-
- # Current state:
- # fourth -> i3->i2->i1
-
- # Add tag to i1.
- move_tag(repository, 'newtag', 'i1', expect_gc=False)
- assert_not_deleted(repository, 'i1', 'i2', 'i3')
-
- # Current state:
- # fourth -> i3->i2->i1
- # newtag -> i1
-
- # Delete tag 'fourth'. This should remove i2 and i3.
- delete_tag(repository, 'fourth')
- assert_deleted(repository, 'i2', 'i3')
- assert_not_deleted(repository, 'i1')
-
- # Current state:
- # newtag -> i1
-
- # Delete tag 'newtag'. This should remove the remaining image.
- delete_tag(repository, 'newtag')
- assert_deleted(repository, 'i1')
-
- # Current state:
- # (Empty)
-
-
-def test_empty_gc(default_tag_policy, initialized_db):
- with assert_gc_integrity(expect_storage_removed=False):
- repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1', 'f2'],
- third=['t1', 't2', 't3'], fourth=['i1', 'f1'])
-
- assert not model.gc.garbage_collect_repo(repository)
- assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3', 'f1', 'f2')
-
-
-def test_time_machine_no_gc(default_tag_policy, initialized_db):
- """ Repository has two tags with shared images. Deleting the tag should not remove any images
- """
- with assert_gc_integrity(expect_storage_removed=False):
- repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1'])
- _set_tag_expiration_policy(repository.namespace_user.username, 60*60*24)
-
- delete_tag(repository, 'latest', expect_gc=False)
- assert_not_deleted(repository, 'i2', 'i3')
- assert_not_deleted(repository, 'i1', 'f1')
-
-
-def test_time_machine_gc(default_tag_policy, initialized_db):
- """ Repository has two tags with shared images. Deleting the second tag should cause the images
- for the first deleted tag to gc.
- """
- now = datetime.utcnow()
-
- with assert_gc_integrity():
- with freeze_time(now):
- repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1'])
-
- _set_tag_expiration_policy(repository.namespace_user.username, 1)
-
- delete_tag(repository, 'latest', expect_gc=False)
- assert_not_deleted(repository, 'i2', 'i3')
- assert_not_deleted(repository, 'i1', 'f1')
-
- with freeze_time(now + timedelta(seconds=2)):
- # This will cause the images associated with latest to gc
- delete_tag(repository, 'other')
- assert_deleted(repository, 'i2', 'i3')
- assert_not_deleted(repository, 'i1', 'f1')
-
-
-def test_images_shared_storage(default_tag_policy, initialized_db):
- """ Repository with two tags, both with the same shared storage. Deleting the first
- tag should delete the first image, but *not* its storage.
- """
- with assert_gc_integrity(expect_storage_removed=False):
- repository = create_repository()
-
- # Add two tags, each with their own image, but with the same storage.
- image_storage = model.storage.create_v1_storage(storage.preferred_locations[0])
-
- first_image = Image.create(docker_image_id='i1',
- repository=repository, storage=image_storage,
- ancestors='/')
-
- second_image = Image.create(docker_image_id='i2',
- repository=repository, storage=image_storage,
- ancestors='/')
-
- store_tag_manifest(repository.namespace_user.username, repository.name,
- 'first', first_image.docker_image_id)
-
- store_tag_manifest(repository.namespace_user.username, repository.name,
- 'second', second_image.docker_image_id)
-
- # Delete the first tag.
- delete_tag(repository, 'first')
- assert_deleted(repository, 'i1')
- assert_not_deleted(repository, 'i2')
-
-
-def test_image_with_cas(default_tag_policy, initialized_db):
- """ A repository with a tag pointing to an image backed by CAS. Deleting and GCing the tag
- should result in the storage and its CAS data being removed.
- """
- with assert_gc_integrity(expect_storage_removed=True):
- repository = create_repository()
-
- # Create an image storage record under CAS.
- content = 'hello world'
- digest = 'sha256:' + hashlib.sha256(content).hexdigest()
- preferred = storage.preferred_locations[0]
- storage.put_content({preferred}, storage.blob_path(digest), content)
-
- image_storage = database.ImageStorage.create(content_checksum=digest, uploading=False)
- location = database.ImageStorageLocation.get(name=preferred)
- database.ImageStoragePlacement.create(location=location, storage=image_storage)
-
- # Ensure the CAS path exists.
- assert storage.exists({preferred}, storage.blob_path(digest))
-
- # Create the image and the tag.
- first_image = Image.create(docker_image_id='i1',
- repository=repository, storage=image_storage,
- ancestors='/')
-
- store_tag_manifest(repository.namespace_user.username, repository.name,
- 'first', first_image.docker_image_id)
-
- assert_not_deleted(repository, 'i1')
-
- # Delete the tag.
- delete_tag(repository, 'first')
- assert_deleted(repository, 'i1')
-
- # Ensure the CAS path is gone.
- assert not storage.exists({preferred}, storage.blob_path(digest))
-
-
-def test_images_shared_cas(default_tag_policy, initialized_db):
- """ A repository, each two tags, pointing to the same image, which has image storage
- with the same *CAS path*, but *distinct records*. Deleting the first tag should delete the
- first image, and its storage, but not the file in storage, as it shares its CAS path.
- """
- with assert_gc_integrity(expect_storage_removed=True):
- repository = create_repository()
-
- # Create two image storage records with the same content checksum.
- content = 'hello world'
- digest = 'sha256:' + hashlib.sha256(content).hexdigest()
- preferred = storage.preferred_locations[0]
- storage.put_content({preferred}, storage.blob_path(digest), content)
-
- is1 = database.ImageStorage.create(content_checksum=digest, uploading=False)
- is2 = database.ImageStorage.create(content_checksum=digest, uploading=False)
-
- location = database.ImageStorageLocation.get(name=preferred)
-
- database.ImageStoragePlacement.create(location=location, storage=is1)
- database.ImageStoragePlacement.create(location=location, storage=is2)
-
- # Ensure the CAS path exists.
- assert storage.exists({preferred}, storage.blob_path(digest))
-
- # Create two images in the repository, and two tags, each pointing to one of the storages.
- first_image = Image.create(docker_image_id='i1',
- repository=repository, storage=is1,
- ancestors='/')
-
- second_image = Image.create(docker_image_id='i2',
- repository=repository, storage=is2,
- ancestors='/')
-
- store_tag_manifest(repository.namespace_user.username, repository.name,
- 'first', first_image.docker_image_id)
-
- store_tag_manifest(repository.namespace_user.username, repository.name,
- 'second', second_image.docker_image_id)
-
- assert_not_deleted(repository, 'i1', 'i2')
-
- # Delete the first tag.
- delete_tag(repository, 'first')
- assert_deleted(repository, 'i1')
- assert_not_deleted(repository, 'i2')
-
- # Ensure the CAS path still exists.
- assert storage.exists({preferred}, storage.blob_path(digest))
-
-
-def test_images_shared_cas_with_new_blob_table(default_tag_policy, initialized_db):
- """ A repository with a tag and image that shares its CAS path with a record in the new Blob
- table. Deleting the first tag should delete the first image, and its storage, but not the
- file in storage, as it shares its CAS path with the blob row.
- """
- with assert_gc_integrity(expect_storage_removed=True):
- repository = create_repository()
-
- # Create two image storage records with the same content checksum.
- content = 'hello world'
- digest = 'sha256:' + hashlib.sha256(content).hexdigest()
- preferred = storage.preferred_locations[0]
- storage.put_content({preferred}, storage.blob_path(digest), content)
-
- media_type = database.MediaType.get(name='text/plain')
-
- is1 = database.ImageStorage.create(content_checksum=digest, uploading=False)
- database.ApprBlob.create(digest=digest, size=0, media_type=media_type)
-
- location = database.ImageStorageLocation.get(name=preferred)
- database.ImageStoragePlacement.create(location=location, storage=is1)
-
- # Ensure the CAS path exists.
- assert storage.exists({preferred}, storage.blob_path(digest))
-
- # Create the image in the repository, and the tag.
- first_image = Image.create(docker_image_id='i1',
- repository=repository, storage=is1,
- ancestors='/')
-
- store_tag_manifest(repository.namespace_user.username, repository.name,
- 'first', first_image.docker_image_id)
-
- assert_not_deleted(repository, 'i1')
-
- # Delete the tag.
- delete_tag(repository, 'first')
- assert_deleted(repository, 'i1')
-
- # Ensure the CAS path still exists, as it is referenced by the Blob table
- assert storage.exists({preferred}, storage.blob_path(digest))
-
-
-def test_purge_repo(app):
- """ Test that app registers delete_metadata function on repository deletions """
- with assert_gc_integrity():
- with patch('app.tuf_metadata_api') as mock_tuf:
- model.gc.purge_repository("ns", "repo")
- assert mock_tuf.delete_metadata.called_with("ns", "repo")
-
-
-def test_super_long_image_chain_gc(app, default_tag_policy):
- """ Test that a super long chain of images all gets properly GCed. """
- with assert_gc_integrity():
- images = ['i%s' % i for i in range(0, 100)]
- repository = create_repository(latest=images)
- delete_tag(repository, 'latest')
-
- # Ensure the repository is now empty.
- assert_deleted(repository, *images)
-
-
-def test_manifest_v2_shared_config_and_blobs(app, default_tag_policy):
- """ Test that GCing a tag that refers to a V2 manifest with the same config and some shared
- blobs as another manifest ensures that the config blob and shared blob are NOT GCed.
- """
- repo = model.repository.create_repository('devtable', 'newrepo', None)
- manifest1, built1 = create_manifest_for_testing(repo, differentiation_field='1',
- include_shared_blob=True)
- manifest2, built2 = create_manifest_for_testing(repo, differentiation_field='2',
- include_shared_blob=True)
-
- assert set(built1.local_blob_digests).intersection(built2.local_blob_digests)
- assert built1.config.digest == built2.config.digest
-
- # Create tags pointing to the manifests.
- model.oci.tag.retarget_tag('tag1', manifest1)
- model.oci.tag.retarget_tag('tag2', manifest2)
-
- with assert_gc_integrity(expect_storage_removed=True, check_oci_tags=False):
- # Delete tag2.
- model.oci.tag.delete_tag(repo, 'tag2')
- assert model.gc.garbage_collect_repo(repo)
-
- # Ensure the blobs for manifest1 still all exist.
- preferred = storage.preferred_locations[0]
- for blob_digest in built1.local_blob_digests:
- storage_row = ImageStorage.get(content_checksum=blob_digest)
-
- assert storage_row.cas_path
- storage.get_content({preferred}, storage.blob_path(storage_row.content_checksum))
diff --git a/data/model/test/test_image.py b/data/model/test/test_image.py
deleted file mode 100644
index 9442a23eb..000000000
--- a/data/model/test/test_image.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import pytest
-
-from collections import defaultdict
-from data.model import image, repository
-from playhouse.test_utils import assert_query_count
-
-from test.fixtures import *
-
-@pytest.fixture()
-def images(initialized_db):
- images = image.get_repository_images('devtable', 'simple')
- assert len(images)
- return images
-
-
-def test_get_image_with_storage(images, initialized_db):
- for current in images:
- storage_uuid = current.storage.uuid
-
- with assert_query_count(1):
- retrieved = image.get_image_with_storage(current.docker_image_id, storage_uuid)
- assert retrieved.id == current.id
- assert retrieved.storage.uuid == storage_uuid
-
-
-def test_get_parent_images(images, initialized_db):
- for current in images:
- if not len(current.ancestor_id_list()):
- continue
-
- with assert_query_count(1):
- parent_images = list(image.get_parent_images('devtable', 'simple', current))
-
- assert len(parent_images) == len(current.ancestor_id_list())
- assert set(current.ancestor_id_list()) == {i.id for i in parent_images}
-
- for parent in parent_images:
- with assert_query_count(0):
- assert parent.storage.id
-
-
-def test_get_image(images, initialized_db):
- for current in images:
- repo = current.repository
-
- with assert_query_count(1):
- found = image.get_image(repo, current.docker_image_id)
-
- assert found.id == current.id
-
-
-def test_placements(images, initialized_db):
- with assert_query_count(1):
- placements_map = image.get_placements_for_images(images)
-
- for current in images:
- assert current.storage.id in placements_map
-
- with assert_query_count(2):
- expected_image, expected_placements = image.get_image_and_placements('devtable', 'simple',
- current.docker_image_id)
-
- assert expected_image.id == current.id
- assert len(expected_placements) == len(placements_map.get(current.storage.id))
- assert ({p.id for p in expected_placements} ==
- {p.id for p in placements_map.get(current.storage.id)})
-
-
-def test_get_repo_image(images, initialized_db):
- for current in images:
- with assert_query_count(1):
- found = image.get_repo_image('devtable', 'simple', current.docker_image_id)
-
- assert found.id == current.id
- with assert_query_count(1):
- assert found.storage.id
-
-
-def test_get_repo_image_and_storage(images, initialized_db):
- for current in images:
- with assert_query_count(1):
- found = image.get_repo_image_and_storage('devtable', 'simple', current.docker_image_id)
-
- assert found.id == current.id
- with assert_query_count(0):
- assert found.storage.id
-
-
-def test_get_repository_images_without_placements(images, initialized_db):
- ancestors_map = defaultdict(list)
- for img in images:
- current = img.parent
- while current is not None:
- ancestors_map[current.id].append(img.id)
- current = current.parent
-
- for current in images:
- repo = current.repository
-
- with assert_query_count(1):
- found = list(image.get_repository_images_without_placements(repo, with_ancestor=current))
-
- assert len(found) == len(ancestors_map[current.id]) + 1
- assert {i.id for i in found} == set(ancestors_map[current.id] + [current.id])
diff --git a/data/model/test/test_image_sharing.py b/data/model/test/test_image_sharing.py
deleted file mode 100644
index 239500b10..000000000
--- a/data/model/test/test_image_sharing.py
+++ /dev/null
@@ -1,215 +0,0 @@
-import pytest
-
-from data import model
-
-from storage.distributedstorage import DistributedStorage
-from storage.fakestorage import FakeStorage
-from test.fixtures import *
-
-NO_ACCESS_USER = 'freshuser'
-READ_ACCESS_USER = 'reader'
-ADMIN_ACCESS_USER = 'devtable'
-PUBLIC_USER = 'public'
-RANDOM_USER = 'randomuser'
-OUTSIDE_ORG_USER = 'outsideorg'
-
-ADMIN_ROBOT_USER = 'devtable+dtrobot'
-
-ORGANIZATION = 'buynlarge'
-
-REPO = 'devtable/simple'
-PUBLIC_REPO = 'public/publicrepo'
-RANDOM_REPO = 'randomuser/randomrepo'
-
-OUTSIDE_ORG_REPO = 'outsideorg/coolrepo'
-
-ORG_REPO = 'buynlarge/orgrepo'
-ANOTHER_ORG_REPO = 'buynlarge/anotherorgrepo'
-
-# Note: The shared repo has devtable as admin, public as a writer and reader as a reader.
-SHARED_REPO = 'devtable/shared'
-
-
-@pytest.fixture()
-def storage(app):
- return DistributedStorage({'local_us': FakeStorage(None)}, preferred_locations=['local_us'])
-
-
-def createStorage(storage, docker_image_id, repository=REPO, username=ADMIN_ACCESS_USER):
- repository_obj = model.repository.get_repository(repository.split('/')[0],
- repository.split('/')[1])
- preferred = storage.preferred_locations[0]
- image = model.image.find_create_or_link_image(docker_image_id, repository_obj, username, {},
- preferred)
- image.storage.uploading = False
- image.storage.save()
- return image.storage
-
-
-def assertSameStorage(storage, docker_image_id, existing_storage, repository=REPO,
- username=ADMIN_ACCESS_USER):
- new_storage = createStorage(storage, docker_image_id, repository, username)
- assert existing_storage.id == new_storage.id
-
-
-def assertDifferentStorage(storage, docker_image_id, existing_storage, repository=REPO,
- username=ADMIN_ACCESS_USER):
- new_storage = createStorage(storage, docker_image_id, repository, username)
- assert existing_storage.id != new_storage.id
-
-
-def test_same_user(storage, initialized_db):
- """ The same user creates two images, each which should be shared in the same repo. This is a
- sanity check. """
-
- # Create a reference to a new docker ID => new image.
- first_storage_id = createStorage(storage, 'first-image')
-
- # Create a reference to the same docker ID => same image.
- assertSameStorage(storage, 'first-image', first_storage_id)
-
- # Create a reference to another new docker ID => new image.
- second_storage_id = createStorage(storage, 'second-image')
-
- # Create a reference to that same docker ID => same image.
- assertSameStorage(storage, 'second-image', second_storage_id)
-
- # Make sure the images are different.
- assert first_storage_id != second_storage_id
-
-
-def test_no_user_private_repo(storage, initialized_db):
- """ If no user is specified (token case usually), then no sharing can occur on a private repo. """
- # Create a reference to a new docker ID => new image.
- first_storage = createStorage(storage, 'the-image', username=None, repository=SHARED_REPO)
-
- # Create a areference to the same docker ID, but since no username => new image.
- assertDifferentStorage(storage, 'the-image', first_storage, username=None, repository=RANDOM_REPO)
-
-
-def test_no_user_public_repo(storage, initialized_db):
- """ If no user is specified (token case usually), then no sharing can occur on a private repo except when the image is first public. """
- # Create a reference to a new docker ID => new image.
- first_storage = createStorage(storage, 'the-image', username=None, repository=PUBLIC_REPO)
-
- # Create a areference to the same docker ID. Since no username, we'd expect different but the first image is public so => shaed image.
- assertSameStorage(storage, 'the-image', first_storage, username=None, repository=RANDOM_REPO)
-
-
-def test_different_user_same_repo(storage, initialized_db):
- """ Two different users create the same image in the same repo. """
-
- # Create a reference to a new docker ID under the first user => new image.
- first_storage = createStorage(storage, 'the-image', username=PUBLIC_USER, repository=SHARED_REPO)
-
- # Create a reference to the *same* docker ID under the second user => same image.
- assertSameStorage(storage, 'the-image', first_storage, username=ADMIN_ACCESS_USER, repository=SHARED_REPO)
-
-
-def test_different_repo_no_shared_access(storage, initialized_db):
- """ Neither user has access to the other user's repository. """
-
- # Create a reference to a new docker ID under the first user => new image.
- first_storage_id = createStorage(storage, 'the-image', username=RANDOM_USER, repository=RANDOM_REPO)
-
- # Create a reference to the *same* docker ID under the second user => new image.
- second_storage_id = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=REPO)
-
- # Verify that the users do not share storage.
- assert first_storage_id != second_storage_id
-
-
-def test_public_than_private(storage, initialized_db):
- """ An image is created publicly then used privately, so it should be shared. """
-
- # Create a reference to a new docker ID under the first user => new image.
- first_storage = createStorage(storage, 'the-image', username=PUBLIC_USER, repository=PUBLIC_REPO)
-
- # Create a reference to the *same* docker ID under the second user => same image, since the first was public.
- assertSameStorage(storage, 'the-image', first_storage, username=ADMIN_ACCESS_USER, repository=REPO)
-
-
-def test_private_than_public(storage, initialized_db):
- """ An image is created privately then used publicly, so it should *not* be shared. """
-
- # Create a reference to a new docker ID under the first user => new image.
- first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=REPO)
-
- # Create a reference to the *same* docker ID under the second user => new image, since the first was private.
- assertDifferentStorage(storage, 'the-image', first_storage, username=PUBLIC_USER, repository=PUBLIC_REPO)
-
-
-def test_different_repo_with_access(storage, initialized_db):
- """ An image is created in one repo (SHARED_REPO) which the user (PUBLIC_USER) has access to. Later, the
- image is created in another repo (PUBLIC_REPO) that the user also has access to. The image should
- be shared since the user has access.
- """
- # Create the image in the shared repo => new image.
- first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=SHARED_REPO)
-
- # Create the image in the other user's repo, but since the user (PUBLIC) still has access to the shared
- # repository, they should reuse the storage.
- assertSameStorage(storage, 'the-image', first_storage, username=PUBLIC_USER, repository=PUBLIC_REPO)
-
-
-def test_org_access(storage, initialized_db):
- """ An image is accessible by being a member of the organization. """
-
- # Create the new image under the org's repo => new image.
- first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=ORG_REPO)
-
- # Create an image under the user's repo, but since the user has access to the organization => shared image.
- assertSameStorage(storage, 'the-image', first_storage, username=ADMIN_ACCESS_USER, repository=REPO)
-
- # Ensure that the user's robot does not have access, since it is not on the permissions list for the repo.
- assertDifferentStorage(storage, 'the-image', first_storage, username=ADMIN_ROBOT_USER, repository=SHARED_REPO)
-
-
-def test_org_access_different_user(storage, initialized_db):
- """ An image is accessible by being a member of the organization. """
-
- # Create the new image under the org's repo => new image.
- first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=ORG_REPO)
-
- # Create an image under a user's repo, but since the user has access to the organization => shared image.
- assertSameStorage(storage, 'the-image', first_storage, username=PUBLIC_USER, repository=PUBLIC_REPO)
-
- # Also verify for reader.
- assertSameStorage(storage, 'the-image', first_storage, username=READ_ACCESS_USER, repository=PUBLIC_REPO)
-
-
-def test_org_no_access(storage, initialized_db):
- """ An image is not accessible if not a member of the organization. """
-
- # Create the new image under the org's repo => new image.
- first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=ORG_REPO)
-
- # Create an image under a user's repo. Since the user is not a member of the organization => new image.
- assertDifferentStorage(storage, 'the-image', first_storage, username=RANDOM_USER, repository=RANDOM_REPO)
-
-
-def test_org_not_team_member_with_access(storage, initialized_db):
- """ An image is accessible to a user specifically listed as having permission on the org repo. """
-
- # Create the new image under the org's repo => new image.
- first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=ORG_REPO)
-
- # Create an image under a user's repo. Since the user has read access on that repo, they can see the image => shared image.
- assertSameStorage(storage, 'the-image', first_storage, username=OUTSIDE_ORG_USER, repository=OUTSIDE_ORG_REPO)
-
-
-def test_org_not_team_member_with_no_access(storage, initialized_db):
- """ A user that has access to one org repo but not another and is not a team member. """
-
- # Create the new image under the org's repo => new image.
- first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=ANOTHER_ORG_REPO)
-
- # Create an image under a user's repo. The user doesn't have access to the repo (ANOTHER_ORG_REPO) so => new image.
- assertDifferentStorage(storage, 'the-image', first_storage, username=OUTSIDE_ORG_USER, repository=OUTSIDE_ORG_REPO)
-
-def test_no_link_to_uploading(storage, initialized_db):
- still_uploading = createStorage(storage, 'an-image', repository=PUBLIC_REPO)
- still_uploading.uploading = True
- still_uploading.save()
-
- assertDifferentStorage(storage, 'an-image', still_uploading)
diff --git a/data/model/test/test_log.py b/data/model/test/test_log.py
deleted file mode 100644
index 7ced0bb91..000000000
--- a/data/model/test/test_log.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import pytest
-
-from data.database import LogEntry3, User
-from data.model import config as _config
-from data.model.log import log_action
-
-from mock import patch, Mock, DEFAULT, sentinel
-from peewee import PeeweeException
-
-
-@pytest.fixture(scope='function')
-def app_config():
- with patch.dict(_config.app_config, {}, clear=True):
- yield _config.app_config
-
-@pytest.fixture()
-def logentry_kind():
- kinds = {'pull_repo': 'pull_repo_kind', 'push_repo': 'push_repo_kind'}
- with patch('data.model.log.get_log_entry_kinds', return_value=kinds, spec=True):
- yield kinds
-
-@pytest.fixture()
-def logentry(logentry_kind):
- with patch('data.database.LogEntry3.create', spec=True):
- yield LogEntry3
-
-@pytest.fixture()
-def user():
- with patch.multiple('data.database.User', username=DEFAULT, get=DEFAULT, select=DEFAULT) as user:
- user['get'].return_value = Mock(id='mock_user_id')
- user['select'].return_value.tuples.return_value.get.return_value = ['default_user_id']
- yield User
-
-@pytest.mark.parametrize('action_kind', [('pull'), ('oops')])
-def test_log_action_unknown_action(action_kind):
- ''' test unknown action types throw an exception when logged '''
- with pytest.raises(Exception):
- log_action(action_kind, None)
-
-
-@pytest.mark.parametrize('user_or_org_name,account_id,account', [
- ('my_test_org', 'N/A', 'mock_user_id' ),
- (None, 'test_account_id', 'test_account_id'),
- (None, None, 'default_user_id')
-])
-@pytest.mark.parametrize('unlogged_pulls_ok,action_kind,db_exception,throws', [
- (False, 'pull_repo', None, False),
- (False, 'push_repo', None, False),
- (False, 'pull_repo', PeeweeException, True ),
- (False, 'push_repo', PeeweeException, True ),
-
- (True, 'pull_repo', PeeweeException, False),
- (True, 'push_repo', PeeweeException, True ),
- (True, 'pull_repo', Exception, True ),
- (True, 'push_repo', Exception, True )
-])
-def test_log_action(user_or_org_name, account_id, account, unlogged_pulls_ok, action_kind,
- db_exception, throws, app_config, logentry, user):
- log_args = {
- 'performer' : Mock(id='TEST_PERFORMER_ID'),
- 'repository' : Mock(id='TEST_REPO'),
- 'ip' : 'TEST_IP',
- 'metadata' : { 'test_key' : 'test_value' },
- 'timestamp' : 'TEST_TIMESTAMP'
- }
- app_config['SERVICE_LOG_ACCOUNT_ID'] = account_id
- app_config['ALLOW_PULLS_WITHOUT_STRICT_LOGGING'] = unlogged_pulls_ok
-
- logentry.create.side_effect = db_exception
-
- if throws:
- with pytest.raises(db_exception):
- log_action(action_kind, user_or_org_name, **log_args)
- else:
- log_action(action_kind, user_or_org_name, **log_args)
-
- logentry.create.assert_called_once_with(kind=action_kind+'_kind', account=account,
- performer='TEST_PERFORMER_ID', repository='TEST_REPO',
- ip='TEST_IP', metadata_json='{"test_key": "test_value"}',
- datetime='TEST_TIMESTAMP')
diff --git a/data/model/test/test_model_blob.py b/data/model/test/test_model_blob.py
deleted file mode 100644
index b6053b353..000000000
--- a/data/model/test/test_model_blob.py
+++ /dev/null
@@ -1,51 +0,0 @@
-from app import storage
-from data import model, database
-
-from test.fixtures import *
-
-ADMIN_ACCESS_USER = 'devtable'
-REPO = 'simple'
-
-def test_store_blob(initialized_db):
- location = database.ImageStorageLocation.select().get()
-
- # Create a new blob at a unique digest.
- digest = 'somecooldigest'
- blob_storage = model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, digest,
- location, 1024, 0, 5000)
- assert blob_storage.content_checksum == digest
- assert blob_storage.image_size == 1024
- assert blob_storage.uncompressed_size == 5000
-
- # Link to the same digest.
- blob_storage2 = model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, digest,
- location, 2048, 0, 6000)
- assert blob_storage2.id == blob_storage.id
-
- # The sizes should be unchanged.
- assert blob_storage2.image_size == 1024
- assert blob_storage2.uncompressed_size == 5000
-
- # Add a new digest, ensure it has a new record.
- otherdigest = 'anotherdigest'
- blob_storage3 = model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, otherdigest,
- location, 1234, 0, 5678)
- assert blob_storage3.id != blob_storage.id
- assert blob_storage3.image_size == 1234
- assert blob_storage3.uncompressed_size == 5678
-
-
-def test_get_or_create_shared_blob(initialized_db):
- shared = model.blob.get_or_create_shared_blob('sha256:abcdef', 'somecontent', storage)
- assert shared.content_checksum == 'sha256:abcdef'
-
- again = model.blob.get_or_create_shared_blob('sha256:abcdef', 'somecontent', storage)
- assert shared == again
-
-
-def test_lookup_repo_storages_by_content_checksum(initialized_db):
- for image in database.Image.select():
- found = model.storage.lookup_repo_storages_by_content_checksum(image.repository,
- [image.storage.content_checksum])
- assert len(found) == 1
- assert found[0].content_checksum == image.storage.content_checksum
diff --git a/data/model/test/test_modelutil.py b/data/model/test/test_modelutil.py
deleted file mode 100644
index 5da72be4a..000000000
--- a/data/model/test/test_modelutil.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import pytest
-
-from data.database import Role
-from data.model.modelutil import paginate
-from test.fixtures import *
-
-@pytest.mark.parametrize('page_size', [
- 10,
- 20,
- 50,
- 100,
- 200,
- 500,
- 1000,
-])
-@pytest.mark.parametrize('descending', [
- False,
- True,
-])
-def test_paginate(page_size, descending, initialized_db):
- # Add a bunch of rows into a test table (`Role`).
- for i in range(0, 522):
- Role.create(name='testrole%s' % i)
-
- query = Role.select().where(Role.name ** 'testrole%')
- all_matching_roles = list(query)
- assert len(all_matching_roles) == 522
-
- # Paginate a query to lookup roles.
- collected = []
- page_token = None
- while True:
- results, page_token = paginate(query, Role, limit=page_size, descending=descending,
- page_token=page_token)
- assert len(results) <= page_size
- collected.extend(results)
-
- if page_token is None:
- break
-
- assert len(results) == page_size
-
- for index, result in enumerate(results[1:]):
- if descending:
- assert result.id < results[index].id
- else:
- assert result.id > results[index].id
-
- assert len(collected) == len(all_matching_roles)
- assert {c.id for c in collected} == {a.id for a in all_matching_roles}
diff --git a/data/model/test/test_organization.py b/data/model/test/test_organization.py
deleted file mode 100644
index 153814765..000000000
--- a/data/model/test/test_organization.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import pytest
-
-from data.model.organization import get_organization, get_organizations
-from data.model.user import mark_namespace_for_deletion
-from data.queue import WorkQueue
-from test.fixtures import *
-
-@pytest.mark.parametrize('deleted', [
- (True),
- (False),
-])
-def test_get_organizations(deleted, initialized_db):
- # Delete an org.
- deleted_org = get_organization('sellnsmall')
- queue = WorkQueue('testgcnamespace', lambda db: db.transaction())
- mark_namespace_for_deletion(deleted_org, [], queue)
-
- orgs = get_organizations(deleted=deleted)
- assert orgs
-
- deleted_found = [org for org in orgs if org.id == deleted_org.id]
- assert bool(deleted_found) == deleted
diff --git a/data/model/test/test_repo_mirroring.py b/data/model/test/test_repo_mirroring.py
deleted file mode 100644
index 6a3f808e3..000000000
--- a/data/model/test/test_repo_mirroring.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import absolute_import
-from jsonschema import ValidationError
-
-from data.database import RepoMirrorConfig, RepoMirrorStatus, User
-from data import model
-from data.model.repo_mirror import (create_mirroring_rule, get_eligible_mirrors, update_sync_status_to_cancel,
- MAX_SYNC_RETRIES, release_mirror)
-
-from test.fixtures import *
-
-
-def create_mirror_repo_robot(rules, repo_name="repo"):
- try:
- user = User.get(User.username == "mirror")
- except User.DoesNotExist:
- user = create_user_noverify("mirror", "mirror@example.com", email_required=False)
-
- try:
- robot = lookup_robot("mirror+robot")
- except model.InvalidRobotException:
- robot, _ = create_robot("robot", user)
-
- repo = create_repository("mirror", repo_name, None, repo_kind="image", visibility="public")
- repo.save()
-
- rule = model.repo_mirror.create_mirroring_rule(repo, rules)
-
- mirror_kwargs = {
- "repository": repo,
- "root_rule": rule,
- "internal_robot": robot,
- "external_reference": "registry.example.com/namespace/repository",
- "sync_interval": timedelta(days=1).total_seconds()
- }
- mirror = enable_mirroring_for_repository(**mirror_kwargs)
- mirror.sync_status = RepoMirrorStatus.NEVER_RUN
- mirror.sync_start_date = datetime.utcnow() - timedelta(days=1)
- mirror.sync_retries_remaining = 3
- mirror.save()
-
- return (mirror, repo)
-
-
-def disable_existing_mirrors():
- mirrors = RepoMirrorConfig.select().execute()
- for mirror in mirrors:
- mirror.is_enabled = False
- mirror.save()
-
-
-def test_eligible_oldest_first(initialized_db):
- """
- Eligible mirror candidates should be returned with the oldest (earliest created) first.
- """
-
- disable_existing_mirrors()
- mirror_first, repo_first = create_mirror_repo_robot(["updated", "created"], repo_name="first")
- mirror_second, repo_second = create_mirror_repo_robot(["updated", "created"], repo_name="second")
- mirror_third, repo_third = create_mirror_repo_robot(["updated", "created"], repo_name="third")
-
- candidates = get_eligible_mirrors()
-
- assert len(candidates) == 3
- assert candidates[0] == mirror_first
- assert candidates[1] == mirror_second
- assert candidates[2] == mirror_third
-
-
-def test_eligible_includes_expired_syncing(initialized_db):
- """
- Mirrors that have an end time in the past are eligible even if their state indicates still syncing.
- """
-
- disable_existing_mirrors()
- mirror_first, repo_first = create_mirror_repo_robot(["updated", "created"], repo_name="first")
- mirror_second, repo_second = create_mirror_repo_robot(["updated", "created"], repo_name="second")
- mirror_third, repo_third = create_mirror_repo_robot(["updated", "created"], repo_name="third")
- mirror_fourth, repo_third = create_mirror_repo_robot(["updated", "created"], repo_name="fourth")
-
- mirror_second.sync_expiration_date = datetime.utcnow() - timedelta(hours=1)
- mirror_second.sync_status = RepoMirrorStatus.SYNCING
- mirror_second.save()
-
- mirror_fourth.sync_expiration_date = datetime.utcnow() + timedelta(hours=1)
- mirror_fourth.sync_status = RepoMirrorStatus.SYNCING
- mirror_fourth.save()
-
- candidates = get_eligible_mirrors()
-
- assert len(candidates) == 3
- assert candidates[0] == mirror_first
- assert candidates[1] == mirror_second
- assert candidates[2] == mirror_third
-
-
-def test_eligible_includes_immediate(initialized_db):
- """
- Mirrors that are SYNC_NOW, regardless of starting time
- """
-
- disable_existing_mirrors()
- mirror_first, repo_first = create_mirror_repo_robot(["updated", "created"], repo_name="first")
- mirror_second, repo_second = create_mirror_repo_robot(["updated", "created"], repo_name="second")
- mirror_third, repo_third = create_mirror_repo_robot(["updated", "created"], repo_name="third")
- mirror_fourth, repo_third = create_mirror_repo_robot(["updated", "created"], repo_name="fourth")
- mirror_future, _ = create_mirror_repo_robot(["updated", "created"], repo_name="future")
- mirror_past, _ = create_mirror_repo_robot(["updated", "created"], repo_name="past")
-
- mirror_future.sync_start_date = datetime.utcnow() + timedelta(hours=6)
- mirror_future.sync_status = RepoMirrorStatus.SYNC_NOW
- mirror_future.save()
-
- mirror_past.sync_start_date = datetime.utcnow() - timedelta(hours=6)
- mirror_past.sync_status = RepoMirrorStatus.SYNC_NOW
- mirror_past.save()
-
- mirror_fourth.sync_expiration_date = datetime.utcnow() + timedelta(hours=1)
- mirror_fourth.sync_status = RepoMirrorStatus.SYNCING
- mirror_fourth.save()
-
- candidates = get_eligible_mirrors()
-
- assert len(candidates) == 5
- assert candidates[0] == mirror_first
- assert candidates[1] == mirror_second
- assert candidates[2] == mirror_third
- assert candidates[3] == mirror_past
- assert candidates[4] == mirror_future
-
-
-def test_create_rule_validations(initialized_db):
- mirror, repo = create_mirror_repo_robot(["updated", "created"], repo_name="first")
-
- with pytest.raises(ValidationError):
- create_mirroring_rule(repo, None)
-
- with pytest.raises(ValidationError):
- create_mirroring_rule(repo, "['tag1', 'tag2']")
-
- with pytest.raises(ValidationError):
- create_mirroring_rule(repo, ['tag1', 'tag2'], rule_type=None)
-
-
-def test_long_registry_passwords(initialized_db):
- """
- Verify that long passwords, such as Base64 JWT used by Redhat's Registry, work as expected.
- """
- MAX_PASSWORD_LENGTH = 1024
-
- username = ''.join('a' for _ in range(MAX_PASSWORD_LENGTH))
- password = ''.join('b' for _ in range(MAX_PASSWORD_LENGTH))
- assert len(username) == MAX_PASSWORD_LENGTH
- assert len(password) == MAX_PASSWORD_LENGTH
-
- repo = model.repository.get_repository('devtable', 'mirrored')
- assert repo
-
- existing_mirror_conf = model.repo_mirror.get_mirror(repo)
- assert existing_mirror_conf
-
- assert model.repo_mirror.change_credentials(repo, username, password)
-
- updated_mirror_conf = model.repo_mirror.get_mirror(repo)
- assert updated_mirror_conf
-
- assert updated_mirror_conf.external_registry_username.decrypt() == username
- assert updated_mirror_conf.external_registry_password.decrypt() == password
-
-
-def test_sync_status_to_cancel(initialized_db):
- """
- SYNCING and SYNC_NOW mirrors may be canceled, ending in NEVER_RUN
- """
-
- disable_existing_mirrors()
- mirror, repo = create_mirror_repo_robot(["updated", "created"], repo_name="cancel")
-
- mirror.sync_status = RepoMirrorStatus.SYNCING
- mirror.save()
- updated = update_sync_status_to_cancel(mirror)
- assert updated is not None
- assert updated.sync_status == RepoMirrorStatus.NEVER_RUN
-
- mirror.sync_status = RepoMirrorStatus.SYNC_NOW
- mirror.save()
- updated = update_sync_status_to_cancel(mirror)
- assert updated is not None
- assert updated.sync_status == RepoMirrorStatus.NEVER_RUN
-
- mirror.sync_status = RepoMirrorStatus.FAIL
- mirror.save()
- updated = update_sync_status_to_cancel(mirror)
- assert updated is None
-
- mirror.sync_status = RepoMirrorStatus.NEVER_RUN
- mirror.save()
- updated = update_sync_status_to_cancel(mirror)
- assert updated is None
-
- mirror.sync_status = RepoMirrorStatus.SUCCESS
- mirror.save()
- updated = update_sync_status_to_cancel(mirror)
- assert updated is None
-
-
-def test_release_mirror(initialized_db):
- """
- Mirrors that are SYNC_NOW, regardless of starting time
- """
-
- disable_existing_mirrors()
- mirror, repo = create_mirror_repo_robot(["updated", "created"], repo_name="first")
-
- # mysql rounds the milliseconds on update so force that to happen now
- query = (RepoMirrorConfig
- .update(sync_start_date=mirror.sync_start_date)
- .where(RepoMirrorConfig.id == mirror.id))
- query.execute()
- mirror = RepoMirrorConfig.get_by_id(mirror.id)
- original_sync_start_date = mirror.sync_start_date
-
- assert mirror.sync_retries_remaining == 3
-
- mirror = release_mirror(mirror, RepoMirrorStatus.FAIL)
- assert mirror.sync_retries_remaining == 2
- assert mirror.sync_start_date == original_sync_start_date
-
- mirror = release_mirror(mirror, RepoMirrorStatus.FAIL)
- assert mirror.sync_retries_remaining == 1
- assert mirror.sync_start_date == original_sync_start_date
-
- mirror = release_mirror(mirror, RepoMirrorStatus.FAIL)
- assert mirror.sync_retries_remaining == 3
- assert mirror.sync_start_date > original_sync_start_date
diff --git a/data/model/test/test_repository.py b/data/model/test/test_repository.py
deleted file mode 100644
index 25e8b7cf2..000000000
--- a/data/model/test/test_repository.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from datetime import timedelta
-
-import pytest
-
-from peewee import IntegrityError
-
-from data.model.gc import purge_repository
-from data.model.repository import create_repository, is_empty
-from data.model.repository import get_filtered_matching_repositories
-from test.fixtures import *
-
-
-def test_duplicate_repository_different_kinds(initialized_db):
- # Create an image repo.
- create_repository('devtable', 'somenewrepo', None, repo_kind='image')
-
- # Try to create an app repo with the same name, which should fail.
- with pytest.raises(IntegrityError):
- create_repository('devtable', 'somenewrepo', None, repo_kind='application')
-
-
-def test_is_empty(initialized_db):
- create_repository('devtable', 'somenewrepo', None, repo_kind='image')
-
- assert is_empty('devtable', 'somenewrepo')
- assert not is_empty('devtable', 'simple')
-
-@pytest.mark.skipif(os.environ.get('TEST_DATABASE_URI', '').find('mysql') >= 0,
- reason='MySQL requires specialized indexing of newly created repos')
-@pytest.mark.parametrize('query', [
- (''),
- ('e'),
-])
-@pytest.mark.parametrize('authed_username', [
- (None),
- ('devtable'),
-])
-def test_search_pagination(query, authed_username, initialized_db):
- # Create some public repos.
- repo1 = create_repository('devtable', 'somenewrepo', None, repo_kind='image', visibility='public')
- repo2 = create_repository('devtable', 'somenewrepo2', None, repo_kind='image', visibility='public')
- repo3 = create_repository('devtable', 'somenewrepo3', None, repo_kind='image', visibility='public')
-
- repositories = get_filtered_matching_repositories(query, filter_username=authed_username)
- assert len(repositories) > 3
-
- next_repos = get_filtered_matching_repositories(query, filter_username=authed_username, offset=1)
- assert repositories[0].id != next_repos[0].id
- assert repositories[1].id == next_repos[0].id
diff --git a/data/model/test/test_repositoryactioncount.py b/data/model/test/test_repositoryactioncount.py
deleted file mode 100644
index bdad4e315..000000000
--- a/data/model/test/test_repositoryactioncount.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from datetime import date, timedelta
-
-import pytest
-
-from data.database import RepositoryActionCount, RepositorySearchScore
-from data.model.repository import create_repository
-from data.model.repositoryactioncount import update_repository_score, SEARCH_BUCKETS
-from test.fixtures import *
-
-@pytest.mark.parametrize('bucket_sums,expected_score', [
- ((0, 0, 0, 0), 0),
-
- ((1, 6, 24, 152), 100),
- ((2, 6, 24, 152), 101),
- ((1, 6, 24, 304), 171),
-
- ((100, 480, 24, 152), 703),
- ((1, 6, 24, 15200), 7131),
-
- ((300, 500, 1000, 0), 1733),
- ((5000, 0, 0, 0), 5434),
-])
-def test_update_repository_score(bucket_sums, expected_score, initialized_db):
- # Create a new repository.
- repo = create_repository('devtable', 'somenewrepo', None, repo_kind='image')
-
- # Delete the RAC created in create_repository.
- RepositoryActionCount.delete().where(RepositoryActionCount.repository == repo).execute()
-
- # Add RAC rows for each of the buckets.
- for index, bucket in enumerate(SEARCH_BUCKETS):
- for day in range(0, bucket.days):
- RepositoryActionCount.create(repository=repo,
- count=(bucket_sums[index] / bucket.days * 1.0),
- date=date.today() - bucket.delta + timedelta(days=day))
-
- assert update_repository_score(repo)
- assert RepositorySearchScore.get(repository=repo).score == expected_score
diff --git a/data/model/test/test_tag.py b/data/model/test/test_tag.py
deleted file mode 100644
index 2f5adf773..000000000
--- a/data/model/test/test_tag.py
+++ /dev/null
@@ -1,356 +0,0 @@
-import json
-
-from datetime import datetime
-from time import time
-
-import pytest
-
-from mock import patch
-
-from app import docker_v2_signing_key
-from data.database import (Image, RepositoryTag, ImageStorage, Repository, Manifest, ManifestBlob,
- ManifestLegacyImage, TagManifestToManifest, Tag, TagToRepositoryTag)
-from data.model.repository import create_repository
-from data.model.tag import (list_active_repo_tags, create_or_update_tag, delete_tag,
- get_matching_tags, _tag_alive, get_matching_tags_for_images,
- change_tag_expiration, get_active_tag, store_tag_manifest_for_testing,
- get_most_recent_tag, get_active_tag_for_repo,
- create_or_update_tag_for_repo, set_tag_end_ts)
-from data.model.image import find_create_or_link_image
-from image.docker.schema1 import DockerSchema1ManifestBuilder
-from util.timedeltastring import convert_to_timedelta
-
-from test.fixtures import *
-
-
-def _get_expected_tags(image):
- expected_query = (RepositoryTag
- .select()
- .join(Image)
- .where(RepositoryTag.hidden == False)
- .where((Image.id == image.id) | (Image.ancestors ** ('%%/%s/%%' % image.id))))
- return set([tag.id for tag in _tag_alive(expected_query)])
-
-
-@pytest.mark.parametrize('max_subqueries,max_image_lookup_count', [
- (1, 1),
- (10, 10),
- (100, 500),
-])
-def test_get_matching_tags(max_subqueries, max_image_lookup_count, initialized_db):
- with patch('data.model.tag._MAX_SUB_QUERIES', max_subqueries):
- with patch('data.model.tag._MAX_IMAGE_LOOKUP_COUNT', max_image_lookup_count):
- # Test for every image in the test database.
- for image in Image.select(Image, ImageStorage).join(ImageStorage):
- matching_query = get_matching_tags(image.docker_image_id, image.storage.uuid)
- matching_tags = set([tag.id for tag in matching_query])
- expected_tags = _get_expected_tags(image)
- assert matching_tags == expected_tags, "mismatch for image %s" % image.id
-
- oci_tags = list(Tag
- .select()
- .join(TagToRepositoryTag)
- .where(TagToRepositoryTag.repository_tag << expected_tags))
- assert len(oci_tags) == len(expected_tags)
-
-
-@pytest.mark.parametrize('max_subqueries,max_image_lookup_count', [
- (1, 1),
- (10, 10),
- (100, 500),
-])
-def test_get_matching_tag_ids_for_images(max_subqueries, max_image_lookup_count, initialized_db):
- with patch('data.model.tag._MAX_SUB_QUERIES', max_subqueries):
- with patch('data.model.tag._MAX_IMAGE_LOOKUP_COUNT', max_image_lookup_count):
- # Try for various sets of the first N images.
- for count in [5, 10, 15]:
- pairs = []
- expected_tags_ids = set()
- for image in Image.select(Image, ImageStorage).join(ImageStorage):
- if len(pairs) >= count:
- break
-
- pairs.append((image.docker_image_id, image.storage.uuid))
- expected_tags_ids.update(_get_expected_tags(image))
-
- matching_tags_ids = set([tag.id for tag in get_matching_tags_for_images(pairs)])
- assert matching_tags_ids == expected_tags_ids
-
-
-@pytest.mark.parametrize('max_subqueries,max_image_lookup_count', [
- (1, 1),
- (10, 10),
- (100, 500),
-])
-def test_get_matching_tag_ids_for_all_images(max_subqueries, max_image_lookup_count, initialized_db):
- with patch('data.model.tag._MAX_SUB_QUERIES', max_subqueries):
- with patch('data.model.tag._MAX_IMAGE_LOOKUP_COUNT', max_image_lookup_count):
- pairs = []
- for image in Image.select(Image, ImageStorage).join(ImageStorage):
- pairs.append((image.docker_image_id, image.storage.uuid))
-
- expected_tags_ids = set([tag.id for tag in _tag_alive(RepositoryTag.select())])
- matching_tags_ids = set([tag.id for tag in get_matching_tags_for_images(pairs)])
-
- # Ensure every alive tag was found.
- assert matching_tags_ids == expected_tags_ids
-
-
-def test_get_matching_tag_ids_images_filtered(initialized_db):
- def filter_query(query):
- return query.join(Repository).where(Repository.name == 'simple')
-
- filtered_images = filter_query(Image
- .select(Image, ImageStorage)
- .join(RepositoryTag)
- .switch(Image)
- .join(ImageStorage)
- .switch(Image))
-
- expected_tags_query = _tag_alive(filter_query(RepositoryTag
- .select()))
-
- pairs = []
- for image in filtered_images:
- pairs.append((image.docker_image_id, image.storage.uuid))
-
- matching_tags = get_matching_tags_for_images(pairs, filter_images=filter_query,
- filter_tags=filter_query)
-
- expected_tag_ids = set([tag.id for tag in expected_tags_query])
- matching_tags_ids = set([tag.id for tag in matching_tags])
-
- # Ensure every alive tag was found.
- assert matching_tags_ids == expected_tag_ids
-
-
-def _get_oci_tag(tag):
- return (Tag
- .select()
- .join(TagToRepositoryTag)
- .where(TagToRepositoryTag.repository_tag == tag)).get()
-
-
-def assert_tags(repository, *args):
- tags = list(list_active_repo_tags(repository))
- assert len(tags) == len(args)
-
- tags_dict = {}
- for tag in tags:
- assert not tag.name in tags_dict
- assert not tag.hidden
- assert not tag.lifetime_end_ts or tag.lifetime_end_ts > time()
-
- tags_dict[tag.name] = tag
-
- oci_tag = _get_oci_tag(tag)
- assert oci_tag.name == tag.name
- assert not oci_tag.hidden
- assert oci_tag.reversion == tag.reversion
-
- if tag.lifetime_end_ts:
- assert oci_tag.lifetime_end_ms == (tag.lifetime_end_ts * 1000)
- else:
- assert oci_tag.lifetime_end_ms is None
-
- for expected in args:
- assert expected in tags_dict
-
-
-def test_create_reversion_tag(initialized_db):
- repository = create_repository('devtable', 'somenewrepo', None)
- manifest = Manifest.get()
- image1 = find_create_or_link_image('foobarimage1', repository, None, {}, 'local_us')
-
- footag = create_or_update_tag_for_repo(repository, 'foo', image1.docker_image_id,
- oci_manifest=manifest, reversion=True)
- assert footag.reversion
-
- oci_tag = _get_oci_tag(footag)
- assert oci_tag.name == footag.name
- assert not oci_tag.hidden
- assert oci_tag.reversion == footag.reversion
-
-
-def test_list_active_tags(initialized_db):
- # Create a new repository.
- repository = create_repository('devtable', 'somenewrepo', None)
- manifest = Manifest.get()
-
- # Create some images.
- image1 = find_create_or_link_image('foobarimage1', repository, None, {}, 'local_us')
- image2 = find_create_or_link_image('foobarimage2', repository, None, {}, 'local_us')
-
- # Make sure its tags list is empty.
- assert_tags(repository)
-
- # Add some new tags.
- footag = create_or_update_tag_for_repo(repository, 'foo', image1.docker_image_id,
- oci_manifest=manifest)
- bartag = create_or_update_tag_for_repo(repository, 'bar', image1.docker_image_id,
- oci_manifest=manifest)
-
- # Since timestamps are stored on a second-granularity, we need to make the tags "start"
- # before "now", so when we recreate them below, they don't conflict.
- footag.lifetime_start_ts -= 5
- footag.save()
-
- bartag.lifetime_start_ts -= 5
- bartag.save()
-
- footag_oci = _get_oci_tag(footag)
- footag_oci.lifetime_start_ms -= 5000
- footag_oci.save()
-
- bartag_oci = _get_oci_tag(bartag)
- bartag_oci.lifetime_start_ms -= 5000
- bartag_oci.save()
-
- # Make sure they are returned.
- assert_tags(repository, 'foo', 'bar')
-
- # Set the expirations to be explicitly empty.
- set_tag_end_ts(footag, None)
- set_tag_end_ts(bartag, None)
-
- # Make sure they are returned.
- assert_tags(repository, 'foo', 'bar')
-
- # Mark as a tag as expiring in the far future, and make sure it is still returned.
- set_tag_end_ts(footag, footag.lifetime_start_ts + 10000000)
-
- # Make sure they are returned.
- assert_tags(repository, 'foo', 'bar')
-
- # Delete a tag and make sure it isn't returned.
- footag = delete_tag('devtable', 'somenewrepo', 'foo')
- set_tag_end_ts(footag, footag.lifetime_end_ts - 4)
-
- assert_tags(repository, 'bar')
-
- # Add a new foo again.
- footag = create_or_update_tag_for_repo(repository, 'foo', image1.docker_image_id,
- oci_manifest=manifest)
- footag.lifetime_start_ts -= 3
- footag.save()
-
- footag_oci = _get_oci_tag(footag)
- footag_oci.lifetime_start_ms -= 3000
- footag_oci.save()
-
- assert_tags(repository, 'foo', 'bar')
-
- # Mark as a tag as expiring in the far future, and make sure it is still returned.
- set_tag_end_ts(footag, footag.lifetime_start_ts + 10000000)
-
- # Make sure they are returned.
- assert_tags(repository, 'foo', 'bar')
-
- # "Move" foo by updating it and make sure we don't get duplicates.
- create_or_update_tag_for_repo(repository, 'foo', image2.docker_image_id, oci_manifest=manifest)
- assert_tags(repository, 'foo', 'bar')
-
-
-@pytest.mark.parametrize('expiration_offset, expected_offset', [
- (None, None),
- ('0s', '1h'),
- ('30m', '1h'),
- ('2h', '2h'),
- ('2w', '2w'),
- ('200w', '104w'),
-])
-def test_change_tag_expiration(expiration_offset, expected_offset, initialized_db):
- repository = create_repository('devtable', 'somenewrepo', None)
- image1 = find_create_or_link_image('foobarimage1', repository, None, {}, 'local_us')
-
- manifest = Manifest.get()
- footag = create_or_update_tag_for_repo(repository, 'foo', image1.docker_image_id,
- oci_manifest=manifest)
-
- expiration_date = None
- if expiration_offset is not None:
- expiration_date = datetime.utcnow() + convert_to_timedelta(expiration_offset)
-
- assert change_tag_expiration(footag, expiration_date)
-
- # Lookup the tag again.
- footag_updated = get_active_tag('devtable', 'somenewrepo', 'foo')
- oci_tag = _get_oci_tag(footag_updated)
-
- if expected_offset is None:
- assert footag_updated.lifetime_end_ts is None
- assert oci_tag.lifetime_end_ms is None
- else:
- start_date = datetime.utcfromtimestamp(footag_updated.lifetime_start_ts)
- end_date = datetime.utcfromtimestamp(footag_updated.lifetime_end_ts)
- expected_end_date = start_date + convert_to_timedelta(expected_offset)
- assert (expected_end_date - end_date).total_seconds() < 5 # variance in test
-
- assert oci_tag.lifetime_end_ms == (footag_updated.lifetime_end_ts * 1000)
-
-
-def random_storages():
- return list(ImageStorage.select().where(~(ImageStorage.content_checksum >> None)).limit(10))
-
-
-def repeated_storages():
- storages = list(ImageStorage.select().where(~(ImageStorage.content_checksum >> None)).limit(5))
- return storages + storages
-
-
-@pytest.mark.parametrize('get_storages', [
- random_storages,
- repeated_storages,
-])
-def test_store_tag_manifest(get_storages, initialized_db):
- # Create a manifest with some layers.
- builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'sometag')
-
- storages = get_storages()
- assert storages
-
- repo = model.repository.get_repository('devtable', 'simple')
- storage_id_map = {}
- for index, storage in enumerate(storages):
- image_id = 'someimage%s' % index
- builder.add_layer(storage.content_checksum, json.dumps({'id': image_id}))
- find_create_or_link_image(image_id, repo, 'devtable', {}, 'local_us')
- storage_id_map[storage.content_checksum] = storage.id
-
- manifest = builder.build(docker_v2_signing_key)
- tag_manifest, _ = store_tag_manifest_for_testing('devtable', 'simple', 'sometag', manifest,
- manifest.leaf_layer_v1_image_id, storage_id_map)
-
- # Ensure we have the new-model expected rows.
- mapping_row = TagManifestToManifest.get(tag_manifest=tag_manifest)
-
- assert mapping_row.manifest is not None
- assert mapping_row.manifest.manifest_bytes == manifest.bytes.as_encoded_str()
- assert mapping_row.manifest.digest == str(manifest.digest)
-
- blob_rows = {m.blob_id for m in
- ManifestBlob.select().where(ManifestBlob.manifest == mapping_row.manifest)}
- assert blob_rows == {s.id for s in storages}
-
- assert ManifestLegacyImage.get(manifest=mapping_row.manifest).image == tag_manifest.tag.image
-
-
-def test_get_most_recent_tag(initialized_db):
- # Create a hidden tag that is the most recent.
- repo = model.repository.get_repository('devtable', 'simple')
- image = model.tag.get_tag_image('devtable', 'simple', 'latest')
- model.tag.create_temporary_hidden_tag(repo, image, 10000000)
-
- # Ensure we find a non-hidden tag.
- found = model.tag.get_most_recent_tag(repo)
- assert not found.hidden
-
-
-def test_get_active_tag_for_repo(initialized_db):
- repo = model.repository.get_repository('devtable', 'simple')
- image = model.tag.get_tag_image('devtable', 'simple', 'latest')
- hidden_tag = model.tag.create_temporary_hidden_tag(repo, image, 10000000)
-
- # Ensure get active tag for repo cannot find it.
- assert model.tag.get_active_tag_for_repo(repo, hidden_tag) is None
- assert model.tag.get_active_tag_for_repo(repo, 'latest') is not None
diff --git a/data/model/test/test_team.py b/data/model/test/test_team.py
deleted file mode 100644
index 88b08855c..000000000
--- a/data/model/test/test_team.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import pytest
-
-from data.model.team import (add_or_invite_to_team, create_team, confirm_team_invite,
- list_team_users, validate_team_name)
-from data.model.organization import create_organization
-from data.model.user import get_user, create_user_noverify
-
-from test.fixtures import *
-
-
-@pytest.mark.parametrize('name, is_valid', [
- ('', False),
- ('f', False),
- ('fo', True),
- ('f' * 255, True),
- ('f' * 256, False),
- (' ', False),
- ('helloworld', True),
- ('hello_world', True),
- ('hello-world', True),
- ('hello world', False),
- ('HelloWorld', False),
-])
-def test_validate_team_name(name, is_valid):
- result, _ = validate_team_name(name)
- assert result == is_valid
-
-
-def is_in_team(team, user):
- return user.username in {u.username for u in list_team_users(team)}
-
-
-def test_invite_to_team(initialized_db):
- first_user = get_user('devtable')
- second_user = create_user_noverify('newuser', 'foo@example.com')
-
- def run_invite_flow(orgname):
- # Create an org owned by `devtable`.
- org = create_organization(orgname, orgname + '@example.com', first_user)
-
- # Create another team and add `devtable` to it. Since `devtable` is already
- # in the org, it should be done directly.
- other_team = create_team('otherteam', org, 'admin')
- invite = add_or_invite_to_team(first_user, other_team, user_obj=first_user)
- assert invite is None
- assert is_in_team(other_team, first_user)
-
- # Try to add `newuser` to the team, which should require an invite.
- invite = add_or_invite_to_team(first_user, other_team, user_obj=second_user)
- assert invite is not None
- assert not is_in_team(other_team, second_user)
-
- # Accept the invite.
- confirm_team_invite(invite.invite_token, second_user)
- assert is_in_team(other_team, second_user)
-
- # Run for a new org.
- run_invite_flow('firstorg')
-
- # Create another org and repeat, ensuring the same operations perform the same way.
- run_invite_flow('secondorg')
diff --git a/data/model/test/test_user.py b/data/model/test/test_user.py
deleted file mode 100644
index 4f124b7f3..000000000
--- a/data/model/test/test_user.py
+++ /dev/null
@@ -1,205 +0,0 @@
-from datetime import datetime
-
-import pytest
-
-from mock import patch
-
-from data.database import EmailConfirmation, User, DeletedNamespace
-from data.model.organization import get_organization
-from data.model.notification import create_notification
-from data.model.team import create_team, add_user_to_team
-from data.model.user import create_user_noverify, validate_reset_code, get_active_users
-from data.model.user import mark_namespace_for_deletion, delete_namespace_via_marker
-from data.model.user import create_robot, lookup_robot, list_namespace_robots
-from data.model.user import get_pull_credentials, retrieve_robot_token, verify_robot
-from data.model.user import InvalidRobotException, delete_robot, get_matching_users
-from data.model.repository import create_repository
-from data.fields import Credential
-from data.queue import WorkQueue
-from util.timedeltastring import convert_to_timedelta
-from util.timedeltastring import convert_to_timedelta
-from util.security.token import encode_public_private_token
-from test.fixtures import *
-
-def test_create_user_with_expiration(initialized_db):
- with patch('data.model.config.app_config', {'DEFAULT_TAG_EXPIRATION': '1h'}):
- user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
- assert user.removed_tag_expiration_s == 60 * 60
-
-@pytest.mark.parametrize('token_lifetime, time_since', [
- ('1m', '2m'),
- ('2m', '1m'),
- ('1h', '1m'),
-])
-def test_validation_code(token_lifetime, time_since, initialized_db):
- user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
- created = datetime.now() - convert_to_timedelta(time_since)
- verification_code, unhashed = Credential.generate()
- confirmation = EmailConfirmation.create(user=user, pw_reset=True,
- created=created, verification_code=verification_code)
- encoded = encode_public_private_token(confirmation.code, unhashed)
-
- with patch('data.model.config.app_config', {'USER_RECOVERY_TOKEN_LIFETIME': token_lifetime}):
- result = validate_reset_code(encoded)
- expect_success = convert_to_timedelta(token_lifetime) >= convert_to_timedelta(time_since)
- assert expect_success == (result is not None)
-
-
-@pytest.mark.parametrize('disabled', [
- (True),
- (False),
-])
-@pytest.mark.parametrize('deleted', [
- (True),
- (False),
-])
-def test_get_active_users(disabled, deleted, initialized_db):
- # Delete a user.
- deleted_user = model.user.get_user('public')
- queue = WorkQueue('testgcnamespace', lambda db: db.transaction())
- mark_namespace_for_deletion(deleted_user, [], queue)
-
- users = get_active_users(disabled=disabled, deleted=deleted)
- deleted_found = [user for user in users if user.id == deleted_user.id]
- assert bool(deleted_found) == (deleted and disabled)
-
- for user in users:
- if not disabled:
- assert user.enabled
-
-
-def test_mark_namespace_for_deletion(initialized_db):
- def create_transaction(db):
- return db.transaction()
-
- # Create a user and then mark it for deletion.
- user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
-
- # Add some robots.
- create_robot('foo', user)
- create_robot('bar', user)
-
- assert lookup_robot('foobar+foo') is not None
- assert lookup_robot('foobar+bar') is not None
- assert len(list(list_namespace_robots('foobar'))) == 2
-
- # Mark the user for deletion.
- queue = WorkQueue('testgcnamespace', create_transaction)
- mark_namespace_for_deletion(user, [], queue)
-
- # Ensure the older user is still in the DB.
- older_user = User.get(id=user.id)
- assert older_user.username != 'foobar'
-
- # Ensure the robots are deleted.
- with pytest.raises(InvalidRobotException):
- assert lookup_robot('foobar+foo')
-
- with pytest.raises(InvalidRobotException):
- assert lookup_robot('foobar+bar')
-
- assert len(list(list_namespace_robots(older_user.username))) == 0
-
- # Ensure we can create a user with the same namespace again.
- new_user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
- assert new_user.id != user.id
-
- # Ensure the older user is still in the DB.
- assert User.get(id=user.id).username != 'foobar'
-
-
-def test_delete_namespace_via_marker(initialized_db):
- def create_transaction(db):
- return db.transaction()
-
- # Create a user and then mark it for deletion.
- user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
-
- # Add some repositories.
- create_repository('foobar', 'somerepo', user)
- create_repository('foobar', 'anotherrepo', user)
-
- # Mark the user for deletion.
- queue = WorkQueue('testgcnamespace', create_transaction)
- marker_id = mark_namespace_for_deletion(user, [], queue)
-
- # Delete the user.
- delete_namespace_via_marker(marker_id, [])
-
- # Ensure the user was actually deleted.
- with pytest.raises(User.DoesNotExist):
- User.get(id=user.id)
-
- with pytest.raises(DeletedNamespace.DoesNotExist):
- DeletedNamespace.get(id=marker_id)
-
-
-def test_delete_robot(initialized_db):
- # Create a robot account.
- user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
- robot, _ = create_robot('foo', user)
-
- # Add some notifications and other rows pointing to the robot.
- create_notification('repo_push', robot)
-
- team = create_team('someteam', get_organization('buynlarge'), 'member')
- add_user_to_team(robot, team)
-
- # Ensure the robot exists.
- assert lookup_robot(robot.username).id == robot.id
-
- # Delete the robot.
- delete_robot(robot.username)
-
- # Ensure it is gone.
- with pytest.raises(InvalidRobotException):
- lookup_robot(robot.username)
-
-
-def test_get_matching_users(initialized_db):
- # Exact match.
- for user in User.select().where(User.organization == False, User.robot == False):
- assert list(get_matching_users(user.username))[0].username == user.username
-
- # Prefix matching.
- for user in User.select().where(User.organization == False, User.robot == False):
- assert user.username in [r.username for r in get_matching_users(user.username[:2])]
-
-
-def test_get_matching_users_with_same_prefix(initialized_db):
- # Create a bunch of users with the same prefix.
- for index in range(0, 20):
- create_user_noverify('foo%s' % index, 'foo%s@example.com' % index, email_required=False)
-
- # For each user, ensure that lookup of the exact name is found first.
- for index in range(0, 20):
- username = 'foo%s' % index
- assert list(get_matching_users(username))[0].username == username
-
- # Prefix matching.
- found = list(get_matching_users('foo', limit=50))
- assert len(found) == 20
-
-
-def test_robot(initialized_db):
- # Create a robot account.
- user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
- robot, token = create_robot('foo', user)
- assert retrieve_robot_token(robot) == token
-
- # Ensure we can retrieve its information.
- found = lookup_robot('foobar+foo')
- assert found == robot
-
- creds = get_pull_credentials('foobar+foo')
- assert creds is not None
- assert creds['username'] == 'foobar+foo'
- assert creds['password'] == token
-
- assert verify_robot('foobar+foo', token) == robot
-
- with pytest.raises(InvalidRobotException):
- assert verify_robot('foobar+foo', 'someothertoken')
-
- with pytest.raises(InvalidRobotException):
- assert verify_robot('foobar+unknownbot', token)
diff --git a/data/model/test/test_visible_repos.py b/data/model/test/test_visible_repos.py
deleted file mode 100644
index 9e5e7cbf5..000000000
--- a/data/model/test/test_visible_repos.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from data import model
-
-from test.fixtures import *
-
-
-NO_ACCESS_USER = 'freshuser'
-READ_ACCESS_USER = 'reader'
-ADMIN_ACCESS_USER = 'devtable'
-PUBLIC_USER = 'public'
-RANDOM_USER = 'randomuser'
-OUTSIDE_ORG_USER = 'outsideorg'
-
-ADMIN_ROBOT_USER = 'devtable+dtrobot'
-
-ORGANIZATION = 'buynlarge'
-
-SIMPLE_REPO = 'simple'
-PUBLIC_REPO = 'publicrepo'
-RANDOM_REPO = 'randomrepo'
-
-OUTSIDE_ORG_REPO = 'coolrepo'
-
-ORG_REPO = 'orgrepo'
-ANOTHER_ORG_REPO = 'anotherorgrepo'
-
-# Note: The shared repo has devtable as admin, public as a writer and reader as a reader.
-SHARED_REPO = 'shared'
-
-
-def assertDoesNotHaveRepo(username, name):
- repos = list(model.repository.get_visible_repositories(username))
- names = [repo.name for repo in repos]
- assert not name in names
-
-
-def assertHasRepo(username, name):
- repos = list(model.repository.get_visible_repositories(username))
- names = [repo.name for repo in repos]
- assert name in names
-
-
-def test_noaccess(initialized_db):
- repos = list(model.repository.get_visible_repositories(NO_ACCESS_USER))
- names = [repo.name for repo in repos]
- assert not names
-
- # Try retrieving public repos now.
- repos = list(model.repository.get_visible_repositories(NO_ACCESS_USER, include_public=True))
- names = [repo.name for repo in repos]
- assert PUBLIC_REPO in names
-
-
-def test_public(initialized_db):
- assertHasRepo(PUBLIC_USER, PUBLIC_REPO)
- assertHasRepo(PUBLIC_USER, SHARED_REPO)
-
- assertDoesNotHaveRepo(PUBLIC_USER, SIMPLE_REPO)
- assertDoesNotHaveRepo(PUBLIC_USER, RANDOM_REPO)
- assertDoesNotHaveRepo(PUBLIC_USER, OUTSIDE_ORG_REPO)
-
-
-def test_reader(initialized_db):
- assertHasRepo(READ_ACCESS_USER, SHARED_REPO)
- assertHasRepo(READ_ACCESS_USER, ORG_REPO)
-
- assertDoesNotHaveRepo(READ_ACCESS_USER, SIMPLE_REPO)
- assertDoesNotHaveRepo(READ_ACCESS_USER, RANDOM_REPO)
- assertDoesNotHaveRepo(READ_ACCESS_USER, OUTSIDE_ORG_REPO)
- assertDoesNotHaveRepo(READ_ACCESS_USER, PUBLIC_REPO)
-
-
-def test_random(initialized_db):
- assertHasRepo(RANDOM_USER, RANDOM_REPO)
-
- assertDoesNotHaveRepo(RANDOM_USER, SIMPLE_REPO)
- assertDoesNotHaveRepo(RANDOM_USER, SHARED_REPO)
- assertDoesNotHaveRepo(RANDOM_USER, ORG_REPO)
- assertDoesNotHaveRepo(RANDOM_USER, ANOTHER_ORG_REPO)
- assertDoesNotHaveRepo(RANDOM_USER, PUBLIC_REPO)
-
-
-def test_admin(initialized_db):
- assertHasRepo(ADMIN_ACCESS_USER, SIMPLE_REPO)
- assertHasRepo(ADMIN_ACCESS_USER, SHARED_REPO)
-
- assertHasRepo(ADMIN_ACCESS_USER, ORG_REPO)
- assertHasRepo(ADMIN_ACCESS_USER, ANOTHER_ORG_REPO)
-
- assertDoesNotHaveRepo(ADMIN_ACCESS_USER, OUTSIDE_ORG_REPO)
diff --git a/data/model/token.py b/data/model/token.py
index 82661cdef..ad5bb359b 100644
--- a/data/model/token.py
+++ b/data/model/token.py
@@ -1,20 +1,15 @@
import logging
-from peewee import JOIN
+from peewee import JOIN_LEFT_OUTER
-from active_migration import ActiveDataMigration, ERTMigrationFlags
from data.database import (AccessToken, AccessTokenKind, Repository, Namespace, Role,
- RepositoryBuildTrigger)
+ RepositoryBuildTrigger, LogEntryKind)
from data.model import DataModelException, _basequery, InvalidTokenException
logger = logging.getLogger(__name__)
-ACCESS_TOKEN_NAME_PREFIX_LENGTH = 32
-ACCESS_TOKEN_CODE_MINIMUM_LENGTH = 32
-
-
def create_access_token(repo, role, kind=None, friendly_name=None):
role = Role.get(Role.name == role)
kind_ref = None
@@ -23,11 +18,6 @@ def create_access_token(repo, role, kind=None, friendly_name=None):
new_token = AccessToken.create(repository=repo, temporary=True, role=role, kind=kind_ref,
friendly_name=friendly_name)
-
- if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
- new_token.code = new_token.token_name + new_token.token_code.decrypt()
- new_token.save()
-
return new_token
@@ -37,69 +27,61 @@ def create_delegate_token(namespace_name, repository_name, friendly_name,
repo = _basequery.get_existing_repository(namespace_name, repository_name)
new_token = AccessToken.create(repository=repo, role=read_only,
friendly_name=friendly_name, temporary=False)
-
- if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
- new_token.code = new_token.token_name + new_token.token_code.decrypt()
- new_token.save()
-
return new_token
+def get_repository_delegate_tokens(namespace_name, repository_name):
+ return (AccessToken
+ .select(AccessToken, Role)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(AccessToken)
+ .join(Role)
+ .switch(AccessToken)
+ .join(RepositoryBuildTrigger, JOIN_LEFT_OUTER)
+ .where(Repository.name == repository_name, Namespace.username == namespace_name,
+ AccessToken.temporary == False, RepositoryBuildTrigger.uuid >> None))
+
+
+def get_repo_delegate_token(namespace_name, repository_name, code):
+ repo_query = get_repository_delegate_tokens(namespace_name, repository_name)
+
+ try:
+ return repo_query.where(AccessToken.code == code).get()
+ except AccessToken.DoesNotExist:
+ raise InvalidTokenException('Unable to find token with code: %s' % code)
+
+
+def set_repo_delegate_token_role(namespace_name, repository_name, code, role):
+ token = get_repo_delegate_token(namespace_name, repository_name, code)
+
+ if role != 'read' and role != 'write':
+ raise DataModelException('Invalid role for delegate token: %s' % role)
+
+ new_role = Role.get(Role.name == role)
+ token.role = new_role
+ token.save()
+
+ return token
+
+
+def delete_delegate_token(namespace_name, repository_name, code):
+ token = get_repo_delegate_token(namespace_name, repository_name, code)
+ token.delete_instance(recursive=True)
+ return token
+
+
def load_token_data(code):
""" Load the permissions for any token by code. """
- token_name = code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
- token_code = code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:]
-
- if not token_name or not token_code:
- raise InvalidTokenException('Invalid delegate token code: %s' % code)
-
- # Try loading by name and then comparing the code.
- assert token_name
try:
- found = (AccessToken
- .select(AccessToken, Repository, Namespace, Role)
- .join(Role)
- .switch(AccessToken)
- .join(Repository)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(AccessToken.token_name == token_name)
- .get())
+ return (AccessToken
+ .select(AccessToken, Repository, Namespace, Role)
+ .join(Role)
+ .switch(AccessToken)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(AccessToken.code == code)
+ .get())
- assert token_code
- if found.token_code is None or not found.token_code.matches(token_code):
- raise InvalidTokenException('Invalid delegate token code: %s' % code)
-
- assert len(token_code) >= ACCESS_TOKEN_CODE_MINIMUM_LENGTH
- return found
except AccessToken.DoesNotExist:
- pass
-
- # Legacy: Try loading the full code directly.
- # TODO(remove-unenc): Remove this once migrated.
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- try:
- return (AccessToken
- .select(AccessToken, Repository, Namespace, Role)
- .join(Role)
- .switch(AccessToken)
- .join(Repository)
- .join(Namespace, on=(Repository.namespace_user == Namespace.id))
- .where(AccessToken.code == code)
- .get())
- except AccessToken.DoesNotExist:
- raise InvalidTokenException('Invalid delegate token code: %s' % code)
-
- raise InvalidTokenException('Invalid delegate token code: %s' % code)
-
-
-def get_full_token_string(token):
- """ Returns the full string to use for this token to login. """
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- if token.token_name is None:
- return token.code
-
- assert token.token_name
- token_code = token.token_code.decrypt()
- assert len(token.token_name) == ACCESS_TOKEN_NAME_PREFIX_LENGTH
- assert len(token_code) >= ACCESS_TOKEN_CODE_MINIMUM_LENGTH
- return '%s%s' % (token.token_name, token_code)
+ raise InvalidTokenException('Invalid delegate token code: %s' % code)
diff --git a/data/model/user.py b/data/model/user.py
index 7e9ed81b1..e5b34a099 100644
--- a/data/model/user.py
+++ b/data/model/user.py
@@ -1,36 +1,22 @@
import bcrypt
import logging
import json
-import uuid
-from flask_login import UserMixin
-from peewee import JOIN, IntegrityError, fn
+from peewee import JOIN_LEFT_OUTER, IntegrityError, fn
from uuid import uuid4
from datetime import datetime, timedelta
-from active_migration import ActiveDataMigration, ERTMigrationFlags
from data.database import (User, LoginService, FederatedLogin, RepositoryPermission, TeamMember,
Team, Repository, TupleSelector, TeamRole, Namespace, Visibility,
- EmailConfirmation, Role, db_for_update, random_string_generator,
- UserRegion, ImageStorageLocation,
- ServiceKeyApproval, OAuthApplication, RepositoryBuildTrigger,
- UserPromptKind, UserPrompt, UserPromptTypes, DeletedNamespace,
- RobotAccountMetadata, NamespaceGeoRestriction, RepoMirrorConfig,
- RobotAccountToken)
-from data.readreplica import ReadOnlyModeException
+ EmailConfirmation, Role, db_for_update, random_string_generator)
from data.model import (DataModelException, InvalidPasswordException, InvalidRobotException,
InvalidUsernameException, InvalidEmailAddressException,
- TooManyLoginAttemptsException, db_transaction,
- notification, config, repository, _basequery, gc)
-from data.fields import Credential
-from data.text import prefix_search
+ TooManyUsersException, TooManyLoginAttemptsException, db_transaction,
+ notification, config, repository, _basequery)
from util.names import format_robot_username, parse_robot_username
from util.validation import (validate_username, validate_email, validate_password,
INVALID_PASSWORD_MESSAGE)
from util.backoff import exponential_backoff
-from util.timedeltastring import convert_to_timedelta
-from util.unicode import remove_unicode
-from util.security.token import decode_public_private_token, encode_public_private_token
logger = logging.getLogger(__name__)
@@ -38,18 +24,25 @@ logger = logging.getLogger(__name__)
EXPONENTIAL_BACKOFF_SCALE = timedelta(seconds=1)
+
def hash_password(password, salt=None):
salt = salt or bcrypt.gensalt()
return bcrypt.hashpw(password.encode('utf-8'), salt)
-def create_user(username, password, email, auto_verify=False, email_required=True, prompts=tuple(),
- is_possible_abuser=False):
+
+def is_create_user_allowed():
+ return True
+
+
+def create_user(username, password, email, auto_verify=False):
""" Creates a regular user, if allowed. """
if not validate_password(password):
raise InvalidPasswordException(INVALID_PASSWORD_MESSAGE)
- created = create_user_noverify(username, email, email_required=email_required, prompts=prompts,
- is_possible_abuser=is_possible_abuser)
+ if not is_create_user_allowed():
+ raise TooManyUsersException()
+
+ created = create_user_noverify(username, email)
created.password_hash = hash_password(password)
created.verified = auto_verify
created.save()
@@ -57,65 +50,35 @@ def create_user(username, password, email, auto_verify=False, email_required=Tru
return created
-def create_user_noverify(username, email, email_required=True, prompts=tuple(),
- is_possible_abuser=False):
- if email_required:
- if not validate_email(email):
- raise InvalidEmailAddressException('Invalid email address: %s' % email)
- else:
- # If email addresses are not required and none was specified, then we just use a unique
- # ID to ensure that the database consistency check remains intact.
- email = email or str(uuid.uuid4())
+def create_user_noverify(username, email):
+ if not validate_email(email):
+ raise InvalidEmailAddressException('Invalid email address: %s' % email)
(username_valid, username_issue) = validate_username(username)
if not username_valid:
- raise InvalidUsernameException('Invalid namespace %s: %s' % (username, username_issue))
+ raise InvalidUsernameException('Invalid username %s: %s' % (username, username_issue))
try:
existing = User.get((User.username == username) | (User.email == email))
+
logger.info('Existing user with same username or email.')
# A user already exists with either the same username or email
if existing.username == username:
- assert not existing.robot
+ raise InvalidUsernameException('Username has already been taken: %s' %
+ username)
+ raise InvalidEmailAddressException('Email has already been used: %s' %
+ email)
- msg = 'Username has already been taken by an organization and cannot be reused: %s' % username
- if not existing.organization:
- msg = 'Username has already been taken by user cannot be reused: %s' % username
-
- raise InvalidUsernameException(msg)
-
- raise InvalidEmailAddressException('Email has already been used: %s' % email)
except User.DoesNotExist:
# This is actually the happy path
logger.debug('Email and username are unique!')
- # Create the user.
try:
- default_expr_s = _convert_to_s(config.app_config['DEFAULT_TAG_EXPIRATION'])
- default_max_builds = config.app_config.get('DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT')
- threat_max_builds = config.app_config.get('THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT')
-
- if is_possible_abuser and threat_max_builds is not None:
- default_max_builds = threat_max_builds
-
- new_user = User.create(username=username, email=email, removed_tag_expiration_s=default_expr_s,
- maximum_queued_builds_count=default_max_builds)
- for prompt in prompts:
- create_user_prompt(new_user, prompt)
-
- return new_user
+ return User.create(username=username, email=email)
except Exception as ex:
raise DataModelException(ex.message)
-def increase_maximum_build_count(user, maximum_queued_builds_count):
- """ Increases the maximum number of allowed builds on the namespace, if greater than that
- already present.
- """
- if (user.maximum_queued_builds_count is not None and
- maximum_queued_builds_count > user.maximum_queued_builds_count):
- user.maximum_queued_builds_count = maximum_queued_builds_count
- user.save()
def is_username_unique(test_username):
try:
@@ -132,54 +95,13 @@ def change_password(user, new_password):
pw_hash = hash_password(new_password)
user.invalid_login_attempts = 0
user.password_hash = pw_hash
- invalidate_all_sessions(user)
+ user.uuid = str(uuid4())
+ user.save()
# Remove any password required notifications for the user.
notification.delete_notifications_by_kind(user, 'password_required')
-def get_default_user_prompts(features):
- prompts = set()
- if features.USER_METADATA:
- prompts.add(UserPromptTypes.ENTER_NAME)
- prompts.add(UserPromptTypes.ENTER_COMPANY)
-
- return prompts
-
-
-def has_user_prompts(user):
- try:
- UserPrompt.select().where(UserPrompt.user == user).get()
- return True
- except UserPrompt.DoesNotExist:
- return False
-
-
-def has_user_prompt(user, prompt_name):
- prompt_kind = UserPromptKind.get(name=prompt_name)
-
- try:
- UserPrompt.get(user=user, kind=prompt_kind)
- return True
- except UserPrompt.DoesNotExist:
- return False
-
-
-def create_user_prompt(user, prompt_name):
- prompt_kind = UserPromptKind.get(name=prompt_name)
- return UserPrompt.create(user=user, kind=prompt_kind)
-
-
-def remove_user_prompt(user, prompt_name):
- prompt_kind = UserPromptKind.get(name=prompt_name)
- UserPrompt.delete().where(UserPrompt.user == user, UserPrompt.kind == prompt_kind).execute()
-
-
-def get_user_prompts(user):
- query = UserPrompt.select().where(UserPrompt.user == user).join(UserPromptKind)
- return [prompt.kind.name for prompt in query]
-
-
def change_username(user_id, new_username):
(username_valid, username_issue) = validate_username(new_username)
if not username_valid:
@@ -190,8 +112,7 @@ def change_username(user_id, new_username):
user = db_for_update(User.select().where(User.id == user_id)).get()
# Rename the robots
- for robot in db_for_update(_list_entity_robots(user.username, include_metadata=False,
- include_token=False)):
+ for robot in db_for_update(_list_entity_robots(user.username)):
_, robot_shortname = parse_robot_username(robot.username)
new_robot_name = format_robot_username(new_username, robot_shortname)
robot.username = new_robot_name
@@ -200,38 +121,15 @@ def change_username(user_id, new_username):
# Rename the user
user.username = new_username
user.save()
-
- # Remove any prompts for username.
- remove_user_prompt(user, 'confirm_username')
-
return user
-def change_invoice_email_address(user, invoice_email_address):
- # Note: We null out the address if it is an empty string.
- user.invoice_email_address = invoice_email_address or None
- user.save()
-
-
-def change_send_invoice_email(user, invoice_email):
+def change_invoice_email(user, invoice_email):
user.invoice_email = invoice_email
user.save()
-def _convert_to_s(timespan_string):
- """ Returns the given timespan string (e.g. `2w` or `45s`) into seconds. """
- return convert_to_timedelta(timespan_string).total_seconds()
-
-
def change_user_tag_expiration(user, tag_expiration_s):
- """ Changes the tag expiration on the given user/org. Note that the specified expiration must
- be within the configured TAG_EXPIRATION_OPTIONS or this method will raise a
- DataModelException.
- """
- allowed_options = [_convert_to_s(o) for o in config.app_config['TAG_EXPIRATION_OPTIONS']]
- if tag_expiration_s not in allowed_options:
- raise DataModelException('Invalid tag expiration option')
-
user.removed_tag_expiration_s = tag_expiration_s
user.save()
@@ -245,12 +143,7 @@ def update_email(user, new_email, auto_verify=False):
raise DataModelException('E-mail address already used')
-def update_enabled(user, set_enabled):
- user.enabled = set_enabled
- user.save()
-
-
-def create_robot(robot_shortname, parent, description='', unstructured_metadata=None):
+def create_robot(robot_shortname, parent):
(username_valid, username_issue) = validate_username(robot_shortname)
if not username_valid:
raise InvalidRobotException('The name for the robot \'%s\' is invalid: %s' %
@@ -264,72 +157,42 @@ def create_robot(robot_shortname, parent, description='', unstructured_metadata=
msg = 'Existing robot with name: %s' % username
logger.info(msg)
raise InvalidRobotException(msg)
+
except User.DoesNotExist:
pass
- service = LoginService.get(name='quayrobot')
try:
- with db_transaction():
- created = User.create(username=username, email=str(uuid.uuid4()), robot=True)
- token = random_string_generator(length=64)()
- RobotAccountToken.create(robot_account=created, token=token)
- FederatedLogin.create(user=created, service=service, service_ident='robot:%s' % created.id)
- RobotAccountMetadata.create(robot_account=created, description=description[0:255],
- unstructured_json=unstructured_metadata or {})
- return created, token
+ created = User.create(username=username, robot=True)
+
+ service = LoginService.get(name='quayrobot')
+ password = created.email
+ FederatedLogin.create(user=created, service=service,
+ service_ident=password)
+
+ return created, password
except Exception as ex:
raise DataModelException(ex.message)
-def get_or_create_robot_metadata(robot):
- defaults = dict(description='', unstructured_json={})
- metadata, _ = RobotAccountMetadata.get_or_create(robot_account=robot, defaults=defaults)
- return metadata
-
-
-def update_robot_metadata(robot, description='', unstructured_json=None):
- """ Updates the description and user-specified unstructured metadata associated
- with a robot account to that specified. """
- metadata = get_or_create_robot_metadata(robot)
- metadata.description = description
- metadata.unstructured_json = unstructured_json or metadata.unstructured_json or {}
- metadata.save()
-
-
-def retrieve_robot_token(robot):
- """ Returns the decrypted token for the given robot. """
- try:
- token = RobotAccountToken.get(robot_account=robot).token.decrypt()
- except RobotAccountToken.DoesNotExist:
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- # For legacy only.
- token = robot.email
- else:
- raise
-
- return token
-
-
-def get_robot_and_metadata(robot_shortname, parent):
- """ Returns a tuple of the robot matching the given shortname, its token, and its metadata. """
+def get_robot(robot_shortname, parent):
robot_username = format_robot_username(parent.username, robot_shortname)
- robot, metadata = lookup_robot_and_metadata(robot_username)
- token = retrieve_robot_token(robot)
- return robot, token, metadata
+ robot = lookup_robot(robot_username)
+ return robot, robot.email
def lookup_robot(robot_username):
try:
- return User.get(username=robot_username, robot=True)
+ return (User
+ .select()
+ .join(FederatedLogin)
+ .join(LoginService)
+ .where(LoginService.name == 'quayrobot', User.username == robot_username,
+ User.robot == True)
+ .get())
except User.DoesNotExist:
raise InvalidRobotException('Could not find robot with username: %s' % robot_username)
-def lookup_robot_and_metadata(robot_username):
- robot = lookup_robot(robot_username)
- return robot, get_or_create_robot_metadata(robot)
-
-
def get_matching_robots(name_prefix, username, limit=10):
admined_orgs = (_basequery.get_user_organizations(username)
.switch(Team)
@@ -339,59 +202,32 @@ def get_matching_robots(name_prefix, username, limit=10):
prefix_checks = False
for org in admined_orgs:
- org_search = prefix_search(User.username, org.username + '+' + name_prefix)
- prefix_checks = prefix_checks | org_search
+ prefix_checks = prefix_checks | (User.username ** (org.username + '+' + name_prefix + '%'))
- user_search = prefix_search(User.username, username + '+' + name_prefix)
- prefix_checks = prefix_checks | user_search
+ prefix_checks = prefix_checks | (User.username ** (username + '+' + name_prefix + '%'))
return User.select().where(prefix_checks).limit(limit)
def verify_robot(robot_username, password):
- try:
- password = remove_unicode(password)
- except UnicodeEncodeError:
- msg = ('Could not find robot with username: %s and supplied password.' %
- robot_username)
- raise InvalidRobotException(msg)
-
result = parse_robot_username(robot_username)
if result is None:
raise InvalidRobotException('%s is an invalid robot name' % robot_username)
- robot = lookup_robot(robot_username)
- assert robot.robot
+ # Find the matching robot.
+ query = (User
+ .select()
+ .join(FederatedLogin)
+ .join(LoginService)
+ .where(FederatedLogin.service_ident == password, LoginService.name == 'quayrobot',
+ User.username == robot_username))
- # Lookup the token for the robot.
try:
- token_data = RobotAccountToken.get(robot_account=robot)
- if not token_data.token.matches(password):
- msg = ('Could not find robot with username: %s and supplied password.' %
- robot_username)
- raise InvalidRobotException(msg)
- except RobotAccountToken.DoesNotExist:
- # TODO(remove-unenc): Remove once migrated.
- if not ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- raise InvalidRobotException(msg)
-
- if password.find('robot:') >= 0:
- # Just to be sure.
- raise InvalidRobotException(msg)
-
- query = (User
- .select()
- .join(FederatedLogin)
- .join(LoginService)
- .where(FederatedLogin.service_ident == password, LoginService.name == 'quayrobot',
- User.username == robot_username))
-
- try:
- robot = query.get()
- except User.DoesNotExist:
- msg = ('Could not find robot with username: %s and supplied password.' %
- robot_username)
- raise InvalidRobotException(msg)
+ robot = query.get()
+ except User.DoesNotExist:
+ msg = ('Could not find robot with username: %s and supplied password.' %
+ robot_username)
+ raise InvalidRobotException(msg)
# Find the owner user and ensure it is not disabled.
try:
@@ -402,37 +238,23 @@ def verify_robot(robot_username, password):
if not owner.enabled:
raise InvalidRobotException('This user has been disabled. Please contact your administrator.')
- # Mark that the robot was accessed.
- _basequery.update_last_accessed(robot)
-
return robot
def regenerate_robot_token(robot_shortname, parent):
robot_username = format_robot_username(parent.username, robot_shortname)
- robot, metadata = lookup_robot_and_metadata(robot_username)
+ robot = lookup_robot(robot_username)
password = random_string_generator(length=64)()
- robot.email = str(uuid4())
- robot.uuid = str(uuid4())
+ robot.email = password
service = LoginService.get(name='quayrobot')
login = FederatedLogin.get(FederatedLogin.user == robot, FederatedLogin.service == service)
- login.service_ident = 'robot:%s' % (robot.id)
+ login.service_ident = password
- try:
- token_data = RobotAccountToken.get(robot_account=robot)
- except RobotAccountToken.DoesNotExist:
- token_data = RobotAccountToken.create(robot_account=robot)
-
- token_data.token = password
-
- with db_transaction():
- token_data.save()
- login.save()
- robot.save()
-
- return robot, password, metadata
+ login.save()
+ robot.save()
+ return robot, password
def delete_robot(robot_username):
try:
@@ -444,103 +266,56 @@ def delete_robot(robot_username):
robot_username)
-def list_namespace_robots(namespace):
- """ Returns all the robots found under the given namespace. """
- return _list_entity_robots(namespace)
-
-
-def _list_entity_robots(entity_name, include_metadata=True, include_token=True):
+def _list_entity_robots(entity_name):
""" Return the list of robots for the specified entity. This MUST return a query, not a
materialized list so that callers can use db_for_update.
- """
- # TODO(remove-unenc): Remove FederatedLogin and LEFT_OUTER on RobotAccountToken once migration
- # is complete.
- if include_metadata or include_token:
- query = (User
- .select(User, RobotAccountToken, FederatedLogin, RobotAccountMetadata)
- .join(FederatedLogin)
- .switch(User)
- .join(RobotAccountMetadata, JOIN.LEFT_OUTER)
- .switch(User)
- .join(RobotAccountToken, JOIN.LEFT_OUTER)
- .where(User.robot == True, User.username ** (entity_name + '+%')))
- else:
- query = (User
- .select(User)
- .where(User.robot == True, User.username ** (entity_name + '+%')))
-
- return query
+ """
+ return (User
+ .select()
+ .join(FederatedLogin)
+ .where(User.robot == True, User.username ** (entity_name + '+%')))
-def list_entity_robot_permission_teams(entity_name, limit=None, include_permissions=False):
+def list_entity_robot_permission_teams(entity_name, include_permissions=False):
query = (_list_entity_robots(entity_name))
- # TODO(remove-unenc): Remove FederatedLogin once migration is complete.
- fields = [User.username, User.creation_date, User.last_accessed, RobotAccountToken.token,
- FederatedLogin.service_ident, RobotAccountMetadata.description,
- RobotAccountMetadata.unstructured_json]
+ fields = [User.username, FederatedLogin.service_ident]
if include_permissions:
query = (query
- .join(RepositoryPermission, JOIN.LEFT_OUTER,
+ .join(RepositoryPermission, JOIN_LEFT_OUTER,
on=(RepositoryPermission.user == FederatedLogin.user))
- .join(Repository, JOIN.LEFT_OUTER)
+ .join(Repository, JOIN_LEFT_OUTER)
.switch(User)
- .join(TeamMember, JOIN.LEFT_OUTER)
- .join(Team, JOIN.LEFT_OUTER))
+ .join(TeamMember, JOIN_LEFT_OUTER)
+ .join(Team, JOIN_LEFT_OUTER))
fields.append(Repository.name)
fields.append(Team.name)
- query = query.limit(limit).order_by(User.last_accessed.desc())
return TupleSelector(query, fields)
-def update_user_metadata(user, metadata=None):
- """ Updates the metadata associated with the user, including his/her name and company. """
- metadata = metadata if metadata is not None else {}
-
+def confirm_attached_federated_login(user, service_name):
+ """ Verifies that the given user has a federated service identity for the specified service.
+ If none found, a row is added for that service and user.
+ """
with db_transaction():
- if 'given_name' in metadata:
- user.given_name = metadata['given_name']
-
- if 'family_name' in metadata:
- user.family_name = metadata['family_name']
-
- if 'company' in metadata:
- user.company = metadata['company']
-
- if 'location' in metadata:
- user.location = metadata['location']
-
- user.save()
-
- # Remove any prompts associated with the user's metadata being needed.
- remove_user_prompt(user, UserPromptTypes.ENTER_NAME)
- remove_user_prompt(user, UserPromptTypes.ENTER_COMPANY)
+ if not lookup_federated_login(user, service_name):
+ attach_federated_login(user, service_name, user.username)
-def _get_login_service(service_id):
- try:
- return LoginService.get(LoginService.name == service_id)
- except LoginService.DoesNotExist:
- return LoginService.create(name=service_id)
+def create_federated_user(username, email, service_name, service_id,
+ set_password_notification, metadata={}):
+ if not is_create_user_allowed():
+ raise TooManyUsersException()
-
-def create_federated_user(username, email, service_id, service_ident,
- set_password_notification, metadata={},
- email_required=True, confirm_username=True,
- prompts=tuple()):
- prompts = set(prompts)
-
- if confirm_username:
- prompts.add(UserPromptTypes.CONFIRM_USERNAME)
-
- new_user = create_user_noverify(username, email, email_required=email_required, prompts=prompts)
+ new_user = create_user_noverify(username, email)
new_user.verified = True
new_user.save()
- FederatedLogin.create(user=new_user, service=_get_login_service(service_id),
- service_ident=service_ident,
+ service = LoginService.get(LoginService.name == service_name)
+ FederatedLogin.create(user=new_user, service=service,
+ service_ident=service_id,
metadata_json=json.dumps(metadata))
if set_password_notification:
@@ -549,25 +324,21 @@ def create_federated_user(username, email, service_id, service_ident,
return new_user
-def attach_federated_login(user, service_id, service_ident, metadata=None):
- service = _get_login_service(service_id)
- FederatedLogin.create(user=user, service=service, service_ident=service_ident,
- metadata_json=json.dumps(metadata or {}))
+def attach_federated_login(user, service_name, service_id, metadata={}):
+ service = LoginService.get(LoginService.name == service_name)
+ FederatedLogin.create(user=user, service=service, service_ident=service_id,
+ metadata_json=json.dumps(metadata))
return user
-def verify_federated_login(service_id, service_ident):
+def verify_federated_login(service_name, service_id):
try:
found = (FederatedLogin
.select(FederatedLogin, User)
.join(LoginService)
.switch(FederatedLogin).join(User)
- .where(FederatedLogin.service_ident == service_ident, LoginService.name == service_id)
+ .where(FederatedLogin.service_ident == service_id, LoginService.name == service_name)
.get())
-
- # Mark that the user was accessed.
- _basequery.update_last_accessed(found.user)
-
return found.user
except FederatedLogin.DoesNotExist:
return None
@@ -587,36 +358,23 @@ def lookup_federated_login(user, service_name):
except FederatedLogin.DoesNotExist:
return None
-
def create_confirm_email_code(user, new_email=None):
if new_email:
if not validate_email(new_email):
raise InvalidEmailAddressException('Invalid email address: %s' %
new_email)
- verification_code, unhashed = Credential.generate()
- code = EmailConfirmation.create(user=user,
- email_confirm=True,
- new_email=new_email,
- verification_code=verification_code)
- return encode_public_private_token(code.code, unhashed)
+ code = EmailConfirmation.create(user=user, email_confirm=True,
+ new_email=new_email)
+ return code
-def confirm_user_email(token):
- # TODO(remove-unenc): Remove allow_public_only once migrated.
- allow_public_only = ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS)
- result = decode_public_private_token(token, allow_public_only=allow_public_only)
- if not result:
- raise DataModelException('Invalid email confirmation code')
-
+def confirm_user_email(code):
try:
- code = EmailConfirmation.get(EmailConfirmation.code == result.public_code,
+ code = EmailConfirmation.get(EmailConfirmation.code == code,
EmailConfirmation.email_confirm == True)
except EmailConfirmation.DoesNotExist:
- raise DataModelException('Invalid email confirmation code')
-
- if result.private_token and not code.verification_code.matches(result.private_token):
- raise DataModelException('Invalid email confirmation code')
+ raise DataModelException('Invalid email confirmation code.')
user = code.user
user.verified = True
@@ -625,14 +383,14 @@ def confirm_user_email(token):
new_email = code.new_email
if new_email and new_email != old_email:
if find_user_by_email(new_email):
- raise DataModelException('E-mail address already used')
+ raise DataModelException('E-mail address already used.')
old_email = user.email
user.email = new_email
- with db_transaction():
- user.save()
- code.delete_instance()
+ user.save()
+
+ code.delete_instance()
return user, new_email, old_email
@@ -641,48 +399,24 @@ def create_reset_password_email_code(email):
try:
user = User.get(User.email == email)
except User.DoesNotExist:
- raise InvalidEmailAddressException('Email address was not found')
+ raise InvalidEmailAddressException('Email address was not found.');
if user.organization:
- raise InvalidEmailAddressException('Organizations can not have passwords')
+ raise InvalidEmailAddressException('Organizations can not have passwords.')
- verification_code, unhashed = Credential.generate()
- code = EmailConfirmation.create(user=user, pw_reset=True, verification_code=verification_code)
- return encode_public_private_token(code.code, unhashed)
+ code = EmailConfirmation.create(user=user, pw_reset=True)
+ return code
-def validate_reset_code(token):
- # TODO(remove-unenc): Remove allow_public_only once migrated.
- allow_public_only = ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS)
- result = decode_public_private_token(token, allow_public_only=allow_public_only)
- if not result:
- return None
-
- # Find the reset code.
+def validate_reset_code(code):
try:
- code = EmailConfirmation.get(EmailConfirmation.code == result.public_code,
+ code = EmailConfirmation.get(EmailConfirmation.code == code,
EmailConfirmation.pw_reset == True)
except EmailConfirmation.DoesNotExist:
return None
- if result.private_token and not code.verification_code.matches(result.private_token):
- return None
-
- # Make sure the code is not expired.
- max_lifetime_duration = convert_to_timedelta(config.app_config['USER_RECOVERY_TOKEN_LIFETIME'])
- if code.created + max_lifetime_duration < datetime.now():
- code.delete_instance()
- return None
-
- # Verify the user and return the code.
user = code.user
-
- with db_transaction():
- if not user.verified:
- user.verified = True
- user.save()
-
- code.delete_instance()
+ code.delete_instance()
return user
@@ -729,21 +463,6 @@ def get_user_by_id(user_db_id):
return None
-def get_user_map_by_ids(namespace_ids):
- id_user = {namespace_id: None for namespace_id in namespace_ids}
- users = User.select().where(User.id << namespace_ids, User.organization == False)
- for user in users:
- id_user[user.id] = user
-
- return id_user
-
-def get_namespace_user_by_user_id(namespace_user_db_id):
- try:
- return User.get(User.id == namespace_user_db_id, User.robot == False)
- except User.DoesNotExist:
- raise InvalidUsernameException('User with id does not exist: %s' % namespace_user_db_id)
-
-
def get_namespace_by_user_id(namespace_user_db_id):
try:
return User.get(User.id == namespace_user_db_id, User.robot == False).username
@@ -764,112 +483,70 @@ def get_user_or_org_by_customer_id(customer_id):
except User.DoesNotExist:
return None
-def invalidate_all_sessions(user):
- """ Invalidates all existing user sessions by rotating the user's UUID. """
- if not user:
- return
-
- user.uuid = str(uuid4())
- user.save()
def get_matching_user_namespaces(namespace_prefix, username, limit=10):
- namespace_user = get_namespace_user(username)
- namespace_user_id = namespace_user.id if namespace_user is not None else None
-
- namespace_search = prefix_search(Namespace.username, namespace_prefix)
base_query = (Namespace
.select()
.distinct()
+ .limit(limit)
.join(Repository, on=(Repository.namespace_user == Namespace.id))
- .join(RepositoryPermission, JOIN.LEFT_OUTER)
- .where(namespace_search))
+ .join(RepositoryPermission, JOIN_LEFT_OUTER)
+ .where(Namespace.username ** (namespace_prefix + '%')))
- return _basequery.filter_to_repos_for_user(base_query, namespace_user_id).limit(limit)
+ return _basequery.filter_to_repos_for_user(base_query, username)
-def get_matching_users(username_prefix, robot_namespace=None, organization=None, limit=20,
- exact_matches_only=False):
- # Lookup the exact match first. This ensures that the exact match is not cut off by the list
- # limit.
- updated_limit = limit
- exact_match = list(_get_matching_users(username_prefix, robot_namespace, organization, limit=1,
- exact_matches_only=True))
- if exact_match:
- updated_limit -= 1
- yield exact_match[0]
-
- # Perform the remainder of the lookup.
- if updated_limit:
- for result in _get_matching_users(username_prefix, robot_namespace, organization, updated_limit,
- exact_matches_only):
- if exact_match and result.username == exact_match[0].username:
- continue
-
- yield result
-
-def _get_matching_users(username_prefix, robot_namespace=None, organization=None, limit=20,
- exact_matches_only=False):
- user_search = prefix_search(User.username, username_prefix)
- if exact_matches_only:
- user_search = (User.username == username_prefix)
-
- direct_user_query = (user_search & (User.organization == False) & (User.robot == False))
+def get_matching_users(username_prefix, robot_namespace=None,
+ organization=None):
+ direct_user_query = (User.username ** (username_prefix + '%') &
+ (User.organization == False) & (User.robot == False))
if robot_namespace:
robot_prefix = format_robot_username(robot_namespace, username_prefix)
- robot_search = prefix_search(User.username, robot_prefix)
- direct_user_query = ((robot_search & (User.robot == True)) | direct_user_query)
+ direct_user_query = (direct_user_query |
+ (User.username ** (robot_prefix + '%') &
+ (User.robot == True)))
query = (User
- .select(User.id, User.username, User.email, User.robot)
- .group_by(User.id, User.username, User.email, User.robot)
+ .select(User.username, User.email, User.robot)
+ .group_by(User.username, User.email, User.robot)
.where(direct_user_query))
if organization:
query = (query
- .select(User.id, User.username, User.email, User.robot, fn.Sum(Team.id))
- .join(TeamMember, JOIN.LEFT_OUTER)
- .join(Team, JOIN.LEFT_OUTER, on=((Team.id == TeamMember.team) &
- (Team.organization == organization)))
- .order_by(User.robot.desc()))
+ .select(User.username, User.email, User.robot, fn.Sum(Team.id))
+ .join(TeamMember, JOIN_LEFT_OUTER)
+ .join(Team, JOIN_LEFT_OUTER, on=((Team.id == TeamMember.team) &
+ (Team.organization == organization))))
class MatchingUserResult(object):
def __init__(self, *args):
- self.id = args[0]
- self.username = args[1]
- self.email = args[2]
- self.robot = args[3]
+ self.username = args[0]
+ self.email = args[1]
+ self.robot = args[2]
if organization:
self.is_org_member = (args[3] != None)
else:
self.is_org_member = None
- return (MatchingUserResult(*args) for args in query.tuples().limit(limit))
+ return (MatchingUserResult(*args) for args in query.tuples().limit(10))
def verify_user(username_or_email, password):
- """ Verifies that the given username/email + password pair is valid. If the username or e-mail
- address is invalid, returns None. If the password specified does not match for the given user,
- either returns None or raises TooManyLoginAttemptsException if there have been too many
- invalid login attempts. Returns the user object if the login was valid.
- """
-
# Make sure we didn't get any unicode for the username.
try:
str(username_or_email)
except ValueError:
return None
- # Fetch the user with the matching username or e-mail address.
try:
- fetched = User.get((User.username == username_or_email) | (User.email == username_or_email))
+ fetched = User.get((User.username == username_or_email) |
+ (User.email == username_or_email))
except User.DoesNotExist:
return None
- # If the user has any invalid login attempts, check to see if we are within the exponential
- # backoff window for the user. If so, we raise an exception indicating that the user cannot
- # login.
now = datetime.utcnow()
+
if fetched.invalid_login_attempts > 0:
can_retry_at = exponential_backoff(fetched.invalid_login_attempts, EXPONENTIAL_BACKOFF_SCALE,
fetched.last_invalid_login)
@@ -878,34 +555,17 @@ def verify_user(username_or_email, password):
retry_after = can_retry_at - now
raise TooManyLoginAttemptsException('Too many login attempts.', retry_after.total_seconds())
- # Hash the given password and compare it to the specified password.
if (fetched.password_hash and
hash_password(password, fetched.password_hash) == fetched.password_hash):
-
- # If the user previously had any invalid login attempts, clear them out now.
if fetched.invalid_login_attempts > 0:
- try:
- (User
- .update(invalid_login_attempts=0)
- .where(User.id == fetched.id)
- .execute())
+ fetched.invalid_login_attempts = 0
+ fetched.save()
- # Mark that the user was accessed.
- _basequery.update_last_accessed(fetched)
- except ReadOnlyModeException:
- pass
-
- # Return the valid user.
return fetched
- # Otherwise, update the user's invalid login attempts.
- try:
- (User
- .update(invalid_login_attempts=User.invalid_login_attempts+1, last_invalid_login=now)
- .where(User.id == fetched.id)
- .execute())
- except ReadOnlyModeException:
- pass
+ fetched.invalid_login_attempts += 1
+ fetched.last_invalid_login = now
+ fetched.save()
# We weren't able to authorize the user
return None
@@ -913,7 +573,7 @@ def verify_user(username_or_email, password):
def get_all_repo_users(namespace_name, repository_name):
return (RepositoryPermission
- .select(User, Role, RepositoryPermission)
+ .select(User.username, User.email, User.robot, Role.name, RepositoryPermission)
.join(User)
.switch(RepositoryPermission)
.join(Role)
@@ -963,28 +623,14 @@ def get_private_repo_count(username):
.count())
-def get_active_users(disabled=True, deleted=False):
- query = (User
- .select()
- .where(User.organization == False, User.robot == False))
-
- if not disabled:
- query = query.where(User.enabled == True)
-
- if not deleted:
- query = query.where(User.id.not_in(DeletedNamespace.select(DeletedNamespace.namespace)))
-
- return query
+def get_active_users():
+ return User.select().where(User.organization == False, User.robot == False)
def get_active_user_count():
return get_active_users().count()
-def get_robot_count():
- return User.select().where(User.robot == True).count()
-
-
def detach_external_login(user, service_name):
try:
service = LoginService.get(name=service_name)
@@ -995,223 +641,26 @@ def detach_external_login(user, service_name):
FederatedLogin.service == service).execute()
-def get_solely_admined_organizations(user_obj):
- """ Returns the organizations admined solely by the given user. """
- orgs = (User.select()
- .where(User.organization == True)
- .join(Team)
- .join(TeamRole)
- .where(TeamRole.name == 'admin')
- .switch(Team)
- .join(TeamMember)
- .where(TeamMember.user == user_obj)
- .distinct())
-
- # Filter to organizations where the user is the sole admin.
- solely_admined = []
- for org in orgs:
- admin_user_count = (TeamMember.select()
- .join(Team)
- .join(TeamRole)
- .where(Team.organization == org, TeamRole.name == 'admin')
- .switch(TeamMember)
- .join(User)
- .where(User.robot == False)
- .distinct()
- .count())
-
- if admin_user_count == 1:
- solely_admined.append(org)
-
- return solely_admined
-
-
-def mark_namespace_for_deletion(user, queues, namespace_gc_queue, force=False):
- """ Marks a namespace (as referenced by the given user) for deletion. A queue item will be added
- to delete the namespace's repositories and storage, while the namespace itself will be
- renamed, disabled, and delinked from other tables.
- """
- if not user.enabled:
- return None
-
- if not force and not user.organization:
- # Ensure that the user is not the sole admin for any organizations. If so, then the user
- # cannot be deleted before those organizations are deleted or reassigned.
- organizations = get_solely_admined_organizations(user)
- if len(organizations) > 0:
- message = 'Cannot delete %s as you are the only admin for organizations: ' % user.username
- for index, org in enumerate(organizations):
- if index > 0:
- message = message + ', '
-
- message = message + org.username
-
- raise DataModelException(message)
-
- # Delete all queue items for the user.
- for queue in queues:
- queue.delete_namespaced_items(user.username)
-
- # Delete non-repository related items. This operation is very quick, so we can do so here.
- _delete_user_linked_data(user)
-
- with db_transaction():
- original_username = user.username
- user = db_for_update(User.select().where(User.id == user.id)).get()
-
- # Mark the namespace as deleted and ready for GC.
- try:
- marker = DeletedNamespace.create(namespace=user,
- original_username=original_username,
- original_email=user.email)
- except IntegrityError:
- return
-
- # Disable the namespace itself, and replace its various unique fields with UUIDs.
- user.enabled = False
- user.username = str(uuid4())
- user.email = str(uuid4())
- user.save()
-
- # Add a queueitem to delete the namespace.
- marker.queue_id = namespace_gc_queue.put([str(user.id)], json.dumps({
- 'marker_id': marker.id,
- 'original_username': original_username,
- }))
- marker.save()
- return marker.id
-
-
-def delete_namespace_via_marker(marker_id, queues):
- """ Deletes a namespace referenced by the given DeletedNamespace marker ID. """
- try:
- marker = DeletedNamespace.get(id=marker_id)
- except DeletedNamespace.DoesNotExist:
- return
-
- delete_user(marker.namespace, queues)
-
-
-def delete_user(user, queues):
- """ Deletes a user/organization/robot. Should *not* be called by any user-facing API. Instead,
- mark_namespace_for_deletion should be used, and the queue should call this method.
- """
- # Delete all queue items for the user.
- for queue in queues:
- queue.delete_namespaced_items(user.username)
-
- # Delete any repositories under the user's namespace.
- for repo in list(Repository.select().where(Repository.namespace_user == user)):
- gc.purge_repository(user.username, repo.name)
-
- # Delete non-repository related items.
- _delete_user_linked_data(user)
-
- # Delete the user itself.
+def delete_user(user):
user.delete_instance(recursive=True, delete_nullable=True)
-
-def _delete_user_linked_data(user):
- if user.organization:
- # Delete the organization's teams.
- with db_transaction():
- for team in Team.select().where(Team.organization == user):
- team.delete_instance(recursive=True)
-
- # Delete any OAuth approvals and tokens associated with the user.
- with db_transaction():
- for app in OAuthApplication.select().where(OAuthApplication.organization == user):
- app.delete_instance(recursive=True)
- else:
- # Remove the user from any teams in which they are a member.
- TeamMember.delete().where(TeamMember.user == user).execute()
-
- # Delete any repository buildtriggers where the user is the connected user.
- with db_transaction():
- triggers = RepositoryBuildTrigger.select().where(RepositoryBuildTrigger.connected_user == user)
- for trigger in triggers:
- trigger.delete_instance(recursive=True, delete_nullable=False)
-
- # Delete any mirrors with robots owned by this user.
- with db_transaction():
- robots = list(list_namespace_robots(user.username))
- RepoMirrorConfig.delete().where(RepoMirrorConfig.internal_robot << robots).execute()
-
- # Delete any robots owned by this user.
- with db_transaction():
- robots = list(list_namespace_robots(user.username))
- for robot in robots:
- robot.delete_instance(recursive=True, delete_nullable=True)
-
- # Null out any service key approvals. We technically lose information here, but its better than
- # falling and only occurs if a superuser is being deleted.
- ServiceKeyApproval.update(approver=None).where(ServiceKeyApproval.approver == user).execute()
+ # TODO: also delete any repository data associated
def get_pull_credentials(robotname):
- """ Returns the pull credentials for a robot with the given name. """
try:
robot = lookup_robot(robotname)
except InvalidRobotException:
return None
- token = retrieve_robot_token(robot)
+ try:
+ login_info = FederatedLogin.get(user=robot)
+ except FederatedLogin.DoesNotExist:
+ return None
return {
- 'username': robot.username,
- 'password': token,
- 'registry': '%s://%s/v1/' % (config.app_config['PREFERRED_URL_SCHEME'],
- config.app_config['SERVER_HOSTNAME']),
+ 'username': robot.username,
+ 'password': login_info.service_ident,
+ 'registry': '%s://%s/v1/' % (config.app_config['PREFERRED_URL_SCHEME'],
+ config.app_config['SERVER_HOSTNAME']),
}
-
-def get_region_locations(user):
- """ Returns the locations defined as preferred storage for the given user. """
- query = UserRegion.select().join(ImageStorageLocation).where(UserRegion.user == user)
- return set([region.location.name for region in query])
-
-def get_federated_logins(user_ids, service_name):
- """ Returns all federated logins for the given user ids under the given external service. """
- if not user_ids:
- return []
-
- return (FederatedLogin
- .select()
- .join(User)
- .switch(FederatedLogin)
- .join(LoginService)
- .where(FederatedLogin.user << user_ids,
- LoginService.name == service_name))
-
-
-def list_namespace_geo_restrictions(namespace_name):
- """ Returns all of the defined geographic restrictions for the given namespace. """
- return (NamespaceGeoRestriction
- .select()
- .join(User)
- .where(User.username == namespace_name))
-
-
-def get_minimum_user_id():
- return User.select(fn.Min(User.id)).tuples().get()[0]
-
-
-class LoginWrappedDBUser(UserMixin):
- def __init__(self, user_uuid, db_user=None):
- self._uuid = user_uuid
- self._db_user = db_user
-
- def db_user(self):
- if not self._db_user:
- self._db_user = get_user_by_uuid(self._uuid)
- return self._db_user
-
- @property
- def is_authenticated(self):
- return self.db_user() is not None
-
- @property
- def is_active(self):
- return self.db_user() and self.db_user().verified
-
- def get_id(self):
- return unicode(self._uuid)
diff --git a/data/queue.py b/data/queue.py
index 289f4ad64..60632f5b1 100644
--- a/data/queue.py
+++ b/data/queue.py
@@ -1,41 +1,25 @@
-import uuid
-
from datetime import datetime, timedelta
-from contextlib import contextmanager
-from data.database import QueueItem, db, db_for_update, db_random_func
+from data.database import QueueItem, db, db_for_update
from util.morecollections import AttrDict
MINIMUM_EXTENSION = timedelta(seconds=20)
-DEFAULT_BATCH_SIZE = 1000
+class NoopWith:
+ def __enter__(self):
+ pass
-class BuildMetricQueueReporter(object):
- """ Metric queue reporter for the build system. """
- def __init__(self, metric_queue):
- self._metric_queue = metric_queue
-
- def __call__(self, currently_processing, running_count, total_count):
- need_capacity_count = total_count - running_count
- self._metric_queue.put_deprecated('BuildCapacityShortage', need_capacity_count, unit='Count')
- self._metric_queue.build_capacity_shortage.Set(need_capacity_count)
-
- building_percent = 100 if currently_processing else 0
- self._metric_queue.percent_building.Set(building_percent)
-
+ def __exit__(self, type, value, traceback):
+ pass
class WorkQueue(object):
- """ Work queue defines methods for interacting with a queue backed by the database. """
def __init__(self, queue_name, transaction_factory,
- canonical_name_match_list=None, reporter=None, metric_queue=None,
- has_namespace=False):
+ canonical_name_match_list=None, reporter=None):
self._queue_name = queue_name
self._reporter = reporter
- self._metric_queue = metric_queue
self._transaction_factory = transaction_factory
self._currently_processing = False
- self._has_namespaced_items = has_namespace
if canonical_name_match_list is None:
self._canonical_name_match_list = []
@@ -46,104 +30,57 @@ class WorkQueue(object):
def _canonical_name(name_list):
return '/'.join(name_list) + '/'
- @classmethod
- def _running_jobs(cls, now, name_match_query):
- return (cls
- ._running_jobs_where(QueueItem.select(QueueItem.queue_name), now)
- .where(QueueItem.queue_name ** name_match_query))
-
- @classmethod
- def _available_jobs(cls, now, name_match_query):
- return (cls
- ._available_jobs_where(QueueItem.select(), now)
- .where(QueueItem.queue_name ** name_match_query))
-
- @staticmethod
- def _running_jobs_where(query, now):
- return query.where(QueueItem.available == False, QueueItem.processing_expires > now)
-
- @staticmethod
- def _available_jobs_where(query, now):
- return query.where(QueueItem.available_after <= now,
- ((QueueItem.available == True) | (QueueItem.processing_expires <= now)),
- QueueItem.retries_remaining > 0)
-
- @classmethod
- def _available_jobs_not_running(cls, now, name_match_query, running_query):
- return (cls
- ._available_jobs(now, name_match_query)
- .where(~(QueueItem.queue_name << running_query)))
-
- def num_alive_jobs(self, canonical_name_list):
- """
- Returns the number of alive queue items with a given prefix.
- """
- def strip_slash(name):
- return name.lstrip('/')
- canonical_name_list = map(strip_slash, canonical_name_list)
- canonical_name_query = '/'.join([self._queue_name] + canonical_name_list) + '%'
+ def _running_jobs(self, now, name_match_query):
+ return (QueueItem
+ .select(QueueItem.queue_name)
+ .where(QueueItem.available == False,
+ QueueItem.processing_expires > now,
+ QueueItem.queue_name ** name_match_query))
+ def _available_jobs(self, now, name_match_query):
return (QueueItem
.select()
- .where(QueueItem.queue_name ** canonical_name_query)
- .where(QueueItem.retries_remaining > 0)
- .count())
+ .where(QueueItem.queue_name ** name_match_query, QueueItem.available_after <= now,
+ ((QueueItem.available == True) | (QueueItem.processing_expires <= now)),
+ QueueItem.retries_remaining > 0))
- def num_available_jobs_between(self, available_min_time, available_max_time, canonical_name_list):
- """
- Returns the number of available queue items with a given prefix, between the two provided times.
- """
- def strip_slash(name):
- return name.lstrip('/')
- canonical_name_list = map(strip_slash, canonical_name_list)
-
- available = self._available_jobs(available_max_time,
- '/'.join([self._queue_name] + canonical_name_list) + '%')
-
- return available.where(QueueItem.available_after >= available_min_time).count()
+ def _available_jobs_not_running(self, now, name_match_query, running_query):
+ return (self
+ ._available_jobs(now, name_match_query)
+ .where(~(QueueItem.queue_name << running_query)))
def _name_match_query(self):
return '%s%%' % self._canonical_name([self._queue_name] + self._canonical_name_match_list)
- @staticmethod
- def _item_by_id_for_update(queue_id):
+ def _item_by_id_for_update(self, queue_id):
return db_for_update(QueueItem.select().where(QueueItem.id == queue_id)).get()
- def get_metrics(self):
- now = datetime.utcnow()
- name_match_query = self._name_match_query()
+ def get_metrics(self, require_transaction=True):
+ guard = self._transaction_factory(db) if require_transaction else NoopWith()
+ with guard:
+ now = datetime.utcnow()
+ name_match_query = self._name_match_query()
- running_query = self._running_jobs(now, name_match_query)
- running_count = running_query.distinct().count()
+ running_query = self._running_jobs(now, name_match_query)
+ running_count = running_query.distinct().count()
- available_query = self._available_jobs(now, name_match_query)
- available_count = available_query.select(QueueItem.queue_name).distinct().count()
+ available_query = self._available_jobs(now, name_match_query)
+ available_count = available_query.select(QueueItem.queue_name).distinct().count()
- available_not_running_query = self._available_jobs_not_running(now, name_match_query,
- running_query)
- available_not_running_count = (available_not_running_query
- .select(QueueItem.queue_name)
- .distinct()
- .count())
+ available_not_running_query = self._available_jobs_not_running(now, name_match_query,
+ running_query)
+ available_not_running_count = (available_not_running_query.select(QueueItem.queue_name)
+ .distinct().count())
return (running_count, available_not_running_count, available_count)
def update_metrics(self):
- if self._reporter is None and self._metric_queue is None:
+ if self._reporter is None:
return
(running_count, available_not_running_count, available_count) = self.get_metrics()
-
- if self._metric_queue:
- self._metric_queue.work_queue_running.Set(running_count, labelvalues=[self._queue_name])
- self._metric_queue.work_queue_available.Set(available_count, labelvalues=[self._queue_name])
- self._metric_queue.work_queue_available_not_running.Set(available_not_running_count,
- labelvalues=[self._queue_name])
-
-
- if self._reporter:
- self._reporter(self._currently_processing, running_count,
- running_count + available_not_running_count)
+ self._reporter(self._currently_processing, running_count,
+ running_count + available_not_running_count)
def has_retries_remaining(self, item_id):
""" Returns whether the queue item with the given id has any retries remaining. If the
@@ -154,130 +91,25 @@ class WorkQueue(object):
except QueueItem.DoesNotExist:
return False
- def delete_namespaced_items(self, namespace, subpath=None):
- """ Deletes all items in this queue that exist under the given namespace. """
- if not self._has_namespaced_items:
- return False
-
- subpath_query = '%s/' % subpath if subpath else ''
- queue_prefix = '%s/%s/%s%%' % (self._queue_name, namespace, subpath_query)
- return QueueItem.delete().where(QueueItem.queue_name ** queue_prefix).execute()
-
- def alive(self, canonical_name_list):
- """
- Returns True if a job matching the canonical name list is currently processing
- or available.
- """
- canonical_name = self._canonical_name([self._queue_name] + canonical_name_list)
- try:
- select_query = QueueItem.select().where(QueueItem.queue_name == canonical_name)
- now = datetime.utcnow()
-
- overall_query = (self._available_jobs_where(select_query.clone(), now) |
- self._running_jobs_where(select_query.clone(), now))
- overall_query.get()
- return True
- except QueueItem.DoesNotExist:
- return False
-
- def _queue_dict(self, canonical_name_list, message, available_after, retries_remaining):
- return dict(
- queue_name=self._canonical_name([self._queue_name] + canonical_name_list),
- body=message,
- retries_remaining=retries_remaining,
- available_after=datetime.utcnow() + timedelta(seconds=available_after or 0),
- )
-
- @contextmanager
- def batch_insert(self, batch_size=DEFAULT_BATCH_SIZE):
- items_to_insert = []
- def batch_put(canonical_name_list, message, available_after=0, retries_remaining=5):
- """
- Put an item, if it shouldn't be processed for some number of seconds,
- specify that amount as available_after. Returns the ID of the queue item added.
- """
- items_to_insert.append(self._queue_dict(canonical_name_list, message, available_after,
- retries_remaining))
-
- yield batch_put
-
- # Chunk the inserted items into batch_size chunks and insert_many
- remaining = list(items_to_insert)
- while remaining:
- QueueItem.insert_many(remaining[0:batch_size]).execute()
- remaining = remaining[batch_size:]
-
def put(self, canonical_name_list, message, available_after=0, retries_remaining=5):
"""
Put an item, if it shouldn't be processed for some number of seconds,
specify that amount as available_after. Returns the ID of the queue item added.
"""
- item = QueueItem.create(**self._queue_dict(canonical_name_list, message, available_after,
- retries_remaining))
- return str(item.id)
- def _select_available_item(self, ordering_required, now):
- """ Selects an available queue item from the queue table and returns it, if any. If none,
- return None.
- """
- name_match_query = self._name_match_query()
+ params = {
+ 'queue_name': self._canonical_name([self._queue_name] + canonical_name_list),
+ 'body': message,
+ 'retries_remaining': retries_remaining,
+ }
- try:
- if ordering_required:
- # The previous solution to this used a select for update in a
- # transaction to prevent multiple instances from processing the
- # same queue item. This suffered performance problems. This solution
- # instead has instances attempt to update the potential queue item to be
- # unavailable. However, since their update clause is restricted to items
- # that are available=False, only one instance's update will succeed, and
- # it will have a changed row count of 1. Instances that have 0 changed
- # rows know that another instance is already handling that item.
- running = self._running_jobs(now, name_match_query)
- avail = self._available_jobs_not_running(now, name_match_query, running)
- return avail.order_by(QueueItem.id).get()
- else:
- # If we don't require ordering, we grab a random item from any of the first 50 available.
- subquery = self._available_jobs(now, name_match_query).limit(50).alias('j1')
- return (QueueItem
- .select()
- .join(subquery, on=QueueItem.id == subquery.c.id)
- .order_by(db_random_func())
- .get())
+ available_date = datetime.utcnow() + timedelta(seconds=available_after or 0)
+ params['available_after'] = available_date
- except QueueItem.DoesNotExist:
- # No available queue item was found.
- return None
+ with self._transaction_factory(db):
+ return str(QueueItem.create(**params).id)
- def _attempt_to_claim_item(self, db_item, now, processing_time):
- """ Attempts to claim the specified queue item for this instance. Returns True on success and
- False on failure.
-
- Note that the underlying QueueItem row in the database will be changed on success, but
- the db_item object given as a parameter will *not* have its fields updated.
- """
-
- # Try to claim the item. We do so by updating the item's information only if its current
- # state ID matches that returned in the previous query. Since all updates to the QueueItem
- # must change the state ID, this is guarenteed to only succeed if the item has not yet been
- # claimed by another caller.
- #
- # Note that we use this method because InnoDB takes locks on *every* clause in the WHERE when
- # performing the update. Previously, we would check all these columns, resulting in a bunch
- # of lock contention. This change mitigates the problem significantly by only checking two
- # columns (id and state_id), both of which should be absolutely unique at all times.
- set_unavailable_query = (QueueItem
- .update(available=False,
- processing_expires=now + timedelta(seconds=processing_time),
- retries_remaining=QueueItem.retries_remaining - 1,
- state_id=str(uuid.uuid4()))
- .where(QueueItem.id == db_item.id,
- QueueItem.state_id == db_item.state_id))
-
- changed = set_unavailable_query.execute()
- return changed == 1
-
-
- def get(self, processing_time=300, ordering_required=False):
+ def get(self, processing_time=300):
"""
Get an available item and mark it as unavailable for the default of five
minutes. The result of this method must always be composed of simple
@@ -285,94 +117,87 @@ class WorkQueue(object):
"""
now = datetime.utcnow()
- # Select an available queue item.
- db_item = self._select_available_item(ordering_required, now)
- if db_item is None:
- self._currently_processing = False
- return None
+ name_match_query = self._name_match_query()
- # Attempt to claim the item for this instance.
- was_claimed = self._attempt_to_claim_item(db_item, now, processing_time)
- if not was_claimed:
- self._currently_processing = False
- return None
+ running = self._running_jobs(now, name_match_query)
+ avail = self._available_jobs_not_running(now, name_match_query, running)
- self._currently_processing = True
+ item = None
+ try:
+ db_item_candidate = avail.order_by(QueueItem.id).get()
+
+ with self._transaction_factory(db):
+ still_available_query = (db_for_update(self
+ ._available_jobs(now, name_match_query)
+ .where(QueueItem.id == db_item_candidate.id)))
+
+ db_item = still_available_query.get()
+ db_item.available = False
+ db_item.processing_expires = now + timedelta(seconds=processing_time)
+ db_item.retries_remaining -= 1
+ db_item.save()
+
+ item = AttrDict({
+ 'id': db_item.id,
+ 'body': db_item.body,
+ 'retries_remaining': db_item.retries_remaining
+ })
+
+ self._currently_processing = True
+ except QueueItem.DoesNotExist:
+ self._currently_processing = False
# Return a view of the queue item rather than an active db object
- return AttrDict({
- 'id': db_item.id,
- 'body': db_item.body,
- 'retries_remaining': db_item.retries_remaining - 1,
- })
+ return item
def cancel(self, item_id):
""" Attempts to cancel the queue item with the given ID from the queue. Returns true on success
- and false if the queue item could not be canceled.
+ and false if the queue item could not be canceled. A queue item can only be canceled if
+ if is available and has retries remaining.
"""
- count_removed = QueueItem.delete().where(QueueItem.id == item_id).execute()
- return count_removed > 0
+
+ with self._transaction_factory(db):
+ # Load the build queue item for update.
+ try:
+ queue_item = db_for_update(QueueItem.select()
+ .where(QueueItem.id == item_id)).get()
+ except QueueItem.DoesNotExist:
+ return False
+
+ # Check the queue item.
+ if not queue_item.available or queue_item.retries_remaining == 0:
+ return False
+
+ # Delete the queue item.
+ queue_item.delete_instance(recursive=True)
+ return True
def complete(self, completed_item):
- self._currently_processing = not self.cancel(completed_item.id)
+ with self._transaction_factory(db):
+ completed_item_obj = self._item_by_id_for_update(completed_item.id)
+ completed_item_obj.delete_instance(recursive=True)
+ self._currently_processing = False
def incomplete(self, incomplete_item, retry_after=300, restore_retry=False):
with self._transaction_factory(db):
retry_date = datetime.utcnow() + timedelta(seconds=retry_after)
+ incomplete_item_obj = self._item_by_id_for_update(incomplete_item.id)
+ incomplete_item_obj.available_after = retry_date
+ incomplete_item_obj.available = True
- try:
- incomplete_item_obj = self._item_by_id_for_update(incomplete_item.id)
- incomplete_item_obj.available_after = retry_date
- incomplete_item_obj.available = True
+ if restore_retry:
+ incomplete_item_obj.retries_remaining += 1
- if restore_retry:
- incomplete_item_obj.retries_remaining += 1
+ incomplete_item_obj.save()
+ self._currently_processing = False
+ return incomplete_item_obj.retries_remaining > 0
- incomplete_item_obj.save()
- self._currently_processing = False
- return incomplete_item_obj.retries_remaining > 0
- except QueueItem.DoesNotExist:
- return False
-
- def extend_processing(self, item, seconds_from_now, minimum_extension=MINIMUM_EXTENSION,
- updated_data=None):
+ def extend_processing(self, item, seconds_from_now, minimum_extension=MINIMUM_EXTENSION):
with self._transaction_factory(db):
- try:
- queue_item = self._item_by_id_for_update(item.id)
- new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now)
- has_change = False
+ queue_item = self._item_by_id_for_update(item.id)
+ new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now)
- # Only actually write the new expiration to the db if it moves the expiration some minimum
- if new_expiration - queue_item.processing_expires > minimum_extension:
- queue_item.processing_expires = new_expiration
- has_change = True
-
- if updated_data is not None and queue_item.body != updated_data:
- queue_item.body = updated_data
- has_change = True
-
- if has_change:
- queue_item.save()
-
- return has_change
- except QueueItem.DoesNotExist:
- return False
-
-
-def delete_expired(expiration_threshold, deletion_threshold, batch_size):
- """
- Deletes all queue items that are older than the provided expiration threshold in batches of the
- provided size. If there are less items than the deletion threshold, this method does nothing.
-
- Returns the number of items deleted.
- """
- to_delete = list(QueueItem
- .select()
- .where(QueueItem.processing_expires <= expiration_threshold)
- .limit(batch_size))
-
- if len(to_delete) < deletion_threshold:
- return 0
-
- QueueItem.delete().where(QueueItem.id << to_delete).execute()
- return len(to_delete)
+ # Only actually write the new expiration to the db if it moves the expiration some minimum
+ if new_expiration - queue_item.processing_expires > minimum_extension:
+ queue_item.processing_expires = new_expiration
+ queue_item.save()
diff --git a/data/read_slave.py b/data/read_slave.py
new file mode 100644
index 000000000..ec73b9da6
--- /dev/null
+++ b/data/read_slave.py
@@ -0,0 +1,56 @@
+"""
+Adapted from:
+https://github.com/coleifer/peewee/blob/master/playhouse/read_slave.py
+
+Support for using a dedicated read-slave. The read database is specified as a
+Model.Meta option, and will be used for SELECT statements:
+
+
+master = PostgresqlDatabase('master')
+read_slave = PostgresqlDatabase('read_slave')
+
+class BaseModel(ReadSlaveModel):
+ class Meta:
+ database = master
+ read_slaves = [read_slave] # This database will be used for SELECTs.
+
+
+# Now define your models as you would normally.
+class User(BaseModel):
+ username = CharField()
+
+# To force a SELECT on the master database, you can instantiate the SelectQuery
+# by hand:
+master_select = SelectQuery(User).where(...)
+"""
+from peewee import *
+
+
+class ReadSlaveModel(Model):
+ @classmethod
+ def _get_read_database(cls):
+ if (not getattr(cls._meta, 'read_slaves', None) or
+ cls._meta.database.transaction_depth() > 0):
+ return cls._meta.database
+ current_idx = getattr(cls, '_read_slave_idx', -1)
+ cls._read_slave_idx = (current_idx + 1) % len(cls._meta.read_slaves)
+ selected_read_slave = cls._meta.read_slaves[cls._read_slave_idx]
+
+ if isinstance(selected_read_slave, Proxy) and selected_read_slave.obj is None:
+ # It's possible the read slave was disabled by not initializing it
+ return cls._meta.database
+
+ return selected_read_slave
+
+ @classmethod
+ def select(cls, *args, **kwargs):
+ query = super(ReadSlaveModel, cls).select(*args, **kwargs)
+ query.database = cls._get_read_database()
+ return query
+
+ @classmethod
+ def raw(cls, *args, **kwargs):
+ query = super(ReadSlaveModel, cls).raw(*args, **kwargs)
+ if query._sql.lower().startswith('select'):
+ query.database = cls._get_read_database()
+ return query
diff --git a/data/readreplica.py b/data/readreplica.py
deleted file mode 100644
index 33abff2ed..000000000
--- a/data/readreplica.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import random
-
-from collections import namedtuple
-
-from peewee import Model, SENTINEL, OperationalError, Proxy
-
-ReadOnlyConfig = namedtuple('ReadOnlyConfig', ['is_readonly', 'read_replicas'])
-
-class ReadOnlyModeException(Exception):
- """ Exception raised if a write operation was attempted when in read only mode.
- """
-
-
-class AutomaticFailoverWrapper(object):
- """ Class which wraps a peewee database driver and (optionally) a second driver.
- When executing SQL, if an OperationalError occurs, if a second driver is given,
- the query is attempted again on the fallback DB. Otherwise, the exception is raised.
- """
- def __init__(self, primary_db, fallback_db=None):
- self._primary_db = primary_db
- self._fallback_db = fallback_db
-
- def __getattr__(self, attribute):
- if attribute != 'execute_sql' and hasattr(self._primary_db, attribute):
- return getattr(self._primary_db, attribute)
-
- return getattr(self, attribute)
-
- def execute(self, query, commit=SENTINEL, **context_options):
- ctx = self.get_sql_context(**context_options)
- sql, params = ctx.sql(query).query()
- return self.execute_sql(sql, params, commit=commit)
-
- def execute_sql(self, sql, params=None, commit=SENTINEL):
- try:
- return self._primary_db.execute_sql(sql, params, commit)
- except OperationalError:
- if self._fallback_db is not None:
- try:
- return self._fallback_db.execute_sql(sql, params, commit)
- except OperationalError:
- raise
-
-
-class ReadReplicaSupportedModel(Model):
- """ Base model for peewee data models that support using a read replica for SELECT
- requests not under transactions, and automatic failover to the master if the
- read replica fails.
-
- Read-only queries are initially attempted on one of the read replica databases
- being used; if an OperationalError occurs when attempting to invoke the query,
- then the failure is logged and the query is retried on the database master.
-
- Queries that are non-SELECTs (or under transactions) are always tried on the
- master.
-
- If the system is configured into read only mode, then all non-read-only queries
- will raise a ReadOnlyModeException.
- """
- @classmethod
- def _read_only_config(cls):
- read_only_config = getattr(cls._meta, 'read_only_config', None)
- if read_only_config is None:
- return ReadOnlyConfig(False, [])
-
- if isinstance(read_only_config, Proxy) and read_only_config.obj is None:
- return ReadOnlyConfig(False, [])
-
- return read_only_config.obj or ReadOnlyConfig(False, [])
-
- @classmethod
- def _in_readonly_mode(cls):
- return cls._read_only_config().is_readonly
-
- @classmethod
- def _select_database(cls):
- """ Selects a read replica database if we're configured to support read replicas.
- Otherwise, selects the master database.
- """
- # Select the master DB if read replica support is not enabled.
- read_only_config = cls._read_only_config()
- if not read_only_config.read_replicas:
- return cls._meta.database
-
- # Select the master DB if we're ever under a transaction.
- if cls._meta.database.transaction_depth() > 0:
- return cls._meta.database
-
- # Otherwise, return a read replica database with auto-retry onto the main database.
- replicas = read_only_config.read_replicas
- selected_read_replica = replicas[random.randrange(len(replicas))]
- return AutomaticFailoverWrapper(selected_read_replica, cls._meta.database)
-
- @classmethod
- def select(cls, *args, **kwargs):
- query = super(ReadReplicaSupportedModel, cls).select(*args, **kwargs)
- query._database = cls._select_database()
- return query
-
- @classmethod
- def insert(cls, *args, **kwargs):
- query = super(ReadReplicaSupportedModel, cls).insert(*args, **kwargs)
- if cls._in_readonly_mode():
- raise ReadOnlyModeException()
- return query
-
- @classmethod
- def update(cls, *args, **kwargs):
- query = super(ReadReplicaSupportedModel, cls).update(*args, **kwargs)
- if cls._in_readonly_mode():
- raise ReadOnlyModeException()
- return query
-
- @classmethod
- def delete(cls, *args, **kwargs):
- query = super(ReadReplicaSupportedModel, cls).delete(*args, **kwargs)
- if cls._in_readonly_mode():
- raise ReadOnlyModeException()
- return query
-
- @classmethod
- def raw(cls, *args, **kwargs):
- query = super(ReadReplicaSupportedModel, cls).raw(*args, **kwargs)
- if query._sql.lower().startswith('select '):
- query._database = cls._select_database()
- elif cls._in_readonly_mode():
- raise ReadOnlyModeException()
-
- return query
diff --git a/data/registry_model/__init__.py b/data/registry_model/__init__.py
deleted file mode 100644
index ffac9dd59..000000000
--- a/data/registry_model/__init__.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import os
-import logging
-
-from data.registry_model.registry_pre_oci_model import pre_oci_model
-from data.registry_model.registry_oci_model import oci_model
-from data.registry_model.modelsplitter import SplitModel
-
-logger = logging.getLogger(__name__)
-
-
-class RegistryModelProxy(object):
- def __init__(self):
- self._model = oci_model if os.getenv('OCI_DATA_MODEL') == 'true' else pre_oci_model
-
- def setup_split(self, oci_model_proportion, oci_whitelist, v22_whitelist, upgrade_mode):
- if os.getenv('OCI_DATA_MODEL') == 'true':
- return
-
- if upgrade_mode == 'complete':
- logger.info('===============================')
- logger.info('Full V2_2 + OCI model is enabled')
- logger.info('===============================')
- self._model = oci_model
- return
-
- logger.info('===============================')
- logger.info('Split registry model: OCI %s proportion and whitelist `%s` and V22 whitelist `%s`',
- oci_model_proportion, oci_whitelist, v22_whitelist)
- logger.info('===============================')
- self._model = SplitModel(oci_model_proportion, oci_whitelist, v22_whitelist,
- upgrade_mode == 'post-oci-rollout')
-
- def set_for_testing(self, use_oci_model):
- self._model = oci_model if use_oci_model else pre_oci_model
- logger.debug('Changed registry model to `%s` for testing', self._model)
-
- def __getattr__(self, attr):
- return getattr(self._model, attr)
-
-registry_model = RegistryModelProxy()
-logger.info('===============================')
-logger.info('Using registry model `%s`', registry_model._model)
-logger.info('===============================')
diff --git a/data/registry_model/blobuploader.py b/data/registry_model/blobuploader.py
deleted file mode 100644
index 5f99d3ec8..000000000
--- a/data/registry_model/blobuploader.py
+++ /dev/null
@@ -1,335 +0,0 @@
-import logging
-import time
-
-from contextlib import contextmanager
-from collections import namedtuple
-
-import bitmath
-import resumablehashlib
-
-from data.registry_model import registry_model
-from data.database import CloseForLongOperation, db_transaction
-from digest import digest_tools
-from util.registry.filelike import wrap_with_handler, StreamSlice
-from util.registry.gzipstream import calculate_size_handler
-from util.registry.torrent import PieceHasher
-
-
-logger = logging.getLogger(__name__)
-
-
-BLOB_CONTENT_TYPE = 'application/octet-stream'
-
-
-class BlobUploadException(Exception):
- """ Base for all exceptions raised when uploading blobs. """
-
-class BlobRangeMismatchException(BlobUploadException):
- """ Exception raised if the range to be uploaded does not match. """
-
-class BlobDigestMismatchException(BlobUploadException):
- """ Exception raised if the digest requested does not match that of the contents uploaded. """
-
-class BlobTooLargeException(BlobUploadException):
- """ Exception raised if the data uploaded exceeds the maximum_blob_size. """
- def __init__(self, uploaded, max_allowed):
- super(BlobTooLargeException, self).__init__()
- self.uploaded = uploaded
- self.max_allowed = max_allowed
-
-
-BlobUploadSettings = namedtuple('BlobUploadSettings', ['maximum_blob_size', 'bittorrent_piece_size',
- 'committed_blob_expiration'])
-
-
-def create_blob_upload(repository_ref, storage, settings, extra_blob_stream_handlers=None):
- """ Creates a new blob upload in the specified repository and returns a manager for interacting
- with that upload. Returns None if a new blob upload could not be started.
- """
- location_name = storage.preferred_locations[0]
- new_upload_uuid, upload_metadata = storage.initiate_chunked_upload(location_name)
- blob_upload = registry_model.create_blob_upload(repository_ref, new_upload_uuid, location_name,
- upload_metadata)
- if blob_upload is None:
- return None
-
- return _BlobUploadManager(repository_ref, blob_upload, settings, storage,
- extra_blob_stream_handlers)
-
-
-def retrieve_blob_upload_manager(repository_ref, blob_upload_id, storage, settings):
- """ Retrieves the manager for an in-progress blob upload with the specified ID under the given
- repository or None if none.
- """
- blob_upload = registry_model.lookup_blob_upload(repository_ref, blob_upload_id)
- if blob_upload is None:
- return None
-
- return _BlobUploadManager(repository_ref, blob_upload, settings, storage)
-
-@contextmanager
-def complete_when_uploaded(blob_upload):
- """ Wraps the given blob upload in a context manager that completes the upload when the context
- closes.
- """
- try:
- yield blob_upload
- except Exception as ex:
- logger.exception('Exception when uploading blob `%s`', blob_upload.blob_upload_id)
- raise ex
- finally:
- # Cancel the upload if something went wrong or it was not commit to a blob.
- if blob_upload.committed_blob is None:
- blob_upload.cancel_upload()
-
-@contextmanager
-def upload_blob(repository_ref, storage, settings, extra_blob_stream_handlers=None):
- """ Starts a new blob upload in the specified repository and yields a manager for interacting
- with that upload. When the context manager completes, the blob upload is deleted, whether
- committed to a blob or not. Yields None if a blob upload could not be started.
- """
- created = create_blob_upload(repository_ref, storage, settings, extra_blob_stream_handlers)
- if not created:
- yield None
- return
-
- try:
- yield created
- except Exception as ex:
- logger.exception('Exception when uploading blob `%s`', created.blob_upload_id)
- raise ex
- finally:
- # Cancel the upload if something went wrong or it was not commit to a blob.
- if created.committed_blob is None:
- created.cancel_upload()
-
-
-class _BlobUploadManager(object):
- """ Defines a helper class for easily interacting with blob uploads in progress, including
- handling of database and storage calls.
- """
- def __init__(self, repository_ref, blob_upload, settings, storage,
- extra_blob_stream_handlers=None):
- assert repository_ref is not None
- assert blob_upload is not None
-
- self.repository_ref = repository_ref
- self.blob_upload = blob_upload
- self.settings = settings
- self.storage = storage
- self.extra_blob_stream_handlers = extra_blob_stream_handlers
- self.committed_blob = None
-
- @property
- def blob_upload_id(self):
- """ Returns the unique ID for the blob upload. """
- return self.blob_upload.upload_id
-
- def upload_chunk(self, app_config, input_fp, start_offset=0, length=-1, metric_queue=None):
- """ Uploads a chunk of data found in the given input file-like interface. start_offset and
- length are optional and should match a range header if any was given.
-
- If metric_queue is given, the upload time and chunk size are written into the metrics in
- the queue.
-
- Returns the total number of bytes uploaded after this upload has completed. Raises
- a BlobUploadException if the upload failed.
- """
- assert start_offset is not None
- assert length is not None
-
- if start_offset > 0 and start_offset > self.blob_upload.byte_count:
- logger.error('start_offset provided greater than blob_upload.byte_count')
- raise BlobRangeMismatchException()
-
- # Ensure that we won't go over the allowed maximum size for blobs.
- max_blob_size = bitmath.parse_string_unsafe(self.settings.maximum_blob_size)
- uploaded = bitmath.Byte(length + start_offset)
- if length > -1 and uploaded > max_blob_size:
- raise BlobTooLargeException(uploaded=uploaded.bytes, max_allowed=max_blob_size.bytes)
-
- location_set = {self.blob_upload.location_name}
- upload_error = None
- with CloseForLongOperation(app_config):
- if start_offset > 0 and start_offset < self.blob_upload.byte_count:
- # Skip the bytes which were received on a previous push, which are already stored and
- # included in the sha calculation
- overlap_size = self.blob_upload.byte_count - start_offset
- input_fp = StreamSlice(input_fp, overlap_size)
-
- # Update our upload bounds to reflect the skipped portion of the overlap
- start_offset = self.blob_upload.byte_count
- length = max(length - overlap_size, 0)
-
- # We use this to escape early in case we have already processed all of the bytes the user
- # wants to upload.
- if length == 0:
- return self.blob_upload.byte_count
-
- input_fp = wrap_with_handler(input_fp, self.blob_upload.sha_state.update)
-
- if self.extra_blob_stream_handlers:
- for handler in self.extra_blob_stream_handlers:
- input_fp = wrap_with_handler(input_fp, handler)
-
- # Add a hasher for calculating SHA1s for torrents if this is the first chunk and/or we have
- # already calculated hash data for the previous chunk(s).
- piece_hasher = None
- if self.blob_upload.chunk_count == 0 or self.blob_upload.piece_sha_state:
- initial_sha1_value = self.blob_upload.piece_sha_state or resumablehashlib.sha1()
- initial_sha1_pieces_value = self.blob_upload.piece_hashes or ''
-
- piece_hasher = PieceHasher(self.settings.bittorrent_piece_size, start_offset,
- initial_sha1_pieces_value, initial_sha1_value)
- input_fp = wrap_with_handler(input_fp, piece_hasher.update)
-
- # If this is the first chunk and we're starting at the 0 offset, add a handler to gunzip the
- # stream so we can determine the uncompressed size. We'll throw out this data if another chunk
- # comes in, but in the common case the docker client only sends one chunk.
- size_info = None
- if start_offset == 0 and self.blob_upload.chunk_count == 0:
- size_info, fn = calculate_size_handler()
- input_fp = wrap_with_handler(input_fp, fn)
-
- start_time = time.time()
- length_written, new_metadata, upload_error = self.storage.stream_upload_chunk(
- location_set,
- self.blob_upload.upload_id,
- start_offset,
- length,
- input_fp,
- self.blob_upload.storage_metadata,
- content_type=BLOB_CONTENT_TYPE,
- )
-
- if upload_error is not None:
- logger.error('storage.stream_upload_chunk returned error %s', upload_error)
- raise BlobUploadException(upload_error)
-
- # Update the chunk upload time and push bytes metrics.
- if metric_queue is not None:
- metric_queue.chunk_upload_time.Observe(time.time() - start_time, labelvalues=[
- length_written, list(location_set)[0]])
-
- metric_queue.push_byte_count.Inc(length_written)
-
- # Ensure we have not gone beyond the max layer size.
- new_blob_bytes = self.blob_upload.byte_count + length_written
- new_blob_size = bitmath.Byte(new_blob_bytes)
- if new_blob_size > max_blob_size:
- raise BlobTooLargeException(uploaded=new_blob_size, max_allowed=max_blob_size.bytes)
-
- # If we determined an uncompressed size and this is the first chunk, add it to the blob.
- # Otherwise, we clear the size from the blob as it was uploaded in multiple chunks.
- uncompressed_byte_count = self.blob_upload.uncompressed_byte_count
- if size_info is not None and self.blob_upload.chunk_count == 0 and size_info.is_valid:
- uncompressed_byte_count = size_info.uncompressed_size
- elif length_written > 0:
- # Otherwise, if we wrote some bytes and the above conditions were not met, then we don't
- # know the uncompressed size.
- uncompressed_byte_count = None
-
- piece_hashes = None
- piece_sha_state = None
- if piece_hasher is not None:
- piece_hashes = piece_hasher.piece_hashes
- piece_sha_state = piece_hasher.hash_fragment
-
- self.blob_upload = registry_model.update_blob_upload(self.blob_upload,
- uncompressed_byte_count,
- piece_hashes,
- piece_sha_state,
- new_metadata,
- new_blob_bytes,
- self.blob_upload.chunk_count + 1,
- self.blob_upload.sha_state)
- if self.blob_upload is None:
- raise BlobUploadException('Could not complete upload of chunk')
-
- return new_blob_bytes
-
- def cancel_upload(self):
- """ Cancels the blob upload, deleting any data uploaded and removing the upload itself. """
- if self.blob_upload is None:
- return
-
- # Tell storage to cancel the chunked upload, deleting its contents.
- self.storage.cancel_chunked_upload({self.blob_upload.location_name},
- self.blob_upload.upload_id,
- self.blob_upload.storage_metadata)
-
- # Remove the blob upload record itself.
- registry_model.delete_blob_upload(self.blob_upload)
-
- def commit_to_blob(self, app_config, expected_digest=None):
- """ Commits the blob upload to a blob under the repository. The resulting blob will be marked
- to not be GCed for some period of time (as configured by `committed_blob_expiration`).
-
- If expected_digest is specified, the content digest of the data uploaded for the blob is
- compared to that given and, if it does not match, a BlobDigestMismatchException is
- raised. The digest given must be of type `Digest` and not a string.
- """
- # Compare the content digest.
- if expected_digest is not None:
- self._validate_digest(expected_digest)
-
- # Finalize the storage.
- storage_already_existed = self._finalize_blob_storage(app_config)
-
- # Convert the upload to a blob.
- computed_digest_str = digest_tools.sha256_digest_from_hashlib(self.blob_upload.sha_state)
-
- with db_transaction():
- blob = registry_model.commit_blob_upload(self.blob_upload, computed_digest_str,
- self.settings.committed_blob_expiration)
- if blob is None:
- return None
-
- # Save torrent hash information (if available).
- if self.blob_upload.piece_sha_state is not None and not storage_already_existed:
- piece_bytes = self.blob_upload.piece_hashes + self.blob_upload.piece_sha_state.digest()
- registry_model.set_torrent_info(blob, self.settings.bittorrent_piece_size, piece_bytes)
-
- self.committed_blob = blob
- return blob
-
- def _validate_digest(self, expected_digest):
- """
- Verifies that the digest's SHA matches that of the uploaded data.
- """
- computed_digest = digest_tools.sha256_digest_from_hashlib(self.blob_upload.sha_state)
- if not digest_tools.digests_equal(computed_digest, expected_digest):
- logger.error('Digest mismatch for upload %s: Expected digest %s, found digest %s',
- self.blob_upload.upload_id, expected_digest, computed_digest)
- raise BlobDigestMismatchException()
-
- def _finalize_blob_storage(self, app_config):
- """
- When an upload is successful, this ends the uploading process from the
- storage's perspective.
-
- Returns True if the blob already existed.
- """
- computed_digest = digest_tools.sha256_digest_from_hashlib(self.blob_upload.sha_state)
- final_blob_location = digest_tools.content_path(computed_digest)
-
- # Close the database connection before we perform this operation, as it can take a while
- # and we shouldn't hold the connection during that time.
- with CloseForLongOperation(app_config):
- # Move the storage into place, or if this was a re-upload, cancel it
- already_existed = self.storage.exists({self.blob_upload.location_name}, final_blob_location)
- if already_existed:
- # It already existed, clean up our upload which served as proof that the
- # uploader had the blob.
- self.storage.cancel_chunked_upload({self.blob_upload.location_name},
- self.blob_upload.upload_id,
- self.blob_upload.storage_metadata)
- else:
- # We were the first ones to upload this image (at least to this location)
- # Let's copy it into place
- self.storage.complete_chunked_upload({self.blob_upload.location_name},
- self.blob_upload.upload_id,
- final_blob_location,
- self.blob_upload.storage_metadata)
-
- return already_existed
diff --git a/data/registry_model/datatype.py b/data/registry_model/datatype.py
deleted file mode 100644
index 091776bb1..000000000
--- a/data/registry_model/datatype.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# pylint: disable=protected-access
-
-from functools import wraps, total_ordering
-
-class FromDictionaryException(Exception):
- """ Exception raised if constructing a data type from a dictionary fails due to
- missing data.
- """
-
-def datatype(name, static_fields):
- """ Defines a base class for a datatype that will represent a row from the database,
- in an abstracted form.
- """
- @total_ordering
- class DataType(object):
- __name__ = name
-
- def __init__(self, **kwargs):
- self._db_id = kwargs.pop('db_id', None)
- self._inputs = kwargs.pop('inputs', None)
- self._fields = kwargs
-
- for name in static_fields:
- assert name in self._fields, 'Missing field %s' % name
-
- def __eq__(self, other):
- return self._db_id == other._db_id
-
- def __lt__(self, other):
- return self._db_id < other._db_id
-
- def __getattr__(self, name):
- if name in static_fields:
- return self._fields[name]
-
- raise AttributeError('Unknown field `%s`' % name)
-
- def __repr__(self):
- return '<%s> #%s' % (name, self._db_id)
-
- @classmethod
- def from_dict(cls, dict_data):
- try:
- return cls(**dict_data)
- except:
- raise FromDictionaryException()
-
- def asdict(self):
- dictionary_rep = dict(self._fields)
- assert ('db_id' not in dictionary_rep and
- 'inputs' not in dictionary_rep)
-
- dictionary_rep['db_id'] = self._db_id
- dictionary_rep['inputs'] = self._inputs
- return dictionary_rep
-
- return DataType
-
-
-def requiresinput(input_name):
- """ Marks a property on the data type as requiring an input to be invoked. """
- def inner(func):
- @wraps(func)
- def wrapper(self, *args, **kwargs):
- if self._inputs.get(input_name) is None:
- raise Exception('Cannot invoke function with missing input `%s`' % input_name)
-
- kwargs[input_name] = self._inputs[input_name]
- result = func(self, *args, **kwargs)
- return result
-
- return wrapper
- return inner
-
-
-def optionalinput(input_name):
- """ Marks a property on the data type as having an input be optional when invoked. """
- def inner(func):
- @wraps(func)
- def wrapper(self, *args, **kwargs):
- kwargs[input_name] = self._inputs.get(input_name)
- result = func(self, *args, **kwargs)
- return result
-
- return wrapper
- return inner
diff --git a/data/registry_model/datatypes.py b/data/registry_model/datatypes.py
deleted file mode 100644
index b732fbefc..000000000
--- a/data/registry_model/datatypes.py
+++ /dev/null
@@ -1,504 +0,0 @@
-import hashlib
-
-from collections import namedtuple
-from enum import Enum, unique
-
-from cachetools.func import lru_cache
-
-from data import model
-from data.database import Manifest as ManifestTable
-from data.registry_model.datatype import datatype, requiresinput, optionalinput
-from image.docker import ManifestException
-from image.docker.schemas import parse_manifest_from_bytes
-from image.docker.schema1 import DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
-from image.docker.schema2 import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
-from util.bytes import Bytes
-
-
-class RepositoryReference(datatype('Repository', [])):
- """ RepositoryReference is a reference to a repository, passed to registry interface methods. """
- @classmethod
- def for_repo_obj(cls, repo_obj, namespace_name=None, repo_name=None, is_free_namespace=None,
- state=None):
- if repo_obj is None:
- return None
-
- return RepositoryReference(db_id=repo_obj.id,
- inputs=dict(
- kind=model.repository.get_repo_kind_name(repo_obj),
- is_public=model.repository.is_repository_public(repo_obj),
- namespace_name=namespace_name,
- repo_name=repo_name,
- is_free_namespace=is_free_namespace,
- state=state
- ))
-
- @classmethod
- def for_id(cls, repo_id, namespace_name=None, repo_name=None, is_free_namespace=None, state=None):
- return RepositoryReference(db_id=repo_id,
- inputs=dict(
- kind=None,
- is_public=None,
- namespace_name=namespace_name,
- repo_name=repo_name,
- is_free_namespace=is_free_namespace,
- state=state
- ))
-
- @property
- @lru_cache(maxsize=1)
- def _repository_obj(self):
- return model.repository.lookup_repository(self._db_id)
-
- @property
- @optionalinput('kind')
- def kind(self, kind):
- """ Returns the kind of the repository. """
- return kind or model.repository.get_repo_kind_name(self._repositry_obj)
-
- @property
- @optionalinput('is_public')
- def is_public(self, is_public):
- """ Returns whether the repository is public. """
- if is_public is not None:
- return is_public
-
- return model.repository.is_repository_public(self._repository_obj)
-
- @property
- def trust_enabled(self):
- """ Returns whether trust is enabled in this repository. """
- repository = self._repository_obj
- if repository is None:
- return None
-
- return repository.trust_enabled
-
- @property
- def id(self):
- """ Returns the database ID of the repository. """
- return self._db_id
-
- @property
- @optionalinput('namespace_name')
- def namespace_name(self, namespace_name=None):
- """ Returns the namespace name of this repository.
- """
- if namespace_name is not None:
- return namespace_name
-
- repository = self._repository_obj
- if repository is None:
- return None
-
- return repository.namespace_user.username
-
- @property
- @optionalinput('is_free_namespace')
- def is_free_namespace(self, is_free_namespace=None):
- """ Returns whether the namespace of the repository is on a free plan.
- """
- if is_free_namespace is not None:
- return is_free_namespace
-
- repository = self._repository_obj
- if repository is None:
- return None
-
- return repository.namespace_user.stripe_id is None
-
- @property
- @optionalinput('repo_name')
- def name(self, repo_name=None):
- """ Returns the name of this repository.
- """
- if repo_name is not None:
- return repo_name
-
- repository = self._repository_obj
- if repository is None:
- return None
-
- return repository.name
-
- @property
- @optionalinput('state')
- def state(self, state=None):
- """ Return the state of the Repository. """
- if state is not None:
- return state
-
- repository = self._repository_obj
- if repository is None:
- return None
-
- return repository.state
-
-
-class Label(datatype('Label', ['key', 'value', 'uuid', 'source_type_name', 'media_type_name'])):
- """ Label represents a label on a manifest. """
- @classmethod
- def for_label(cls, label):
- if label is None:
- return None
-
- return Label(db_id=label.id, key=label.key, value=label.value,
- uuid=label.uuid, media_type_name=label.media_type.name,
- source_type_name=label.source_type.name)
-
-
-class ShallowTag(datatype('ShallowTag', ['name'])):
- """ ShallowTag represents a tag in a repository, but only contains basic information. """
- @classmethod
- def for_tag(cls, tag):
- if tag is None:
- return None
-
- return ShallowTag(db_id=tag.id, name=tag.name)
-
- @classmethod
- def for_repository_tag(cls, repository_tag):
- if repository_tag is None:
- return None
-
- return ShallowTag(db_id=repository_tag.id, name=repository_tag.name)
-
- @property
- def id(self):
- """ The ID of this tag for pagination purposes only. """
- return self._db_id
-
-
-class Tag(datatype('Tag', ['name', 'reversion', 'manifest_digest', 'lifetime_start_ts',
- 'lifetime_end_ts', 'lifetime_start_ms', 'lifetime_end_ms'])):
- """ Tag represents a tag in a repository, which points to a manifest or image. """
- @classmethod
- def for_tag(cls, tag, legacy_image=None):
- if tag is None:
- return None
-
- return Tag(db_id=tag.id,
- name=tag.name,
- reversion=tag.reversion,
- lifetime_start_ms=tag.lifetime_start_ms,
- lifetime_end_ms=tag.lifetime_end_ms,
- lifetime_start_ts=tag.lifetime_start_ms / 1000,
- lifetime_end_ts=tag.lifetime_end_ms / 1000 if tag.lifetime_end_ms else None,
- manifest_digest=tag.manifest.digest,
- inputs=dict(legacy_image=legacy_image,
- manifest=tag.manifest,
- repository=RepositoryReference.for_id(tag.repository_id)))
-
- @classmethod
- def for_repository_tag(cls, repository_tag, manifest_digest=None, legacy_image=None):
- if repository_tag is None:
- return None
-
- return Tag(db_id=repository_tag.id,
- name=repository_tag.name,
- reversion=repository_tag.reversion,
- lifetime_start_ts=repository_tag.lifetime_start_ts,
- lifetime_end_ts=repository_tag.lifetime_end_ts,
- lifetime_start_ms=repository_tag.lifetime_start_ts * 1000,
- lifetime_end_ms=(repository_tag.lifetime_end_ts * 1000
- if repository_tag.lifetime_end_ts else None),
- manifest_digest=manifest_digest,
- inputs=dict(legacy_image=legacy_image,
- repository=RepositoryReference.for_id(repository_tag.repository_id)))
-
- @property
- @requiresinput('manifest')
- def _manifest(self, manifest):
- """ Returns the manifest for this tag. Will only apply to new-style OCI tags. """
- return manifest
-
- @property
- @optionalinput('manifest')
- def manifest(self, manifest):
- """ Returns the manifest for this tag or None if none. Will only apply to new-style OCI tags.
- """
- return Manifest.for_manifest(manifest, self.legacy_image_if_present)
-
- @property
- @requiresinput('repository')
- def repository(self, repository):
- """ Returns the repository under which this tag lives.
- """
- return repository
-
- @property
- @requiresinput('legacy_image')
- def legacy_image(self, legacy_image):
- """ Returns the legacy Docker V1-style image for this tag. Note that this
- will be None for tags whose manifests point to other manifests instead of images.
- """
- return legacy_image
-
- @property
- @optionalinput('legacy_image')
- def legacy_image_if_present(self, legacy_image):
- """ Returns the legacy Docker V1-style image for this tag. Note that this
- will be None for tags whose manifests point to other manifests instead of images.
- """
- return legacy_image
-
- @property
- def id(self):
- """ The ID of this tag for pagination purposes only. """
- return self._db_id
-
-
-class Manifest(datatype('Manifest', ['digest', 'media_type', 'internal_manifest_bytes'])):
- """ Manifest represents a manifest in a repository. """
- @classmethod
- def for_tag_manifest(cls, tag_manifest, legacy_image=None):
- if tag_manifest is None:
- return None
-
- return Manifest(db_id=tag_manifest.id, digest=tag_manifest.digest,
- internal_manifest_bytes=Bytes.for_string_or_unicode(tag_manifest.json_data),
- media_type=DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE, # Always in legacy.
- inputs=dict(legacy_image=legacy_image, tag_manifest=True))
-
- @classmethod
- def for_manifest(cls, manifest, legacy_image):
- if manifest is None:
- return None
-
- # NOTE: `manifest_bytes` will be None if not selected by certain join queries.
- manifest_bytes = (Bytes.for_string_or_unicode(manifest.manifest_bytes)
- if manifest.manifest_bytes is not None else None)
- return Manifest(db_id=manifest.id,
- digest=manifest.digest,
- internal_manifest_bytes=manifest_bytes,
- media_type=ManifestTable.media_type.get_name(manifest.media_type_id),
- inputs=dict(legacy_image=legacy_image, tag_manifest=False))
-
- @property
- @requiresinput('tag_manifest')
- def _is_tag_manifest(self, tag_manifest):
- return tag_manifest
-
- @property
- @requiresinput('legacy_image')
- def legacy_image(self, legacy_image):
- """ Returns the legacy Docker V1-style image for this manifest.
- """
- return legacy_image
-
- @property
- @optionalinput('legacy_image')
- def legacy_image_if_present(self, legacy_image):
- """ Returns the legacy Docker V1-style image for this manifest. Note that this
- will be None for manifests that point to other manifests instead of images.
- """
- return legacy_image
-
- def get_parsed_manifest(self, validate=True):
- """ Returns the parsed manifest for this manifest. """
- assert self.internal_manifest_bytes
- return parse_manifest_from_bytes(self.internal_manifest_bytes, self.media_type,
- validate=validate)
-
- @property
- def layers_compressed_size(self):
- """ Returns the total compressed size of the layers in the manifest or None if this could not
- be computed.
- """
- try:
- return self.get_parsed_manifest().layers_compressed_size
- except ManifestException:
- return None
-
- @property
- def is_manifest_list(self):
- """ Returns True if this manifest points to a list (instead of an image). """
- return self.media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
-
-
-class LegacyImage(datatype('LegacyImage', ['docker_image_id', 'created', 'comment', 'command',
- 'image_size', 'aggregate_size', 'uploading',
- 'v1_metadata_string'])):
- """ LegacyImage represents a Docker V1-style image found in a repository. """
- @classmethod
- def for_image(cls, image, images_map=None, tags_map=None, blob=None):
- if image is None:
- return None
-
- return LegacyImage(db_id=image.id,
- inputs=dict(images_map=images_map, tags_map=tags_map,
- ancestor_id_list=image.ancestor_id_list(),
- blob=blob),
- docker_image_id=image.docker_image_id,
- created=image.created,
- comment=image.comment,
- command=image.command,
- v1_metadata_string=image.v1_json_metadata,
- image_size=image.storage.image_size,
- aggregate_size=image.aggregate_size,
- uploading=image.storage.uploading)
-
- @property
- def id(self):
- """ Returns the database ID of the legacy image. """
- return self._db_id
-
- @property
- @requiresinput('images_map')
- @requiresinput('ancestor_id_list')
- def parents(self, images_map, ancestor_id_list):
- """ Returns the parent images for this image. Raises an exception if the parents have
- not been loaded before this property is invoked. Parents are returned starting at the
- leaf image.
- """
- return [LegacyImage.for_image(images_map[ancestor_id], images_map=images_map)
- for ancestor_id in reversed(ancestor_id_list)
- if images_map.get(ancestor_id)]
-
- @property
- @requiresinput('blob')
- def blob(self, blob):
- """ Returns the blob for this image. Raises an exception if the blob has
- not been loaded before this property is invoked.
- """
- return blob
-
- @property
- @requiresinput('tags_map')
- def tags(self, tags_map):
- """ Returns the tags pointing to this image. Raises an exception if the tags have
- not been loaded before this property is invoked.
- """
- tags = tags_map.get(self._db_id)
- if not tags:
- return []
-
- return [Tag.for_repository_tag(tag) for tag in tags]
-
-
-@unique
-class SecurityScanStatus(Enum):
- """ Security scan status enum """
- SCANNED = 'scanned'
- FAILED = 'failed'
- QUEUED = 'queued'
- UNSUPPORTED = 'unsupported'
-
-
-class ManifestLayer(namedtuple('ManifestLayer', ['layer_info', 'blob'])):
- """ Represents a single layer in a manifest. The `layer_info` data will be manifest-type specific,
- but will have a few expected fields (such as `digest`). The `blob` represents the associated
- blob for this layer, optionally with placements. If the layer is a remote layer, the blob will
- be None.
- """
-
- def estimated_size(self, estimate_multiplier):
- """ Returns the estimated size of this layer. If the layers' blob has an uncompressed size,
- it is used. Otherwise, the compressed_size field in the layer is multiplied by the
- multiplier.
- """
- if self.blob.uncompressed_size:
- return self.blob.uncompressed_size
-
- return (self.layer_info.compressed_size or 0) * estimate_multiplier
-
-
-class Blob(datatype('Blob', ['uuid', 'digest', 'compressed_size', 'uncompressed_size',
- 'uploading'])):
- """ Blob represents a content-addressable piece of storage. """
- @classmethod
- def for_image_storage(cls, image_storage, storage_path, placements=None):
- if image_storage is None:
- return None
-
- return Blob(db_id=image_storage.id,
- uuid=image_storage.uuid,
- inputs=dict(placements=placements, storage_path=storage_path),
- digest=image_storage.content_checksum,
- compressed_size=image_storage.image_size,
- uncompressed_size=image_storage.uncompressed_size,
- uploading=image_storage.uploading)
-
- @property
- @requiresinput('storage_path')
- def storage_path(self, storage_path):
- """ Returns the path of this blob in storage. """
- # TODO: change this to take in the storage engine?
- return storage_path
-
- @property
- @requiresinput('placements')
- def placements(self, placements):
- """ Returns all the storage placements at which the Blob can be found. """
- return placements
-
-
-class DerivedImage(datatype('DerivedImage', ['verb', 'varying_metadata', 'blob'])):
- """ DerivedImage represents an image derived from a manifest via some form of verb. """
- @classmethod
- def for_derived_storage(cls, derived, verb, varying_metadata, blob):
- return DerivedImage(db_id=derived.id,
- verb=verb,
- varying_metadata=varying_metadata,
- blob=blob)
-
- @property
- def unique_id(self):
- """ Returns a unique ID for this derived image. This call will consistently produce the same
- unique ID across calls in the same code base.
- """
- return hashlib.sha256('%s:%s' % (self.verb, self._db_id)).hexdigest()
-
-
-class TorrentInfo(datatype('TorrentInfo', ['pieces', 'piece_length'])):
- """ TorrentInfo represents information to pull a blob via torrent. """
- @classmethod
- def for_torrent_info(cls, torrent_info):
- return TorrentInfo(db_id=torrent_info.id,
- pieces=torrent_info.pieces,
- piece_length=torrent_info.piece_length)
-
-
-class BlobUpload(datatype('BlobUpload', ['upload_id', 'byte_count', 'uncompressed_byte_count',
- 'chunk_count', 'sha_state', 'location_name',
- 'storage_metadata', 'piece_sha_state', 'piece_hashes'])):
- """ BlobUpload represents information about an in-progress upload to create a blob. """
- @classmethod
- def for_upload(cls, blob_upload, location_name=None):
- return BlobUpload(db_id=blob_upload.id,
- upload_id=blob_upload.uuid,
- byte_count=blob_upload.byte_count,
- uncompressed_byte_count=blob_upload.uncompressed_byte_count,
- chunk_count=blob_upload.chunk_count,
- sha_state=blob_upload.sha_state,
- location_name=location_name or blob_upload.location.name,
- storage_metadata=blob_upload.storage_metadata,
- piece_sha_state=blob_upload.piece_sha_state,
- piece_hashes=blob_upload.piece_hashes)
-
-
-class LikelyVulnerableTag(datatype('LikelyVulnerableTag', ['layer_id', 'name'])):
- """ LikelyVulnerableTag represents a tag in a repository that is likely vulnerable to a notified
- vulnerability.
- """
- # TODO: Remove all of this once we're on the new security model exclusively.
- @classmethod
- def for_tag(cls, tag, repository, docker_image_id, storage_uuid):
- layer_id = '%s.%s' % (docker_image_id, storage_uuid)
- return LikelyVulnerableTag(db_id=tag.id,
- name=tag.name,
- layer_id=layer_id,
- inputs=dict(repository=repository))
-
- @classmethod
- def for_repository_tag(cls, tag, repository):
- tag_layer_id = '%s.%s' % (tag.image.docker_image_id, tag.image.storage.uuid)
- return LikelyVulnerableTag(db_id=tag.id,
- name=tag.name,
- layer_id=tag_layer_id,
- inputs=dict(repository=repository))
-
- @property
- @requiresinput('repository')
- def repository(self, repository):
- return RepositoryReference.for_repo_obj(repository)
diff --git a/data/registry_model/interface.py b/data/registry_model/interface.py
deleted file mode 100644
index 8862f88bc..000000000
--- a/data/registry_model/interface.py
+++ /dev/null
@@ -1,384 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-@add_metaclass(ABCMeta)
-class RegistryDataInterface(object):
- """ Interface for code to work with the registry data model. The registry data model consists
- of all tables that store registry-specific information, such as Manifests, Blobs, Images,
- and Labels.
- """
- @abstractmethod
- def supports_schema2(self, namespace_name):
- """ Returns whether the implementation of the data interface supports schema 2 format
- manifests. """
-
- @abstractmethod
- def get_tag_legacy_image_id(self, repository_ref, tag_name, storage):
- """ Returns the legacy image ID for the tag with a legacy images in
- the repository. Returns None if None.
- """
-
- @abstractmethod
- def get_legacy_tags_map(self, repository_ref, storage):
- """ Returns a map from tag name to its legacy image ID, for all tags with legacy images in
- the repository. Note that this can be a *very* heavy operation.
- """
-
- @abstractmethod
- def find_matching_tag(self, repository_ref, tag_names):
- """ Finds an alive tag in the repository matching one of the given tag names and returns it
- or None if none.
- """
-
- @abstractmethod
- def get_most_recent_tag(self, repository_ref):
- """ Returns the most recently pushed alive tag in the repository, if any. If none, returns
- None.
- """
-
- @abstractmethod
- def lookup_repository(self, namespace_name, repo_name, kind_filter=None):
- """ Looks up and returns a reference to the repository with the given namespace and name,
- or None if none. """
-
- @abstractmethod
- def get_manifest_for_tag(self, tag, backfill_if_necessary=False, include_legacy_image=False):
- """ Returns the manifest associated with the given tag. """
-
- @abstractmethod
- def lookup_manifest_by_digest(self, repository_ref, manifest_digest, allow_dead=False,
- include_legacy_image=False, require_available=False):
- """ Looks up the manifest with the given digest under the given repository and returns it
- or None if none. If allow_dead is True, manifests pointed to by dead tags will also
- be returned. If require_available is True, a temporary tag will be added onto the
- returned manifest (before it is returned) to ensure it is available until another
- tagging or manifest operation is taken.
- """
-
- @abstractmethod
- def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name,
- storage, raise_on_error=False):
- """ Creates a manifest in a repository, adding all of the necessary data in the model.
-
- The `manifest_interface_instance` parameter must be an instance of the manifest
- interface as returned by the image/docker package.
-
- Note that all blobs referenced by the manifest must exist under the repository or this
- method will fail and return None.
-
- Returns a reference to the (created manifest, tag) or (None, None) on error.
- """
-
- @abstractmethod
- def get_legacy_images(self, repository_ref):
- """
- Returns an iterator of all the LegacyImage's defined in the matching repository.
- """
-
- @abstractmethod
- def get_legacy_image(self, repository_ref, docker_image_id, include_parents=False,
- include_blob=False):
- """
- Returns the matching LegacyImages under the matching repository, if any. If none,
- returns None.
- """
-
- @abstractmethod
- def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None):
- """ Creates a label on the manifest with the given key and value.
-
- Can raise InvalidLabelKeyException or InvalidMediaTypeException depending
- on the validation errors.
- """
-
- @abstractmethod
- def batch_create_manifest_labels(self, manifest):
- """ Returns a context manager for batch creation of labels on a manifest.
-
- Can raise InvalidLabelKeyException or InvalidMediaTypeException depending
- on the validation errors.
- """
-
- @abstractmethod
- def list_manifest_labels(self, manifest, key_prefix=None):
- """ Returns all labels found on the manifest. If specified, the key_prefix will filter the
- labels returned to those keys that start with the given prefix.
- """
-
- @abstractmethod
- def get_manifest_label(self, manifest, label_uuid):
- """ Returns the label with the specified UUID on the manifest or None if none. """
-
- @abstractmethod
- def delete_manifest_label(self, manifest, label_uuid):
- """ Delete the label with the specified UUID on the manifest. Returns the label deleted
- or None if none.
- """
-
- @abstractmethod
- def lookup_cached_active_repository_tags(self, model_cache, repository_ref, start_pagination_id,
- limit):
- """
- Returns a page of active tags in a repository. Note that the tags returned by this method
- are ShallowTag objects, which only contain the tag name. This method will automatically cache
- the result and check the cache before making a call.
- """
-
- @abstractmethod
- def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit):
- """
- Returns a page of active tags in a repository. Note that the tags returned by this method
- are ShallowTag objects, which only contain the tag name.
- """
-
- @abstractmethod
- def list_all_active_repository_tags(self, repository_ref, include_legacy_images=False):
- """
- Returns a list of all the active tags in the repository. Note that this is a *HEAVY*
- operation on repositories with a lot of tags, and should only be used for testing or
- where other more specific operations are not possible.
- """
-
- @abstractmethod
- def list_repository_tag_history(self, repository_ref, page=1, size=100, specific_tag_name=None,
- active_tags_only=False, since_time_ms=None):
- """
- Returns the history of all tags in the repository (unless filtered). This includes tags that
- have been made in-active due to newer versions of those tags coming into service.
- """
-
- @abstractmethod
- def get_most_recent_tag_lifetime_start(self, repository_refs):
- """
- Returns a map from repository ID to the last modified time ( seconds from epoch, UTC)
- for each repository in the given repository reference list.
- """
-
- @abstractmethod
- def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False):
- """
- Returns the latest, *active* tag found in the repository, with the matching name
- or None if none.
- """
-
- @abstractmethod
- def has_expired_tag(self, repository_ref, tag_name):
- """
- Returns true if and only if the repository contains a tag with the given name that is expired.
- """
-
- @abstractmethod
- def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image,
- storage, legacy_manifest_key, is_reversion=False):
- """
- Creates, updates or moves a tag to a new entry in history, pointing to the manifest or
- legacy image specified. If is_reversion is set to True, this operation is considered a
- reversion over a previous tag move operation. Returns the updated Tag or None on error.
- """
-
- @abstractmethod
- def delete_tag(self, repository_ref, tag_name):
- """
- Deletes the latest, *active* tag with the given name in the repository.
- """
-
- @abstractmethod
- def delete_tags_for_manifest(self, manifest):
- """
- Deletes all tags pointing to the given manifest, making the manifest inaccessible for pulling.
- Returns the tags deleted, if any. Returns None on error.
- """
-
- @abstractmethod
- def change_repository_tag_expiration(self, tag, expiration_date):
- """ Sets the expiration date of the tag under the matching repository to that given. If the
- expiration date is None, then the tag will not expire. Returns a tuple of the previous
- expiration timestamp in seconds (if any), and whether the operation succeeded.
- """
-
- @abstractmethod
- def get_legacy_images_owned_by_tag(self, tag):
- """ Returns all legacy images *solely owned and used* by the given tag. """
-
- @abstractmethod
- def get_security_status(self, manifest_or_legacy_image):
- """ Returns the security status for the given manifest or legacy image or None if none. """
-
- @abstractmethod
- def reset_security_status(self, manifest_or_legacy_image):
- """ Resets the security status for the given manifest or legacy image, ensuring that it will
- get re-indexed.
- """
-
- @abstractmethod
- def backfill_manifest_for_tag(self, tag):
- """ Backfills a manifest for the V1 tag specified.
- If a manifest already exists for the tag, returns that manifest.
-
- NOTE: This method will only be necessary until we've completed the backfill, at which point
- it should be removed.
- """
-
- @abstractmethod
- def is_existing_disabled_namespace(self, namespace_name):
- """ Returns whether the given namespace exists and is disabled. """
-
- @abstractmethod
- def is_namespace_enabled(self, namespace_name):
- """ Returns whether the given namespace exists and is enabled. """
-
- @abstractmethod
- def get_manifest_local_blobs(self, manifest, include_placements=False):
- """ Returns the set of local blobs for the given manifest or None if none. """
-
- @abstractmethod
- def list_manifest_layers(self, manifest, storage, include_placements=False):
- """ Returns an *ordered list* of the layers found in the manifest, starting at the base
- and working towards the leaf, including the associated Blob and its placements
- (if specified). The layer information in `layer_info` will be of type
- `image.docker.types.ManifestImageLayer`. Should not be called for a manifest list.
- """
-
- @abstractmethod
- def list_parsed_manifest_layers(self, repository_ref, parsed_manifest, storage,
- include_placements=False):
- """ Returns an *ordered list* of the layers found in the parsed manifest, starting at the base
- and working towards the leaf, including the associated Blob and its placements
- (if specified). The layer information in `layer_info` will be of type
- `image.docker.types.ManifestImageLayer`. Should not be called for a manifest list.
- """
-
- @abstractmethod
- def lookup_derived_image(self, manifest, verb, storage, varying_metadata=None,
- include_placements=False):
- """
- Looks up the derived image for the given manifest, verb and optional varying metadata and
- returns it or None if none.
- """
-
- @abstractmethod
- def lookup_or_create_derived_image(self, manifest, verb, storage_location, storage,
- varying_metadata=None, include_placements=False):
- """
- Looks up the derived image for the given maniest, verb and optional varying metadata
- and returns it. If none exists, a new derived image is created.
- """
-
- @abstractmethod
- def get_derived_image_signature(self, derived_image, signer_name):
- """
- Returns the signature associated with the derived image and a specific signer or None if none.
- """
-
- @abstractmethod
- def set_derived_image_signature(self, derived_image, signer_name, signature):
- """
- Sets the calculated signature for the given derived image and signer to that specified.
- """
-
- @abstractmethod
- def delete_derived_image(self, derived_image):
- """
- Deletes a derived image and all of its storage.
- """
-
- @abstractmethod
- def set_derived_image_size(self, derived_image, compressed_size):
- """
- Sets the compressed size on the given derived image.
- """
-
- @abstractmethod
- def get_torrent_info(self, blob):
- """
- Returns the torrent information associated with the given blob or None if none.
- """
-
- @abstractmethod
- def set_torrent_info(self, blob, piece_length, pieces):
- """
- Sets the torrent infomation associated with the given blob to that specified.
- """
-
- @abstractmethod
- def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
- """
- Returns the blob in the repository with the given digest, if any or None if none. Note that
- there may be multiple records in the same repository for the same blob digest, so the return
- value of this function may change.
- """
-
- @abstractmethod
- def create_blob_upload(self, repository_ref, upload_id, location_name, storage_metadata):
- """ Creates a new blob upload and returns a reference. If the blob upload could not be
- created, returns None. """
-
- @abstractmethod
- def lookup_blob_upload(self, repository_ref, blob_upload_id):
- """ Looks up the blob upload with the given ID under the specified repository and returns it
- or None if none.
- """
-
- @abstractmethod
- def update_blob_upload(self, blob_upload, uncompressed_byte_count, piece_hashes, piece_sha_state,
- storage_metadata, byte_count, chunk_count, sha_state):
- """ Updates the fields of the blob upload to match those given. Returns the updated blob upload
- or None if the record does not exists.
- """
-
- @abstractmethod
- def delete_blob_upload(self, blob_upload):
- """ Deletes a blob upload record. """
-
- @abstractmethod
- def commit_blob_upload(self, blob_upload, blob_digest_str, blob_expiration_seconds):
- """ Commits the blob upload into a blob and sets an expiration before that blob will be GCed.
- """
-
- @abstractmethod
- def mount_blob_into_repository(self, blob, target_repository_ref, expiration_sec):
- """
- Mounts the blob from another repository into the specified target repository, and adds an
- expiration before that blob is automatically GCed. This function is useful during push
- operations if an existing blob from another repository is being pushed. Returns False if
- the mounting fails. Note that this function does *not* check security for mounting the blob
- and the caller is responsible for doing this check (an example can be found in
- endpoints/v2/blob.py).
- """
-
- @abstractmethod
- def set_tags_expiration_for_manifest(self, manifest, expiration_sec):
- """
- Sets the expiration on all tags that point to the given manifest to that specified.
- """
-
- @abstractmethod
- def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage):
- """ Returns the schema 1 version of this manifest, or None if none. """
-
- @abstractmethod
- def create_manifest_with_temp_tag(self, repository_ref, manifest_interface_instance,
- expiration_sec, storage):
- """ Creates a manifest under the repository and sets a temporary tag to point to it.
- Returns the manifest object created or None on error.
- """
-
- @abstractmethod
- def get_cached_namespace_region_blacklist(self, model_cache, namespace_name):
- """ Returns a cached set of ISO country codes blacklisted for pulls for the namespace
- or None if the list could not be loaded.
- """
-
- @abstractmethod
- def convert_manifest(self, manifest, namespace_name, repo_name, tag_name, allowed_mediatypes,
- storage):
- """ Attempts to convert the specified into a parsed manifest with a media type
- in the allowed_mediatypes set. If not possible, or an error occurs, returns None.
- """
-
- @abstractmethod
- def yield_tags_for_vulnerability_notification(self, layer_id_pairs):
- """ Yields tags that contain one (or more) of the given layer ID pairs, in repositories
- which have been registered for vulnerability_found notifications. Returns an iterator
- of LikelyVulnerableTag instances.
- """
diff --git a/data/registry_model/label_handlers.py b/data/registry_model/label_handlers.py
deleted file mode 100644
index 96afe0d94..000000000
--- a/data/registry_model/label_handlers.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import logging
-
-from util.timedeltastring import convert_to_timedelta
-
-logger = logging.getLogger(__name__)
-
-def _expires_after(label_dict, manifest, model):
- """ Sets the expiration of a manifest based on the quay.expires-in label. """
- try:
- timedelta = convert_to_timedelta(label_dict['value'])
- except ValueError:
- logger.exception('Could not convert %s to timedeltastring', label_dict['value'])
- return
-
- total_seconds = timedelta.total_seconds()
- logger.debug('Labeling manifest %s with expiration of %s', manifest, total_seconds)
- model.set_tags_expiration_for_manifest(manifest, total_seconds)
-
-
-_LABEL_HANDLERS = {
- 'quay.expires-after': _expires_after,
-}
-
-def apply_label_to_manifest(label_dict, manifest, model):
- """ Runs the handler defined, if any, for the given label. """
- handler = _LABEL_HANDLERS.get(label_dict['key'])
- if handler is not None:
- handler(label_dict, manifest, model)
diff --git a/data/registry_model/manifestbuilder.py b/data/registry_model/manifestbuilder.py
deleted file mode 100644
index 384ecb604..000000000
--- a/data/registry_model/manifestbuilder.py
+++ /dev/null
@@ -1,220 +0,0 @@
-import logging
-import json
-import uuid
-
-from collections import namedtuple
-
-from flask import session
-
-from data import model
-from data.database import db_transaction, ImageStorage, ImageStoragePlacement
-from data.registry_model import registry_model
-from image.docker.schema2 import EMPTY_LAYER_BLOB_DIGEST
-
-logger = logging.getLogger(__name__)
-
-ManifestLayer = namedtuple('ManifestLayer', ['layer_id', 'v1_metadata_string', 'db_id'])
-_BuilderState = namedtuple('_BuilderState', ['builder_id', 'images', 'tags', 'checksums',
- 'temp_storages'])
-
-_SESSION_KEY = '__manifestbuilder'
-
-
-def create_manifest_builder(repository_ref, storage, legacy_signing_key):
- """ Creates a new manifest builder for populating manifests under the specified repository
- and returns it. Returns None if the builder could not be constructed.
- """
- builder_id = str(uuid.uuid4())
- builder = _ManifestBuilder(repository_ref, _BuilderState(builder_id, {}, {}, {}, []), storage,
- legacy_signing_key)
- builder._save_to_session()
- return builder
-
-
-def lookup_manifest_builder(repository_ref, builder_id, storage, legacy_signing_key):
- """ Looks up the manifest builder with the given ID under the specified repository and returns
- it or None if none.
- """
- builder_state_tuple = session.get(_SESSION_KEY)
- if builder_state_tuple is None:
- return None
-
- builder_state = _BuilderState(*builder_state_tuple)
- if builder_state.builder_id != builder_id:
- return None
-
- return _ManifestBuilder(repository_ref, builder_state, storage, legacy_signing_key)
-
-
-class _ManifestBuilder(object):
- """ Helper class which provides an interface for bookkeeping the layers and configuration of
- manifests being constructed.
- """
- def __init__(self, repository_ref, builder_state, storage, legacy_signing_key):
- self._repository_ref = repository_ref
- self._builder_state = builder_state
- self._storage = storage
- self._legacy_signing_key = legacy_signing_key
-
- @property
- def builder_id(self):
- """ Returns the unique ID for this builder. """
- return self._builder_state.builder_id
-
- @property
- def committed_tags(self):
- """ Returns the tags committed by this builder, if any. """
- return [registry_model.get_repo_tag(self._repository_ref, tag_name, include_legacy_image=True)
- for tag_name in self._builder_state.tags.keys()]
-
- def start_layer(self, layer_id, v1_metadata_string, location_name, calling_user,
- temp_tag_expiration):
- """ Starts a new layer with the given ID to be placed into a manifest. Returns the layer
- started or None if an error occurred.
- """
- # Ensure the repository still exists.
- repository = model.repository.lookup_repository(self._repository_ref._db_id)
- if repository is None:
- return None
-
- namespace_name = repository.namespace_user.username
- repo_name = repository.name
-
- try:
- v1_metadata = json.loads(v1_metadata_string)
- except ValueError:
- logger.exception('Exception when trying to parse V1 metadata JSON for layer %s', layer_id)
- return None
- except TypeError:
- logger.exception('Exception when trying to parse V1 metadata JSON for layer %s', layer_id)
- return None
-
- # Sanity check that the ID matches the v1 metadata.
- if layer_id != v1_metadata['id']:
- return None
-
- # Ensure the parent already exists in the repository.
- parent_id = v1_metadata.get('parent', None)
- parent_image = None
-
- if parent_id is not None:
- parent_image = model.image.get_repo_image(namespace_name, repo_name, parent_id)
- if parent_image is None:
- return None
-
- # Check to see if this layer already exists in the repository. If so, we can skip the creation.
- existing_image = registry_model.get_legacy_image(self._repository_ref, layer_id)
- if existing_image is not None:
- self._builder_state.images[layer_id] = existing_image.id
- self._save_to_session()
- return ManifestLayer(layer_id, v1_metadata_string, existing_image.id)
-
- with db_transaction():
- # Otherwise, create a new legacy image and point a temporary tag at it.
- created = model.image.find_create_or_link_image(layer_id, repository, calling_user, {},
- location_name)
- model.tag.create_temporary_hidden_tag(repository, created, temp_tag_expiration)
-
- # Save its V1 metadata.
- command_list = v1_metadata.get('container_config', {}).get('Cmd', None)
- command = json.dumps(command_list) if command_list else None
-
- model.image.set_image_metadata(layer_id, namespace_name, repo_name,
- v1_metadata.get('created'),
- v1_metadata.get('comment'),
- command, v1_metadata_string,
- parent=parent_image)
-
- # Save the changes to the builder.
- self._builder_state.images[layer_id] = created.id
- self._save_to_session()
-
- return ManifestLayer(layer_id, v1_metadata_string, created.id)
-
- def lookup_layer(self, layer_id):
- """ Returns a layer with the given ID under this builder. If none exists, returns None. """
- if layer_id not in self._builder_state.images:
- return None
-
- image = model.image.get_image_by_db_id(self._builder_state.images[layer_id])
- if image is None:
- return None
-
- return ManifestLayer(layer_id, image.v1_json_metadata, image.id)
-
- def assign_layer_blob(self, layer, blob, computed_checksums):
- """ Assigns a blob to a layer. """
- assert blob
- assert not blob.uploading
-
- repo_image = model.image.get_image_by_db_id(layer.db_id)
- if repo_image is None:
- return None
-
- with db_transaction():
- existing_storage = repo_image.storage
- repo_image.storage = blob._db_id
- repo_image.save()
-
- if existing_storage.uploading:
- self._builder_state.temp_storages.append(existing_storage.id)
-
- self._builder_state.checksums[layer.layer_id] = computed_checksums
- self._save_to_session()
- return True
-
- def validate_layer_checksum(self, layer, checksum):
- """ Returns whether the checksum for a layer matches that specified.
- """
- return checksum in self.get_layer_checksums(layer)
-
- def get_layer_checksums(self, layer):
- """ Returns the registered defined for the layer, if any. """
- return self._builder_state.checksums.get(layer.layer_id) or []
-
- def save_precomputed_checksum(self, layer, checksum):
- """ Saves a precomputed checksum for a layer. """
- checksums = self._builder_state.checksums.get(layer.layer_id) or []
- checksums.append(checksum)
- self._builder_state.checksums[layer.layer_id] = checksums
- self._save_to_session()
-
- def commit_tag_and_manifest(self, tag_name, layer):
- """ Commits a new tag + manifest for that tag to the repository with the given name,
- pointing to the given layer.
- """
- legacy_image = registry_model.get_legacy_image(self._repository_ref, layer.layer_id)
- if legacy_image is None:
- return None
-
- tag = registry_model.retarget_tag(self._repository_ref, tag_name, legacy_image, self._storage,
- self._legacy_signing_key)
- if tag is None:
- return None
-
- self._builder_state.tags[tag_name] = tag._db_id
- self._save_to_session()
- return tag
-
- def done(self):
- """ Marks the manifest builder as complete and disposes of any state. This call is optional
- and it is expected manifest builders will eventually time out if unused for an
- extended period of time.
- """
- temp_storages = self._builder_state.temp_storages
- for storage_id in temp_storages:
- try:
- storage = ImageStorage.get(id=storage_id)
- if storage.uploading and storage.content_checksum != EMPTY_LAYER_BLOB_DIGEST:
- # Delete all the placements pointing to the storage.
- ImageStoragePlacement.delete().where(ImageStoragePlacement.storage == storage).execute()
-
- # Delete the storage.
- storage.delete_instance()
- except ImageStorage.DoesNotExist:
- pass
-
- session.pop(_SESSION_KEY, None)
-
- def _save_to_session(self):
- session[_SESSION_KEY] = self._builder_state
diff --git a/data/registry_model/modelsplitter.py b/data/registry_model/modelsplitter.py
deleted file mode 100644
index 675a66928..000000000
--- a/data/registry_model/modelsplitter.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import inspect
-import logging
-import hashlib
-
-from data.database import DerivedStorageForImage, TagManifest, Manifest, Image
-from data.registry_model.registry_oci_model import back_compat_oci_model, oci_model
-from data.registry_model.registry_pre_oci_model import pre_oci_model
-from data.registry_model.datatypes import LegacyImage, Manifest as ManifestDataType
-
-
-logger = logging.getLogger(__name__)
-
-
-class SplitModel(object):
- def __init__(self, oci_model_proportion, oci_namespace_whitelist, v22_namespace_whitelist,
- oci_only_mode):
- self.v22_namespace_whitelist = set(v22_namespace_whitelist)
-
- self.oci_namespace_whitelist = set(oci_namespace_whitelist)
- self.oci_namespace_whitelist.update(v22_namespace_whitelist)
-
- self.oci_model_proportion = oci_model_proportion
- self.oci_only_mode = oci_only_mode
-
- def supports_schema2(self, namespace_name):
- """ Returns whether the implementation of the data interface supports schema 2 format
- manifests. """
- return namespace_name in self.v22_namespace_whitelist
-
- def _namespace_from_kwargs(self, args_dict):
- if 'namespace_name' in args_dict:
- return args_dict['namespace_name']
-
- if 'repository_ref' in args_dict:
- return args_dict['repository_ref'].namespace_name
-
- if 'tag' in args_dict:
- return args_dict['tag'].repository.namespace_name
-
- if 'manifest' in args_dict:
- manifest = args_dict['manifest']
- if manifest._is_tag_manifest:
- return TagManifest.get(id=manifest._db_id).tag.repository.namespace_user.username
- else:
- return Manifest.get(id=manifest._db_id).repository.namespace_user.username
-
- if 'manifest_or_legacy_image' in args_dict:
- manifest_or_legacy_image = args_dict['manifest_or_legacy_image']
- if isinstance(manifest_or_legacy_image, LegacyImage):
- return Image.get(id=manifest_or_legacy_image._db_id).repository.namespace_user.username
- else:
- manifest = manifest_or_legacy_image
- if manifest._is_tag_manifest:
- return TagManifest.get(id=manifest._db_id).tag.repository.namespace_user.username
- else:
- return Manifest.get(id=manifest._db_id).repository.namespace_user.username
-
- if 'derived_image' in args_dict:
- return (DerivedStorageForImage
- .get(id=args_dict['derived_image']._db_id)
- .source_image
- .repository
- .namespace_user
- .username)
-
- if 'blob' in args_dict:
- return '' # Blob functions are shared, so no need to do anything.
-
- if 'blob_upload' in args_dict:
- return '' # Blob functions are shared, so no need to do anything.
-
- raise Exception('Unknown namespace for dict `%s`' % args_dict)
-
- def __getattr__(self, attr):
- def method(*args, **kwargs):
- if self.oci_model_proportion >= 1.0:
- if self.oci_only_mode:
- logger.debug('Calling method `%s` under full OCI data model for all namespaces', attr)
- return getattr(oci_model, attr)(*args, **kwargs)
- else:
- logger.debug('Calling method `%s` under compat OCI data model for all namespaces', attr)
- return getattr(back_compat_oci_model, attr)(*args, **kwargs)
-
- argnames = inspect.getargspec(getattr(back_compat_oci_model, attr))[0]
- if not argnames and isinstance(args[0], ManifestDataType):
- args_dict = dict(manifest=args[0])
- else:
- args_dict = {argnames[index + 1]: value for index, value in enumerate(args)}
-
- if attr in ['yield_tags_for_vulnerability_notification', 'get_most_recent_tag_lifetime_start']:
- use_oci = self.oci_model_proportion >= 1.0
- namespace_name = '(implicit for ' + attr + ')'
- else:
- namespace_name = self._namespace_from_kwargs(args_dict)
- use_oci = namespace_name in self.oci_namespace_whitelist
-
- if not use_oci and self.oci_model_proportion:
- # Hash the namespace name and see if it falls into the proportion bucket.
- bucket = (int(hashlib.md5(namespace_name).hexdigest(), 16) % 100)
- if bucket <= int(self.oci_model_proportion * 100):
- logger.debug('Enabling OCI for namespace `%s` in proportional bucket',
- namespace_name)
- use_oci = True
-
- if use_oci:
- logger.debug('Calling method `%s` under OCI data model for namespace `%s`',
- attr, namespace_name)
- return getattr(back_compat_oci_model, attr)(*args, **kwargs)
- else:
- return getattr(pre_oci_model, attr)(*args, **kwargs)
-
- return method
diff --git a/data/registry_model/registry_oci_model.py b/data/registry_model/registry_oci_model.py
deleted file mode 100644
index 8821a747b..000000000
--- a/data/registry_model/registry_oci_model.py
+++ /dev/null
@@ -1,668 +0,0 @@
-# pylint: disable=protected-access
-import logging
-
-from contextlib import contextmanager
-from peewee import fn
-
-from data import database
-from data import model
-from data.model import oci, DataModelException
-from data.model.oci.retriever import RepositoryContentRetriever
-from data.database import db_transaction, Image, IMAGE_NOT_SCANNED_ENGINE_VERSION
-from data.registry_model.interface import RegistryDataInterface
-from data.registry_model.datatypes import (Tag, Manifest, LegacyImage, Label, SecurityScanStatus,
- Blob, ShallowTag, LikelyVulnerableTag)
-from data.registry_model.shared import SharedModel
-from data.registry_model.label_handlers import apply_label_to_manifest
-from image.docker import ManifestException
-from image.docker.schema1 import DOCKER_SCHEMA1_CONTENT_TYPES
-from image.docker.schema2 import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
-
-
-logger = logging.getLogger(__name__)
-
-
-class OCIModel(SharedModel, RegistryDataInterface):
- """
- OCIModel implements the data model for the registry API using a database schema
- after it was changed to support the OCI specification.
- """
- def __init__(self, oci_model_only=True):
- self.oci_model_only = oci_model_only
-
- def supports_schema2(self, namespace_name):
- """ Returns whether the implementation of the data interface supports schema 2 format
- manifests. """
- return True
-
- def get_tag_legacy_image_id(self, repository_ref, tag_name, storage):
- """ Returns the legacy image ID for the tag with a legacy images in
- the repository. Returns None if None.
- """
- tag = self.get_repo_tag(repository_ref, tag_name, include_legacy_image=True)
- if tag is None:
- return None
-
- if tag.legacy_image_if_present is not None:
- return tag.legacy_image_if_present.docker_image_id
-
- if tag.manifest.media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
- # See if we can lookup a schema1 legacy image.
- v1_compatible = self.get_schema1_parsed_manifest(tag.manifest, '', '', '', storage)
- if v1_compatible is not None:
- return v1_compatible.leaf_layer_v1_image_id
-
- return None
-
- def get_legacy_tags_map(self, repository_ref, storage):
- """ Returns a map from tag name to its legacy image ID, for all tags with legacy images in
- the repository. Note that this can be a *very* heavy operation.
- """
- tags = oci.tag.list_alive_tags(repository_ref._db_id)
- legacy_images_map = oci.tag.get_legacy_images_for_tags(tags)
-
- tags_map = {}
- for tag in tags:
- legacy_image = legacy_images_map.get(tag.id)
- if legacy_image is not None:
- tags_map[tag.name] = legacy_image.docker_image_id
- else:
- manifest = Manifest.for_manifest(tag.manifest, None)
- if legacy_image is None and manifest.media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
- # See if we can lookup a schema1 legacy image.
- v1_compatible = self.get_schema1_parsed_manifest(manifest, '', '', '', storage)
- if v1_compatible is not None:
- v1_id = v1_compatible.leaf_layer_v1_image_id
- if v1_id is not None:
- tags_map[tag.name] = v1_id
-
- return tags_map
-
- def _get_legacy_compatible_image_for_manifest(self, manifest, storage):
- # Check for a legacy image directly on the manifest.
- if manifest.media_type != DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
- return oci.shared.get_legacy_image_for_manifest(manifest._db_id)
-
- # Otherwise, lookup a legacy image associated with the v1-compatible manifest
- # in the list.
- try:
- manifest_obj = database.Manifest.get(id=manifest._db_id)
- except database.Manifest.DoesNotExist:
- logger.exception('Could not find manifest for manifest `%s`', manifest._db_id)
- return None
-
- # See if we can lookup a schema1 legacy image.
- v1_compatible = self.get_schema1_parsed_manifest(manifest, '', '', '', storage)
- if v1_compatible is None:
- return None
-
- v1_id = v1_compatible.leaf_layer_v1_image_id
- if v1_id is None:
- return None
-
- return model.image.get_image(manifest_obj.repository_id, v1_id)
-
- def find_matching_tag(self, repository_ref, tag_names):
- """ Finds an alive tag in the repository matching one of the given tag names and returns it
- or None if none.
- """
- found_tag = oci.tag.find_matching_tag(repository_ref._db_id, tag_names)
- assert found_tag is None or not found_tag.hidden
- return Tag.for_tag(found_tag)
-
- def get_most_recent_tag(self, repository_ref):
- """ Returns the most recently pushed alive tag in the repository, if any. If none, returns
- None.
- """
- found_tag = oci.tag.get_most_recent_tag(repository_ref._db_id)
- assert found_tag is None or not found_tag.hidden
- return Tag.for_tag(found_tag)
-
- def get_manifest_for_tag(self, tag, backfill_if_necessary=False, include_legacy_image=False):
- """ Returns the manifest associated with the given tag. """
- legacy_image = None
- if include_legacy_image:
- legacy_image = oci.shared.get_legacy_image_for_manifest(tag._manifest)
-
- return Manifest.for_manifest(tag._manifest, LegacyImage.for_image(legacy_image))
-
- def lookup_manifest_by_digest(self, repository_ref, manifest_digest, allow_dead=False,
- include_legacy_image=False, require_available=False):
- """ Looks up the manifest with the given digest under the given repository and returns it
- or None if none. """
- manifest = oci.manifest.lookup_manifest(repository_ref._db_id, manifest_digest,
- allow_dead=allow_dead,
- require_available=require_available)
- if manifest is None:
- return None
-
- legacy_image = None
- if include_legacy_image:
- try:
- legacy_image_id = database.ManifestLegacyImage.get(manifest=manifest).image.docker_image_id
- legacy_image = self.get_legacy_image(repository_ref, legacy_image_id, include_parents=True)
- except database.ManifestLegacyImage.DoesNotExist:
- pass
-
- return Manifest.for_manifest(manifest, legacy_image)
-
- def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None):
- """ Creates a label on the manifest with the given key and value. """
- label_data = dict(key=key, value=value, source_type_name=source_type_name,
- media_type_name=media_type_name)
-
- # Create the label itself.
- label = oci.label.create_manifest_label(manifest._db_id, key, value, source_type_name,
- media_type_name,
- adjust_old_model=not self.oci_model_only)
- if label is None:
- return None
-
- # Apply any changes to the manifest that the label prescribes.
- apply_label_to_manifest(label_data, manifest, self)
-
- return Label.for_label(label)
-
- @contextmanager
- def batch_create_manifest_labels(self, manifest):
- """ Returns a context manager for batch creation of labels on a manifest.
-
- Can raise InvalidLabelKeyException or InvalidMediaTypeException depending
- on the validation errors.
- """
- labels_to_add = []
- def add_label(key, value, source_type_name, media_type_name=None):
- labels_to_add.append(dict(key=key, value=value, source_type_name=source_type_name,
- media_type_name=media_type_name))
-
- yield add_label
-
- # TODO: make this truly batch once we've fully transitioned to V2_2 and no longer need
- # the mapping tables.
- for label_data in labels_to_add:
- with db_transaction():
- # Create the label itself.
- oci.label.create_manifest_label(manifest._db_id, **label_data)
-
- # Apply any changes to the manifest that the label prescribes.
- apply_label_to_manifest(label_data, manifest, self)
-
- def list_manifest_labels(self, manifest, key_prefix=None):
- """ Returns all labels found on the manifest. If specified, the key_prefix will filter the
- labels returned to those keys that start with the given prefix.
- """
- labels = oci.label.list_manifest_labels(manifest._db_id, prefix_filter=key_prefix)
- return [Label.for_label(l) for l in labels]
-
- def get_manifest_label(self, manifest, label_uuid):
- """ Returns the label with the specified UUID on the manifest or None if none. """
- return Label.for_label(oci.label.get_manifest_label(label_uuid, manifest._db_id))
-
- def delete_manifest_label(self, manifest, label_uuid):
- """ Delete the label with the specified UUID on the manifest. Returns the label deleted
- or None if none.
- """
- return Label.for_label(oci.label.delete_manifest_label(label_uuid, manifest._db_id))
-
- def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit):
- """
- Returns a page of actvie tags in a repository. Note that the tags returned by this method
- are ShallowTag objects, which only contain the tag name.
- """
- tags = oci.tag.lookup_alive_tags_shallow(repository_ref._db_id, start_pagination_id, limit)
- return [ShallowTag.for_tag(tag) for tag in tags]
-
- def list_all_active_repository_tags(self, repository_ref, include_legacy_images=False):
- """
- Returns a list of all the active tags in the repository. Note that this is a *HEAVY*
- operation on repositories with a lot of tags, and should only be used for testing or
- where other more specific operations are not possible.
- """
- tags = list(oci.tag.list_alive_tags(repository_ref._db_id))
- legacy_images_map = {}
- if include_legacy_images:
- legacy_images_map = oci.tag.get_legacy_images_for_tags(tags)
-
- return [Tag.for_tag(tag, legacy_image=LegacyImage.for_image(legacy_images_map.get(tag.id)))
- for tag in tags]
-
- def list_repository_tag_history(self, repository_ref, page=1, size=100, specific_tag_name=None,
- active_tags_only=False, since_time_ms=None):
- """
- Returns the history of all tags in the repository (unless filtered). This includes tags that
- have been made in-active due to newer versions of those tags coming into service.
- """
- tags, has_more = oci.tag.list_repository_tag_history(repository_ref._db_id,
- page, size,
- specific_tag_name,
- active_tags_only,
- since_time_ms)
-
- # TODO: do we need legacy images here?
- legacy_images_map = oci.tag.get_legacy_images_for_tags(tags)
- return [Tag.for_tag(tag, LegacyImage.for_image(legacy_images_map.get(tag.id))) for tag in tags], has_more
-
- def has_expired_tag(self, repository_ref, tag_name):
- """
- Returns true if and only if the repository contains a tag with the given name that is expired.
- """
- return bool(oci.tag.get_expired_tag(repository_ref._db_id, tag_name))
-
- def get_most_recent_tag_lifetime_start(self, repository_refs):
- """
- Returns a map from repository ID to the last modified time (in s) for each repository in the
- given repository reference list.
- """
- if not repository_refs:
- return {}
-
- toSeconds = lambda ms: ms / 1000 if ms is not None else None
- last_modified = oci.tag.get_most_recent_tag_lifetime_start([r.id for r in repository_refs])
-
- return {repo_id: toSeconds(ms) for repo_id, ms in last_modified.items()}
-
- def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False):
- """
- Returns the latest, *active* tag found in the repository, with the matching name
- or None if none.
- """
- assert isinstance(tag_name, basestring)
-
- tag = oci.tag.get_tag(repository_ref._db_id, tag_name)
- if tag is None:
- return None
-
- legacy_image = None
- if include_legacy_image:
- legacy_images = oci.tag.get_legacy_images_for_tags([tag])
- legacy_image = legacy_images.get(tag.id)
-
- return Tag.for_tag(tag, legacy_image=LegacyImage.for_image(legacy_image))
-
- def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name,
- storage, raise_on_error=False):
- """ Creates a manifest in a repository, adding all of the necessary data in the model.
-
- The `manifest_interface_instance` parameter must be an instance of the manifest
- interface as returned by the image/docker package.
-
- Note that all blobs referenced by the manifest must exist under the repository or this
- method will fail and return None.
-
- Returns a reference to the (created manifest, tag) or (None, None) on error, unless
- raise_on_error is set to True, in which case a CreateManifestException may also be
- raised.
- """
- # Get or create the manifest itself.
- created_manifest = oci.manifest.get_or_create_manifest(repository_ref._db_id,
- manifest_interface_instance,
- storage,
- for_tagging=True,
- raise_on_error=raise_on_error)
- if created_manifest is None:
- return (None, None)
-
- # Re-target the tag to it.
- tag = oci.tag.retarget_tag(tag_name, created_manifest.manifest,
- adjust_old_model=not self.oci_model_only)
- if tag is None:
- return (None, None)
-
- legacy_image = oci.shared.get_legacy_image_for_manifest(created_manifest.manifest)
- li = LegacyImage.for_image(legacy_image)
- wrapped_manifest = Manifest.for_manifest(created_manifest.manifest, li)
-
- # Apply any labels that should modify the created tag.
- if created_manifest.labels_to_apply:
- for key, value in created_manifest.labels_to_apply.iteritems():
- apply_label_to_manifest(dict(key=key, value=value), wrapped_manifest, self)
-
- # Reload the tag in case any updates were applied.
- tag = database.Tag.get(id=tag.id)
-
- return (wrapped_manifest, Tag.for_tag(tag, li))
-
- def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image, storage,
- legacy_manifest_key, is_reversion=False):
- """
- Creates, updates or moves a tag to a new entry in history, pointing to the manifest or
- legacy image specified. If is_reversion is set to True, this operation is considered a
- reversion over a previous tag move operation. Returns the updated Tag or None on error.
- """
- assert legacy_manifest_key is not None
- manifest_id = manifest_or_legacy_image._db_id
- if isinstance(manifest_or_legacy_image, LegacyImage):
- # If a legacy image was required, build a new manifest for it and move the tag to that.
- try:
- image_row = database.Image.get(id=manifest_or_legacy_image._db_id)
- except database.Image.DoesNotExist:
- return None
-
- manifest_instance = self._build_manifest_for_legacy_image(tag_name, image_row)
- if manifest_instance is None:
- return None
-
- created = oci.manifest.get_or_create_manifest(repository_ref._db_id, manifest_instance,
- storage)
- if created is None:
- return None
-
- manifest_id = created.manifest.id
- else:
- # If the manifest is a schema 1 manifest and its tag name does not match that
- # specified, then we need to create a new manifest, but with that tag name.
- if manifest_or_legacy_image.media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
- try:
- parsed = manifest_or_legacy_image.get_parsed_manifest()
- except ManifestException:
- logger.exception('Could not parse manifest `%s` in retarget_tag',
- manifest_or_legacy_image._db_id)
- return None
-
- if parsed.tag != tag_name:
- logger.debug('Rewriting manifest `%s` for tag named `%s`',
- manifest_or_legacy_image._db_id, tag_name)
-
- repository_id = repository_ref._db_id
- updated = parsed.with_tag_name(tag_name, legacy_manifest_key)
- assert updated.is_signed
-
- created = oci.manifest.get_or_create_manifest(repository_id, updated, storage)
- if created is None:
- return None
-
- manifest_id = created.manifest.id
-
- tag = oci.tag.retarget_tag(tag_name, manifest_id, is_reversion=is_reversion)
- legacy_image = LegacyImage.for_image(oci.shared.get_legacy_image_for_manifest(manifest_id))
- return Tag.for_tag(tag, legacy_image)
-
- def delete_tag(self, repository_ref, tag_name):
- """
- Deletes the latest, *active* tag with the given name in the repository.
- """
- deleted_tag = oci.tag.delete_tag(repository_ref._db_id, tag_name)
- if deleted_tag is None:
- # TODO: This is only needed because preoci raises an exception. Remove and fix
- # expected status codes once PreOCIModel is gone.
- msg = ('Invalid repository tag \'%s\' on repository' % tag_name)
- raise DataModelException(msg)
-
- return Tag.for_tag(deleted_tag)
-
- def delete_tags_for_manifest(self, manifest):
- """
- Deletes all tags pointing to the given manifest, making the manifest inaccessible for pulling.
- Returns the tags deleted, if any. Returns None on error.
- """
- deleted_tags = oci.tag.delete_tags_for_manifest(manifest._db_id)
- return [Tag.for_tag(tag) for tag in deleted_tags]
-
- def change_repository_tag_expiration(self, tag, expiration_date):
- """ Sets the expiration date of the tag under the matching repository to that given. If the
- expiration date is None, then the tag will not expire. Returns a tuple of the previous
- expiration timestamp in seconds (if any), and whether the operation succeeded.
- """
- return oci.tag.change_tag_expiration(tag._db_id, expiration_date)
-
- def get_legacy_images_owned_by_tag(self, tag):
- """ Returns all legacy images *solely owned and used* by the given tag. """
- tag_obj = oci.tag.get_tag_by_id(tag._db_id)
- if tag_obj is None:
- return None
-
- tags = oci.tag.list_alive_tags(tag_obj.repository_id)
- legacy_images = oci.tag.get_legacy_images_for_tags(tags)
-
- tag_legacy_image = legacy_images.get(tag._db_id)
- if tag_legacy_image is None:
- return None
-
- assert isinstance(tag_legacy_image, Image)
-
- # Collect the IDs of all images that the tag uses.
- tag_image_ids = set()
- tag_image_ids.add(tag_legacy_image.id)
- tag_image_ids.update(tag_legacy_image.ancestor_id_list())
-
- # Remove any images shared by other tags.
- for current in tags:
- if current == tag_obj:
- continue
-
- current_image = legacy_images.get(current.id)
- if current_image is None:
- continue
-
- tag_image_ids.discard(current_image.id)
- tag_image_ids = tag_image_ids.difference(current_image.ancestor_id_list())
- if not tag_image_ids:
- return []
-
- if not tag_image_ids:
- return []
-
- # Load the images we need to return.
- images = database.Image.select().where(database.Image.id << list(tag_image_ids))
- all_image_ids = set()
- for image in images:
- all_image_ids.add(image.id)
- all_image_ids.update(image.ancestor_id_list())
-
- # Build a map of all the images and their parents.
- images_map = {}
- all_images = database.Image.select().where(database.Image.id << list(all_image_ids))
- for image in all_images:
- images_map[image.id] = image
-
- return [LegacyImage.for_image(image, images_map=images_map) for image in images]
-
- def get_security_status(self, manifest_or_legacy_image):
- """ Returns the security status for the given manifest or legacy image or None if none. """
- image = None
-
- if isinstance(manifest_or_legacy_image, Manifest):
- image = oci.shared.get_legacy_image_for_manifest(manifest_or_legacy_image._db_id)
- if image is None:
- return SecurityScanStatus.UNSUPPORTED
- else:
- try:
- image = database.Image.get(id=manifest_or_legacy_image._db_id)
- except database.Image.DoesNotExist:
- return None
-
- if image.security_indexed_engine is not None and image.security_indexed_engine >= 0:
- return SecurityScanStatus.SCANNED if image.security_indexed else SecurityScanStatus.FAILED
-
- return SecurityScanStatus.QUEUED
-
- def reset_security_status(self, manifest_or_legacy_image):
- """ Resets the security status for the given manifest or legacy image, ensuring that it will
- get re-indexed.
- """
- image = None
-
- if isinstance(manifest_or_legacy_image, Manifest):
- image = oci.shared.get_legacy_image_for_manifest(manifest_or_legacy_image._db_id)
- if image is None:
- return None
- else:
- try:
- image = database.Image.get(id=manifest_or_legacy_image._db_id)
- except database.Image.DoesNotExist:
- return None
-
- assert image
- image.security_indexed = False
- image.security_indexed_engine = IMAGE_NOT_SCANNED_ENGINE_VERSION
- image.save()
-
- def backfill_manifest_for_tag(self, tag):
- """ Backfills a manifest for the V1 tag specified.
- If a manifest already exists for the tag, returns that manifest.
-
- NOTE: This method will only be necessary until we've completed the backfill, at which point
- it should be removed.
- """
- # Nothing to do for OCI tags.
- manifest = tag.manifest
- if manifest is None:
- return None
-
- legacy_image = oci.shared.get_legacy_image_for_manifest(manifest)
- return Manifest.for_manifest(manifest, LegacyImage.for_image(legacy_image))
-
- def list_manifest_layers(self, manifest, storage, include_placements=False):
- try:
- manifest_obj = database.Manifest.get(id=manifest._db_id)
- except database.Manifest.DoesNotExist:
- logger.exception('Could not find manifest for manifest `%s`', manifest._db_id)
- return None
-
- try:
- parsed = manifest.get_parsed_manifest()
- except ManifestException:
- logger.exception('Could not parse and validate manifest `%s`', manifest._db_id)
- return None
-
- return self._list_manifest_layers(manifest_obj.repository_id, parsed, storage,
- include_placements, by_manifest=True)
-
- def lookup_derived_image(self, manifest, verb, storage, varying_metadata=None,
- include_placements=False):
- """
- Looks up the derived image for the given manifest, verb and optional varying metadata and
- returns it or None if none.
- """
- legacy_image = self._get_legacy_compatible_image_for_manifest(manifest, storage)
- if legacy_image is None:
- return None
-
- derived = model.image.find_derived_storage_for_image(legacy_image, verb, varying_metadata)
- return self._build_derived(derived, verb, varying_metadata, include_placements)
-
- def lookup_or_create_derived_image(self, manifest, verb, storage_location, storage,
- varying_metadata=None,
- include_placements=False):
- """
- Looks up the derived image for the given maniest, verb and optional varying metadata
- and returns it. If none exists, a new derived image is created.
- """
- legacy_image = self._get_legacy_compatible_image_for_manifest(manifest, storage)
- if legacy_image is None:
- return None
-
- derived = model.image.find_or_create_derived_storage(legacy_image, verb, storage_location,
- varying_metadata)
- return self._build_derived(derived, verb, varying_metadata, include_placements)
-
- def set_tags_expiration_for_manifest(self, manifest, expiration_sec):
- """
- Sets the expiration on all tags that point to the given manifest to that specified.
- """
- oci.tag.set_tag_expiration_sec_for_manifest(manifest._db_id, expiration_sec)
-
- def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage):
- """ Returns the schema 1 manifest for this manifest, or None if none. """
- try:
- parsed = manifest.get_parsed_manifest()
- except ManifestException:
- return None
-
- try:
- manifest_row = database.Manifest.get(id=manifest._db_id)
- except database.Manifest.DoesNotExist:
- return None
-
- retriever = RepositoryContentRetriever(manifest_row.repository_id, storage)
- return parsed.get_schema1_manifest(namespace_name, repo_name, tag_name, retriever)
-
- def convert_manifest(self, manifest, namespace_name, repo_name, tag_name, allowed_mediatypes,
- storage):
- try:
- parsed = manifest.get_parsed_manifest()
- except ManifestException:
- return None
-
- try:
- manifest_row = database.Manifest.get(id=manifest._db_id)
- except database.Manifest.DoesNotExist:
- return None
-
- retriever = RepositoryContentRetriever(manifest_row.repository_id, storage)
- return parsed.convert_manifest(allowed_mediatypes, namespace_name, repo_name, tag_name,
- retriever)
-
- def create_manifest_with_temp_tag(self, repository_ref, manifest_interface_instance,
- expiration_sec, storage):
- """ Creates a manifest under the repository and sets a temporary tag to point to it.
- Returns the manifest object created or None on error.
- """
- # Get or create the manifest itself. get_or_create_manifest will take care of the
- # temporary tag work.
- created_manifest = oci.manifest.get_or_create_manifest(repository_ref._db_id,
- manifest_interface_instance,
- storage,
- temp_tag_expiration_sec=expiration_sec)
- if created_manifest is None:
- return None
-
- legacy_image = oci.shared.get_legacy_image_for_manifest(created_manifest.manifest)
- li = LegacyImage.for_image(legacy_image)
- return Manifest.for_manifest(created_manifest.manifest, li)
-
- def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
- """
- Returns the blob in the repository with the given digest, if any or None if none. Note that
- there may be multiple records in the same repository for the same blob digest, so the return
- value of this function may change.
- """
- image_storage = self._get_shared_storage(blob_digest)
- if image_storage is None:
- image_storage = oci.blob.get_repository_blob_by_digest(repository_ref._db_id, blob_digest)
- if image_storage is None:
- return None
-
- assert image_storage.cas_path is not None
-
- placements = None
- if include_placements:
- placements = list(model.storage.get_storage_locations(image_storage.uuid))
-
- return Blob.for_image_storage(image_storage,
- storage_path=model.storage.get_layer_path(image_storage),
- placements=placements)
-
- def list_parsed_manifest_layers(self, repository_ref, parsed_manifest, storage,
- include_placements=False):
- """ Returns an *ordered list* of the layers found in the parsed manifest, starting at the base
- and working towards the leaf, including the associated Blob and its placements
- (if specified).
- """
- return self._list_manifest_layers(repository_ref._db_id, parsed_manifest, storage,
- include_placements=include_placements,
- by_manifest=True)
-
- def get_manifest_local_blobs(self, manifest, include_placements=False):
- """ Returns the set of local blobs for the given manifest or None if none. """
- try:
- manifest_row = database.Manifest.get(id=manifest._db_id)
- except database.Manifest.DoesNotExist:
- return None
-
- return self._get_manifest_local_blobs(manifest, manifest_row.repository_id, include_placements,
- by_manifest=True)
-
- def yield_tags_for_vulnerability_notification(self, layer_id_pairs):
- """ Yields tags that contain one (or more) of the given layer ID pairs, in repositories
- which have been registered for vulnerability_found notifications. Returns an iterator
- of LikelyVulnerableTag instances.
- """
- for docker_image_id, storage_uuid in layer_id_pairs:
- tags = oci.tag.lookup_notifiable_tags_for_legacy_image(docker_image_id, storage_uuid,
- 'vulnerability_found')
- for tag in tags:
- yield LikelyVulnerableTag.for_tag(tag, tag.repository, docker_image_id, storage_uuid)
-
-oci_model = OCIModel()
-back_compat_oci_model = OCIModel(oci_model_only=False)
diff --git a/data/registry_model/registry_pre_oci_model.py b/data/registry_model/registry_pre_oci_model.py
deleted file mode 100644
index ec69328d5..000000000
--- a/data/registry_model/registry_pre_oci_model.py
+++ /dev/null
@@ -1,694 +0,0 @@
-# pylint: disable=protected-access
-import logging
-
-from contextlib import contextmanager
-
-from peewee import IntegrityError, fn
-
-from data import database
-from data import model
-from data.database import db_transaction, IMAGE_NOT_SCANNED_ENGINE_VERSION
-from data.registry_model.interface import RegistryDataInterface
-from data.registry_model.datatypes import (Tag, Manifest, LegacyImage, Label, SecurityScanStatus,
- Blob, RepositoryReference, ShallowTag,
- LikelyVulnerableTag)
-from data.registry_model.shared import SharedModel
-from data.registry_model.label_handlers import apply_label_to_manifest
-from image.docker.schema1 import ManifestException, DockerSchema1Manifest
-from util.validation import is_json
-
-
-logger = logging.getLogger(__name__)
-
-
-class PreOCIModel(SharedModel, RegistryDataInterface):
- """
- PreOCIModel implements the data model for the registry API using a database schema
- before it was changed to support the OCI specification.
- """
- def supports_schema2(self, namespace_name):
- """ Returns whether the implementation of the data interface supports schema 2 format
- manifests. """
- return False
-
- def get_tag_legacy_image_id(self, repository_ref, tag_name, storage):
- """ Returns the legacy image ID for the tag with a legacy images in
- the repository. Returns None if None.
- """
- tag = self.get_repo_tag(repository_ref, tag_name, include_legacy_image=True)
- if tag is None:
- return None
-
- return tag.legacy_image.docker_image_id
-
- def get_legacy_tags_map(self, repository_ref, storage):
- """ Returns a map from tag name to its legacy image, for all tags with legacy images in
- the repository.
- """
- tags = self.list_all_active_repository_tags(repository_ref, include_legacy_images=True)
- return {tag.name: tag.legacy_image.docker_image_id for tag in tags}
-
- def find_matching_tag(self, repository_ref, tag_names):
- """ Finds an alive tag in the repository matching one of the given tag names and returns it
- or None if none.
- """
- found_tag = model.tag.find_matching_tag(repository_ref._db_id, tag_names)
- assert found_tag is None or not found_tag.hidden
- return Tag.for_repository_tag(found_tag)
-
- def get_most_recent_tag(self, repository_ref):
- """ Returns the most recently pushed alive tag in the repository, if any. If none, returns
- None.
- """
- found_tag = model.tag.get_most_recent_tag(repository_ref._db_id)
- assert found_tag is None or not found_tag.hidden
- return Tag.for_repository_tag(found_tag)
-
- def get_manifest_for_tag(self, tag, backfill_if_necessary=False, include_legacy_image=False):
- """ Returns the manifest associated with the given tag. """
- try:
- tag_manifest = database.TagManifest.get(tag_id=tag._db_id)
- except database.TagManifest.DoesNotExist:
- if backfill_if_necessary:
- return self.backfill_manifest_for_tag(tag)
-
- return None
-
- return Manifest.for_tag_manifest(tag_manifest)
-
- def lookup_manifest_by_digest(self, repository_ref, manifest_digest, allow_dead=False,
- include_legacy_image=False, require_available=False):
- """ Looks up the manifest with the given digest under the given repository and returns it
- or None if none. """
- repo = model.repository.lookup_repository(repository_ref._db_id)
- if repo is None:
- return None
-
- try:
- tag_manifest = model.tag.load_manifest_by_digest(repo.namespace_user.username,
- repo.name,
- manifest_digest,
- allow_dead=allow_dead)
- except model.tag.InvalidManifestException:
- return None
-
- legacy_image = None
- if include_legacy_image:
- legacy_image = self.get_legacy_image(repository_ref, tag_manifest.tag.image.docker_image_id,
- include_parents=True)
-
- return Manifest.for_tag_manifest(tag_manifest, legacy_image)
-
- def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name,
- storage, raise_on_error=False):
- """ Creates a manifest in a repository, adding all of the necessary data in the model.
-
- The `manifest_interface_instance` parameter must be an instance of the manifest
- interface as returned by the image/docker package.
-
- Note that all blobs referenced by the manifest must exist under the repository or this
- method will fail and return None.
-
- Returns a reference to the (created manifest, tag) or (None, None) on error.
- """
- # NOTE: Only Schema1 is supported by the pre_oci_model.
- assert isinstance(manifest_interface_instance, DockerSchema1Manifest)
- if not manifest_interface_instance.layers:
- return None, None
-
- # Ensure all the blobs in the manifest exist.
- digests = manifest_interface_instance.checksums
- query = self._lookup_repo_storages_by_content_checksum(repository_ref._db_id, digests)
- blob_map = {s.content_checksum: s for s in query}
- for layer in manifest_interface_instance.layers:
- digest_str = str(layer.digest)
- if digest_str not in blob_map:
- return None, None
-
- # Lookup all the images and their parent images (if any) inside the manifest.
- # This will let us know which v1 images we need to synthesize and which ones are invalid.
- docker_image_ids = list(manifest_interface_instance.legacy_image_ids)
- images_query = model.image.lookup_repository_images(repository_ref._db_id, docker_image_ids)
- image_storage_map = {i.docker_image_id: i.storage for i in images_query}
-
- # Rewrite any v1 image IDs that do not match the checksum in the database.
- try:
- rewritten_images = manifest_interface_instance.rewrite_invalid_image_ids(image_storage_map)
- rewritten_images = list(rewritten_images)
- parent_image_map = {}
-
- for rewritten_image in rewritten_images:
- if not rewritten_image.image_id in image_storage_map:
- parent_image = None
- if rewritten_image.parent_image_id:
- parent_image = parent_image_map.get(rewritten_image.parent_image_id)
- if parent_image is None:
- parent_image = model.image.get_image(repository_ref._db_id,
- rewritten_image.parent_image_id)
- if parent_image is None:
- return None, None
-
- synthesized = model.image.synthesize_v1_image(
- repository_ref._db_id,
- blob_map[rewritten_image.content_checksum].id,
- blob_map[rewritten_image.content_checksum].image_size,
- rewritten_image.image_id,
- rewritten_image.created,
- rewritten_image.comment,
- rewritten_image.command,
- rewritten_image.compat_json,
- parent_image,
- )
-
- parent_image_map[rewritten_image.image_id] = synthesized
- except ManifestException:
- logger.exception("exception when rewriting v1 metadata")
- return None, None
-
- # Store the manifest pointing to the tag.
- leaf_layer_id = rewritten_images[-1].image_id
- tag_manifest, newly_created = model.tag.store_tag_manifest_for_repo(repository_ref._db_id,
- tag_name,
- manifest_interface_instance,
- leaf_layer_id,
- blob_map)
-
- manifest = Manifest.for_tag_manifest(tag_manifest)
-
- # Save the labels on the manifest.
- repo_tag = tag_manifest.tag
- if newly_created:
- has_labels = False
- with self.batch_create_manifest_labels(manifest) as add_label:
- if add_label is None:
- return None, None
-
- for key, value in manifest_interface_instance.layers[-1].v1_metadata.labels.iteritems():
- media_type = 'application/json' if is_json(value) else 'text/plain'
- add_label(key, value, 'manifest', media_type)
- has_labels = True
-
- # Reload the tag in case any updates were applied.
- if has_labels:
- repo_tag = database.RepositoryTag.get(id=repo_tag.id)
-
- return manifest, Tag.for_repository_tag(repo_tag)
-
- def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None):
- """ Creates a label on the manifest with the given key and value. """
- try:
- tag_manifest = database.TagManifest.get(id=manifest._db_id)
- except database.TagManifest.DoesNotExist:
- return None
-
- label_data = dict(key=key, value=value, source_type_name=source_type_name,
- media_type_name=media_type_name)
-
- with db_transaction():
- # Create the label itself.
- label = model.label.create_manifest_label(tag_manifest, key, value, source_type_name,
- media_type_name)
-
- # Apply any changes to the manifest that the label prescribes.
- apply_label_to_manifest(label_data, manifest, self)
-
- return Label.for_label(label)
-
- @contextmanager
- def batch_create_manifest_labels(self, manifest):
- """ Returns a context manager for batch creation of labels on a manifest.
-
- Can raise InvalidLabelKeyException or InvalidMediaTypeException depending
- on the validation errors.
- """
- try:
- tag_manifest = database.TagManifest.get(id=manifest._db_id)
- except database.TagManifest.DoesNotExist:
- yield None
- return
-
- labels_to_add = []
- def add_label(key, value, source_type_name, media_type_name=None):
- labels_to_add.append(dict(key=key, value=value, source_type_name=source_type_name,
- media_type_name=media_type_name))
-
- yield add_label
-
- # TODO: make this truly batch once we've fully transitioned to V2_2 and no longer need
- # the mapping tables.
- for label in labels_to_add:
- with db_transaction():
- # Create the label itself.
- model.label.create_manifest_label(tag_manifest, **label)
-
- # Apply any changes to the manifest that the label prescribes.
- apply_label_to_manifest(label, manifest, self)
-
- def list_manifest_labels(self, manifest, key_prefix=None):
- """ Returns all labels found on the manifest. If specified, the key_prefix will filter the
- labels returned to those keys that start with the given prefix.
- """
- labels = model.label.list_manifest_labels(manifest._db_id, prefix_filter=key_prefix)
- return [Label.for_label(l) for l in labels]
-
- def get_manifest_label(self, manifest, label_uuid):
- """ Returns the label with the specified UUID on the manifest or None if none. """
- return Label.for_label(model.label.get_manifest_label(label_uuid, manifest._db_id))
-
- def delete_manifest_label(self, manifest, label_uuid):
- """ Delete the label with the specified UUID on the manifest. Returns the label deleted
- or None if none.
- """
- return Label.for_label(model.label.delete_manifest_label(label_uuid, manifest._db_id))
-
- def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit):
- """
- Returns a page of actvie tags in a repository. Note that the tags returned by this method
- are ShallowTag objects, which only contain the tag name.
- """
- tags = model.tag.list_active_repo_tags(repository_ref._db_id, include_images=False,
- start_id=start_pagination_id, limit=limit)
- return [ShallowTag.for_repository_tag(tag) for tag in tags]
-
- def list_all_active_repository_tags(self, repository_ref, include_legacy_images=False):
- """
- Returns a list of all the active tags in the repository. Note that this is a *HEAVY*
- operation on repositories with a lot of tags, and should only be used for testing or
- where other more specific operations are not possible.
- """
- if not include_legacy_images:
- tags = model.tag.list_active_repo_tags(repository_ref._db_id, include_images=False)
- return [Tag.for_repository_tag(tag) for tag in tags]
-
- tags = model.tag.list_active_repo_tags(repository_ref._db_id)
- return [Tag.for_repository_tag(tag,
- legacy_image=LegacyImage.for_image(tag.image),
- manifest_digest=(tag.tagmanifest.digest
- if hasattr(tag, 'tagmanifest')
- else None))
- for tag in tags]
-
- def list_repository_tag_history(self, repository_ref, page=1, size=100, specific_tag_name=None,
- active_tags_only=False, since_time_ms=None):
- """
- Returns the history of all tags in the repository (unless filtered). This includes tags that
- have been made in-active due to newer versions of those tags coming into service.
- """
-
- # Only available on OCI model
- if since_time_ms is not None:
- raise NotImplementedError
-
- tags, manifest_map, has_more = model.tag.list_repository_tag_history(repository_ref._db_id,
- page, size,
- specific_tag_name,
- active_tags_only)
- return [Tag.for_repository_tag(tag, manifest_map.get(tag.id),
- legacy_image=LegacyImage.for_image(tag.image))
- for tag in tags], has_more
-
- def has_expired_tag(self, repository_ref, tag_name):
- """
- Returns true if and only if the repository contains a tag with the given name that is expired.
- """
- try:
- model.tag.get_expired_tag_in_repo(repository_ref._db_id, tag_name)
- return True
- except database.RepositoryTag.DoesNotExist:
- return False
-
- def get_most_recent_tag_lifetime_start(self, repository_refs):
- """
- Returns a map from repository ID to the last modified time (in s) for each repository in the
- given repository reference list.
- """
- if not repository_refs:
- return {}
-
- tuples = (database.RepositoryTag.select(database.RepositoryTag.repository,
- fn.Max(database.RepositoryTag.lifetime_start_ts))
- .where(database.RepositoryTag.repository << [r.id for r in repository_refs])
- .group_by(database.RepositoryTag.repository)
- .tuples())
-
- return {repo_id: seconds for repo_id, seconds in tuples}
-
- def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False):
- """
- Returns the latest, *active* tag found in the repository, with the matching name
- or None if none.
- """
- assert isinstance(tag_name, basestring)
- tag = model.tag.get_active_tag_for_repo(repository_ref._db_id, tag_name)
- if tag is None:
- return None
-
- legacy_image = LegacyImage.for_image(tag.image) if include_legacy_image else None
- tag_manifest = model.tag.get_tag_manifest(tag)
- manifest_digest = tag_manifest.digest if tag_manifest else None
- return Tag.for_repository_tag(tag, legacy_image=legacy_image, manifest_digest=manifest_digest)
-
- def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image, storage,
- legacy_manifest_key, is_reversion=False):
- """
- Creates, updates or moves a tag to a new entry in history, pointing to the manifest or
- legacy image specified. If is_reversion is set to True, this operation is considered a
- reversion over a previous tag move operation. Returns the updated Tag or None on error.
- """
- # TODO: unify this.
- assert legacy_manifest_key is not None
- if not is_reversion:
- if isinstance(manifest_or_legacy_image, Manifest):
- raise NotImplementedError('Not yet implemented')
- else:
- model.tag.create_or_update_tag_for_repo(repository_ref._db_id, tag_name,
- manifest_or_legacy_image.docker_image_id)
- else:
- if isinstance(manifest_or_legacy_image, Manifest):
- model.tag.restore_tag_to_manifest(repository_ref._db_id, tag_name,
- manifest_or_legacy_image.digest)
- else:
- model.tag.restore_tag_to_image(repository_ref._db_id, tag_name,
- manifest_or_legacy_image.docker_image_id)
-
- # Generate a manifest for the tag, if necessary.
- tag = self.get_repo_tag(repository_ref, tag_name, include_legacy_image=True)
- if tag is None:
- return None
-
- self.backfill_manifest_for_tag(tag)
- return tag
-
- def delete_tag(self, repository_ref, tag_name):
- """
- Deletes the latest, *active* tag with the given name in the repository.
- """
- repo = model.repository.lookup_repository(repository_ref._db_id)
- if repo is None:
- return None
-
- deleted_tag = model.tag.delete_tag(repo.namespace_user.username, repo.name, tag_name)
- return Tag.for_repository_tag(deleted_tag)
-
- def delete_tags_for_manifest(self, manifest):
- """
- Deletes all tags pointing to the given manifest, making the manifest inaccessible for pulling.
- Returns the tags deleted, if any. Returns None on error.
- """
- try:
- tagmanifest = database.TagManifest.get(id=manifest._db_id)
- except database.TagManifest.DoesNotExist:
- return None
-
- namespace_name = tagmanifest.tag.repository.namespace_user.username
- repo_name = tagmanifest.tag.repository.name
- tags = model.tag.delete_manifest_by_digest(namespace_name, repo_name, manifest.digest)
- return [Tag.for_repository_tag(tag) for tag in tags]
-
- def change_repository_tag_expiration(self, tag, expiration_date):
- """ Sets the expiration date of the tag under the matching repository to that given. If the
- expiration date is None, then the tag will not expire. Returns a tuple of the previous
- expiration timestamp in seconds (if any), and whether the operation succeeded.
- """
- try:
- tag_obj = database.RepositoryTag.get(id=tag._db_id)
- except database.RepositoryTag.DoesNotExist:
- return (None, False)
-
- return model.tag.change_tag_expiration(tag_obj, expiration_date)
-
- def get_legacy_images_owned_by_tag(self, tag):
- """ Returns all legacy images *solely owned and used* by the given tag. """
- try:
- tag_obj = database.RepositoryTag.get(id=tag._db_id)
- except database.RepositoryTag.DoesNotExist:
- return None
-
- # Collect the IDs of all images that the tag uses.
- tag_image_ids = set()
- tag_image_ids.add(tag_obj.image.id)
- tag_image_ids.update(tag_obj.image.ancestor_id_list())
-
- # Remove any images shared by other tags.
- for current_tag in model.tag.list_active_repo_tags(tag_obj.repository_id):
- if current_tag == tag_obj:
- continue
-
- tag_image_ids.discard(current_tag.image.id)
- tag_image_ids = tag_image_ids.difference(current_tag.image.ancestor_id_list())
- if not tag_image_ids:
- return []
-
- if not tag_image_ids:
- return []
-
- # Load the images we need to return.
- images = database.Image.select().where(database.Image.id << list(tag_image_ids))
- all_image_ids = set()
- for image in images:
- all_image_ids.add(image.id)
- all_image_ids.update(image.ancestor_id_list())
-
- # Build a map of all the images and their parents.
- images_map = {}
- all_images = database.Image.select().where(database.Image.id << list(all_image_ids))
- for image in all_images:
- images_map[image.id] = image
-
- return [LegacyImage.for_image(image, images_map=images_map) for image in images]
-
- def get_security_status(self, manifest_or_legacy_image):
- """ Returns the security status for the given manifest or legacy image or None if none. """
- image = None
-
- if isinstance(manifest_or_legacy_image, Manifest):
- try:
- tag_manifest = database.TagManifest.get(id=manifest_or_legacy_image._db_id)
- image = tag_manifest.tag.image
- except database.TagManifest.DoesNotExist:
- return None
- else:
- try:
- image = database.Image.get(id=manifest_or_legacy_image._db_id)
- except database.Image.DoesNotExist:
- return None
-
- if image.security_indexed_engine is not None and image.security_indexed_engine >= 0:
- return SecurityScanStatus.SCANNED if image.security_indexed else SecurityScanStatus.FAILED
-
- return SecurityScanStatus.QUEUED
-
- def reset_security_status(self, manifest_or_legacy_image):
- """ Resets the security status for the given manifest or legacy image, ensuring that it will
- get re-indexed.
- """
- image = None
-
- if isinstance(manifest_or_legacy_image, Manifest):
- try:
- tag_manifest = database.TagManifest.get(id=manifest_or_legacy_image._db_id)
- image = tag_manifest.tag.image
- except database.TagManifest.DoesNotExist:
- return None
- else:
- try:
- image = database.Image.get(id=manifest_or_legacy_image._db_id)
- except database.Image.DoesNotExist:
- return None
-
- assert image
- image.security_indexed = False
- image.security_indexed_engine = IMAGE_NOT_SCANNED_ENGINE_VERSION
- image.save()
-
- def backfill_manifest_for_tag(self, tag):
- """ Backfills a manifest for the V1 tag specified.
- If a manifest already exists for the tag, returns that manifest.
-
- NOTE: This method will only be necessary until we've completed the backfill, at which point
- it should be removed.
- """
- # Ensure that there isn't already a manifest for the tag.
- tag_manifest = model.tag.get_tag_manifest(tag._db_id)
- if tag_manifest is not None:
- return Manifest.for_tag_manifest(tag_manifest)
-
- # Create the manifest.
- try:
- tag_obj = database.RepositoryTag.get(id=tag._db_id)
- except database.RepositoryTag.DoesNotExist:
- return None
-
- assert not tag_obj.hidden
-
- repo = tag_obj.repository
-
- # Write the manifest to the DB.
- manifest = self._build_manifest_for_legacy_image(tag_obj.name, tag_obj.image)
- if manifest is None:
- return None
-
- blob_query = self._lookup_repo_storages_by_content_checksum(repo, manifest.checksums)
- storage_map = {blob.content_checksum: blob.id for blob in blob_query}
- try:
- tag_manifest = model.tag.associate_generated_tag_manifest_with_tag(tag_obj, manifest,
- storage_map)
- assert tag_manifest
- except IntegrityError:
- tag_manifest = model.tag.get_tag_manifest(tag_obj)
-
- return Manifest.for_tag_manifest(tag_manifest)
-
- def list_manifest_layers(self, manifest, storage, include_placements=False):
- try:
- tag_manifest = database.TagManifest.get(id=manifest._db_id)
- except database.TagManifest.DoesNotExist:
- logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id)
- return None
-
- try:
- parsed = manifest.get_parsed_manifest()
- except ManifestException:
- logger.exception('Could not parse and validate manifest `%s`', manifest._db_id)
- return None
-
- repo_ref = RepositoryReference.for_id(tag_manifest.tag.repository_id)
- return self.list_parsed_manifest_layers(repo_ref, parsed, storage, include_placements)
-
- def lookup_derived_image(self, manifest, verb, storage, varying_metadata=None,
- include_placements=False):
- """
- Looks up the derived image for the given manifest, verb and optional varying metadata and
- returns it or None if none.
- """
- try:
- tag_manifest = database.TagManifest.get(id=manifest._db_id)
- except database.TagManifest.DoesNotExist:
- logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id)
- return None
-
- repo_image = tag_manifest.tag.image
- derived = model.image.find_derived_storage_for_image(repo_image, verb, varying_metadata)
- return self._build_derived(derived, verb, varying_metadata, include_placements)
-
- def lookup_or_create_derived_image(self, manifest, verb, storage_location, storage,
- varying_metadata=None, include_placements=False):
- """
- Looks up the derived image for the given maniest, verb and optional varying metadata
- and returns it. If none exists, a new derived image is created.
- """
- try:
- tag_manifest = database.TagManifest.get(id=manifest._db_id)
- except database.TagManifest.DoesNotExist:
- logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id)
- return None
-
- repo_image = tag_manifest.tag.image
- derived = model.image.find_or_create_derived_storage(repo_image, verb, storage_location,
- varying_metadata)
- return self._build_derived(derived, verb, varying_metadata, include_placements)
-
- def set_tags_expiration_for_manifest(self, manifest, expiration_sec):
- """
- Sets the expiration on all tags that point to the given manifest to that specified.
- """
- try:
- tag_manifest = database.TagManifest.get(id=manifest._db_id)
- except database.TagManifest.DoesNotExist:
- return
-
- model.tag.set_tag_expiration_for_manifest(tag_manifest, expiration_sec)
-
- def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage):
- """ Returns the schema 1 version of this manifest, or None if none. """
- try:
- return manifest.get_parsed_manifest()
- except ManifestException:
- return None
-
- def convert_manifest(self, manifest, namespace_name, repo_name, tag_name, allowed_mediatypes,
- storage):
- try:
- parsed = manifest.get_parsed_manifest()
- except ManifestException:
- return None
-
- try:
- return parsed.convert_manifest(allowed_mediatypes, namespace_name, repo_name, tag_name, None)
- except ManifestException:
- return None
-
- def create_manifest_with_temp_tag(self, repository_ref, manifest_interface_instance,
- expiration_sec, storage):
- """ Creates a manifest under the repository and sets a temporary tag to point to it.
- Returns the manifest object created or None on error.
- """
- raise NotImplementedError('Unsupported in pre OCI model')
-
- def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
- """
- Returns the blob in the repository with the given digest, if any or None if none. Note that
- there may be multiple records in the same repository for the same blob digest, so the return
- value of this function may change.
- """
- image_storage = self._get_shared_storage(blob_digest)
- if image_storage is None:
- try:
- image_storage = model.blob.get_repository_blob_by_digest(repository_ref._db_id, blob_digest)
- except model.BlobDoesNotExist:
- return None
-
- assert image_storage.cas_path is not None
-
- placements = None
- if include_placements:
- placements = list(model.storage.get_storage_locations(image_storage.uuid))
-
- return Blob.for_image_storage(image_storage,
- storage_path=model.storage.get_layer_path(image_storage),
- placements=placements)
-
- def list_parsed_manifest_layers(self, repository_ref, parsed_manifest, storage,
- include_placements=False):
- """ Returns an *ordered list* of the layers found in the parsed manifest, starting at the base
- and working towards the leaf, including the associated Blob and its placements
- (if specified).
- """
- return self._list_manifest_layers(repository_ref._db_id, parsed_manifest, storage,
- include_placements=include_placements)
-
- def get_manifest_local_blobs(self, manifest, include_placements=False):
- """ Returns the set of local blobs for the given manifest or None if none. """
- try:
- tag_manifest = database.TagManifest.get(id=manifest._db_id)
- except database.TagManifest.DoesNotExist:
- return None
-
- return self._get_manifest_local_blobs(manifest, tag_manifest.tag.repository_id,
- include_placements)
-
- def yield_tags_for_vulnerability_notification(self, layer_id_pairs):
- """ Yields tags that contain one (or more) of the given layer ID pairs, in repositories
- which have been registered for vulnerability_found notifications. Returns an iterator
- of LikelyVulnerableTag instances.
- """
- event = database.ExternalNotificationEvent.get(name='vulnerability_found')
-
- def filter_notifying_repos(query):
- return model.tag.filter_has_repository_event(query, event)
-
- def filter_and_order(query):
- return model.tag.filter_tags_have_repository_event(query, event)
-
- # Find the matching tags.
- tags = model.tag.get_matching_tags_for_images(layer_id_pairs,
- selections=[database.RepositoryTag,
- database.Image,
- database.ImageStorage],
- filter_images=filter_notifying_repos,
- filter_tags=filter_and_order)
- for tag in tags:
- yield LikelyVulnerableTag.for_repository_tag(tag, tag.repository)
-
-
-pre_oci_model = PreOCIModel()
diff --git a/data/registry_model/shared.py b/data/registry_model/shared.py
deleted file mode 100644
index 82a01aa67..000000000
--- a/data/registry_model/shared.py
+++ /dev/null
@@ -1,509 +0,0 @@
-# pylint: disable=protected-access
-import logging
-
-from abc import abstractmethod
-from collections import defaultdict
-
-from data import database
-from data import model
-from data.cache import cache_key
-from data.model.oci.retriever import RepositoryContentRetriever
-from data.model.blob import get_shared_blob
-from data.registry_model.datatype import FromDictionaryException
-from data.registry_model.datatypes import (RepositoryReference, Blob, TorrentInfo, BlobUpload,
- LegacyImage, ManifestLayer, DerivedImage, ShallowTag)
-from image.docker.schema1 import ManifestException, DockerSchema1ManifestBuilder
-from image.docker.schema2 import EMPTY_LAYER_BLOB_DIGEST
-
-logger = logging.getLogger(__name__)
-
-# The maximum size for generated manifest after which we remove extra metadata.
-MAXIMUM_GENERATED_MANIFEST_SIZE = 3 * 1024 * 1024 # 3 MB
-
-class SharedModel:
- """
- SharedModel implements those data model operations for the registry API that are unchanged
- between the old and new data models.
- """
- def lookup_repository(self, namespace_name, repo_name, kind_filter=None):
- """ Looks up and returns a reference to the repository with the given namespace and name,
- or None if none. """
- repo = model.repository.get_repository(namespace_name, repo_name, kind_filter=kind_filter)
- state = repo.state if repo is not None else None
- return RepositoryReference.for_repo_obj(repo, namespace_name, repo_name,
- repo.namespace_user.stripe_id is None if repo else None,
- state=state)
-
- def is_existing_disabled_namespace(self, namespace_name):
- """ Returns whether the given namespace exists and is disabled. """
- namespace = model.user.get_namespace_user(namespace_name)
- return namespace is not None and not namespace.enabled
-
- def is_namespace_enabled(self, namespace_name):
- """ Returns whether the given namespace exists and is enabled. """
- namespace = model.user.get_namespace_user(namespace_name)
- return namespace is not None and namespace.enabled
-
- def get_derived_image_signature(self, derived_image, signer_name):
- """
- Returns the signature associated with the derived image and a specific signer or None if none.
- """
- try:
- derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id)
- except database.DerivedStorageForImage.DoesNotExist:
- return None
-
- storage = derived_storage.derivative
- signature_entry = model.storage.lookup_storage_signature(storage, signer_name)
- if signature_entry is None:
- return None
-
- return signature_entry.signature
-
- def set_derived_image_signature(self, derived_image, signer_name, signature):
- """
- Sets the calculated signature for the given derived image and signer to that specified.
- """
- try:
- derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id)
- except database.DerivedStorageForImage.DoesNotExist:
- return None
-
- storage = derived_storage.derivative
- signature_entry = model.storage.find_or_create_storage_signature(storage, signer_name)
- signature_entry.signature = signature
- signature_entry.uploading = False
- signature_entry.save()
-
- def delete_derived_image(self, derived_image):
- """
- Deletes a derived image and all of its storage.
- """
- try:
- derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id)
- except database.DerivedStorageForImage.DoesNotExist:
- return None
-
- model.image.delete_derived_storage(derived_storage)
-
- def set_derived_image_size(self, derived_image, compressed_size):
- """
- Sets the compressed size on the given derived image.
- """
- try:
- derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id)
- except database.DerivedStorageForImage.DoesNotExist:
- return None
-
- storage_entry = derived_storage.derivative
- storage_entry.image_size = compressed_size
- storage_entry.uploading = False
- storage_entry.save()
-
- def get_torrent_info(self, blob):
- """
- Returns the torrent information associated with the given blob or None if none.
- """
- try:
- image_storage = database.ImageStorage.get(id=blob._db_id)
- except database.ImageStorage.DoesNotExist:
- return None
-
- try:
- torrent_info = model.storage.get_torrent_info(image_storage)
- except model.TorrentInfoDoesNotExist:
- return None
-
- return TorrentInfo.for_torrent_info(torrent_info)
-
- def set_torrent_info(self, blob, piece_length, pieces):
- """
- Sets the torrent infomation associated with the given blob to that specified.
- """
- try:
- image_storage = database.ImageStorage.get(id=blob._db_id)
- except database.ImageStorage.DoesNotExist:
- return None
-
- torrent_info = model.storage.save_torrent_info(image_storage, piece_length, pieces)
- return TorrentInfo.for_torrent_info(torrent_info)
-
- @abstractmethod
- def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit):
- pass
-
- def lookup_cached_active_repository_tags(self, model_cache, repository_ref, start_pagination_id,
- limit):
- """
- Returns a page of active tags in a repository. Note that the tags returned by this method
- are ShallowTag objects, which only contain the tag name. This method will automatically cache
- the result and check the cache before making a call.
- """
- def load_tags():
- tags = self.lookup_active_repository_tags(repository_ref, start_pagination_id, limit)
- return [tag.asdict() for tag in tags]
-
- tags_cache_key = cache_key.for_active_repo_tags(repository_ref._db_id, start_pagination_id,
- limit)
- result = model_cache.retrieve(tags_cache_key, load_tags)
-
- try:
- return [ShallowTag.from_dict(tag_dict) for tag_dict in result]
- except FromDictionaryException:
- return self.lookup_active_repository_tags(repository_ref, start_pagination_id, limit)
-
- def get_cached_namespace_region_blacklist(self, model_cache, namespace_name):
- """ Returns a cached set of ISO country codes blacklisted for pulls for the namespace
- or None if the list could not be loaded.
- """
-
- def load_blacklist():
- restrictions = model.user.list_namespace_geo_restrictions(namespace_name)
- if restrictions is None:
- return None
-
- return [restriction.restricted_region_iso_code for restriction in restrictions]
-
- blacklist_cache_key = cache_key.for_namespace_geo_restrictions(namespace_name)
- result = model_cache.retrieve(blacklist_cache_key, load_blacklist)
- if result is None:
- return None
-
- return set(result)
-
- def get_cached_repo_blob(self, model_cache, namespace_name, repo_name, blob_digest):
- """
- Returns the blob in the repository with the given digest if any or None if none.
- Caches the result in the caching system.
- """
- def load_blob():
- repository_ref = self.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- return None
-
- blob_found = self.get_repo_blob_by_digest(repository_ref, blob_digest,
- include_placements=True)
- if blob_found is None:
- return None
-
- return blob_found.asdict()
-
- blob_cache_key = cache_key.for_repository_blob(namespace_name, repo_name, blob_digest, 2)
- blob_dict = model_cache.retrieve(blob_cache_key, load_blob)
-
- try:
- return Blob.from_dict(blob_dict) if blob_dict is not None else None
- except FromDictionaryException:
- # The data was stale in some way. Simply reload.
- repository_ref = self.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- return None
-
- return self.get_repo_blob_by_digest(repository_ref, blob_digest, include_placements=True)
-
- @abstractmethod
- def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
- pass
-
- def create_blob_upload(self, repository_ref, new_upload_id, location_name, storage_metadata):
- """ Creates a new blob upload and returns a reference. If the blob upload could not be
- created, returns None. """
- repo = model.repository.lookup_repository(repository_ref._db_id)
- if repo is None:
- return None
-
- try:
- upload_record = model.blob.initiate_upload_for_repo(repo, new_upload_id, location_name,
- storage_metadata)
- return BlobUpload.for_upload(upload_record, location_name=location_name)
- except database.Repository.DoesNotExist:
- return None
-
- def lookup_blob_upload(self, repository_ref, blob_upload_id):
- """ Looks up the blob upload withn the given ID under the specified repository and returns it
- or None if none.
- """
- upload_record = model.blob.get_blob_upload_by_uuid(blob_upload_id)
- if upload_record is None:
- return None
-
- return BlobUpload.for_upload(upload_record)
-
- def update_blob_upload(self, blob_upload, uncompressed_byte_count, piece_hashes, piece_sha_state,
- storage_metadata, byte_count, chunk_count, sha_state):
- """ Updates the fields of the blob upload to match those given. Returns the updated blob upload
- or None if the record does not exists.
- """
- upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id)
- if upload_record is None:
- return None
-
- upload_record.uncompressed_byte_count = uncompressed_byte_count
- upload_record.piece_hashes = piece_hashes
- upload_record.piece_sha_state = piece_sha_state
- upload_record.storage_metadata = storage_metadata
- upload_record.byte_count = byte_count
- upload_record.chunk_count = chunk_count
- upload_record.sha_state = sha_state
- upload_record.save()
- return BlobUpload.for_upload(upload_record)
-
- def delete_blob_upload(self, blob_upload):
- """ Deletes a blob upload record. """
- upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id)
- if upload_record is not None:
- upload_record.delete_instance()
-
- def commit_blob_upload(self, blob_upload, blob_digest_str, blob_expiration_seconds):
- """ Commits the blob upload into a blob and sets an expiration before that blob will be GCed.
- """
- upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id)
- if upload_record is None:
- return None
-
- repository_id = upload_record.repository_id
-
- # Create the blob and temporarily tag it.
- location_obj = model.storage.get_image_location_for_name(blob_upload.location_name)
- blob_record = model.blob.store_blob_record_and_temp_link_in_repo(
- repository_id, blob_digest_str, location_obj.id, blob_upload.byte_count,
- blob_expiration_seconds, blob_upload.uncompressed_byte_count)
-
- # Delete the blob upload.
- upload_record.delete_instance()
- return Blob.for_image_storage(blob_record,
- storage_path=model.storage.get_layer_path(blob_record))
-
- def mount_blob_into_repository(self, blob, target_repository_ref, expiration_sec):
- """
- Mounts the blob from another repository into the specified target repository, and adds an
- expiration before that blob is automatically GCed. This function is useful during push
- operations if an existing blob from another repository is being pushed. Returns False if
- the mounting fails.
- """
- storage = model.blob.temp_link_blob(target_repository_ref._db_id, blob.digest, expiration_sec)
- return bool(storage)
-
- def get_legacy_images(self, repository_ref):
- """
- Returns an iterator of all the LegacyImage's defined in the matching repository.
- """
- repo = model.repository.lookup_repository(repository_ref._db_id)
- if repo is None:
- return None
-
- all_images = model.image.get_repository_images_without_placements(repo)
- all_images_map = {image.id: image for image in all_images}
-
- all_tags = model.tag.list_repository_tags(repo.namespace_user.username, repo.name)
- tags_by_image_id = defaultdict(list)
- for tag in all_tags:
- tags_by_image_id[tag.image_id].append(tag)
-
- return [LegacyImage.for_image(image, images_map=all_images_map, tags_map=tags_by_image_id)
- for image in all_images]
-
- def get_legacy_image(self, repository_ref, docker_image_id, include_parents=False,
- include_blob=False):
- """
- Returns the matching LegacyImages under the matching repository, if any. If none,
- returns None.
- """
- repo = model.repository.lookup_repository(repository_ref._db_id)
- if repo is None:
- return None
-
- image = model.image.get_image(repository_ref._db_id, docker_image_id)
- if image is None:
- return None
-
- parent_images_map = None
- if include_parents:
- parent_images = model.image.get_parent_images(repo.namespace_user.username, repo.name, image)
- parent_images_map = {image.id: image for image in parent_images}
-
- blob = None
- if include_blob:
- placements = list(model.storage.get_storage_locations(image.storage.uuid))
- blob = Blob.for_image_storage(image.storage,
- storage_path=model.storage.get_layer_path(image.storage),
- placements=placements)
-
- return LegacyImage.for_image(image, images_map=parent_images_map, blob=blob)
-
- def _get_manifest_local_blobs(self, manifest, repo_id, include_placements=False,
- by_manifest=False):
- parsed = manifest.get_parsed_manifest()
- if parsed is None:
- return None
-
- local_blob_digests = list(set(parsed.local_blob_digests))
- if not len(local_blob_digests):
- return []
-
- blob_query = self._lookup_repo_storages_by_content_checksum(repo_id, local_blob_digests,
- by_manifest=by_manifest)
- blobs = []
- for image_storage in blob_query:
- placements = None
- if include_placements:
- placements = list(model.storage.get_storage_locations(image_storage.uuid))
-
- blob = Blob.for_image_storage(image_storage,
- storage_path=model.storage.get_layer_path(image_storage),
- placements=placements)
- blobs.append(blob)
-
- return blobs
-
- def _list_manifest_layers(self, repo_id, parsed, storage, include_placements=False,
- by_manifest=False):
- """ Returns an *ordered list* of the layers found in the manifest, starting at the base and
- working towards the leaf, including the associated Blob and its placements (if specified).
- Returns None if the manifest could not be parsed and validated.
- """
- assert not parsed.is_manifest_list
-
- retriever = RepositoryContentRetriever(repo_id, storage)
- requires_empty_blob = parsed.get_requires_empty_layer_blob(retriever)
-
- storage_map = {}
- blob_digests = list(parsed.local_blob_digests)
- if requires_empty_blob:
- blob_digests.append(EMPTY_LAYER_BLOB_DIGEST)
-
- if blob_digests:
- blob_query = self._lookup_repo_storages_by_content_checksum(repo_id, blob_digests,
- by_manifest=by_manifest)
- storage_map = {blob.content_checksum: blob for blob in blob_query}
-
-
- layers = parsed.get_layers(retriever)
- if layers is None:
- logger.error('Could not load layers for manifest `%s`', parsed.digest)
- return None
-
- manifest_layers = []
- for layer in layers:
- if layer.is_remote:
- manifest_layers.append(ManifestLayer(layer, None))
- continue
-
- digest_str = str(layer.blob_digest)
- if digest_str not in storage_map:
- logger.error('Missing digest `%s` for manifest `%s`', layer.blob_digest, parsed.digest)
- return None
-
- image_storage = storage_map[digest_str]
- assert image_storage.cas_path is not None
- assert image_storage.image_size is not None
-
- placements = None
- if include_placements:
- placements = list(model.storage.get_storage_locations(image_storage.uuid))
-
- blob = Blob.for_image_storage(image_storage,
- storage_path=model.storage.get_layer_path(image_storage),
- placements=placements)
- manifest_layers.append(ManifestLayer(layer, blob))
-
- return manifest_layers
-
- def _build_derived(self, derived, verb, varying_metadata, include_placements):
- if derived is None:
- return None
-
- derived_storage = derived.derivative
- placements = None
- if include_placements:
- placements = list(model.storage.get_storage_locations(derived_storage.uuid))
-
- blob = Blob.for_image_storage(derived_storage,
- storage_path=model.storage.get_layer_path(derived_storage),
- placements=placements)
-
- return DerivedImage.for_derived_storage(derived, verb, varying_metadata, blob)
-
- def _build_manifest_for_legacy_image(self, tag_name, legacy_image_row):
- import features
-
- from app import app, docker_v2_signing_key
-
- repo = legacy_image_row.repository
- namespace_name = repo.namespace_user.username
- repo_name = repo.name
-
- # Find the v1 metadata for this image and its parents.
- try:
- parents = model.image.get_parent_images(namespace_name, repo_name, legacy_image_row)
- except model.DataModelException:
- logger.exception('Could not load parent images for legacy image %s', legacy_image_row.id)
- return None
-
- # If the manifest is being generated under the library namespace, then we make its namespace
- # empty.
- manifest_namespace = namespace_name
- if features.LIBRARY_SUPPORT and namespace_name == app.config['LIBRARY_NAMESPACE']:
- manifest_namespace = ''
-
- # Create and populate the manifest builder
- builder = DockerSchema1ManifestBuilder(manifest_namespace, repo_name, tag_name)
-
- # Add the leaf layer
- builder.add_layer(legacy_image_row.storage.content_checksum, legacy_image_row.v1_json_metadata)
- if legacy_image_row.storage.uploading:
- logger.error('Cannot add an uploading storage row: %s', legacy_image_row.storage.id)
- return None
-
- for parent_image in parents:
- if parent_image.storage.uploading:
- logger.error('Cannot add an uploading storage row: %s', legacy_image_row.storage.id)
- return None
-
- builder.add_layer(parent_image.storage.content_checksum, parent_image.v1_json_metadata)
-
- try:
- built_manifest = builder.build(docker_v2_signing_key)
-
- # If the generated manifest is greater than the maximum size, regenerate it with
- # intermediate metadata layers stripped down to their bare essentials.
- if len(built_manifest.bytes.as_encoded_str()) > MAXIMUM_GENERATED_MANIFEST_SIZE:
- built_manifest = builder.with_metadata_removed().build(docker_v2_signing_key)
-
- if len(built_manifest.bytes.as_encoded_str()) > MAXIMUM_GENERATED_MANIFEST_SIZE:
- logger.error('Legacy image is too large to generate manifest')
- return None
-
- return built_manifest
- except ManifestException as me:
- logger.exception('Got exception when trying to build manifest for legacy image %s',
- legacy_image_row)
- return None
-
- def _get_shared_storage(self, blob_digest):
- """ Returns an ImageStorage row for the blob digest if it is a globally shared storage. """
- # If the EMPTY_LAYER_BLOB_DIGEST is in the checksums, look it up directly. Since we have
- # so many duplicate copies in the database currently, looking it up bound to a repository
- # can be incredibly slow, and, since it is defined as a globally shared layer, this is extra
- # work we don't need to do.
- if blob_digest == EMPTY_LAYER_BLOB_DIGEST:
- return get_shared_blob(EMPTY_LAYER_BLOB_DIGEST)
-
- return None
-
- def _lookup_repo_storages_by_content_checksum(self, repo, checksums, by_manifest=False):
- checksums = set(checksums)
-
- # Load any shared storages first.
- extra_storages = []
- for checksum in list(checksums):
- shared_storage = self._get_shared_storage(checksum)
- if shared_storage is not None:
- extra_storages.append(shared_storage)
- checksums.remove(checksum)
-
- found = []
- if checksums:
- found = list(model.storage.lookup_repo_storages_by_content_checksum(repo, checksums,
- by_manifest=by_manifest))
- return found + extra_storages
diff --git a/data/registry_model/test/test_blobuploader.py b/data/registry_model/test/test_blobuploader.py
deleted file mode 100644
index 8b539c617..000000000
--- a/data/registry_model/test/test_blobuploader.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import hashlib
-import os
-import tarfile
-
-from io import BytesIO
-from contextlib import closing
-
-import pytest
-
-from data.registry_model.blobuploader import (retrieve_blob_upload_manager,
- upload_blob, BlobUploadException,
- BlobDigestMismatchException, BlobTooLargeException,
- BlobUploadSettings)
-from data.registry_model.registry_pre_oci_model import PreOCIModel
-
-from storage.distributedstorage import DistributedStorage
-from storage.fakestorage import FakeStorage
-from test.fixtures import *
-
-@pytest.fixture()
-def pre_oci_model(initialized_db):
- return PreOCIModel()
-
-@pytest.mark.parametrize('chunk_count', [
- 0,
- 1,
- 2,
- 10,
-])
-@pytest.mark.parametrize('subchunk', [
- True,
- False,
-])
-def test_basic_upload_blob(chunk_count, subchunk, pre_oci_model):
- repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
- storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
- settings = BlobUploadSettings('2M', 512 * 1024, 3600)
- app_config = {'TESTING': True}
-
- data = ''
- with upload_blob(repository_ref, storage, settings) as manager:
- assert manager
- assert manager.blob_upload_id
-
- for index in range(0, chunk_count):
- chunk_data = os.urandom(100)
- data += chunk_data
-
- if subchunk:
- manager.upload_chunk(app_config, BytesIO(chunk_data))
- manager.upload_chunk(app_config, BytesIO(chunk_data), (index * 100) + 50)
- else:
- manager.upload_chunk(app_config, BytesIO(chunk_data))
-
- blob = manager.commit_to_blob(app_config)
-
- # Check the blob.
- assert blob.compressed_size == len(data)
- assert not blob.uploading
- assert blob.digest == 'sha256:' + hashlib.sha256(data).hexdigest()
-
- # Ensure the blob exists in storage and has the expected data.
- assert storage.get_content(['local_us'], blob.storage_path) == data
-
-
-def test_cancel_upload(pre_oci_model):
- repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
- storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
- settings = BlobUploadSettings('2M', 512 * 1024, 3600)
- app_config = {'TESTING': True}
-
- blob_upload_id = None
- with upload_blob(repository_ref, storage, settings) as manager:
- blob_upload_id = manager.blob_upload_id
- assert pre_oci_model.lookup_blob_upload(repository_ref, blob_upload_id) is not None
-
- manager.upload_chunk(app_config, BytesIO('hello world'))
-
- # Since the blob was not comitted, the upload should be deleted.
- assert blob_upload_id
- assert pre_oci_model.lookup_blob_upload(repository_ref, blob_upload_id) is None
-
-
-def test_too_large(pre_oci_model):
- repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
- storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
- settings = BlobUploadSettings('1K', 512 * 1024, 3600)
- app_config = {'TESTING': True}
-
- with upload_blob(repository_ref, storage, settings) as manager:
- with pytest.raises(BlobTooLargeException):
- manager.upload_chunk(app_config, BytesIO(os.urandom(1024 * 1024 * 2)))
-
-
-def test_extra_blob_stream_handlers(pre_oci_model):
- handler1_result = []
- handler2_result = []
-
- def handler1(bytes):
- handler1_result.append(bytes)
-
- def handler2(bytes):
- handler2_result.append(bytes)
-
- repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
- storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
- settings = BlobUploadSettings('1K', 512 * 1024, 3600)
- app_config = {'TESTING': True}
-
- with upload_blob(repository_ref, storage, settings,
- extra_blob_stream_handlers=[handler1, handler2]) as manager:
- manager.upload_chunk(app_config, BytesIO('hello '))
- manager.upload_chunk(app_config, BytesIO('world'))
-
- assert ''.join(handler1_result) == 'hello world'
- assert ''.join(handler2_result) == 'hello world'
-
-
-def valid_tar_gz(contents):
- with closing(BytesIO()) as layer_data:
- with closing(tarfile.open(fileobj=layer_data, mode='w|gz')) as tar_file:
- tar_file_info = tarfile.TarInfo(name='somefile')
- tar_file_info.type = tarfile.REGTYPE
- tar_file_info.size = len(contents)
- tar_file_info.mtime = 1
- tar_file.addfile(tar_file_info, BytesIO(contents))
-
- layer_bytes = layer_data.getvalue()
- return layer_bytes
-
-
-def test_uncompressed_size(pre_oci_model):
- repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
- storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
- settings = BlobUploadSettings('1K', 512 * 1024, 3600)
- app_config = {'TESTING': True}
-
- with upload_blob(repository_ref, storage, settings) as manager:
- manager.upload_chunk(app_config, BytesIO(valid_tar_gz('hello world')))
-
- blob = manager.commit_to_blob(app_config)
-
- assert blob.compressed_size is not None
- assert blob.uncompressed_size is not None
-
diff --git a/data/registry_model/test/test_interface.py b/data/registry_model/test/test_interface.py
deleted file mode 100644
index 8255ade6d..000000000
--- a/data/registry_model/test/test_interface.py
+++ /dev/null
@@ -1,1095 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import hashlib
-import json
-import uuid
-
-from datetime import datetime, timedelta
-from io import BytesIO
-
-import pytest
-
-from mock import patch
-from playhouse.test_utils import assert_query_count
-
-from app import docker_v2_signing_key, storage
-from data import model
-from data.database import (TagManifestLabelMap, TagManifestToManifest, Manifest, ManifestBlob,
- ManifestLegacyImage, ManifestLabel,
- TagManifest, TagManifestLabel, DerivedStorageForImage,
- TorrentInfo, Tag, TagToRepositoryTag, ImageStorageLocation)
-from data.cache.impl import InMemoryDataModelCache
-from data.registry_model.registry_pre_oci_model import PreOCIModel
-from data.registry_model.registry_oci_model import OCIModel
-from data.registry_model.datatypes import RepositoryReference
-from data.registry_model.blobuploader import upload_blob, BlobUploadSettings
-from data.registry_model.modelsplitter import SplitModel
-from data.model.blob import store_blob_record_and_temp_link
-from image.docker.types import ManifestImageLayer
-from image.docker.schema1 import (DockerSchema1ManifestBuilder, DOCKER_SCHEMA1_CONTENT_TYPES,
- DockerSchema1Manifest)
-from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
-from image.docker.schema2.list import DockerSchema2ManifestListBuilder
-from util.bytes import Bytes
-
-from test.fixtures import *
-
-
-@pytest.fixture(params=[PreOCIModel(), OCIModel(), OCIModel(oci_model_only=False),
- SplitModel(0, {'devtable'}, {'buynlarge'}, False),
- SplitModel(1.0, {'devtable'}, {'buynlarge'}, False),
- SplitModel(1.0, {'devtable'}, {'buynlarge'}, True)])
-def registry_model(request, initialized_db):
- return request.param
-
-@pytest.fixture()
-def pre_oci_model(initialized_db):
- return PreOCIModel()
-
-@pytest.fixture()
-def oci_model(initialized_db):
- return OCIModel()
-
-
-@pytest.mark.parametrize('names, expected', [
- (['unknown'], None),
- (['latest'], {'latest'}),
- (['latest', 'prod'], {'latest', 'prod'}),
- (['latest', 'prod', 'another'], {'latest', 'prod'}),
- (['foo', 'prod'], {'prod'}),
-])
-def test_find_matching_tag(names, expected, registry_model):
- repo = model.repository.get_repository('devtable', 'simple')
- repository_ref = RepositoryReference.for_repo_obj(repo)
- found = registry_model.find_matching_tag(repository_ref, names)
- if expected is None:
- assert found is None
- else:
- assert found.name in expected
- assert found.repository.namespace_name == 'devtable'
- assert found.repository.name == 'simple'
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name, expected', [
- ('devtable', 'simple', {'latest', 'prod'}),
- ('buynlarge', 'orgrepo', {'latest', 'prod'}),
-])
-def test_get_most_recent_tag(repo_namespace, repo_name, expected, registry_model):
- repo = model.repository.get_repository(repo_namespace, repo_name)
- repository_ref = RepositoryReference.for_repo_obj(repo)
- found = registry_model.get_most_recent_tag(repository_ref)
- if expected is None:
- assert found is None
- else:
- assert found.name in expected
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name, expected', [
- ('devtable', 'simple', True),
- ('buynlarge', 'orgrepo', True),
- ('buynlarge', 'unknownrepo', False),
-])
-def test_lookup_repository(repo_namespace, repo_name, expected, registry_model):
- repo_ref = registry_model.lookup_repository(repo_namespace, repo_name)
- if expected:
- assert repo_ref
- else:
- assert repo_ref is None
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name', [
- ('devtable', 'simple'),
- ('buynlarge', 'orgrepo'),
-])
-def test_lookup_manifests(repo_namespace, repo_name, registry_model):
- repo = model.repository.get_repository(repo_namespace, repo_name)
- repository_ref = RepositoryReference.for_repo_obj(repo)
- found_tag = registry_model.find_matching_tag(repository_ref, ['latest'])
- found_manifest = registry_model.get_manifest_for_tag(found_tag)
- found = registry_model.lookup_manifest_by_digest(repository_ref, found_manifest.digest,
- include_legacy_image=True)
- assert found._db_id == found_manifest._db_id
- assert found.digest == found_manifest.digest
- assert found.legacy_image
- assert found.legacy_image.parents
-
- schema1_parsed = registry_model.get_schema1_parsed_manifest(found, 'foo', 'bar', 'baz', storage)
- assert schema1_parsed is not None
-
-
-def test_lookup_unknown_manifest(registry_model):
- repo = model.repository.get_repository('devtable', 'simple')
- repository_ref = RepositoryReference.for_repo_obj(repo)
- found = registry_model.lookup_manifest_by_digest(repository_ref, 'sha256:deadbeef')
- assert found is None
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name', [
- ('devtable', 'simple'),
- ('devtable', 'complex'),
- ('devtable', 'history'),
- ('buynlarge', 'orgrepo'),
-])
-def test_legacy_images(repo_namespace, repo_name, registry_model):
- repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
- legacy_images = registry_model.get_legacy_images(repository_ref)
- assert len(legacy_images)
-
- found_tags = set()
- for image in legacy_images:
- found_image = registry_model.get_legacy_image(repository_ref, image.docker_image_id,
- include_parents=True)
-
- with assert_query_count(5 if found_image.parents else 4):
- found_image = registry_model.get_legacy_image(repository_ref, image.docker_image_id,
- include_parents=True, include_blob=True)
- assert found_image.docker_image_id == image.docker_image_id
- assert found_image.parents == image.parents
- assert found_image.blob
- assert found_image.blob.placements
-
- # Check that the tags list can be retrieved.
- assert image.tags is not None
- found_tags.update({tag.name for tag in image.tags})
-
- # Check against the actual DB row.
- model_image = model.image.get_image(repository_ref._db_id, found_image.docker_image_id)
- assert model_image.id == found_image._db_id
- assert ([pid for pid in reversed(model_image.ancestor_id_list())] ==
- [p._db_id for p in found_image.parents])
-
- # Try without parents and ensure it raises an exception.
- found_image = registry_model.get_legacy_image(repository_ref, image.docker_image_id,
- include_parents=False)
- with pytest.raises(Exception):
- assert not found_image.parents
-
- assert found_tags
-
- unknown = registry_model.get_legacy_image(repository_ref, 'unknown', include_parents=True)
- assert unknown is None
-
-
-def test_manifest_labels(registry_model):
- repo = model.repository.get_repository('devtable', 'simple')
- repository_ref = RepositoryReference.for_repo_obj(repo)
- found_tag = registry_model.find_matching_tag(repository_ref, ['latest'])
- found_manifest = registry_model.get_manifest_for_tag(found_tag)
-
- # Create a new label.
- created = registry_model.create_manifest_label(found_manifest, 'foo', 'bar', 'api')
- assert created.key == 'foo'
- assert created.value == 'bar'
- assert created.source_type_name == 'api'
- assert created.media_type_name == 'text/plain'
-
- # Ensure we can look it up.
- assert registry_model.get_manifest_label(found_manifest, created.uuid) == created
-
- # Ensure it is in our list of labels.
- assert created in registry_model.list_manifest_labels(found_manifest)
- assert created in registry_model.list_manifest_labels(found_manifest, key_prefix='fo')
-
- # Ensure it is *not* in our filtered list.
- assert created not in registry_model.list_manifest_labels(found_manifest, key_prefix='ba')
-
- # Delete the label and ensure it is gone.
- assert registry_model.delete_manifest_label(found_manifest, created.uuid)
- assert registry_model.get_manifest_label(found_manifest, created.uuid) is None
- assert created not in registry_model.list_manifest_labels(found_manifest)
-
-
-def test_manifest_label_handlers(registry_model):
- repo = model.repository.get_repository('devtable', 'simple')
- repository_ref = RepositoryReference.for_repo_obj(repo)
- found_tag = registry_model.get_repo_tag(repository_ref, 'latest')
- found_manifest = registry_model.get_manifest_for_tag(found_tag)
-
- # Ensure the tag has no expiration.
- assert found_tag.lifetime_end_ts is None
-
- # Create a new label with an expires-after.
- registry_model.create_manifest_label(found_manifest, 'quay.expires-after', '2h', 'api')
-
- # Ensure the tag now has an expiration.
- updated_tag = registry_model.get_repo_tag(repository_ref, 'latest')
- assert updated_tag.lifetime_end_ts == (updated_tag.lifetime_start_ts + (60 * 60 * 2))
-
-
-def test_batch_labels(registry_model):
- repo = model.repository.get_repository('devtable', 'history')
- repository_ref = RepositoryReference.for_repo_obj(repo)
- found_tag = registry_model.find_matching_tag(repository_ref, ['latest'])
- found_manifest = registry_model.get_manifest_for_tag(found_tag)
-
- with registry_model.batch_create_manifest_labels(found_manifest) as add_label:
- add_label('foo', '1', 'api')
- add_label('bar', '2', 'api')
- add_label('baz', '3', 'api')
-
- # Ensure we can look them up.
- assert len(registry_model.list_manifest_labels(found_manifest)) == 3
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name', [
- ('devtable', 'simple'),
- ('devtable', 'complex'),
- ('devtable', 'history'),
- ('buynlarge', 'orgrepo'),
-])
-def test_repository_tags(repo_namespace, repo_name, registry_model):
- repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
- tags = registry_model.list_all_active_repository_tags(repository_ref, include_legacy_images=True)
- assert len(tags)
-
- tags_map = registry_model.get_legacy_tags_map(repository_ref, storage)
-
- for tag in tags:
- found_tag = registry_model.get_repo_tag(repository_ref, tag.name, include_legacy_image=True)
- assert found_tag == tag
-
- if found_tag.legacy_image is None:
- continue
-
- found_image = registry_model.get_legacy_image(repository_ref,
- found_tag.legacy_image.docker_image_id)
- assert found_image == found_tag.legacy_image
- assert tag.name in tags_map
- assert tags_map[tag.name] == found_image.docker_image_id
-
-
-@pytest.mark.parametrize('namespace, name, expected_tag_count, has_expired', [
- ('devtable', 'simple', 2, False),
- ('devtable', 'history', 2, True),
- ('devtable', 'gargantuan', 8, False),
- ('public', 'publicrepo', 1, False),
-])
-def test_repository_tag_history(namespace, name, expected_tag_count, has_expired, registry_model):
- # Pre-cache media type loads to ensure consistent query count.
- Manifest.media_type.get_name(1)
-
- repository_ref = registry_model.lookup_repository(namespace, name)
- with assert_query_count(2):
- history, has_more = registry_model.list_repository_tag_history(repository_ref)
- assert not has_more
- assert len(history) == expected_tag_count
-
- for tag in history:
- # Retrieve the manifest to ensure it doesn't issue extra queries.
- tag.manifest
-
- if has_expired:
- # Ensure the latest tag is marked expired, since there is an expired one.
- with assert_query_count(1):
- assert registry_model.has_expired_tag(repository_ref, 'latest')
-
-
-@pytest.mark.parametrize('repositories, expected_tag_count', [
- ([], 0),
- ([('devtable', 'simple'), ('devtable', 'building')], 1),
-])
-def test_get_most_recent_tag_lifetime_start(repositories, expected_tag_count, registry_model):
- last_modified_map = registry_model.get_most_recent_tag_lifetime_start(
- [registry_model.lookup_repository(name, namespace) for name, namespace in repositories]
- )
-
- assert len(last_modified_map) == expected_tag_count
- for repo_id, last_modified in last_modified_map.items():
- tag = registry_model.get_most_recent_tag(RepositoryReference.for_id(repo_id))
- assert last_modified == tag.lifetime_start_ms / 1000
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name', [
- ('devtable', 'simple'),
- ('devtable', 'complex'),
- ('devtable', 'history'),
- ('buynlarge', 'orgrepo'),
-])
-@pytest.mark.parametrize('via_manifest', [
- False,
- True,
-])
-def test_delete_tags(repo_namespace, repo_name, via_manifest, registry_model):
- repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
- tags = registry_model.list_all_active_repository_tags(repository_ref)
- assert len(tags)
-
- # Save history before the deletions.
- previous_history, _ = registry_model.list_repository_tag_history(repository_ref, size=1000)
- assert len(previous_history) >= len(tags)
-
- # Delete every tag in the repository.
- for tag in tags:
- if via_manifest:
- assert registry_model.delete_tag(repository_ref, tag.name)
- else:
- manifest = registry_model.get_manifest_for_tag(tag)
- if manifest is not None:
- assert registry_model.delete_tags_for_manifest(manifest)
-
- # Make sure the tag is no longer found.
- # TODO: Uncomment once we're done with the SplitModel.
- #with assert_query_count(1):
- found_tag = registry_model.get_repo_tag(repository_ref, tag.name, include_legacy_image=True)
- assert found_tag is None
-
- # Ensure all tags have been deleted.
- tags = registry_model.list_all_active_repository_tags(repository_ref)
- assert not len(tags)
-
- # Ensure that the tags all live in history.
- history, _ = registry_model.list_repository_tag_history(repository_ref, size=1000)
- assert len(history) == len(previous_history)
-
-
-@pytest.mark.parametrize('use_manifest', [
- True,
- False,
-])
-def test_retarget_tag_history(use_manifest, registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'history')
- history, _ = registry_model.list_repository_tag_history(repository_ref)
-
- if use_manifest:
- manifest_or_legacy_image = registry_model.lookup_manifest_by_digest(repository_ref,
- history[0].manifest_digest,
- allow_dead=True)
- else:
- manifest_or_legacy_image = history[0].legacy_image
-
- # Retarget the tag.
- assert manifest_or_legacy_image
- updated_tag = registry_model.retarget_tag(repository_ref, 'latest', manifest_or_legacy_image,
- storage, docker_v2_signing_key, is_reversion=True)
-
- # Ensure the tag has changed targets.
- if use_manifest:
- assert updated_tag.manifest_digest == manifest_or_legacy_image.digest
- else:
- assert updated_tag.legacy_image == manifest_or_legacy_image
-
- # Ensure history has been updated.
- new_history, _ = registry_model.list_repository_tag_history(repository_ref)
- assert len(new_history) == len(history) + 1
-
-
-def test_retarget_tag_schema1(oci_model):
- repository_ref = oci_model.lookup_repository('devtable', 'simple')
- latest_tag = oci_model.get_repo_tag(repository_ref, 'latest')
- manifest = oci_model.get_manifest_for_tag(latest_tag)
-
- existing_parsed = manifest.get_parsed_manifest()
-
- # Retarget a new tag to the manifest.
- updated_tag = oci_model.retarget_tag(repository_ref, 'somenewtag', manifest, storage,
- docker_v2_signing_key)
- assert updated_tag
- assert updated_tag.name == 'somenewtag'
-
- updated_manifest = oci_model.get_manifest_for_tag(updated_tag)
- parsed = updated_manifest.get_parsed_manifest()
- assert parsed.namespace == 'devtable'
- assert parsed.repo_name == 'simple'
- assert parsed.tag == 'somenewtag'
-
- assert parsed.layers == existing_parsed.layers
-
- # Ensure the tag has changed targets.
- assert oci_model.get_repo_tag(repository_ref, 'somenewtag') == updated_tag
-
-
-def test_change_repository_tag_expiration(registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- tag = registry_model.get_repo_tag(repository_ref, 'latest')
- assert tag.lifetime_end_ts is None
-
- new_datetime = datetime.utcnow() + timedelta(days=2)
- previous, okay = registry_model.change_repository_tag_expiration(tag, new_datetime)
-
- assert okay
- assert previous is None
-
- tag = registry_model.get_repo_tag(repository_ref, 'latest')
- assert tag.lifetime_end_ts is not None
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name, expected_non_empty', [
- ('devtable', 'simple', []),
- ('devtable', 'complex', ['prod', 'v2.0']),
- ('devtable', 'history', ['latest']),
- ('buynlarge', 'orgrepo', []),
- ('devtable', 'gargantuan', ['v2.0', 'v3.0', 'v4.0', 'v5.0', 'v6.0']),
-])
-def test_get_legacy_images_owned_by_tag(repo_namespace, repo_name, expected_non_empty,
- registry_model):
- repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
- tags = registry_model.list_all_active_repository_tags(repository_ref)
- assert len(tags)
-
- non_empty = set()
- for tag in tags:
- if registry_model.get_legacy_images_owned_by_tag(tag):
- non_empty.add(tag.name)
-
- assert non_empty == set(expected_non_empty)
-
-
-def test_get_security_status(registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- tags = registry_model.list_all_active_repository_tags(repository_ref, include_legacy_images=True)
- assert len(tags)
-
- for tag in tags:
- assert registry_model.get_security_status(tag.legacy_image)
- registry_model.reset_security_status(tag.legacy_image)
- assert registry_model.get_security_status(tag.legacy_image)
-
-
-@pytest.fixture()
-def clear_rows(initialized_db):
- # Remove all new-style rows so we can backfill.
- TagToRepositoryTag.delete().execute()
- Tag.delete().execute()
- TagManifestLabelMap.delete().execute()
- ManifestLabel.delete().execute()
- ManifestBlob.delete().execute()
- ManifestLegacyImage.delete().execute()
- TagManifestToManifest.delete().execute()
- Manifest.delete().execute()
- TagManifestLabel.delete().execute()
- TagManifest.delete().execute()
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name', [
- ('devtable', 'simple'),
- ('devtable', 'complex'),
- ('devtable', 'history'),
- ('buynlarge', 'orgrepo'),
-])
-def test_backfill_manifest_for_tag(repo_namespace, repo_name, clear_rows, pre_oci_model):
- repository_ref = pre_oci_model.lookup_repository(repo_namespace, repo_name)
- tags, has_more = pre_oci_model.list_repository_tag_history(repository_ref, size=2500)
- assert tags
- assert not has_more
-
- for tag in tags:
- assert not tag.manifest_digest
- assert pre_oci_model.backfill_manifest_for_tag(tag)
-
- tags, _ = pre_oci_model.list_repository_tag_history(repository_ref)
- assert tags
- for tag in tags:
- assert tag.manifest_digest
-
- manifest = pre_oci_model.get_manifest_for_tag(tag)
- assert manifest
-
- legacy_image = pre_oci_model.get_legacy_image(repository_ref, tag.legacy_image.docker_image_id,
- include_parents=True)
-
- parsed_manifest = manifest.get_parsed_manifest()
- assert parsed_manifest.leaf_layer_v1_image_id == legacy_image.docker_image_id
- assert parsed_manifest.parent_image_ids == {p.docker_image_id for p in legacy_image.parents}
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name', [
- ('devtable', 'simple'),
- ('devtable', 'complex'),
- ('devtable', 'history'),
- ('buynlarge', 'orgrepo'),
-])
-def test_backfill_manifest_on_lookup(repo_namespace, repo_name, clear_rows, pre_oci_model):
- repository_ref = pre_oci_model.lookup_repository(repo_namespace, repo_name)
- tags = pre_oci_model.list_all_active_repository_tags(repository_ref)
- assert tags
-
- for tag in tags:
- assert not tag.manifest_digest
- assert not pre_oci_model.get_manifest_for_tag(tag)
-
- manifest = pre_oci_model.get_manifest_for_tag(tag, backfill_if_necessary=True)
- assert manifest
-
- updated_tag = pre_oci_model.get_repo_tag(repository_ref, tag.name)
- assert updated_tag.manifest_digest == manifest.digest
-
-
-@pytest.mark.parametrize('namespace, expect_enabled', [
- ('devtable', True),
- ('buynlarge', True),
-
- ('disabled', False),
-])
-def test_is_namespace_enabled(namespace, expect_enabled, registry_model):
- assert registry_model.is_namespace_enabled(namespace) == expect_enabled
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name', [
- ('devtable', 'simple'),
- ('devtable', 'complex'),
- ('devtable', 'history'),
- ('buynlarge', 'orgrepo'),
-])
-def test_layers_and_blobs(repo_namespace, repo_name, registry_model):
- repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
- tags = registry_model.list_all_active_repository_tags(repository_ref)
- assert tags
-
- for tag in tags:
- manifest = registry_model.get_manifest_for_tag(tag)
- assert manifest
-
- parsed = manifest.get_parsed_manifest()
- assert parsed
-
- layers = registry_model.list_parsed_manifest_layers(repository_ref, parsed, storage)
- assert layers
-
- layers = registry_model.list_parsed_manifest_layers(repository_ref, parsed, storage,
- include_placements=True)
- assert layers
-
- for index, manifest_layer in enumerate(layers):
- assert manifest_layer.blob.storage_path
- assert manifest_layer.blob.placements
-
- repo_blob = registry_model.get_repo_blob_by_digest(repository_ref, manifest_layer.blob.digest)
- assert repo_blob.digest == manifest_layer.blob.digest
-
- assert manifest_layer.estimated_size(1) is not None
- assert isinstance(manifest_layer.layer_info, ManifestImageLayer)
-
- blobs = registry_model.get_manifest_local_blobs(manifest, include_placements=True)
- assert {b.digest for b in blobs} == set(parsed.local_blob_digests)
-
-
-def test_manifest_remote_layers(oci_model):
- # Create a config blob for testing.
- config_json = json.dumps({
- 'config': {},
- "rootfs": {
- "type": "layers",
- "diff_ids": []
- },
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "do something",
- },
- ],
- })
-
- app_config = {'TESTING': True}
- repository_ref = oci_model.lookup_repository('devtable', 'simple')
- with upload_blob(repository_ref, storage, BlobUploadSettings(500, 500, 500)) as upload:
- upload.upload_chunk(app_config, BytesIO(config_json))
- blob = upload.commit_to_blob(app_config)
-
- # Create the manifest in the repo.
- builder = DockerSchema2ManifestBuilder()
- builder.set_config_digest(blob.digest, blob.compressed_size)
- builder.add_layer('sha256:abcd', 1234, urls=['http://hello/world'])
- manifest = builder.build()
-
- created_manifest, _ = oci_model.create_manifest_and_retarget_tag(repository_ref, manifest,
- 'sometag', storage)
- assert created_manifest
-
- layers = oci_model.list_parsed_manifest_layers(repository_ref,
- created_manifest.get_parsed_manifest(),
- storage)
- assert len(layers) == 1
- assert layers[0].layer_info.is_remote
- assert layers[0].layer_info.urls == ['http://hello/world']
- assert layers[0].blob is None
-
-
-def test_derived_image(registry_model):
- # Clear all existing derived storage.
- DerivedStorageForImage.delete().execute()
-
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- tag = registry_model.get_repo_tag(repository_ref, 'latest')
- manifest = registry_model.get_manifest_for_tag(tag)
-
- # Ensure the squashed image doesn't exist.
- assert registry_model.lookup_derived_image(manifest, 'squash', storage, {}) is None
-
- # Create a new one.
- squashed = registry_model.lookup_or_create_derived_image(manifest, 'squash',
- 'local_us', storage, {})
- assert registry_model.lookup_or_create_derived_image(manifest, 'squash',
- 'local_us', storage, {}) == squashed
- assert squashed.unique_id
-
- # Check and set the size.
- assert squashed.blob.compressed_size is None
- registry_model.set_derived_image_size(squashed, 1234)
-
- found = registry_model.lookup_derived_image(manifest, 'squash', storage, {})
- assert found.blob.compressed_size == 1234
- assert found.unique_id == squashed.unique_id
-
- # Ensure its returned now.
- assert found == squashed
-
- # Ensure different metadata results in a different derived image.
- found = registry_model.lookup_derived_image(manifest, 'squash', storage, {'foo': 'bar'})
- assert found is None
-
- squashed_foo = registry_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us',
- storage, {'foo': 'bar'})
- assert squashed_foo != squashed
-
- found = registry_model.lookup_derived_image(manifest, 'squash', storage, {'foo': 'bar'})
- assert found == squashed_foo
-
- assert squashed.unique_id != squashed_foo.unique_id
-
- # Lookup with placements.
- squashed = registry_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us',
- storage, {}, include_placements=True)
- assert squashed.blob.placements
-
- # Delete the derived image.
- registry_model.delete_derived_image(squashed)
- assert registry_model.lookup_derived_image(manifest, 'squash', storage, {}) is None
-
-
-def test_derived_image_signatures(registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- tag = registry_model.get_repo_tag(repository_ref, 'latest')
- manifest = registry_model.get_manifest_for_tag(tag)
-
- derived = registry_model.lookup_derived_image(manifest, 'squash', storage, {})
- assert derived
-
- signature = registry_model.get_derived_image_signature(derived, 'gpg2')
- assert signature is None
-
- registry_model.set_derived_image_signature(derived, 'gpg2', 'foo')
- assert registry_model.get_derived_image_signature(derived, 'gpg2') == 'foo'
-
-
-def test_derived_image_for_manifest_list(oci_model):
- # Clear all existing derived storage.
- DerivedStorageForImage.delete().execute()
-
- # Create a config blob for testing.
- config_json = json.dumps({
- 'config': {},
- "rootfs": {
- "type": "layers",
- "diff_ids": []
- },
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "do something",
- },
- ],
- })
-
- app_config = {'TESTING': True}
- repository_ref = oci_model.lookup_repository('devtable', 'simple')
- with upload_blob(repository_ref, storage, BlobUploadSettings(500, 500, 500)) as upload:
- upload.upload_chunk(app_config, BytesIO(config_json))
- blob = upload.commit_to_blob(app_config)
-
- # Create the manifest in the repo.
- builder = DockerSchema2ManifestBuilder()
- builder.set_config_digest(blob.digest, blob.compressed_size)
- builder.add_layer(blob.digest, blob.compressed_size)
- amd64_manifest = builder.build()
-
- oci_model.create_manifest_and_retarget_tag(repository_ref, amd64_manifest, 'submanifest', storage)
-
- # Create a manifest list, pointing to at least one amd64+linux manifest.
- builder = DockerSchema2ManifestListBuilder()
- builder.add_manifest(amd64_manifest, 'amd64', 'linux')
- manifestlist = builder.build()
-
- oci_model.create_manifest_and_retarget_tag(repository_ref, manifestlist, 'listtag', storage)
- manifest = oci_model.get_manifest_for_tag(oci_model.get_repo_tag(repository_ref, 'listtag'))
- assert manifest
- assert manifest.get_parsed_manifest().is_manifest_list
-
- # Ensure the squashed image doesn't exist.
- assert oci_model.lookup_derived_image(manifest, 'squash', storage, {}) is None
-
- # Create a new one.
- squashed = oci_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us', storage, {})
- assert squashed.unique_id
- assert oci_model.lookup_or_create_derived_image(manifest, 'squash',
- 'local_us', storage, {}) == squashed
-
- # Perform lookup.
- assert oci_model.lookup_derived_image(manifest, 'squash', storage, {}) == squashed
-
-
-def test_torrent_info(registry_model):
- # Remove all existing info.
- TorrentInfo.delete().execute()
-
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- tag = registry_model.get_repo_tag(repository_ref, 'latest')
- manifest = registry_model.get_manifest_for_tag(tag)
-
- blobs = registry_model.get_manifest_local_blobs(manifest)
- assert blobs
-
- assert registry_model.get_torrent_info(blobs[0]) is None
- registry_model.set_torrent_info(blobs[0], 2, 'foo')
-
- # Set it again exactly, which should be a no-op.
- registry_model.set_torrent_info(blobs[0], 2, 'foo')
-
- # Check the information we've set.
- torrent_info = registry_model.get_torrent_info(blobs[0])
- assert torrent_info is not None
- assert torrent_info.piece_length == 2
- assert torrent_info.pieces == 'foo'
-
- # Try setting it again. Nothing should happen.
- registry_model.set_torrent_info(blobs[0], 3, 'bar')
-
- torrent_info = registry_model.get_torrent_info(blobs[0])
- assert torrent_info is not None
- assert torrent_info.piece_length == 2
- assert torrent_info.pieces == 'foo'
-
-
-def test_blob_uploads(registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
-
- blob_upload = registry_model.create_blob_upload(repository_ref, str(uuid.uuid4()),
- 'local_us', {'some': 'metadata'})
- assert blob_upload
- assert blob_upload.storage_metadata == {'some': 'metadata'}
- assert blob_upload.location_name == 'local_us'
-
- # Ensure we can find the blob upload.
- assert registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id) == blob_upload
-
- # Update and ensure the changes are saved.
- assert registry_model.update_blob_upload(blob_upload, 1, 'the-pieces_hash',
- blob_upload.piece_sha_state,
- {'new': 'metadata'}, 2, 3,
- blob_upload.sha_state)
-
- updated = registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
- assert updated
- assert updated.uncompressed_byte_count == 1
- assert updated.piece_hashes == 'the-pieces_hash'
- assert updated.storage_metadata == {'new': 'metadata'}
- assert updated.byte_count == 2
- assert updated.chunk_count == 3
-
- # Delete the upload.
- registry_model.delete_blob_upload(blob_upload)
-
- # Ensure it can no longer be found.
- assert not registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
-
-
-def test_commit_blob_upload(registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- blob_upload = registry_model.create_blob_upload(repository_ref, str(uuid.uuid4()),
- 'local_us', {'some': 'metadata'})
-
- # Commit the blob upload and make sure it is written as a blob.
- digest = 'sha256:' + hashlib.sha256('hello').hexdigest()
- blob = registry_model.commit_blob_upload(blob_upload, digest, 60)
- assert blob.digest == digest
-
- # Ensure the upload can no longer be found.
- assert not registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
-
-
-# TODO: Re-enable for OCI model once we have a new table for temporary blobs.
-def test_mount_blob_into_repository(pre_oci_model):
- repository_ref = pre_oci_model.lookup_repository('devtable', 'simple')
- latest_tag = pre_oci_model.get_repo_tag(repository_ref, 'latest')
- manifest = pre_oci_model.get_manifest_for_tag(latest_tag)
-
- target_repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
-
- blobs = pre_oci_model.get_manifest_local_blobs(manifest, include_placements=True)
- assert blobs
-
- for blob in blobs:
- # Ensure the blob doesn't exist under the repository.
- assert not pre_oci_model.get_repo_blob_by_digest(target_repository_ref, blob.digest)
-
- # Mount the blob into the repository.
- assert pre_oci_model.mount_blob_into_repository(blob, target_repository_ref, 60)
-
- # Ensure it now exists.
- found = pre_oci_model.get_repo_blob_by_digest(target_repository_ref, blob.digest)
- assert found == blob
-
-
-class SomeException(Exception):
- pass
-
-
-def test_get_cached_repo_blob(registry_model):
- model_cache = InMemoryDataModelCache()
-
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- latest_tag = registry_model.get_repo_tag(repository_ref, 'latest')
- manifest = registry_model.get_manifest_for_tag(latest_tag)
-
- blobs = registry_model.get_manifest_local_blobs(manifest, include_placements=True)
- assert blobs
-
- blob = blobs[0]
-
- # Load a blob to add it to the cache.
- found = registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', blob.digest)
- assert found.digest == blob.digest
- assert found.uuid == blob.uuid
- assert found.compressed_size == blob.compressed_size
- assert found.uncompressed_size == blob.uncompressed_size
- assert found.uploading == blob.uploading
- assert found.placements == blob.placements
-
- # Disconnect from the database by overwriting the connection.
- def fail(x, y):
- raise SomeException('Not connected!')
-
- with patch('data.registry_model.registry_pre_oci_model.model.blob.get_repository_blob_by_digest',
- fail):
- with patch('data.registry_model.registry_oci_model.model.oci.blob.get_repository_blob_by_digest',
- fail):
- # Make sure we can load again, which should hit the cache.
- cached = registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', blob.digest)
- assert cached.digest == blob.digest
- assert cached.uuid == blob.uuid
- assert cached.compressed_size == blob.compressed_size
- assert cached.uncompressed_size == blob.uncompressed_size
- assert cached.uploading == blob.uploading
- assert cached.placements == blob.placements
-
- # Try another blob, which should fail since the DB is not connected and the cache
- # does not contain the blob.
- with pytest.raises(SomeException):
- registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', 'some other digest')
-
-
-def test_create_manifest_and_retarget_tag(registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
- manifest = registry_model.get_manifest_for_tag(latest_tag).get_parsed_manifest()
-
- builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
- builder.add_layer(manifest.blob_digests[0],
- '{"id": "%s"}' % latest_tag.legacy_image.docker_image_id)
- sample_manifest = builder.build(docker_v2_signing_key)
- assert sample_manifest is not None
-
- another_manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref,
- sample_manifest,
- 'anothertag',
- storage)
- assert another_manifest is not None
- assert tag is not None
-
- assert tag.name == 'anothertag'
- assert another_manifest.get_parsed_manifest().manifest_dict == sample_manifest.manifest_dict
-
-
-def test_get_schema1_parsed_manifest(registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
- manifest = registry_model.get_manifest_for_tag(latest_tag)
- assert registry_model.get_schema1_parsed_manifest(manifest, '', '', '', storage)
-
-
-def test_convert_manifest(registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
- manifest = registry_model.get_manifest_for_tag(latest_tag)
-
- mediatypes = DOCKER_SCHEMA1_CONTENT_TYPES
- assert registry_model.convert_manifest(manifest, '', '', '', mediatypes, storage)
-
- mediatypes = []
- assert registry_model.convert_manifest(manifest, '', '', '', mediatypes, storage) is None
-
-
-def test_create_manifest_and_retarget_tag_with_labels(registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
- manifest = registry_model.get_manifest_for_tag(latest_tag).get_parsed_manifest()
-
- json_metadata = {
- 'id': latest_tag.legacy_image.docker_image_id,
- 'config': {
- 'Labels': {
- 'quay.expires-after': '2w',
- },
- },
- }
-
- builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
- builder.add_layer(manifest.blob_digests[0], json.dumps(json_metadata))
- sample_manifest = builder.build(docker_v2_signing_key)
- assert sample_manifest is not None
-
- another_manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref,
- sample_manifest,
- 'anothertag',
- storage)
- assert another_manifest is not None
- assert tag is not None
-
- assert tag.name == 'anothertag'
- assert another_manifest.get_parsed_manifest().manifest_dict == sample_manifest.manifest_dict
-
- # Ensure the labels were applied.
- assert tag.lifetime_end_ms is not None
-
-
-
-def _populate_blob(digest):
- location = ImageStorageLocation.get(name='local_us')
- store_blob_record_and_temp_link('devtable', 'simple', digest, location, 1, 120)
-
-
-def test_known_issue_schema1(registry_model):
- test_dir = os.path.dirname(os.path.abspath(__file__))
- path = os.path.join(test_dir, '../../../image/docker/test/validate_manifest_known_issue.json')
- with open(path, 'r') as f:
- manifest_bytes = f.read()
-
- manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes))
-
- for blob_digest in manifest.local_blob_digests:
- _populate_blob(blob_digest)
-
- digest = manifest.digest
- assert digest == 'sha256:44518f5a4d1cb5b7a6347763116fb6e10f6a8563b6c40bb389a0a982f0a9f47a'
-
- # Create the manifest in the database.
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- created_manifest, _ = registry_model.create_manifest_and_retarget_tag(repository_ref, manifest,
- 'latest', storage)
- assert created_manifest
- assert created_manifest.digest == manifest.digest
- assert (created_manifest.internal_manifest_bytes.as_encoded_str() ==
- manifest.bytes.as_encoded_str())
-
- # Look it up again and validate.
- found = registry_model.lookup_manifest_by_digest(repository_ref, manifest.digest, allow_dead=True)
- assert found
- assert found.digest == digest
- assert found.internal_manifest_bytes.as_encoded_str() == manifest.bytes.as_encoded_str()
- assert found.get_parsed_manifest().digest == digest
-
-
-def test_unicode_emoji(registry_model):
- builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'latest')
- builder.add_layer('sha256:abcde', json.dumps({
- 'id': 'someid',
- 'author': u'😱',
- }, ensure_ascii=False))
-
- manifest = builder.build(ensure_ascii=False)
- manifest._validate()
-
- for blob_digest in manifest.local_blob_digests:
- _populate_blob(blob_digest)
-
- # Create the manifest in the database.
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- created_manifest, _ = registry_model.create_manifest_and_retarget_tag(repository_ref, manifest,
- 'latest', storage)
- assert created_manifest
- assert created_manifest.digest == manifest.digest
- assert (created_manifest.internal_manifest_bytes.as_encoded_str() ==
- manifest.bytes.as_encoded_str())
-
- # Look it up again and validate.
- found = registry_model.lookup_manifest_by_digest(repository_ref, manifest.digest, allow_dead=True)
- assert found
- assert found.digest == manifest.digest
- assert found.internal_manifest_bytes.as_encoded_str() == manifest.bytes.as_encoded_str()
- assert found.get_parsed_manifest().digest == manifest.digest
-
-
-def test_lookup_active_repository_tags(oci_model):
- repository_ref = oci_model.lookup_repository('devtable', 'simple')
- latest_tag = oci_model.get_repo_tag(repository_ref, 'latest')
- manifest = oci_model.get_manifest_for_tag(latest_tag)
-
- tag_count = 500
-
- # Create a bunch of tags.
- tags_expected = set()
- for index in range(0, tag_count):
- tags_expected.add('somenewtag%s' % index)
- oci_model.retarget_tag(repository_ref, 'somenewtag%s' % index, manifest, storage,
- docker_v2_signing_key)
-
- assert tags_expected
-
- # List the tags.
- tags_found = set()
- tag_id = None
- while True:
- tags = oci_model.lookup_active_repository_tags(repository_ref, tag_id, 11)
- assert len(tags) <= 11
- for tag in tags[0:10]:
- assert tag.name not in tags_found
- if tag.name in tags_expected:
- tags_found.add(tag.name)
- tags_expected.remove(tag.name)
-
- if len(tags) < 11:
- break
-
- tag_id = tags[10].id
-
- # Make sure we've found all the tags.
- assert tags_found
- assert not tags_expected
-
-
-def test_yield_tags_for_vulnerability_notification(registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'complex')
-
- # Check for all legacy images under the tags and ensure not raised because
- # no notification is yet registered.
- for tag in registry_model.list_all_active_repository_tags(repository_ref,
- include_legacy_images=True):
- image = registry_model.get_legacy_image(repository_ref, tag.legacy_image.docker_image_id,
- include_blob=True)
- pairs = [(image.docker_image_id, image.blob.uuid)]
- results = list(registry_model.yield_tags_for_vulnerability_notification(pairs))
- assert not len(results)
-
- # Register a notification.
- model.notification.create_repo_notification(repository_ref.id, 'vulnerability_found', 'email',
- {}, {})
-
- # Check again.
- for tag in registry_model.list_all_active_repository_tags(repository_ref,
- include_legacy_images=True):
- image = registry_model.get_legacy_image(repository_ref, tag.legacy_image.docker_image_id,
- include_blob=True, include_parents=True)
-
- # Check for every parent of the image.
- for current in image.parents:
- img = registry_model.get_legacy_image(repository_ref, current.docker_image_id,
- include_blob=True)
- pairs = [(img.docker_image_id, img.blob.uuid)]
- results = list(registry_model.yield_tags_for_vulnerability_notification(pairs))
- assert len(results) > 0
- assert tag.name in {t.name for t in results}
-
- # Check for the image itself.
- pairs = [(image.docker_image_id, image.blob.uuid)]
- results = list(registry_model.yield_tags_for_vulnerability_notification(pairs))
- assert len(results) > 0
- assert tag.name in {t.name for t in results}
diff --git a/data/registry_model/test/test_manifestbuilder.py b/data/registry_model/test/test_manifestbuilder.py
deleted file mode 100644
index 538731b8d..000000000
--- a/data/registry_model/test/test_manifestbuilder.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import hashlib
-import json
-
-from io import BytesIO
-
-import pytest
-
-from mock import patch
-
-from app import docker_v2_signing_key
-
-from data.registry_model.blobuploader import BlobUploadSettings, upload_blob
-from data.registry_model.manifestbuilder import create_manifest_builder, lookup_manifest_builder
-from data.registry_model.registry_pre_oci_model import PreOCIModel
-from data.registry_model.registry_oci_model import OCIModel
-
-from storage.distributedstorage import DistributedStorage
-from storage.fakestorage import FakeStorage
-from test.fixtures import *
-
-
-@pytest.fixture(params=[PreOCIModel, OCIModel])
-def registry_model(request, initialized_db):
- return request.param()
-
-
-@pytest.fixture()
-def fake_session():
- with patch('data.registry_model.manifestbuilder.session', {}):
- yield
-
-
-@pytest.mark.parametrize('layers', [
- pytest.param([('someid', None, 'some data')], id='Single layer'),
- pytest.param([('parentid', None, 'some parent data'),
- ('someid', 'parentid', 'some data')],
- id='Multi layer'),
-])
-def test_build_manifest(layers, fake_session, registry_model):
- repository_ref = registry_model.lookup_repository('devtable', 'complex')
- storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
- settings = BlobUploadSettings('2M', 512 * 1024, 3600)
- app_config = {'TESTING': True}
-
- builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key)
- assert lookup_manifest_builder(repository_ref, 'anotherid', storage,
- docker_v2_signing_key) is None
- assert lookup_manifest_builder(repository_ref, builder.builder_id, storage,
- docker_v2_signing_key) is not None
-
- blobs_by_layer = {}
- for layer_id, parent_id, layer_bytes in layers:
- # Start a new layer.
- assert builder.start_layer(layer_id, json.dumps({'id': layer_id, 'parent': parent_id}),
- 'local_us', None, 60)
-
- checksum = hashlib.sha1(layer_bytes).hexdigest()
-
- # Assign it a blob.
- with upload_blob(repository_ref, storage, settings) as uploader:
- uploader.upload_chunk(app_config, BytesIO(layer_bytes))
- blob = uploader.commit_to_blob(app_config)
- blobs_by_layer[layer_id] = blob
- builder.assign_layer_blob(builder.lookup_layer(layer_id), blob, [checksum])
-
- # Validate the checksum.
- assert builder.validate_layer_checksum(builder.lookup_layer(layer_id), checksum)
-
- # Commit the manifest to a tag.
- tag = builder.commit_tag_and_manifest('somenewtag', builder.lookup_layer(layers[-1][0]))
- assert tag
- assert tag in builder.committed_tags
-
- # Mark the builder as done.
- builder.done()
-
- # Verify the legacy image for the tag.
- found = registry_model.get_repo_tag(repository_ref, 'somenewtag', include_legacy_image=True)
- assert found
- assert found.name == 'somenewtag'
- assert found.legacy_image.docker_image_id == layers[-1][0]
-
- # Verify the blob and manifest.
- manifest = registry_model.get_manifest_for_tag(found)
- assert manifest
-
- parsed = manifest.get_parsed_manifest()
- assert len(list(parsed.layers)) == len(layers)
-
- for index, (layer_id, parent_id, layer_bytes) in enumerate(layers):
- assert list(parsed.blob_digests)[index] == blobs_by_layer[layer_id].digest
- assert list(parsed.layers)[index].v1_metadata.image_id == layer_id
- assert list(parsed.layers)[index].v1_metadata.parent_image_id == parent_id
-
- assert parsed.leaf_layer_v1_image_id == layers[-1][0]
-
-
-def test_build_manifest_missing_parent(fake_session, registry_model):
- storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
- repository_ref = registry_model.lookup_repository('devtable', 'complex')
- builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key)
-
- assert builder.start_layer('somelayer', json.dumps({'id': 'somelayer', 'parent': 'someparent'}),
- 'local_us', None, 60) is None
diff --git a/data/runmigration.py b/data/runmigration.py
index f4126aba1..b06cf861d 100644
--- a/data/runmigration.py
+++ b/data/runmigration.py
@@ -5,19 +5,12 @@ from alembic.script import ScriptDirectory
from alembic.environment import EnvironmentContext
from alembic.migration import __name__ as migration_name
-def run_alembic_migration(db_uri, log_handler=None, setup_app=True):
+def run_alembic_migration(log_handler=None):
if log_handler:
logging.getLogger(migration_name).addHandler(log_handler)
config = Config()
config.set_main_option("script_location", "data:migrations")
- config.set_main_option("db_uri", db_uri)
-
- if setup_app:
- config.set_main_option('alembic_setup_app', 'True')
- else:
- config.set_main_option('alembic_setup_app', '')
-
script = ScriptDirectory.from_config(config)
def fn(rev, context):
diff --git a/data/test/test_encryption.py b/data/test/test_encryption.py
deleted file mode 100644
index f6ec8a94b..000000000
--- a/data/test/test_encryption.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pytest
-
-from data.encryption import FieldEncrypter, _VERSIONS, DecryptionFailureException
-
-@pytest.mark.parametrize('test_data', [
- '',
- 'hello world',
- 'wassup?!',
- 'IGZ2Y8KUN3EFWAZZXR3D7U4V5NXDVYZI5VGU6STPB6KM83PAB8WRGM32RD9FW0C0',
- 'JLRFBYS1EHKUE73S99HWOQWNPGLUZTBRF5HQEFUJS5BK3XVB54RNXYV4AUMJXCMC',
- 'a' * 3,
- 'a' * 4,
- 'a' * 5,
- 'a' * 31,
- 'a' * 32,
- 'a' * 33,
- 'a' * 150,
- u'😇',
-])
-@pytest.mark.parametrize('version', _VERSIONS.keys())
-@pytest.mark.parametrize('secret_key', [
- u'test1234',
- 'test1234',
- 'thisisanothercoolsecretkeyhere',
- '107383705745765174750346070528443780244192102846031525796571939503548634055845',
-])
-@pytest.mark.parametrize('use_valid_key', [
- True,
- False,
-])
-def test_encryption(test_data, version, secret_key, use_valid_key):
- encrypter = FieldEncrypter(secret_key, version)
- encrypted = encrypter.encrypt_value(test_data, field_max_length=255)
- assert encrypted != test_data
-
- if use_valid_key:
- decrypted = encrypter.decrypt_value(encrypted)
- assert decrypted == test_data
-
- with pytest.raises(DecryptionFailureException):
- encrypter.decrypt_value('somerandomvalue')
- else:
- decrypter = FieldEncrypter('some other key', version)
- with pytest.raises(DecryptionFailureException):
- decrypter.decrypt_value(encrypted)
diff --git a/data/test/test_queue.py b/data/test/test_queue.py
deleted file mode 100644
index 36f61b502..000000000
--- a/data/test/test_queue.py
+++ /dev/null
@@ -1,420 +0,0 @@
-import json
-import time
-
-import pytest
-
-from contextlib import contextmanager
-from datetime import datetime, timedelta
-from functools import wraps
-
-from data.database import QueueItem
-from data.queue import WorkQueue, MINIMUM_EXTENSION
-
-from test.fixtures import *
-
-QUEUE_NAME = 'testqueuename'
-
-
-class SaveLastCountReporter(object):
- def __init__(self):
- self.currently_processing = None
- self.running_count = None
- self.total = None
-
- def __call__(self, currently_processing, running_count, total_jobs):
- self.currently_processing = currently_processing
- self.running_count = running_count
- self.total = total_jobs
-
-
-class AutoUpdatingQueue(object):
- def __init__(self, queue_to_wrap):
- self._queue = queue_to_wrap
-
- def _wrapper(self, func):
- @wraps(func)
- def wrapper(*args, **kwargs):
- to_return = func(*args, **kwargs)
- self._queue.update_metrics()
- return to_return
- return wrapper
-
- def __getattr__(self, attr_name):
- method_or_attr = getattr(self._queue, attr_name)
- if callable(method_or_attr):
- return self._wrapper(method_or_attr)
- else:
- return method_or_attr
-
-
-TEST_MESSAGE_1 = json.dumps({'data': 1})
-TEST_MESSAGE_2 = json.dumps({'data': 2})
-TEST_MESSAGES = [json.dumps({'data': str(i)}) for i in range(1, 101)]
-
-
-@contextmanager
-def fake_transaction(arg):
- yield
-
-@pytest.fixture()
-def reporter():
- return SaveLastCountReporter()
-
-
-@pytest.fixture()
-def transaction_factory():
- return fake_transaction
-
-
-@pytest.fixture()
-def queue(reporter, transaction_factory, initialized_db):
- return AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory, reporter=reporter))
-
-
-def test_get_single_item(queue, reporter, transaction_factory):
- # Add a single item to the queue.
- queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
-
- # Have two "instances" retrieve an item to claim. Since there is only one, both calls should
- # return the same item.
- now = datetime.utcnow()
- first_item = queue._select_available_item(False, now)
- second_item = queue._select_available_item(False, now)
-
- assert first_item.id == second_item.id
- assert first_item.state_id == second_item.state_id
-
- # Have both "instances" now try to claim the item. Only one should succeed.
- first_claimed = queue._attempt_to_claim_item(first_item, now, 300)
- second_claimed = queue._attempt_to_claim_item(first_item, now, 300)
-
- assert first_claimed
- assert not second_claimed
-
- # Ensure the item is no longer available.
- assert queue.get() is None
-
- # Ensure the item's state ID has changed.
- assert first_item.state_id != QueueItem.get().state_id
-
-def test_extend_processing(queue, reporter, transaction_factory):
- # Add and retrieve a queue item.
- queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
- queue_item = queue.get(processing_time=10)
- assert queue_item is not None
-
- existing_db_item = QueueItem.get(id=queue_item.id)
-
- # Call extend processing with a timedelta less than the minimum and ensure its
- # processing_expires and state_id do not change.
- changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() - 1)
- assert not changed
-
- updated_db_item = QueueItem.get(id=queue_item.id)
-
- assert existing_db_item.processing_expires == updated_db_item.processing_expires
- assert existing_db_item.state_id == updated_db_item.state_id
-
- # Call extend processing with a timedelta greater than the minimum and ensure its
- # processing_expires and state_id are changed.
- changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() + 1)
- assert changed
-
- updated_db_item = QueueItem.get(id=queue_item.id)
-
- assert existing_db_item.processing_expires != updated_db_item.processing_expires
- assert existing_db_item.state_id != updated_db_item.state_id
-
- # Call extend processing with a timedelta less than the minimum but also with new data and
- # ensure its processing_expires and state_id are changed.
- changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() - 1,
- updated_data='newbody')
- assert changed
-
- updated_db_item = QueueItem.get(id=queue_item.id)
-
- assert existing_db_item.processing_expires != updated_db_item.processing_expires
- assert existing_db_item.state_id != updated_db_item.state_id
- assert updated_db_item.body == 'newbody'
-
-def test_same_canonical_names(queue, reporter, transaction_factory):
- assert reporter.currently_processing is None
- assert reporter.running_count is None
- assert reporter.total is None
-
- id_1 = int(queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1))
- id_2 = int(queue.put(['abc', 'def'], TEST_MESSAGE_2, available_after=-1))
- assert id_1 + 1 == id_2
- assert not reporter.currently_processing
- assert reporter.running_count == 0
- assert reporter.total == 1
-
- one = queue.get(ordering_required=True)
- assert one is not None
- assert one.body == TEST_MESSAGE_1
- assert reporter.currently_processing
- assert reporter.running_count == 1
- assert reporter.total == 1
-
- two_fail = queue.get(ordering_required=True)
- assert two_fail is None
- assert reporter.running_count == 1
- assert reporter.total == 1
-
- queue.complete(one)
- assert not reporter.currently_processing
- assert reporter.running_count == 0
- assert reporter.total == 1
-
- two = queue.get(ordering_required=True)
- assert two is not None
- assert reporter.currently_processing
- assert two.body == TEST_MESSAGE_2
- assert reporter.running_count == 1
- assert reporter.total == 1
-
-def test_different_canonical_names(queue, reporter, transaction_factory):
- queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
- queue.put(['abc', 'ghi'], TEST_MESSAGE_2, available_after=-1)
- assert reporter.running_count == 0
- assert reporter.total == 2
-
- one = queue.get(ordering_required=True)
- assert one is not None
- assert one.body == TEST_MESSAGE_1
- assert reporter.running_count == 1
- assert reporter.total == 2
-
- two = queue.get(ordering_required=True)
- assert two is not None
- assert two.body == TEST_MESSAGE_2
- assert reporter.running_count == 2
- assert reporter.total == 2
-
-def test_canonical_name(queue, reporter, transaction_factory):
- queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
- queue.put(['abc', 'def', 'ghi'], TEST_MESSAGE_1, available_after=-1)
-
- one = queue.get(ordering_required=True)
- assert QUEUE_NAME + '/abc/def/' != one
-
- two = queue.get(ordering_required=True)
- assert QUEUE_NAME + '/abc/def/ghi/' != two
-
-def test_expiration(queue, reporter, transaction_factory):
- queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
- assert reporter.running_count == 0
- assert reporter.total == 1
-
- one = queue.get(processing_time=0.5, ordering_required=True)
- assert one is not None
- assert reporter.running_count == 1
- assert reporter.total == 1
-
- one_fail = queue.get(ordering_required=True)
- assert one_fail is None
-
- time.sleep(1)
- queue.update_metrics()
- assert reporter.running_count == 0
- assert reporter.total == 1
-
- one_again = queue.get(ordering_required=True)
- assert one_again is not None
- assert reporter.running_count == 1
- assert reporter.total == 1
-
-def test_alive(queue, reporter, transaction_factory):
- # No queue item = not alive.
- assert not queue.alive(['abc', 'def'])
-
- # Add a queue item.
- queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
- assert queue.alive(['abc', 'def'])
-
- # Retrieve the queue item.
- queue_item = queue.get()
- assert queue_item is not None
- assert queue.alive(['abc', 'def'])
-
- # Make sure it is running by trying to retrieve it again.
- assert queue.get() is None
-
- # Delete the queue item.
- queue.complete(queue_item)
- assert not queue.alive(['abc', 'def'])
-
-def test_specialized_queue(queue, reporter, transaction_factory):
- queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
- queue.put(['def', 'def'], TEST_MESSAGE_2, available_after=-1)
-
- my_queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory, ['def']))
-
- two = my_queue.get(ordering_required=True)
- assert two is not None
- assert two.body == TEST_MESSAGE_2
-
- one_fail = my_queue.get(ordering_required=True)
- assert one_fail is None
-
- one = queue.get(ordering_required=True)
- assert one is not None
- assert one.body == TEST_MESSAGE_1
-
-def test_random_queue_no_duplicates(queue, reporter, transaction_factory):
- for msg in TEST_MESSAGES:
- queue.put(['abc', 'def'], msg, available_after=-1)
- seen = set()
-
- for _ in range(1, 101):
- item = queue.get()
- json_body = json.loads(item.body)
- msg = str(json_body['data'])
- assert msg not in seen
- seen.add(msg)
-
- for body in TEST_MESSAGES:
- json_body = json.loads(body)
- msg = str(json_body['data'])
- assert msg in seen
-
-def test_bulk_insert(queue, reporter, transaction_factory):
- assert reporter.currently_processing is None
- assert reporter.running_count is None
- assert reporter.total is None
-
- with queue.batch_insert() as queue_put:
- queue_put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
- queue_put(['abc', 'def'], TEST_MESSAGE_2, available_after=-1)
-
- queue.update_metrics()
- assert not reporter.currently_processing
- assert reporter.running_count == 0
- assert reporter.total == 1
-
- with queue.batch_insert() as queue_put:
- queue_put(['abd', 'def'], TEST_MESSAGE_1, available_after=-1)
- queue_put(['abd', 'ghi'], TEST_MESSAGE_2, available_after=-1)
-
- queue.update_metrics()
- assert not reporter.currently_processing
- assert reporter.running_count == 0
- assert reporter.total == 3
-
-def test_num_available_between(queue, reporter, transaction_factory):
- now = datetime.utcnow()
- queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-10)
- queue.put(['abc', 'ghi'], TEST_MESSAGE_2, available_after=-5)
-
- # Partial results
- count = queue.num_available_jobs_between(now-timedelta(seconds=8), now, ['abc'])
- assert count == 1
-
- # All results
- count = queue.num_available_jobs_between(now-timedelta(seconds=20), now, ['/abc'])
- assert count == 2
-
- # No results
- count = queue.num_available_jobs_between(now, now, 'abc')
- assert count == 0
-
-def test_incomplete(queue, reporter, transaction_factory):
- # Add an item.
- queue.put(['somenamespace', 'abc', 'def'], TEST_MESSAGE_1, available_after=-10)
-
- now = datetime.utcnow()
- count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
- assert count == 1
-
- # Retrieve it.
- item = queue.get()
- assert item is not None
- assert reporter.currently_processing
-
- # Mark it as incomplete.
- queue.incomplete(item, retry_after=-1)
- assert not reporter.currently_processing
-
- # Retrieve again to ensure it is once again available.
- same_item = queue.get()
- assert same_item is not None
- assert reporter.currently_processing
-
- assert item.id == same_item.id
-
-def test_complete(queue, reporter, transaction_factory):
- # Add an item.
- queue.put(['somenamespace', 'abc', 'def'], TEST_MESSAGE_1, available_after=-10)
-
- now = datetime.utcnow()
- count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
- assert count == 1
-
- # Retrieve it.
- item = queue.get()
- assert item is not None
- assert reporter.currently_processing
-
- # Mark it as complete.
- queue.complete(item)
- assert not reporter.currently_processing
-
-def test_cancel(queue, reporter, transaction_factory):
- # Add an item.
- queue.put(['somenamespace', 'abc', 'def'], TEST_MESSAGE_1, available_after=-10)
- queue.put(['somenamespace', 'abc', 'def'], TEST_MESSAGE_2, available_after=-5)
-
- now = datetime.utcnow()
- count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
- assert count == 2
-
- # Retrieve it.
- item = queue.get()
- assert item is not None
-
- # Make sure we can cancel it.
- assert queue.cancel(item.id)
-
- now = datetime.utcnow()
- count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
- assert count == 1
-
- # Make sure it is gone.
- assert not queue.cancel(item.id)
-
-def test_deleted_namespaced_items(queue, reporter, transaction_factory):
- queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory,
- reporter=reporter,
- has_namespace=True))
-
- queue.put(['somenamespace', 'abc', 'def'], TEST_MESSAGE_1, available_after=-10)
- queue.put(['somenamespace', 'abc', 'ghi'], TEST_MESSAGE_2, available_after=-5)
- queue.put(['anothernamespace', 'abc', 'def'], TEST_MESSAGE_1, available_after=-10)
-
- # Ensure we have 2 items under `somenamespace` and 1 item under `anothernamespace`.
- now = datetime.utcnow()
- count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
- assert count == 2
-
- count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/anothernamespace'])
- assert count == 1
-
- # Delete all `somenamespace` items.
- queue.delete_namespaced_items('somenamespace')
-
- # Check the updated counts.
- count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
- assert count == 0
-
- count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/anothernamespace'])
- assert count == 1
-
- # Delete all `anothernamespace` items.
- queue.delete_namespaced_items('anothernamespace')
-
- # Check the updated counts.
- count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
- assert count == 0
-
- count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/anothernamespace'])
- assert count == 0
diff --git a/data/test/test_readreplica.py b/data/test/test_readreplica.py
deleted file mode 100644
index 7f7111d2a..000000000
--- a/data/test/test_readreplica.py
+++ /dev/null
@@ -1,102 +0,0 @@
-import os
-import shutil
-
-import pytest
-
-from peewee import OperationalError
-
-from data.database import configure, User, read_only_config
-from data.readreplica import ReadOnlyModeException
-from test.testconfig import FakeTransaction
-from test.fixtures import *
-
-
-@pytest.mark.skipif(bool(os.environ.get('TEST_DATABASE_URI')), reason='Testing requires SQLite')
-def test_readreplica(init_db_path, tmpdir_factory):
- primary_file = str(tmpdir_factory.mktemp("data").join("primary.db"))
- replica_file = str(tmpdir_factory.mktemp("data").join("replica.db"))
-
- # Copy the initialized database to two different locations.
- shutil.copy2(init_db_path, primary_file)
- shutil.copy2(init_db_path, replica_file)
-
- db_config = {
- 'DB_URI': 'sqlite:///{0}'.format(primary_file),
- 'DB_READ_REPLICAS': [
- {'DB_URI': 'sqlite:///{0}'.format(replica_file)},
- ],
- "DB_CONNECTION_ARGS": {
- 'threadlocals': True,
- 'autorollback': True,
- },
- "DB_TRANSACTION_FACTORY": lambda x: FakeTransaction(),
- "FOR_TESTING": True,
- "DATABASE_SECRET_KEY": "anothercrazykey!",
- }
-
- # Initialize the DB with the primary and the replica.
- configure(db_config)
- assert not read_only_config.obj.is_readonly
- assert read_only_config.obj.read_replicas
-
- # Ensure we can read the data.
- devtable_user = User.get(username='devtable')
- assert devtable_user.username == 'devtable'
-
- # Configure with a bad primary. Reading should still work since we're hitting the replica.
- db_config['DB_URI'] = 'sqlite:///does/not/exist'
- configure(db_config)
-
- assert not read_only_config.obj.is_readonly
- assert read_only_config.obj.read_replicas
-
- devtable_user = User.get(username='devtable')
- assert devtable_user.username == 'devtable'
-
- # Try to change some data. This should fail because the primary is broken.
- with pytest.raises(OperationalError):
- devtable_user.email = 'newlychanged'
- devtable_user.save()
-
- # Fix the primary and try again.
- db_config['DB_URI'] = 'sqlite:///{0}'.format(primary_file)
- configure(db_config)
-
- assert not read_only_config.obj.is_readonly
- assert read_only_config.obj.read_replicas
-
- devtable_user.email = 'newlychanged'
- devtable_user.save()
-
- # Mark the system as readonly.
- db_config['DB_URI'] = 'sqlite:///{0}'.format(primary_file)
- db_config['REGISTRY_STATE'] = 'readonly'
- configure(db_config)
-
- assert read_only_config.obj.is_readonly
- assert read_only_config.obj.read_replicas
-
- # Ensure all write operations raise a readonly mode exception.
- with pytest.raises(ReadOnlyModeException):
- devtable_user.email = 'newlychanged2'
- devtable_user.save()
-
- with pytest.raises(ReadOnlyModeException):
- User.create(username='foo')
-
- with pytest.raises(ReadOnlyModeException):
- User.delete().where(User.username == 'foo').execute()
-
- with pytest.raises(ReadOnlyModeException):
- User.update(username='bar').where(User.username == 'foo').execute()
-
- # Reset the config on the DB, so we don't mess up other tests.
- configure({
- 'DB_URI': 'sqlite:///{0}'.format(primary_file),
- "DB_CONNECTION_ARGS": {
- 'threadlocals': True,
- 'autorollback': True,
- },
- "DB_TRANSACTION_FACTORY": lambda x: FakeTransaction(),
- "DATABASE_SECRET_KEY": "anothercrazykey!",
- })
diff --git a/data/test/test_text.py b/data/test/test_text.py
deleted file mode 100644
index 14b4519d1..000000000
--- a/data/test/test_text.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import pytest
-
-from data.text import match_mysql, match_like
-from data.database import Repository
-from test.fixtures import *
-
-@pytest.mark.parametrize('input', [
- ('hello world'),
- ('hello \' world'),
- ('hello " world'),
- ('hello ` world'),
-])
-def test_mysql_text_escaping(input):
- query, values = Repository.select().where(match_mysql(Repository.description, input)).sql()
- assert input not in query
-
-
-@pytest.mark.parametrize('input, expected', [
- ('hello world', 'hello world'),
- ('hello \'world', 'hello world'),
- ('hello "world', 'hello world'),
- ('hello `world', 'hello world'),
- ('hello !world', 'hello !!world'),
- ('hello %world', 'hello !%world'),
-])
-def test_postgres_text_escaping(input, expected):
- query, values = Repository.select().where(match_like(Repository.description, input)).sql()
- assert input not in query
- assert values[0] == '%' + expected + '%'
diff --git a/data/test/test_userfiles.py b/data/test/test_userfiles.py
deleted file mode 100644
index 671011e58..000000000
--- a/data/test/test_userfiles.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import pytest
-
-from mock import Mock
-from io import BytesIO
-
-from data.userfiles import DelegateUserfiles, Userfiles
-from test.fixtures import *
-
-
-@pytest.mark.parametrize('prefix,path,expected', [
- ('test', 'foo', 'test/foo'),
- ('test', 'bar', 'test/bar'),
- ('test', '/bar', 'test/bar'),
- ('test', '../foo', 'test/foo'),
- ('test', 'foo/bar/baz', 'test/baz'),
- ('test', 'foo/../baz', 'test/baz'),
-
- (None, 'foo', 'foo'),
- (None, 'foo/bar/baz', 'baz'),
-])
-def test_filepath(prefix, path, expected):
- userfiles = DelegateUserfiles(None, None, 'local_us', prefix)
- assert userfiles.get_file_id_path(path) == expected
-
-
-def test_lookup_userfile(app, client):
- uuid = 'deadbeef-dead-beef-dead-beefdeadbeef'
- bad_uuid = 'deadduck-dead-duck-dead-duckdeadduck'
- upper_uuid = 'DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF'
-
- def _stream_read_file(locations, path):
- if path.find(uuid) > 0 or path.find(upper_uuid) > 0:
- return BytesIO("hello world")
-
- raise IOError('Not found!')
-
- storage_mock = Mock()
- storage_mock.stream_read_file = _stream_read_file
-
- app.config['USERFILES_PATH'] = 'foo'
- Userfiles(app, distributed_storage=storage_mock, path='mockuserfiles',
- handler_name='mockuserfiles')
-
- rv = client.open('/mockuserfiles/' + uuid, method='GET')
- assert rv.status_code == 200
-
- rv = client.open('/mockuserfiles/' + upper_uuid, method='GET')
- assert rv.status_code == 200
-
- rv = client.open('/mockuserfiles/' + bad_uuid, method='GET')
- assert rv.status_code == 404
-
- rv = client.open('/mockuserfiles/foo/bar/baz', method='GET')
- assert rv.status_code == 404
diff --git a/data/text.py b/data/text.py
deleted file mode 100644
index 9fa6bbf3e..000000000
--- a/data/text.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from peewee import NodeList, SQL, fn, TextField, Field
-
-def _escape_wildcard(search_query):
- """ Escapes the wildcards found in the given search query so that they are treated as *characters*
- rather than wildcards when passed to a LIKE or ILIKE clause with an ESCAPE '!'.
- """
- search_query = (search_query
- .replace('!', '!!')
- .replace('%', '!%')
- .replace('_', '!_')
- .replace('[', '!['))
-
- # Just to be absolutely sure.
- search_query = search_query.replace('\'', '')
- search_query = search_query.replace('"', '')
- search_query = search_query.replace('`', '')
-
- return search_query
-
-
-def prefix_search(field, prefix_query):
- """ Returns the wildcard match for searching for the given prefix query. """
- # Escape the known wildcard characters.
- prefix_query = _escape_wildcard(prefix_query)
- return Field.__pow__(field, NodeList((prefix_query + '%', SQL("ESCAPE '!'"))))
-
-
-def match_mysql(field, search_query):
- """ Generates a full-text match query using a Match operation, which is needed for MySQL.
- """
- if field.name.find('`') >= 0: # Just to be safe.
- raise Exception("How did field name '%s' end up containing a backtick?" % field.name)
-
- # Note: There is a known bug in MySQL (https://bugs.mysql.com/bug.php?id=78485) that causes
- # queries of the form `*` to raise a parsing error. If found, simply filter out.
- search_query = search_query.replace('*', '')
-
- # Just to be absolutely sure.
- search_query = search_query.replace('\'', '')
- search_query = search_query.replace('"', '')
- search_query = search_query.replace('`', '')
-
- return NodeList((fn.MATCH(SQL("`%s`" % field.name)), fn.AGAINST(SQL('%s', [search_query]))),
- parens=True)
-
-
-def match_like(field, search_query):
- """ Generates a full-text match query using an ILIKE operation, which is needed for SQLite and
- Postgres.
- """
- escaped_query = _escape_wildcard(search_query)
- clause = NodeList(('%' + escaped_query + '%', SQL("ESCAPE '!'")))
- return Field.__pow__(field, clause)
diff --git a/data/userevent.py b/data/userevent.py
index b4f340e5e..aea34226a 100644
--- a/data/userevent.py
+++ b/data/userevent.py
@@ -1,14 +1,10 @@
+import redis
import json
import threading
import logging
-import redis
-
logger = logging.getLogger(__name__)
-class CannotReadUserEventsException(Exception):
- """ Exception raised if user events cannot be read. """
-
class UserEventBuilder(object):
"""
Defines a helper class for constructing UserEvent and UserEventListener
@@ -57,7 +53,7 @@ class UserEvent(object):
as backed by Redis.
"""
def __init__(self, redis_config, username):
- self._redis = redis.StrictRedis(socket_connect_timeout=2, socket_timeout=2, **redis_config)
+ self._redis = redis.StrictRedis(socket_connect_timeout=5, **redis_config)
self._username = username
@staticmethod
@@ -76,7 +72,7 @@ class UserEvent(object):
try:
self.publish_event_data_sync(event_id, data_obj)
logger.debug('Published user event %s: %s', event_id, data_obj)
- except redis.RedisError:
+ except Exception:
logger.exception('Could not publish user event')
thread = threading.Thread(target=conduct)
@@ -88,21 +84,12 @@ class UserEventListener(object):
Defines a helper class for subscribing to realtime user events as
backed by Redis.
"""
- def __init__(self, redis_config, username, events=None):
- events = events or set([])
+ def __init__(self, redis_config, username, events=set([])):
channels = [self._user_event_key(username, e) for e in events]
- args = dict(redis_config)
- args.update({'socket_connect_timeout': 5,
- 'single_connection_client': True})
-
- try:
- self._redis = redis.StrictRedis(**args)
- self._pubsub = self._redis.pubsub(ignore_subscribe_messages=True)
- self._pubsub.subscribe(channels)
- except redis.RedisError as re:
- logger.exception('Could not reach user events redis: %s', re)
- raise CannotReadUserEventsException
+ self._redis = redis.StrictRedis(socket_connect_timeout=5, **redis_config)
+ self._pubsub = self._redis.pubsub()
+ self._pubsub.subscribe(channels)
@staticmethod
def _user_event_key(username, event_id):
@@ -111,44 +98,24 @@ class UserEventListener(object):
def event_stream(self):
"""
Starts listening for events on the channel(s), yielding for each event
- found. Will yield a "pulse" event (a custom event we've decided) as a heartbeat
- every few seconds.
+ found.
"""
- while True:
- pubsub = self._pubsub
- if pubsub is None:
- raise StopIteration
+ for item in self._pubsub.listen():
+ channel = item['channel']
+ event_id = channel.split('/')[3] # user/{username}/{events}/{id}
+ data = None
try:
- item = pubsub.get_message(ignore_subscribe_messages=True, timeout=5)
- except redis.RedisError:
- item = None
+ data = json.loads(item['data'] or '{}')
+ except:
+ pass
- if item is None:
- yield 'pulse', {}
- else:
- channel = item['channel']
- event_id = channel.split('/')[3] # user/{username}/{events}/{id}
- data = None
-
- try:
- data = json.loads(item['data'] or '{}')
- except ValueError:
- continue
-
- if data:
- yield event_id, data
+ if data:
+ yield event_id, data
def stop(self):
"""
Unsubscribes from the channel(s). Should be called once the connection
has terminated.
"""
- if self._pubsub is not None:
- self._pubsub.unsubscribe()
- self._pubsub.close()
- if self._redis is not None:
- self._redis.close()
-
- self._pubsub = None
- self._redis = None
+ self._pubsub.unsubscribe()
diff --git a/data/userfiles.py b/data/userfiles.py
index 1803c94ef..f4b786df5 100644
--- a/data/userfiles.py
+++ b/data/userfiles.py
@@ -1,15 +1,11 @@
import os
import logging
-import urlparse
-
-from uuid import uuid4
-from _pyio import BufferedReader
-
import magic
+from uuid import uuid4
from flask import url_for, request, send_file, make_response, abort
from flask.views import View
-from util import get_app_url
+from _pyio import BufferedReader
logger = logging.getLogger(__name__)
@@ -30,10 +26,8 @@ class UserfilesHandlers(View):
file_stream = self._storage.stream_read_file(self._locations, path)
buffered = BufferedReader(file_stream)
file_header_bytes = buffered.peek(1024)
- return send_file(buffered, mimetype=self._magic.from_buffer(file_header_bytes),
- as_attachment=True, attachment_filename=file_id)
+ return send_file(buffered, mimetype=self._magic.from_buffer(file_header_bytes))
except IOError:
- logger.exception('Error reading user file')
abort(404)
def put(self, file_id):
@@ -57,12 +51,8 @@ class UserfilesHandlers(View):
return self.put(file_id)
-class MissingHandlerException(Exception):
- pass
-
-
class DelegateUserfiles(object):
- def __init__(self, app, distributed_storage, location, path, handler_name=None):
+ def __init__(self, app, distributed_storage, location, path, handler_name):
self._app = app
self._storage = distributed_storage
self._locations = {location}
@@ -75,8 +65,7 @@ class DelegateUserfiles(object):
url_scheme=self._app.config['PREFERRED_URL_SCHEME'])
def get_file_id_path(self, file_id):
- # Note: We use basename here to prevent paths with ..'s and absolute paths.
- return os.path.join(self._prefix or '', os.path.basename(file_id))
+ return os.path.join(self._prefix, file_id)
def prepare_for_drop(self, mime_type, requires_cors=True):
""" Returns a signed URL to upload a file to our bucket. """
@@ -86,14 +75,9 @@ class DelegateUserfiles(object):
url = self._storage.get_direct_upload_url(self._locations, path, mime_type, requires_cors)
if url is None:
- if self._handler_name is None:
- raise MissingHandlerException()
-
with self._app.app_context() as ctx:
ctx.url_adapter = self._build_url_adapter()
- file_relative_url = url_for(self._handler_name, file_id=file_id)
- file_url = urlparse.urljoin(get_app_url(self._app.config), file_relative_url)
- return (file_url, file_id)
+ return (url_for(self._handler_name, file_id=file_id, _external=True), file_id)
return (url, file_id)
@@ -106,19 +90,14 @@ class DelegateUserfiles(object):
content_encoding)
return file_id
- def get_file_url(self, file_id, remote_ip, expires_in=300, requires_cors=False):
+ def get_file_url(self, file_id, expires_in=300, requires_cors=False):
path = self.get_file_id_path(file_id)
- url = self._storage.get_direct_download_url(self._locations, path, remote_ip, expires_in,
- requires_cors)
+ url = self._storage.get_direct_download_url(self._locations, path, expires_in, requires_cors)
if url is None:
- if self._handler_name is None:
- raise MissingHandlerException()
-
with self._app.app_context() as ctx:
ctx.url_adapter = self._build_url_adapter()
- file_relative_url = url_for(self._handler_name, file_id=file_id)
- return urlparse.urljoin(get_app_url(self._app.config), file_relative_url)
+ return url_for(self._handler_name, file_id=file_id, _external=True)
return url
@@ -128,32 +107,30 @@ class DelegateUserfiles(object):
class Userfiles(object):
- def __init__(self, app=None, distributed_storage=None, path='userfiles',
- handler_name='userfiles_handler'):
+ def __init__(self, app=None, distributed_storage=None):
self.app = app
if app is not None:
- self.state = self.init_app(app, distributed_storage, path=path, handler_name=handler_name)
+ self.state = self.init_app(app, distributed_storage)
else:
self.state = None
- def init_app(self, app, distributed_storage, path='userfiles', handler_name='userfiles_handler'):
+ def init_app(self, app, distributed_storage):
location = app.config.get('USERFILES_LOCATION')
- userfiles_path = app.config.get('USERFILES_PATH', None)
+ path = app.config.get('USERFILES_PATH', None)
- if userfiles_path is not None:
- userfiles = DelegateUserfiles(app, distributed_storage, location, userfiles_path,
- handler_name=handler_name)
+ handler_name = 'userfiles_handlers'
- app.add_url_rule('/%s/' % path,
- view_func=UserfilesHandlers.as_view(handler_name,
- distributed_storage=distributed_storage,
- location=location,
- files=userfiles))
+ userfiles = DelegateUserfiles(app, distributed_storage, location, path, handler_name)
- # register extension with app
- app.extensions = getattr(app, 'extensions', {})
- app.extensions['userfiles'] = userfiles
+ app.add_url_rule('/userfiles/',
+ view_func=UserfilesHandlers.as_view(handler_name,
+ distributed_storage=distributed_storage,
+ location=location,
+ files=userfiles))
+ # register extension with app
+ app.extensions = getattr(app, 'extensions', {})
+ app.extensions['userfiles'] = userfiles
return userfiles
def __getattr__(self, name):
diff --git a/data/users/__init__.py b/data/users/__init__.py
index 78e025028..220c0b6f8 100644
--- a/data/users/__init__.py
+++ b/data/users/__init__.py
@@ -9,11 +9,8 @@ from data import model
from data.users.database import DatabaseUsers
from data.users.externalldap import LDAPUsers
from data.users.externaljwt import ExternalJWTAuthN
-from data.users.keystone import get_keystone_users
-from data.users.apptoken import AppTokenInternalAuth
+from data.users.keystone import KeystoneUsers
from util.security.aes import AESCipher
-from util.security.secret import convert_secret_key
-
logger = logging.getLogger(__name__)
@@ -27,88 +24,50 @@ def get_federated_service_name(authentication_type):
if authentication_type == 'Keystone':
return 'keystone'
- if authentication_type == 'AppToken':
- return None
-
- if authentication_type == 'Database':
- return None
-
raise Exception('Unknown auth type: %s' % authentication_type)
-LDAP_CERT_FILENAME = 'ldap.crt'
-
-def get_users_handler(config, _, override_config_dir):
- """ Returns a users handler for the authentication configured in the given config object. """
- authentication_type = config.get('AUTHENTICATION_TYPE', 'Database')
-
- if authentication_type == 'Database':
- return DatabaseUsers()
-
- if authentication_type == 'LDAP':
- ldap_uri = config.get('LDAP_URI', 'ldap://localhost')
- base_dn = config.get('LDAP_BASE_DN')
- admin_dn = config.get('LDAP_ADMIN_DN')
- admin_passwd = config.get('LDAP_ADMIN_PASSWD')
- user_rdn = config.get('LDAP_USER_RDN', [])
- uid_attr = config.get('LDAP_UID_ATTR', 'uid')
- email_attr = config.get('LDAP_EMAIL_ATTR', 'mail')
- secondary_user_rdns = config.get('LDAP_SECONDARY_USER_RDNS', [])
- timeout = config.get('LDAP_TIMEOUT')
- network_timeout = config.get('LDAP_NETWORK_TIMEOUT')
-
- allow_tls_fallback = config.get('LDAP_ALLOW_INSECURE_FALLBACK', False)
- return LDAPUsers(ldap_uri, base_dn, admin_dn, admin_passwd, user_rdn, uid_attr, email_attr,
- allow_tls_fallback, secondary_user_rdns=secondary_user_rdns,
- requires_email=features.MAILING, timeout=timeout,
- network_timeout=network_timeout)
-
- if authentication_type == 'JWT':
- verify_url = config.get('JWT_VERIFY_ENDPOINT')
- issuer = config.get('JWT_AUTH_ISSUER')
- max_fresh_s = config.get('JWT_AUTH_MAX_FRESH_S', 300)
-
- query_url = config.get('JWT_QUERY_ENDPOINT', None)
- getuser_url = config.get('JWT_GETUSER_ENDPOINT', None)
-
- return ExternalJWTAuthN(verify_url, query_url, getuser_url, issuer, override_config_dir,
- config['HTTPCLIENT'], max_fresh_s,
- requires_email=features.MAILING)
-
- if authentication_type == 'Keystone':
- auth_url = config.get('KEYSTONE_AUTH_URL')
- auth_version = int(config.get('KEYSTONE_AUTH_VERSION', 2))
- timeout = config.get('KEYSTONE_AUTH_TIMEOUT')
- keystone_admin_username = config.get('KEYSTONE_ADMIN_USERNAME')
- keystone_admin_password = config.get('KEYSTONE_ADMIN_PASSWORD')
- keystone_admin_tenant = config.get('KEYSTONE_ADMIN_TENANT')
- return get_keystone_users(auth_version, auth_url, keystone_admin_username,
- keystone_admin_password, keystone_admin_tenant, timeout,
- requires_email=features.MAILING)
-
- if authentication_type == 'AppToken':
- if features.DIRECT_LOGIN:
- raise Exception('Direct login feature must be disabled to use AppToken internal auth')
-
- if not features.APP_SPECIFIC_TOKENS:
- raise Exception('AppToken internal auth requires app specific token support to be enabled')
-
- return AppTokenInternalAuth()
-
- raise RuntimeError('Unknown authentication type: %s' % authentication_type)
-
class UserAuthentication(object):
- def __init__(self, app=None, config_provider=None, override_config_dir=None):
- self.secret_key = None
+ def __init__(self, app=None, override_config_dir=None):
+ self.app_secret_key = None
self.app = app
if app is not None:
- self.state = self.init_app(app, config_provider, override_config_dir)
+ self.state = self.init_app(app, override_config_dir)
else:
self.state = None
- def init_app(self, app, config_provider, override_config_dir):
- self.secret_key = convert_secret_key(app.config['SECRET_KEY'])
- users = get_users_handler(app.config, config_provider, override_config_dir)
+ def init_app(self, app, override_config_dir):
+ self.app_secret_key = app.config['SECRET_KEY']
+
+ authentication_type = app.config.get('AUTHENTICATION_TYPE', 'Database')
+
+ if authentication_type == 'Database':
+ users = DatabaseUsers()
+ elif authentication_type == 'LDAP':
+ ldap_uri = app.config.get('LDAP_URI', 'ldap://localhost')
+ base_dn = app.config.get('LDAP_BASE_DN')
+ admin_dn = app.config.get('LDAP_ADMIN_DN')
+ admin_passwd = app.config.get('LDAP_ADMIN_PASSWD')
+ user_rdn = app.config.get('LDAP_USER_RDN', [])
+ uid_attr = app.config.get('LDAP_UID_ATTR', 'uid')
+ email_attr = app.config.get('LDAP_EMAIL_ATTR', 'mail')
+
+ users = LDAPUsers(ldap_uri, base_dn, admin_dn, admin_passwd, user_rdn, uid_attr, email_attr)
+ elif authentication_type == 'JWT':
+ verify_url = app.config.get('JWT_VERIFY_ENDPOINT')
+ issuer = app.config.get('JWT_AUTH_ISSUER')
+ max_fresh_s = app.config.get('JWT_AUTH_MAX_FRESH_S', 300)
+ users = ExternalJWTAuthN(verify_url, issuer, override_config_dir,
+ app.config['HTTPCLIENT'], max_fresh_s)
+ elif authentication_type == 'Keystone':
+ auth_url = app.config.get('KEYSTONE_AUTH_URL')
+ keystone_admin_username = app.config.get('KEYSTONE_ADMIN_USERNAME')
+ keystone_admin_password = app.config.get('KEYSTONE_ADMIN_PASSWORD')
+ keystone_admin_tenant = app.config.get('KEYSTONE_ADMIN_TENANT')
+ users = KeystoneUsers(auth_url, keystone_admin_username, keystone_admin_password,
+ keystone_admin_tenant)
+ else:
+ raise RuntimeError('Unknown authentication type: %s' % authentication_type)
# register extension with app
app.extensions = getattr(app, 'extensions', {})
@@ -116,6 +75,30 @@ class UserAuthentication(object):
return users
+ def _get_secret_key(self):
+ """ Returns the secret key to use for encrypting and decrypting. """
+ secret_key = None
+
+ # First try parsing the key as an int.
+ try:
+ big_int = int(self.app_secret_key)
+ secret_key = str(bytearray.fromhex('{:02x}'.format(big_int)))
+ except ValueError:
+ pass
+
+ # Next try parsing it as an UUID.
+ if secret_key is None:
+ try:
+ secret_key = uuid.UUID(self.app_secret_key).bytes
+ except ValueError:
+ pass
+
+ if secret_key is None:
+ secret_key = str(bytearray(map(ord, self.app_secret_key)))
+
+ # Otherwise, use the bytes directly.
+ return ''.join(itertools.islice(itertools.cycle(secret_key), 32))
+
def encrypt_user_password(self, password):
""" Returns an encrypted version of the user's password. """
data = {
@@ -123,12 +106,12 @@ class UserAuthentication(object):
}
message = json.dumps(data)
- cipher = AESCipher(self.secret_key)
+ cipher = AESCipher(self._get_secret_key())
return cipher.encrypt(message)
def _decrypt_user_password(self, encrypted):
""" Attempts to decrypt the given password and returns it. """
- cipher = AESCipher(self.secret_key)
+ cipher = AESCipher(self._get_secret_key())
try:
message = cipher.decrypt(encrypted)
@@ -144,64 +127,6 @@ class UserAuthentication(object):
return data.get('password', encrypted)
- def ping(self):
- """ Returns whether the authentication engine is reachable and working. """
- return self.state.ping()
-
- @property
- def federated_service(self):
- """ Returns the name of the federated service for the auth system. If none, should return None.
- """
- return self.state.federated_service
-
- @property
- def requires_distinct_cli_password(self):
- """ Returns whether this auth system requires a distinct CLI password to be created,
- in-system, before the CLI can be used. """
- return self.state.requires_distinct_cli_password
-
- @property
- def supports_encrypted_credentials(self):
- """ Returns whether this auth system supports using encrypted credentials. """
- return self.state.supports_encrypted_credentials
-
- def has_password_set(self, username):
- """ Returns whether the user has a password set in the auth system. """
- return self.state.has_password_set(username)
-
- @property
- def supports_fresh_login(self):
- """ Returns whether this auth system supports the fresh login check. """
- return self.state.supports_fresh_login
-
- def query_users(self, query, limit=20):
- """ Performs a lookup against the user system for the specified query. The returned tuple
- will be of the form (results, federated_login_id, err_msg). If the method is unsupported,
- the results portion of the tuple will be None instead of empty list.
-
- Note that this method can and will return results for users not yet found within the
- database; it is the responsibility of the caller to call link_user if they need the
- database row for the user system record.
-
- Results will be in the form of objects's with username and email fields.
- """
- return self.state.query_users(query, limit)
-
- def link_user(self, username_or_email):
- """ Returns a tuple containing the database user record linked to the given username/email
- and any error that occurred when trying to link the user.
- """
- return self.state.link_user(username_or_email)
-
- def get_and_link_federated_user_info(self, user_info, internal_create=False):
- """ Returns a tuple containing the database user record linked to the given UserInformation
- pair and any error that occurred when trying to link the user.
-
- If `internal_create` is True, the caller is an internal user creation process (such
- as team syncing), and the "can a user be created" check will be bypassed.
- """
- return self.state.get_and_link_federated_user_info(user_info, internal_create=internal_create)
-
def confirm_existing_user(self, username, password):
""" Verifies that the given password matches to the given DB username. Unlike
verify_credentials, this call first translates the DB user via the FederatedLogin table
@@ -213,28 +138,6 @@ class UserAuthentication(object):
""" Verifies that the given username and password credentials are valid. """
return self.state.verify_credentials(username_or_email, password)
- def check_group_lookup_args(self, group_lookup_args):
- """ Verifies that the given group lookup args point to a valid group. Returns a tuple consisting
- of a boolean status and an error message (if any).
- """
- return self.state.check_group_lookup_args(group_lookup_args)
-
- def service_metadata(self):
- """ Returns a dictionary of extra metadata to present to *superusers* about this auth engine.
- For example, LDAP returns the base DN so we can display to the user during sync setup.
- """
- return self.state.service_metadata()
-
- def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
- """ Returns a tuple of an iterator over all the members of the group matching the given lookup
- args dictionary, or the error that occurred if the initial call failed or is unsupported.
- The format of the lookup args dictionary is specific to the implementation.
- Each result in the iterator is a tuple of (UserInformation, error_message), and only
- one will be not-None.
- """
- return self.state.iterate_group_members(group_lookup_args, page_size=page_size,
- disable_pagination=disable_pagination)
-
def verify_and_link_user(self, username_or_email, password, basic_auth=False):
""" Verifies that the given username and password credentials are valid and, if so,
creates or links the database user to the federated identity. """
diff --git a/data/users/apptoken.py b/data/users/apptoken.py
deleted file mode 100644
index c306e7064..000000000
--- a/data/users/apptoken.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import logging
-
-from data import model
-from oauth.loginmanager import OAuthLoginManager
-from oauth.oidc import PublicKeyLoadException
-from util.security.jwtutil import InvalidTokenError
-
-
-logger = logging.getLogger(__name__)
-
-class AppTokenInternalAuth(object):
- """ Forces all internal credential login to go through an app token, by disabling all other
- access.
- """
- @property
- def supports_fresh_login(self):
- # Since there is no password.
- return False
-
- @property
- def federated_service(self):
- return None
-
- @property
- def requires_distinct_cli_password(self):
- # Since there is no supported "password".
- return False
-
- def has_password_set(self, username):
- # Since there is no supported "password".
- return False
-
- @property
- def supports_encrypted_credentials(self):
- # Since there is no supported "password".
- return False
-
- def verify_credentials(self, username_or_email, id_token):
- return (None, 'An application specific token is required to login')
-
- def verify_and_link_user(self, username_or_email, password):
- return self.verify_credentials(username_or_email, password)
-
- def confirm_existing_user(self, username, password):
- return self.verify_credentials(username, password)
-
- def link_user(self, username_or_email):
- return (None, 'Unsupported for this authentication system')
-
- def get_and_link_federated_user_info(self, user_info):
- return (None, 'Unsupported for this authentication system')
-
- def query_users(self, query, limit):
- return (None, '', '')
-
- def check_group_lookup_args(self, group_lookup_args):
- return (False, 'Not supported')
-
- def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
- return (None, 'Not supported')
-
- def service_metadata(self):
- return {}
-
- def ping(self):
- """ Always assumed to be working. If the DB is broken, other checks will handle it. """
- return (True, None)
diff --git a/data/users/database.py b/data/users/database.py
index 2a1780429..e8cb3faad 100644
--- a/data/users/database.py
+++ b/data/users/database.py
@@ -1,31 +1,6 @@
from data import model
class DatabaseUsers(object):
- @property
- def federated_service(self):
- return None
-
- @property
- def supports_fresh_login(self):
- return True
-
- def ping(self):
- """ Always assumed to be working. If the DB is broken, other checks will handle it. """
- return (True, None)
-
- @property
- def supports_encrypted_credentials(self):
- return True
-
- def has_password_set(self, username):
- user = model.user.get_user(username)
- return user and user.password_hash is not None
-
- @property
- def requires_distinct_cli_password(self):
- # Since the database stores its own password.
- return True
-
def verify_credentials(self, username_or_email, password):
""" Simply delegate to the model implementation. """
result = model.user.verify_user(username_or_email, password)
@@ -41,26 +16,3 @@ class DatabaseUsers(object):
def confirm_existing_user(self, username, password):
return self.verify_credentials(username, password)
- def link_user(self, username_or_email):
- """ Never used since all users being added are already, by definition, in the database. """
- return (None, 'Unsupported for this authentication system')
-
- def get_and_link_federated_user_info(self, user_info, internal_create=False):
- """ Never used since all users being added are already, by definition, in the database. """
- return (None, 'Unsupported for this authentication system')
-
- def query_users(self, query, limit):
- """ No need to implement, as we already query for users directly in the database. """
- return (None, '', '')
-
- def check_group_lookup_args(self, group_lookup_args):
- """ Never used since all groups, by definition, are in the database. """
- return (False, 'Not supported')
-
- def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
- """ Never used since all groups, by definition, are in the database. """
- return (None, 'Not supported')
-
- def service_metadata(self):
- """ Never used since database has no metadata """
- return {}
diff --git a/data/users/externaljwt.py b/data/users/externaljwt.py
index 7f2fea255..241cfa947 100644
--- a/data/users/externaljwt.py
+++ b/data/users/externaljwt.py
@@ -1,128 +1,72 @@
import logging
import json
import os
+import jwt
-from data.users.federated import FederatedUsers, UserInformation
-from util.security import jwtutil
-
+from datetime import datetime, timedelta
+from data.users.federated import FederatedUsers, VerifiedCredentials
logger = logging.getLogger(__name__)
-
class ExternalJWTAuthN(FederatedUsers):
""" Delegates authentication to a REST endpoint that returns JWTs. """
PUBLIC_KEY_FILENAME = 'jwt-authn.cert'
- def __init__(self, verify_url, query_url, getuser_url, issuer, override_config_dir, http_client,
- max_fresh_s, public_key_path=None, requires_email=True):
- super(ExternalJWTAuthN, self).__init__('jwtauthn', requires_email)
+ def __init__(self, verify_url, issuer, override_config_dir, http_client, max_fresh_s,
+ public_key_path=None):
+ super(ExternalJWTAuthN, self).__init__('jwtauthn')
self.verify_url = verify_url
- self.query_url = query_url
- self.getuser_url = getuser_url
-
self.issuer = issuer
self.client = http_client
self.max_fresh_s = max_fresh_s
- self.requires_email = requires_email
default_key_path = os.path.join(override_config_dir, ExternalJWTAuthN.PUBLIC_KEY_FILENAME)
public_key_path = public_key_path or default_key_path
if not os.path.exists(public_key_path):
- error_message = ('JWT Authentication public key file "%s" not found' % public_key_path)
+ error_message = ('JWT Authentication public key file "%s" not found in directory %s' %
+ (ExternalJWTAuthN.PUBLIC_KEY_FILENAME, override_config_dir))
raise Exception(error_message)
- self.public_key_path = public_key_path
-
with open(public_key_path) as public_key_file:
self.public_key = public_key_file.read()
- def has_password_set(self, username):
- return True
-
- def ping(self):
- result = self.client.get(self.getuser_url, timeout=2)
- # We expect a 401 or 403 of some kind, since we explicitly don't send an auth header
- if result.status_code // 100 != 4:
- return (False, result.text or 'Could not reach JWT authn endpoint')
-
- return (True, None)
-
- def get_user(self, username_or_email):
- if self.getuser_url is None:
- return (None, 'No endpoint defined for retrieving user')
-
- (payload, err_msg) = self._execute_call(self.getuser_url, 'quay.io/jwtauthn/getuser',
- params=dict(username=username_or_email))
- if err_msg is not None:
- return (None, err_msg)
-
- if not 'sub' in payload:
- raise Exception('Missing sub field in JWT')
-
- if self.requires_email and not 'email' in payload:
- raise Exception('Missing email field in JWT')
-
- # Parse out the username and email.
- user_info = UserInformation(username=payload['sub'], email=payload.get('email'),
- id=payload['sub'])
- return (user_info, None)
-
-
- def query_users(self, query, limit=20):
- if self.query_url is None:
- return (None, self.federated_service, 'No endpoint defined for querying users')
-
- (payload, err_msg) = self._execute_call(self.query_url, 'quay.io/jwtauthn/query',
- params=dict(query=query, limit=limit))
- if err_msg is not None:
- return (None, self.federated_service, err_msg)
-
- query_results = []
- for result in payload['results'][0:limit]:
- user_info = UserInformation(username=result['username'], email=result.get('email'),
- id=result['username'])
- query_results.append(user_info)
-
- return (query_results, self.federated_service, None)
-
-
def verify_credentials(self, username_or_email, password):
- (payload, err_msg) = self._execute_call(self.verify_url, 'quay.io/jwtauthn',
- auth=(username_or_email, password))
- if err_msg is not None:
- return (None, err_msg)
+ result = self.client.get(self.verify_url, timeout=2, auth=(username_or_email, password))
- if not 'sub' in payload:
- raise Exception('Missing sub field in JWT')
-
- if self.requires_email and not 'email' in payload:
- raise Exception('Missing email field in JWT')
-
- user_info = UserInformation(username=payload['sub'], email=payload.get('email'),
- id=payload['sub'])
- return (user_info, None)
-
-
- def _execute_call(self, url, aud, auth=None, params=None):
- """ Executes a call to the external JWT auth provider. """
- result = self.client.get(url, timeout=2, auth=auth, params=params)
if result.status_code != 200:
- return (None, result.text or 'Could not make JWT auth call')
+ return (None, result.text or 'Invalid username or password')
try:
result_data = json.loads(result.text)
except ValueError:
- raise Exception('Returned JWT body for url %s does not contain JSON', url)
+ raise Exception('Returned JWT Authentication body does not contain JSON')
# Load the JWT returned.
encoded = result_data.get('token', '')
- exp_limit_options = jwtutil.exp_max_s_option(self.max_fresh_s)
try:
- payload = jwtutil.decode(encoded, self.public_key, algorithms=['RS256'],
- audience=aud, issuer=self.issuer,
- options=exp_limit_options)
- return (payload, None)
- except jwtutil.InvalidTokenError:
- logger.exception('Exception when decoding returned JWT for url %s', url)
- return (None, 'Exception when decoding returned JWT')
+ payload = jwt.decode(encoded, self.public_key, algorithms=['RS256'],
+ audience='quay.io/jwtauthn', issuer=self.issuer)
+ except jwt.InvalidTokenError:
+ logger.exception('Exception when decoding returned JWT')
+ return (None, 'Invalid username or password')
+
+ if not 'sub' in payload:
+ raise Exception('Missing username field in JWT')
+
+ if not 'email' in payload:
+ raise Exception('Missing email field in JWT')
+
+ if not 'exp' in payload:
+ raise Exception('Missing exp field in JWT')
+
+ # Verify that the expiration is no more than self.max_fresh_s seconds in the future.
+ expiration = datetime.utcfromtimestamp(payload['exp'])
+ if expiration > datetime.utcnow() + timedelta(seconds=self.max_fresh_s):
+ logger.debug('Payload expiration is outside of the %s second window: %s', self.max_fresh_s,
+ payload['exp'])
+ return (None, 'Invalid username or password')
+
+ # Parse out the username and email.
+ return (VerifiedCredentials(username=payload['sub'], email=payload['email']), None)
+
diff --git a/data/users/externalldap.py b/data/users/externalldap.py
index c1242e5d1..9a488b283 100644
--- a/data/users/externalldap.py
+++ b/data/users/externalldap.py
@@ -2,60 +2,31 @@ import ldap
import logging
import os
-from ldap.controls import SimplePagedResultsControl
-from ldap.filter import filter_format, escape_filter_chars
-
from collections import namedtuple
-from data.users.federated import FederatedUsers, UserInformation
-from util.itertoolrecipes import take
+from datetime import datetime
+from data.users.federated import FederatedUsers, VerifiedCredentials
logger = logging.getLogger(__name__)
-_DEFAULT_NETWORK_TIMEOUT = 10.0 # seconds
-_DEFAULT_TIMEOUT = 10.0 # seconds
-_DEFAULT_PAGE_SIZE = 1000
-
-
-class LDAPConnectionBuilder(object):
- def __init__(self, ldap_uri, user_dn, user_pw, allow_tls_fallback=False,
- timeout=None, network_timeout=None):
- self._ldap_uri = ldap_uri
- self._user_dn = user_dn
- self._user_pw = user_pw
- self._allow_tls_fallback = allow_tls_fallback
- self._timeout = timeout
- self._network_timeout = network_timeout
-
- def get_connection(self):
- return LDAPConnection(self._ldap_uri, self._user_dn, self._user_pw, self._allow_tls_fallback,
- self._timeout, self._network_timeout)
-
class LDAPConnection(object):
- def __init__(self, ldap_uri, user_dn, user_pw, allow_tls_fallback=False,
- timeout=None, network_timeout=None):
+ def __init__(self, ldap_uri, user_dn, user_pw):
self._ldap_uri = ldap_uri
self._user_dn = user_dn
self._user_pw = user_pw
- self._allow_tls_fallback = allow_tls_fallback
- self._timeout = timeout
- self._network_timeout = network_timeout
self._conn = None
def __enter__(self):
trace_level = 2 if os.environ.get('USERS_DEBUG') == '1' else 0
-
self._conn = ldap.initialize(self._ldap_uri, trace_level=trace_level)
self._conn.set_option(ldap.OPT_REFERRALS, 1)
- self._conn.set_option(ldap.OPT_NETWORK_TIMEOUT,
- self._network_timeout or _DEFAULT_NETWORK_TIMEOUT)
- self._conn.set_option(ldap.OPT_TIMEOUT, self._timeout or _DEFAULT_TIMEOUT)
- if self._allow_tls_fallback:
- logger.debug('TLS Fallback enabled in LDAP')
- self._conn.set_option(ldap.OPT_X_TLS_TRY, 1)
+ try:
+ self._conn.simple_bind_s(self._user_dn, self._user_pw)
+ except ldap.INVALID_CREDENTIALS:
+ logger.exception('LDAP admin dn or password are invalid')
+ return None
- self._conn.simple_bind_s(self._user_dn, self._user_pw)
return self._conn
def __exit__(self, exc_type, value, tb):
@@ -65,31 +36,14 @@ class LDAPConnection(object):
class LDAPUsers(FederatedUsers):
_LDAPResult = namedtuple('LDAPResult', ['dn', 'attrs'])
- def __init__(self, ldap_uri, base_dn, admin_dn, admin_passwd, user_rdn, uid_attr, email_attr,
- allow_tls_fallback=False, secondary_user_rdns=None, requires_email=True,
- timeout=None, network_timeout=None, force_no_pagination=False):
- super(LDAPUsers, self).__init__('ldap', requires_email)
-
- self._ldap = LDAPConnectionBuilder(ldap_uri, admin_dn, admin_passwd, allow_tls_fallback,
- timeout, network_timeout)
+ def __init__(self, ldap_uri, base_dn, admin_dn, admin_passwd, user_rdn, uid_attr, email_attr):
+ super(LDAPUsers, self).__init__('ldap')
+ self._ldap_conn = LDAPConnection(ldap_uri, admin_dn, admin_passwd)
self._ldap_uri = ldap_uri
+ self._base_dn = base_dn
+ self._user_rdn = user_rdn
self._uid_attr = uid_attr
self._email_attr = email_attr
- self._allow_tls_fallback = allow_tls_fallback
- self._requires_email = requires_email
- self._force_no_pagination = force_no_pagination
-
- # Note: user_rdn is a list of RDN pieces (for historical reasons), and secondary_user_rds
- # is a list of RDN strings.
- relative_user_dns = [','.join(user_rdn)] + (secondary_user_rdns or [])
-
- def get_full_rdn(relative_dn):
- prefix = relative_dn.split(',') if relative_dn else []
- return ','.join(prefix + base_dn)
-
- # Create the set of full DN paths.
- self._user_dns = [get_full_rdn(relative_dn) for relative_dn in relative_user_dns]
- self._base_dn = ','.join(base_dn)
def _get_ldap_referral_dn(self, referral_exception):
logger.debug('Got referral: %s', referral_exception.args[0])
@@ -110,160 +64,53 @@ class LDAPUsers(FederatedUsers):
referral_dn = referral_uri[len('ldap:///'):]
return referral_dn
- def _ldap_user_search_with_rdn(self, conn, username_or_email, user_search_dn, suffix=''):
- query = u'(|({0}={2}{3})({1}={2}{3}))'.format(self._uid_attr, self._email_attr,
- escape_filter_chars(username_or_email),
- suffix)
- logger.debug('Conducting user search: %s under %s', query, user_search_dn)
- try:
- return (conn.search_s(user_search_dn, ldap.SCOPE_SUBTREE, query.encode('utf-8')), None)
- except ldap.REFERRAL as re:
- referral_dn = self._get_ldap_referral_dn(re)
- if not referral_dn:
- return (None, 'Failed to follow referral when looking up username')
+ def _ldap_user_search(self, username_or_email):
+ with self._ldap_conn as conn:
+ if conn is None:
+ return (None, 'LDAP Admin dn or password is invalid')
+ logger.debug('Incoming username or email param: %s', username_or_email.__repr__())
+ user_search_dn = ','.join(self._user_rdn + self._base_dn)
+ query = u'(|({0}={2})({1}={2}))'.format(self._uid_attr, self._email_attr,
+ username_or_email)
+
+ logger.debug('Conducting user search: %s under %s', query, user_search_dn)
try:
- subquery = u'(%s=%s)' % (self._uid_attr, username_or_email)
- return (conn.search_s(referral_dn, ldap.SCOPE_BASE, subquery), None)
+ pairs = conn.search_s(user_search_dn, ldap.SCOPE_SUBTREE, query.encode('utf-8'))
+ except ldap.REFERRAL as re:
+ referral_dn = self._get_ldap_referral_dn(re)
+ if not referral_dn:
+ return (None, 'Failed to follow referral when looking up username')
+
+ try:
+ subquery = u'(%s=%s)' % (self._uid_attr, username_or_email)
+ pairs = conn.search_s(referral_dn, ldap.SCOPE_BASE, subquery)
+ except ldap.LDAPError:
+ logger.exception('LDAP referral search exception')
+ return (None, 'Username not found')
+
except ldap.LDAPError:
- logger.debug('LDAP referral search exception')
+ logger.exception('LDAP search exception')
return (None, 'Username not found')
- except ldap.LDAPError:
- logger.debug('LDAP search exception')
- return (None, 'Username not found')
-
- def _ldap_user_search(self, username_or_email, limit=20, suffix=''):
- if not username_or_email:
- return (None, 'Empty username/email')
-
- # Verify the admin connection works first. We do this here to avoid wrapping
- # the entire block in the INVALID CREDENTIALS check.
- try:
- with self._ldap.get_connection():
- pass
- except ldap.INVALID_CREDENTIALS:
- return (None, 'LDAP Admin dn or password is invalid')
-
- with self._ldap.get_connection() as conn:
- logger.debug('Incoming username or email param: %s', username_or_email.__repr__())
-
- for user_search_dn in self._user_dns:
- (pairs, err_msg) = self._ldap_user_search_with_rdn(conn, username_or_email, user_search_dn,
- suffix=suffix)
- if pairs is not None and len(pairs) > 0:
- break
-
- if err_msg is not None:
- return (None, err_msg)
-
logger.debug('Found matching pairs: %s', pairs)
- results = [LDAPUsers._LDAPResult(*pair) for pair in take(limit, pairs)]
- # Filter out pairs without DNs. Some LDAP impls will return such pairs.
+ results = [LDAPUsers._LDAPResult(*pair) for pair in pairs]
+
+ # Filter out pairs without DNs. Some LDAP impls will return such
+ # pairs.
with_dns = [result for result in results if result.dn]
- return (with_dns, None)
+ if len(with_dns) < 1:
+ return (None, 'Username not found')
- def _ldap_single_user_search(self, username_or_email):
- with_dns, err_msg = self._ldap_user_search(username_or_email)
- if err_msg is not None:
- return (None, err_msg)
+ # If we have found a single pair, then return it.
+ if len(with_dns) == 1:
+ return (with_dns[0], None)
- # Make sure we have at least one result.
- if len(with_dns) < 1:
- return (None, 'Username not found')
-
- # If we have found a single pair, then return it.
- if len(with_dns) == 1:
- return (with_dns[0], None)
-
- # Otherwise, there are multiple pairs with DNs, so find the one with the mail
- # attribute (if any).
- with_mail = [result for result in with_dns if result.attrs.get(self._email_attr)]
- return (with_mail[0] if with_mail else with_dns[0], None)
-
- def _build_user_information(self, response):
- if not response.get(self._uid_attr):
- return (None, 'Missing uid field "%s" in user record' % self._uid_attr)
-
- if self._requires_email and not response.get(self._email_attr):
- return (None, 'Missing mail field "%s" in user record' % self._email_attr)
-
- username = response[self._uid_attr][0].decode('utf-8')
- email = response.get(self._email_attr, [None])[0]
- return (UserInformation(username=username, email=email, id=username), None)
-
- def ping(self):
- try:
- with self._ldap.get_connection():
- pass
- except ldap.INVALID_CREDENTIALS:
- return (False, 'LDAP Admin dn or password is invalid')
- except ldap.LDAPError as lde:
- logger.exception('Exception when trying to health check LDAP')
- return (False, lde.message)
-
- return (True, None)
-
- def at_least_one_user_exists(self):
- logger.debug('Checking if any users exist in LDAP')
- try:
- with self._ldap.get_connection():
- pass
- except ldap.INVALID_CREDENTIALS:
- return (None, 'LDAP Admin dn or password is invalid')
-
- has_pagination = not self._force_no_pagination
- with self._ldap.get_connection() as conn:
- for user_search_dn in self._user_dns:
- lc = ldap.controls.libldap.SimplePagedResultsControl(criticality=True, size=1, cookie='')
- try:
- if has_pagination:
- msgid = conn.search_ext(user_search_dn, ldap.SCOPE_SUBTREE, serverctrls=[lc])
- _, rdata, _, serverctrls = conn.result3(msgid)
- else:
- msgid = conn.search(user_search_dn, ldap.SCOPE_SUBTREE)
- _, rdata = conn.result(msgid)
-
- for entry in rdata: # Handles both lists and iterators.
- return (True, None)
-
- except ldap.LDAPError as lde:
- return (False, str(lde) or 'Could not find DN %s' % user_search_dn)
-
- return (False, None)
-
- def get_user(self, username_or_email):
- """ Looks up a username or email in LDAP. """
- logger.debug('Looking up LDAP username or email %s', username_or_email)
- (found_user, err_msg) = self._ldap_single_user_search(username_or_email)
- if err_msg is not None:
- return (None, err_msg)
-
- logger.debug('Found user for LDAP username or email %s', username_or_email)
- _, found_response = found_user
- return self._build_user_information(found_response)
-
- def query_users(self, query, limit=20):
- """ Queries LDAP for matching users. """
- if not query:
- return (None, self.federated_service, 'Empty query')
-
- logger.debug('Got query %s with limit %s', query, limit)
- (results, err_msg) = self._ldap_user_search(query, limit=limit, suffix='*')
- if err_msg is not None:
- return (None, self.federated_service, err_msg)
-
- final_results = []
- for result in results[0:limit]:
- credentials, err_msg = self._build_user_information(result.attrs)
- if err_msg is not None:
- continue
-
- final_results.append(credentials)
-
- logger.debug('For query %s found results %s', query, final_results)
- return (final_results, self.federated_service, None)
+ # Otherwise, there are multiple pairs with DNs, so find the one with the mail
+ # attribute (if any).
+ with_mail = [result for result in results if result.attrs.get(self._email_attr)]
+ return (with_mail[0] if with_mail else with_dns[0], None)
def verify_credentials(self, username_or_email, password):
""" Verify the credentials with LDAP. """
@@ -271,7 +118,7 @@ class LDAPUsers(FederatedUsers):
if not password:
return (None, 'Anonymous binding not allowed')
- (found_user, err_msg) = self._ldap_single_user_search(username_or_email)
+ (found_user, err_msg) = self._ldap_user_search(username_or_email)
if found_user is None:
return (None, err_msg)
@@ -281,8 +128,7 @@ class LDAPUsers(FederatedUsers):
# First validate the password by binding as the user
try:
- with LDAPConnection(self._ldap_uri, found_dn, password.encode('utf-8'),
- self._allow_tls_fallback):
+ with LDAPConnection(self._ldap_uri, found_dn, password.encode('utf-8')):
pass
except ldap.REFERRAL as re:
referral_dn = self._get_ldap_referral_dn(re)
@@ -290,124 +136,24 @@ class LDAPUsers(FederatedUsers):
return (None, 'Invalid username')
try:
- with LDAPConnection(self._ldap_uri, referral_dn, password.encode('utf-8'),
- self._allow_tls_fallback):
+ with LDAPConnection(self._ldap_uri, referral_dn, password.encode('utf-8')):
pass
except ldap.INVALID_CREDENTIALS:
- logger.debug('Invalid LDAP credentials')
+ logger.exception('Invalid LDAP credentials')
return (None, 'Invalid password')
except ldap.INVALID_CREDENTIALS:
- logger.debug('Invalid LDAP credentials')
+ logger.exception('Invalid LDAP credentials')
return (None, 'Invalid password')
- return self._build_user_information(found_response)
+ # Now check if we have a federated login for this user
+ if not found_response.get(self._uid_attr):
+ return (None, 'Missing uid field "%s" in user record' % self._uid_attr)
- def service_metadata(self):
- return {
- 'base_dn': self._base_dn,
- }
+ if not found_response.get(self._email_attr):
+ return (None, 'Missing mail field "%s" in user record' % self._email_attr)
- def check_group_lookup_args(self, group_lookup_args, disable_pagination=False):
- if not group_lookup_args.get('group_dn'):
- return (False, 'Missing group_dn')
+ username = found_response[self._uid_attr][0].decode('utf-8')
+ email = found_response[self._email_attr][0]
+ return (VerifiedCredentials(username=username, email=email), None)
- (it, err) = self.iterate_group_members(group_lookup_args, page_size=1,
- disable_pagination=disable_pagination)
- if err is not None:
- return (False, err)
-
- if not next(it, False):
- return (False, 'Group does not exist or is empty')
-
- return (True, None)
-
- def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
- try:
- with self._ldap.get_connection():
- pass
- except ldap.INVALID_CREDENTIALS:
- return (None, 'LDAP Admin dn or password is invalid')
-
- group_dn = group_lookup_args['group_dn']
- page_size = page_size or _DEFAULT_PAGE_SIZE
- return (self._iterate_members(group_dn, page_size, disable_pagination), None)
-
- def _iterate_members(self, group_dn, page_size, disable_pagination):
- has_pagination = not(self._force_no_pagination or disable_pagination)
- with self._ldap.get_connection() as conn:
- search_flt = filter_format('(memberOf=%s,%s)', (group_dn, self._base_dn))
- attributes = [self._uid_attr, self._email_attr]
-
- for user_search_dn in self._user_dns:
- lc = ldap.controls.libldap.SimplePagedResultsControl(criticality=True, size=page_size,
- cookie='')
-
- # Conduct the initial search for users that are a member of the group.
- logger.debug('Conducting LDAP search of DN: %s and filter %s', user_search_dn, search_flt)
- try:
- if has_pagination:
- msgid = conn.search_ext(user_search_dn, ldap.SCOPE_SUBTREE, search_flt,
- serverctrls=[lc], attrlist=attributes)
- else:
- msgid = conn.search(user_search_dn, ldap.SCOPE_SUBTREE, search_flt, attrlist=attributes)
- except ldap.LDAPError as lde:
- logger.exception('Got error when trying to search %s with filter %s: %s',
- user_search_dn, search_flt, lde.message)
- break
-
- while True:
- try:
- if has_pagination:
- _, rdata, _, serverctrls = conn.result3(msgid)
- else:
- _, rdata = conn.result(msgid)
-
- # Yield any users found.
- found_results = 0
- for userdata in rdata:
- found_results = found_results + 1
- yield self._build_user_information(userdata[1])
-
- logger.debug('Found %s users in group %s; %s', found_results, user_search_dn,
- search_flt)
- except ldap.NO_SUCH_OBJECT as nsoe:
- logger.debug('NSO when trying to lookup results of search %s with filter %s: %s',
- user_search_dn, search_flt, nsoe.message)
- except ldap.LDAPError as lde:
- logger.exception('Error when trying to lookup results of search %s with filter %s: %s',
- user_search_dn, search_flt, lde.message)
- break
-
- # If no additional results, nothing more to do.
- if not found_results:
- break
-
- # If pagination is disabled, nothing more to do.
- if not has_pagination:
- logger.debug('Pagination is disabled, no further queries')
- break
-
- # Filter down the controls with which the server responded, looking for the paging
- # control type. If not found, then the server does not support pagination and we already
- # got all of the results.
- pctrls = [control for control in serverctrls
- if control.controlType == ldap.controls.SimplePagedResultsControl.controlType]
-
- if pctrls:
- # Server supports pagination. Update the cookie so the next search finds the next page,
- # then conduct the next search.
- cookie = lc.cookie = pctrls[0].cookie
- if cookie:
- logger.debug('Pagination is supported for this LDAP server; trying next page')
- msgid = conn.search_ext(user_search_dn, ldap.SCOPE_SUBTREE, search_flt,
- serverctrls=[lc], attrlist=attributes)
- continue
- else:
- # No additional results.
- logger.debug('Pagination is supported for this LDAP server but on last page')
- break
- else:
- # Pagination is not supported.
- logger.debug('Pagination is not supported for this LDAP server')
- break
diff --git a/data/users/federated.py b/data/users/federated.py
index 87210bccd..e70740917 100644
--- a/data/users/federated.py
+++ b/data/users/federated.py
@@ -1,71 +1,46 @@
import logging
-import features
from collections import namedtuple
from data import model
-from data.users.shared import can_create_user
from util.validation import generate_valid_usernames
logger = logging.getLogger(__name__)
-UserInformation = namedtuple('UserInformation', ['username', 'email', 'id'])
-
-DISABLED_MESSAGE = 'User creation is disabled. Please contact your administrator to gain access.'
+VerifiedCredentials = namedtuple('VerifiedCredentials', ['username', 'email'])
class FederatedUsers(object):
""" Base class for all federated users systems. """
- def __init__(self, federated_service, requires_email):
+ def __init__(self, federated_service):
self._federated_service = federated_service
- self._requires_email = requires_email
-
- @property
- def federated_service(self):
- return self._federated_service
-
- @property
- def supports_fresh_login(self):
- return True
-
- @property
- def supports_encrypted_credentials(self):
- return True
-
- def has_password_set(self, username):
- return True
-
- @property
- def requires_distinct_cli_password(self):
- # Since the federated auth provides a password which works on the CLI.
- return False
-
- def get_user(self, username_or_email):
- """ Retrieves the user with the given username or email, returning a tuple containing
- a UserInformation (if success) and the error message (on failure).
- """
- raise NotImplementedError
def verify_credentials(self, username_or_email, password):
""" Verifies the given credentials against the backing federated service, returning
- a tuple containing a UserInformation (on success) and the error message (on failure).
- """
+ a tuple containing a VerifiedCredentials (if success) and the error message (if failed). """
raise NotImplementedError
- def query_users(self, query, limit=20):
- """ If implemented, get_user must be implemented as well. """
- return (None, 'Not supported')
+ def _get_federated_user(self, username, email):
+ db_user = model.user.verify_federated_login(self._federated_service, username)
+ if not db_user:
+ # We must create the user in our db
+ valid_username = None
+ for valid_username in generate_valid_usernames(username):
+ if model.user.is_username_unique(valid_username):
+ break
- def link_user(self, username_or_email):
- (user_info, err_msg) = self.get_user(username_or_email)
- if user_info is None:
- return (None, err_msg)
+ if not valid_username:
+ logger.error('Unable to pick a username for user: %s', username)
+ return (None, 'Unable to pick a username. Please report this to your administrator.')
- return self.get_and_link_federated_user_info(user_info)
+ db_user = model.user.create_federated_user(valid_username, email, self._federated_service,
+ username, set_password_notification=False)
+ else:
+ # Update the db attributes from the federated service.
+ db_user.email = email
+ db_user.save()
- def get_and_link_federated_user_info(self, user_info, internal_create=False):
- return self._get_and_link_federated_user_info(user_info.username, user_info.email,
- internal_create=internal_create)
+ return (db_user, None)
def verify_and_link_user(self, username_or_email, password):
""" Verifies the given credentials and, if valid, creates/links a database user to the
@@ -75,7 +50,7 @@ class FederatedUsers(object):
if credentials is None:
return (None, err_msg)
- return self._get_and_link_federated_user_info(credentials.username, credentials.email)
+ return self._get_federated_user(credentials.username, credentials.email)
def confirm_existing_user(self, username, password):
""" Confirms that the given *database* username and service password are valid for the linked
@@ -94,61 +69,3 @@ class FederatedUsers(object):
return (None, err_msg)
return (db_user, None)
-
- def service_metadata(self):
- """ Returns a dictionary of extra metadata to present to *superusers* about this auth engine.
- For example, LDAP returns the base DN so we can display to the user during sync setup.
- """
- return {}
-
- def check_group_lookup_args(self, group_lookup_args):
- """ Verifies that the given group lookup args point to a valid group. Returns a tuple consisting
- of a boolean status and an error message (if any).
- """
- return (False, 'Not supported')
-
- def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
- """ Returns an iterator over all the members of the group matching the given lookup args
- dictionary. The format of the lookup args dictionary is specific to the implementation.
- """
- return (None, 'Not supported')
-
- def _get_and_link_federated_user_info(self, username, email, internal_create=False):
- db_user = model.user.verify_federated_login(self._federated_service, username)
- if not db_user:
-
- # Fetch list of blacklisted domains
- blacklisted_domains = model.config.app_config.get('BLACKLISTED_EMAIL_DOMAINS')
-
- # We must create the user in our db. Check to see if this is allowed (except for internal
- # creation, which is always allowed).
- if not internal_create and not can_create_user(email, blacklisted_domains):
- return (None, DISABLED_MESSAGE)
-
- valid_username = None
- for valid_username in generate_valid_usernames(username):
- if model.user.is_username_unique(valid_username):
- break
-
- if not valid_username:
- logger.error('Unable to pick a username for user: %s', username)
- return (None, 'Unable to pick a username. Please report this to your administrator.')
-
- prompts = model.user.get_default_user_prompts(features)
- try:
- db_user = model.user.create_federated_user(valid_username, email, self._federated_service,
- username,
- set_password_notification=False,
- email_required=self._requires_email,
- confirm_username=features.USERNAME_CONFIRMATION,
- prompts=prompts)
- except model.InvalidEmailAddressException as iae:
- return (None, str(iae))
-
- else:
- # Update the db attributes from the federated service.
- if email and db_user.email != email:
- db_user.email = email
- db_user.save()
-
- return (db_user, None)
diff --git a/data/users/keystone.py b/data/users/keystone.py
index b8e581e77..a7feb9a4f 100644
--- a/data/users/keystone.py
+++ b/data/users/keystone.py
@@ -1,300 +1,39 @@
import logging
-import os
-from keystoneauth1.identity import v2 as keystone_v2_auth
-from keystoneauth1.identity import v3 as keystone_v3_auth
-from keystoneauth1 import session
-from keystoneauth1.exceptions import ClientException
-from keystoneclient.v2_0 import client as client_v2
-from keystoneclient.v3 import client as client_v3
+from keystoneclient.v2_0 import client as kclient
from keystoneclient.exceptions import AuthorizationFailure as KeystoneAuthorizationFailure
from keystoneclient.exceptions import Unauthorized as KeystoneUnauthorized
-from keystoneclient.exceptions import NotFound as KeystoneNotFound
-from data.users.federated import FederatedUsers, UserInformation
-from util.itertoolrecipes import take
+from data.users.federated import FederatedUsers, VerifiedCredentials
logger = logging.getLogger(__name__)
-DEFAULT_TIMEOUT = 10 # seconds
-
-def get_keystone_users(auth_version, auth_url, admin_username, admin_password, admin_tenant,
- timeout=None, requires_email=True):
- if auth_version == 3:
- return KeystoneV3Users(auth_url, admin_username, admin_password, admin_tenant, timeout,
- requires_email)
- else:
- return KeystoneV2Users(auth_url, admin_username, admin_password, admin_tenant, timeout,
- requires_email)
-
-
-class KeystoneV2Users(FederatedUsers):
- """ Delegates authentication to OpenStack Keystone V2. """
- def __init__(self, auth_url, admin_username, admin_password, admin_tenant, timeout=None,
- requires_email=True):
- super(KeystoneV2Users, self).__init__('keystone', requires_email)
+class KeystoneUsers(FederatedUsers):
+ """ Delegates authentication to OpenStack Keystone. """
+ def __init__(self, auth_url, admin_username, admin_password, admin_tenant):
+ super(KeystoneUsers, self).__init__('keystone')
self.auth_url = auth_url
self.admin_username = admin_username
self.admin_password = admin_password
self.admin_tenant = admin_tenant
- self.timeout = timeout or DEFAULT_TIMEOUT
- self.debug = os.environ.get('USERS_DEBUG') == '1'
- self.requires_email = requires_email
-
- def _get_client(self, username, password, tenant_name=None):
- if tenant_name:
- auth = keystone_v2_auth.Password(auth_url=self.auth_url,
- username=username,
- password=password,
- tenant_name=tenant_name)
- else:
- auth = keystone_v2_auth.Password(auth_url=self.auth_url,
- username=username,
- password=password)
-
- sess = session.Session(auth=auth)
- client = client_v2.Client(session=sess,
- timeout=self.timeout,
- debug=self.debug)
- return client, sess
-
- def ping(self):
- try:
- _, sess = self._get_client(self.admin_username, self.admin_password, self.admin_tenant)
- assert sess.get_user_id() # Make sure we loaded a valid user.
- except KeystoneUnauthorized as kut:
- logger.exception('Keystone unauthorized admin')
- return (False, 'Keystone admin credentials are invalid: %s' % kut.message)
- except ClientException as e:
- logger.exception('Keystone unauthorized admin')
- return (False, 'Keystone ping check failed: %s' % e.message)
-
- return (True, None)
-
- def at_least_one_user_exists(self):
- logger.debug('Checking if any users exist in Keystone')
- try:
- keystone_client, _ = self._get_client(self.admin_username, self.admin_password,
- self.admin_tenant)
- user_list = keystone_client.users.list(tenant_id=self.admin_tenant, limit=1)
-
- if len(user_list) < 1:
- return (False, None)
-
- return (True, None)
- except ClientException as e:
- # Catch exceptions to give the user our custom error message
- logger.exception('Unable to list users in Keystone')
- return (False, e.message)
def verify_credentials(self, username_or_email, password):
try:
- _, sess = self._get_client(username_or_email, password)
- user_id = sess.get_user_id()
+ keystone_client = kclient.Client(username=username_or_email, password=password,
+ auth_url=self.auth_url)
+ user_id = keystone_client.user_id
except KeystoneAuthorizationFailure as kaf:
logger.exception('Keystone auth failure for user: %s', username_or_email)
- return (None, 'Invalid username or password')
+ return (None, kaf.message or 'Invalid username or password')
except KeystoneUnauthorized as kut:
logger.exception('Keystone unauthorized for user: %s', username_or_email)
- return (None, 'Invalid username or password')
- except ClientException as ex:
- logger.exception('Keystone unauthorized for user: %s', username_or_email)
- return (None, 'Invalid username or password')
-
- if user_id is None:
- return (None, 'Invalid username or password')
+ return (None, kut.message or 'Invalid username or password')
try:
- admin_client, _ = self._get_client(self.admin_username, self.admin_password,
- self.admin_tenant)
+ admin_client = kclient.Client(username=self.admin_username, password=self.admin_password,
+ tenant_name=self.admin_tenant, auth_url=self.auth_url)
user = admin_client.users.get(user_id)
except KeystoneUnauthorized as kut:
logger.exception('Keystone unauthorized admin')
return (None, 'Keystone admin credentials are invalid: %s' % kut.message)
- if self.requires_email and not hasattr(user, 'email'):
- return (None, 'Missing email field for user %s' % user_id)
-
- email = user.email if hasattr(user, 'email') else None
- return (UserInformation(username=username_or_email, email=email, id=user_id), None)
-
- def query_users(self, query, limit=20):
- return (None, self.federated_service, 'Unsupported in Keystone V2')
-
- def get_user(self, username_or_email):
- return (None, 'Unsupported in Keystone V2')
-
-
-class KeystoneV3Users(FederatedUsers):
- """ Delegates authentication to OpenStack Keystone V3. """
- def __init__(self, auth_url, admin_username, admin_password, admin_tenant, timeout=None,
- requires_email=True, project_domain_id='default', user_domain_id='default'):
- super(KeystoneV3Users, self).__init__('keystone', requires_email)
- self.auth_url = auth_url
- self.admin_username = admin_username
- self.admin_password = admin_password
- self.admin_tenant = admin_tenant
- self.project_domain_id = project_domain_id
- self.user_domain_id = user_domain_id
- self.timeout = timeout or DEFAULT_TIMEOUT
- self.debug = os.environ.get('USERS_DEBUG') == '1'
- self.requires_email = requires_email
-
- def _get_client(self, username, password, project_name=None):
- if project_name:
- auth = keystone_v3_auth.Password(auth_url=self.auth_url,
- username=username,
- password=password,
- project_name=project_name,
- project_domain_id=self.project_domain_id,
- user_domain_id=self.user_domain_id)
- else:
- auth = keystone_v3_auth.Password(auth_url=self.auth_url,
- username=username,
- password=password,
- user_domain_id=self.user_domain_id)
-
- sess = session.Session(auth=auth)
- client = client_v3.Client(session=sess,
- timeout=self.timeout,
- debug=self.debug)
- return client, sess
-
- def ping(self):
- try:
- _, sess = self._get_client(self.admin_username, self.admin_password)
- assert sess.get_user_id() # Make sure we loaded a valid user.
- except KeystoneUnauthorized as kut:
- logger.exception('Keystone unauthorized admin')
- return (False, 'Keystone admin credentials are invalid: %s' % kut.message)
- except ClientException as cle:
- logger.exception('Keystone unauthorized admin')
- return (False, 'Keystone ping check failed: %s' % cle.message)
-
- return (True, None)
-
- def at_least_one_user_exists(self):
- logger.debug('Checking if any users exist in admin tenant in Keystone')
- try:
- # Just make sure the admin can connect to the project.
- self._get_client(self.admin_username, self.admin_password, self.admin_tenant)
- return (True, None)
- except ClientException as cle:
- # Catch exceptions to give the user our custom error message
- logger.exception('Unable to list users in Keystone')
- return (False, cle.message)
-
- def verify_credentials(self, username_or_email, password):
- try:
- keystone_client, sess = self._get_client(username_or_email, password)
- user_id = sess.get_user_id()
- assert user_id
-
- keystone_client, sess = self._get_client(self.admin_username, self.admin_password,
- self.admin_tenant)
- user = keystone_client.users.get(user_id)
- if self.requires_email and not hasattr(user, 'email'):
- return (None, 'Missing email field for user %s' % user_id)
-
- return (self._user_info(user), None)
- except KeystoneAuthorizationFailure as kaf:
- logger.exception('Keystone auth failure for user: %s', username_or_email)
- return (None, 'Invalid username or password')
- except KeystoneUnauthorized as kut:
- logger.exception('Keystone unauthorized for user: %s', username_or_email)
- return (None, 'Invalid username or password')
- except ClientException as cle:
- logger.exception('Keystone unauthorized for user: %s', username_or_email)
- return (None, 'Invalid username or password')
-
- def get_user(self, username_or_email):
- users_found, _, err_msg = self.query_users(username_or_email)
- if err_msg is not None:
- return (None, err_msg)
-
- if len(users_found) != 1:
- return (None, 'Single user not found')
-
- user = users_found[0]
- if self.requires_email and not user.email:
- return (None, 'Missing email field for user %s' % user.id)
-
- return (user, None)
-
- def check_group_lookup_args(self, group_lookup_args):
- if not group_lookup_args.get('group_id'):
- return (False, 'Missing group_id')
-
- group_id = group_lookup_args['group_id']
- return self._check_group(group_id)
-
- def _check_group(self, group_id):
- try:
- admin_client, _ = self._get_client(self.admin_username, self.admin_password,
- self.admin_tenant)
- return (bool(admin_client.groups.get(group_id)), None)
- except KeystoneNotFound:
- return (False, 'Group not found')
- except KeystoneAuthorizationFailure as kaf:
- logger.exception('Keystone auth failure for admin user for group lookup %s', group_id)
- return (False, kaf.message or 'Invalid admin username or password')
- except KeystoneUnauthorized as kut:
- logger.exception('Keystone unauthorized for admin user for group lookup %s', group_id)
- return (False, kut.message or 'Invalid admin username or password')
- except ClientException as cle:
- logger.exception('Keystone unauthorized for admin user for group lookup %s', group_id)
- return (False, cle.message or 'Invalid admin username or password')
-
- def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
- group_id = group_lookup_args['group_id']
-
- (status, err) = self._check_group(group_id)
- if not status:
- return (None, err)
-
- try:
- admin_client, _ = self._get_client(self.admin_username, self.admin_password,
- self.admin_tenant)
- user_info_iterator = admin_client.users.list(group=group_id)
- def iterator():
- for user in user_info_iterator:
- yield (self._user_info(user), None)
-
- return (iterator(), None)
- except KeystoneAuthorizationFailure as kaf:
- logger.exception('Keystone auth failure for admin user for group lookup %s', group_id)
- return (False, kaf.message or 'Invalid admin username or password')
- except KeystoneUnauthorized as kut:
- logger.exception('Keystone unauthorized for admin user for group lookup %s', group_id)
- return (False, kut.message or 'Invalid admin username or password')
- except ClientException as cle:
- logger.exception('Keystone unauthorized for admin user for group lookup %s', group_id)
- return (False, cle.message or 'Invalid admin username or password')
-
- @staticmethod
- def _user_info(user):
- email = user.email if hasattr(user, 'email') else None
- return UserInformation(user.name, email, user.id)
-
- def query_users(self, query, limit=20):
- if len(query) < 3:
- return ([], self.federated_service, None)
-
- try:
- admin_client, _ = self._get_client(self.admin_username, self.admin_password,
- self.admin_tenant)
-
- found_users = list(take(limit, admin_client.users.list(name=query)))
- logger.debug('For Keystone query %s found users: %s', query, found_users)
- if not found_users:
- return ([], self.federated_service, None)
-
- return ([self._user_info(user) for user in found_users], self.federated_service, None)
- except KeystoneAuthorizationFailure as kaf:
- logger.exception('Keystone auth failure for admin user for query %s', query)
- return (None, self.federated_service, kaf.message or 'Invalid admin username or password')
- except KeystoneUnauthorized as kut:
- logger.exception('Keystone unauthorized for admin user for query %s', query)
- return (None, self.federated_service, kut.message or 'Invalid admin username or password')
- except ClientException as cle:
- logger.exception('Keystone unauthorized for admin user for query %s', query)
- return (None, self.federated_service, cle.message or 'Invalid admin username or password')
+ return (VerifiedCredentials(username=username_or_email, email=user.email), None)
diff --git a/data/users/shared.py b/data/users/shared.py
deleted file mode 100644
index 8f1cc09df..000000000
--- a/data/users/shared.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# -*- coding: utf-8 -*-
-import tldextract
-
-import features
-
-from data import model
-
-
-def can_create_user(email_address, blacklisted_domains=None):
- """ Returns true if a user with the specified e-mail address can be created. """
-
- if features.BLACKLISTED_EMAILS and email_address and '@' in email_address:
- blacklisted_domains = blacklisted_domains or []
- _, email_domain = email_address.split('@', 1)
- extracted = tldextract.extract(email_domain)
- if extracted.registered_domain.lower() in blacklisted_domains:
- return False
-
- if not features.USER_CREATION:
- return False
-
- if features.INVITE_ONLY_USER_CREATION:
- if not email_address:
- return False
-
- # Check to see that there is an invite for the e-mail address.
- return bool(model.team.lookup_team_invites_by_email(email_address))
-
- # Otherwise the user can be created (assuming it doesn't already exist, of course)
- return True
diff --git a/data/users/teamsync.py b/data/users/teamsync.py
deleted file mode 100644
index 2ab0fea10..000000000
--- a/data/users/teamsync.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import logging
-import json
-
-from data import model
-
-logger = logging.getLogger(__name__)
-
-
-MAX_TEAMS_PER_ITERATION = 500
-
-
-def sync_teams_to_groups(authentication, stale_cutoff):
- """ Performs team syncing by looking up any stale team(s) found, and performing the sync
- operation on them.
- """
- logger.debug('Looking up teams to sync to groups')
-
- sync_team_tried = set()
- while len(sync_team_tried) < MAX_TEAMS_PER_ITERATION:
- # Find a stale team.
- stale_team_sync = model.team.get_stale_team(stale_cutoff)
- if not stale_team_sync:
- logger.debug('No additional stale team found; sleeping')
- return
-
- # Make sure we don't try to reprocess a team on this iteration.
- if stale_team_sync.id in sync_team_tried:
- break
-
- sync_team_tried.add(stale_team_sync.id)
-
- # Sync the team.
- sync_successful = sync_team(authentication, stale_team_sync)
- if not sync_successful:
- return
-
-
-def sync_team(authentication, stale_team_sync):
- """ Performs synchronization of a team (as referenced by the TeamSync stale_team_sync).
- Returns True on success and False otherwise.
- """
- sync_config = json.loads(stale_team_sync.config)
- logger.info('Syncing team `%s` under organization %s via %s (#%s)', stale_team_sync.team.name,
- stale_team_sync.team.organization.username, sync_config, stale_team_sync.team_id,
- extra={'team': stale_team_sync.team_id, 'sync_config': sync_config})
-
- # Load all the existing members of the team in Quay that are bound to the auth service.
- existing_users = model.team.get_federated_team_member_mapping(stale_team_sync.team,
- authentication.federated_service)
-
- logger.debug('Existing membership of %s for team `%s` under organization %s via %s (#%s)',
- len(existing_users), stale_team_sync.team.name,
- stale_team_sync.team.organization.username, sync_config, stale_team_sync.team_id,
- extra={'team': stale_team_sync.team_id, 'sync_config': sync_config,
- 'existing_member_count': len(existing_users)})
-
- # Load all the members of the team from the authenication system.
- (member_iterator, err) = authentication.iterate_group_members(sync_config)
- if err is not None:
- logger.error('Got error when trying to iterate group members with config %s: %s',
- sync_config, err)
- return False
-
- # Collect all the members currently found in the group, adding them to the team as we go
- # along.
- group_membership = set()
- for (member_info, err) in member_iterator:
- if err is not None:
- logger.error('Got error when trying to construct a member: %s', err)
- continue
-
- # If the member is already in the team, nothing more to do.
- if member_info.username in existing_users:
- logger.debug('Member %s already in team `%s` under organization %s via %s (#%s)',
- member_info.username, stale_team_sync.team.name,
- stale_team_sync.team.organization.username, sync_config,
- stale_team_sync.team_id,
- extra={'team': stale_team_sync.team_id, 'sync_config': sync_config,
- 'member': member_info.username})
-
- group_membership.add(existing_users[member_info.username])
- continue
-
- # Retrieve the Quay user associated with the member info.
- (quay_user, err) = authentication.get_and_link_federated_user_info(member_info,
- internal_create=True)
- if err is not None:
- logger.error('Could not link external user %s to an internal user: %s',
- member_info.username, err,
- extra={'team': stale_team_sync.team_id, 'sync_config': sync_config,
- 'member': member_info.username, 'error': err})
- continue
-
- # Add the user to the membership set.
- group_membership.add(quay_user.id)
-
- # Add the user to the team.
- try:
- logger.info('Adding member %s to team `%s` under organization %s via %s (#%s)',
- quay_user.username, stale_team_sync.team.name,
- stale_team_sync.team.organization.username, sync_config,
- stale_team_sync.team_id,
- extra={'team': stale_team_sync.team_id, 'sync_config': sync_config,
- 'member': quay_user.username})
-
- model.team.add_user_to_team(quay_user, stale_team_sync.team)
- except model.UserAlreadyInTeam:
- # If the user is already present, nothing more to do for them.
- pass
-
- # Update the transaction and last_updated time of the team sync. Only if it matches
- # the current value will we then perform the deletion step.
- got_transaction_handle = model.team.update_sync_status(stale_team_sync)
- if not got_transaction_handle:
- # Another worker updated this team. Nothing more to do.
- logger.debug('Another worker synced team `%s` under organization %s via %s (#%s)',
- stale_team_sync.team.name,
- stale_team_sync.team.organization.username, sync_config,
- stale_team_sync.team_id,
- extra={'team': stale_team_sync.team_id, 'sync_config': sync_config})
- return True
-
- # Delete any team members not found in the backing auth system.
- logger.debug('Deleting stale members for team `%s` under organization %s via %s (#%s)',
- stale_team_sync.team.name, stale_team_sync.team.organization.username,
- sync_config, stale_team_sync.team_id,
- extra={'team': stale_team_sync.team_id, 'sync_config': sync_config})
-
- deleted = model.team.delete_members_not_present(stale_team_sync.team, group_membership)
-
- # Done!
- logger.info('Finishing sync for team `%s` under organization %s via %s (#%s): %s deleted',
- stale_team_sync.team.name, stale_team_sync.team.organization.username,
- sync_config, stale_team_sync.team_id, deleted,
- extra={'team': stale_team_sync.team_id, 'sync_config': sync_config})
- return True
diff --git a/data/users/test/test_shared.py b/data/users/test/test_shared.py
deleted file mode 100644
index d211fb485..000000000
--- a/data/users/test/test_shared.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import pytest
-
-from mock import patch
-
-from data.database import model
-from data.users.shared import can_create_user
-
-from test.fixtures import *
-
-@pytest.mark.parametrize('open_creation, invite_only, email, has_invite, can_create', [
- # Open user creation => always allowed.
- (True, False, None, False, True),
-
- # Open user creation => always allowed.
- (True, False, 'foo@example.com', False, True),
-
- # Invite only user creation + no invite => disallowed.
- (True, True, None, False, False),
-
- # Invite only user creation + no invite => disallowed.
- (True, True, 'foo@example.com', False, False),
-
- # Invite only user creation + invite => allowed.
- (True, True, 'foo@example.com', True, True),
-
- # No open creation => Disallowed.
- (False, True, 'foo@example.com', False, False),
- (False, True, 'foo@example.com', True, False),
-
- # Blacklisted emails => Disallowed.
- (True, False, 'foo@blacklisted.com', False, False),
- (True, False, 'foo@blacklisted.org', False, False),
- (True, False, 'foo@BlAcKlIsTeD.CoM', False, False), # Verify Capitalization
- (True, False, u'foo@mail.bLacklisted.Com', False, False), # Verify unicode
- (True, False, 'foo@blacklisted.net', False, True), # Avoid False Positives
- (True, False, 'foo@myblacklisted.com', False, True), # Avoid partial domain matches
- (True, False, 'fooATblacklisted.com', False, True), # Ignore invalid email addresses
-])
-@pytest.mark.parametrize('blacklisting_enabled', [True, False])
-def test_can_create_user(open_creation, invite_only, email, has_invite, can_create, blacklisting_enabled, app):
-
- # Mock list of blacklisted domains
- blacklisted_domains = ['blacklisted.com', 'blacklisted.org']
-
- if has_invite:
- inviter = model.user.get_user('devtable')
- team = model.team.get_organization_team('buynlarge', 'owners')
- model.team.add_or_invite_to_team(inviter, team, email=email)
-
- with patch('features.USER_CREATION', open_creation):
- with patch('features.INVITE_ONLY_USER_CREATION', invite_only):
- with patch('features.BLACKLISTED_EMAILS', blacklisting_enabled):
- if email and any(domain in email.lower() for domain in blacklisted_domains) and not blacklisting_enabled:
- can_create = True # blacklisted domains can be used, if blacklisting is disabled
- assert can_create_user(email, blacklisted_domains) == can_create
diff --git a/data/users/test/test_teamsync.py b/data/users/test/test_teamsync.py
deleted file mode 100644
index 470c31707..000000000
--- a/data/users/test/test_teamsync.py
+++ /dev/null
@@ -1,332 +0,0 @@
-import os
-
-from datetime import datetime, timedelta
-
-import pytest
-
-from mock import patch
-
-from data import model, database
-from data.users.federated import FederatedUsers, UserInformation
-from data.users.teamsync import sync_team, sync_teams_to_groups
-from test.test_ldap import mock_ldap
-from test.test_keystone_auth import fake_keystone
-from util.names import parse_robot_username
-
-from test.fixtures import *
-
-_FAKE_AUTH = 'fake'
-
-class FakeUsers(FederatedUsers):
- def __init__(self, group_members):
- super(FakeUsers, self).__init__(_FAKE_AUTH, False)
- self.group_tuples = [(m, None) for m in group_members]
-
- def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
- return (self.group_tuples, None)
-
-
-@pytest.fixture(params=[True, False])
-def user_creation(request):
- with patch('features.USER_CREATION', request.param):
- yield
-
-
-@pytest.fixture(params=[True, False])
-def invite_only_user_creation(request):
- with patch('features.INVITE_ONLY_USER_CREATION', request.param):
- yield
-
-
-@pytest.fixture(params=[True, False])
-def blacklisted_emails(request):
- mock_blacklisted_domains = {'BLACKLISTED_EMAIL_DOMAINS': ['blacklisted.com', 'blacklisted.net']}
- with patch('features.BLACKLISTED_EMAILS', request.param):
- with patch.dict('data.model.config.app_config', mock_blacklisted_domains):
- yield
-
-
-@pytest.mark.skipif(os.environ.get('TEST_DATABASE_URI', '').find('postgres') >= 0,
- reason="Postgres fails when existing members are added under the savepoint")
-@pytest.mark.parametrize('starting_membership,group_membership,expected_membership', [
- # Empty team + single member in group => Single member in team.
- ([],
- [
- UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
- ],
- ['someuser']),
-
- # Team with a Quay user + empty group => empty team.
- ([('someuser', None)],
- [],
- []),
-
- # Team with an existing external user + user is in the group => no changes.
- ([
- ('someuser', 'someuser'),
- ],
- [
- UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
- ],
- ['someuser']),
-
- # Team with an existing external user (with a different Quay username) + user is in the group.
- # => no changes
- ([
- ('anotherquayname', 'someuser'),
- ],
- [
- UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
- ],
- ['someuser']),
-
- # Team missing a few members that are in the group => members added.
- ([('someuser', 'someuser')],
- [
- UserInformation('anotheruser', 'anotheruser', 'anotheruser@devtable.com'),
- UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
- UserInformation('thirduser', 'thirduser', 'thirduser@devtable.com'),
- ],
- ['anotheruser', 'someuser', 'thirduser']),
-
- # Team has a few extra members no longer in the group => members removed.
- ([
- ('anotheruser', 'anotheruser'),
- ('someuser', 'someuser'),
- ('thirduser', 'thirduser'),
- ('nontestuser', None),
- ],
- [
- UserInformation('thirduser', 'thirduser', 'thirduser@devtable.com'),
- ],
- ['thirduser']),
-
- # Team has different membership than the group => members added and removed.
- ([
- ('anotheruser', 'anotheruser'),
- ('someuser', 'someuser'),
- ('nontestuser', None),
- ],
- [
- UserInformation('anotheruser', 'anotheruser', 'anotheruser@devtable.com'),
- UserInformation('missinguser', 'missinguser', 'missinguser@devtable.com'),
- ],
- ['anotheruser', 'missinguser']),
-
- # Team has same membership but some robots => robots remain and no other changes.
- ([
- ('someuser', 'someuser'),
- ('buynlarge+anotherbot', None),
- ('buynlarge+somerobot', None),
- ],
- [
- UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
- ],
- ['someuser', 'buynlarge+somerobot', 'buynlarge+anotherbot']),
-
- # Team has an extra member and some robots => member removed and robots remain.
- ([
- ('someuser', 'someuser'),
- ('buynlarge+anotherbot', None),
- ('buynlarge+somerobot', None),
- ],
- [
- # No members.
- ],
- ['buynlarge+somerobot', 'buynlarge+anotherbot']),
-
- # Team has a different member and some robots => member changed and robots remain.
- ([
- ('someuser', 'someuser'),
- ('buynlarge+anotherbot', None),
- ('buynlarge+somerobot', None),
- ],
- [
- UserInformation('anotheruser', 'anotheruser', 'anotheruser@devtable.com'),
- ],
- ['anotheruser', 'buynlarge+somerobot', 'buynlarge+anotherbot']),
-
- # Team with an existing external user (with a different Quay username) + user is in the group.
- # => no changes and robots remain.
- ([
- ('anotherquayname', 'someuser'),
- ('buynlarge+anotherbot', None),
- ],
- [
- UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
- ],
- ['someuser', 'buynlarge+anotherbot']),
-
- # Team which returns the same member twice, as pagination in some engines (like LDAP) is not
- # stable.
- ([],
- [
- UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
- UserInformation('anotheruser', 'anotheruser', 'anotheruser@devtable.com'),
- UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
- ],
- ['anotheruser', 'someuser']),
-])
-def test_syncing(user_creation, invite_only_user_creation, starting_membership, group_membership,
- expected_membership, blacklisted_emails, app):
- org = model.organization.get_organization('buynlarge')
-
- # Necessary for the fake auth entries to be created in FederatedLogin.
- database.LoginService.create(name=_FAKE_AUTH)
-
- # Assert the team is empty, so we have a clean slate.
- sync_team_info = model.team.get_team_sync_information('buynlarge', 'synced')
- assert len(list(model.team.list_team_users(sync_team_info.team))) == 0
-
- # Add the existing starting members to the team.
- for starting_member in starting_membership:
- (quay_username, fakeauth_username) = starting_member
- if '+' in quay_username:
- # Add a robot.
- (_, shortname) = parse_robot_username(quay_username)
- robot, _ = model.user.create_robot(shortname, org)
- model.team.add_user_to_team(robot, sync_team_info.team)
- else:
- email = quay_username + '@devtable.com'
-
- if fakeauth_username is None:
- quay_user = model.user.create_user_noverify(quay_username, email)
- else:
- quay_user = model.user.create_federated_user(quay_username, email, _FAKE_AUTH,
- fakeauth_username, False)
-
- model.team.add_user_to_team(quay_user, sync_team_info.team)
-
- # Call syncing on the team.
- fake_auth = FakeUsers(group_membership)
- assert sync_team(fake_auth, sync_team_info)
-
- # Ensure the last updated time and transaction_id's have changed.
- updated_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
- assert updated_sync_info.last_updated is not None
- assert updated_sync_info.transaction_id != sync_team_info.transaction_id
-
- users_expected = set([name for name in expected_membership if '+' not in name])
- robots_expected = set([name for name in expected_membership if '+' in name])
- assert len(users_expected) + len(robots_expected) == len(expected_membership)
-
- # Check that the team's users match those expected.
- service_user_map = model.team.get_federated_team_member_mapping(sync_team_info.team,
- _FAKE_AUTH)
- assert set(service_user_map.keys()) == users_expected
-
- quay_users = model.team.list_team_users(sync_team_info.team)
- assert len(quay_users) == len(users_expected)
-
- for quay_user in quay_users:
- fakeauth_record = model.user.lookup_federated_login(quay_user, _FAKE_AUTH)
- assert fakeauth_record is not None
- assert fakeauth_record.service_ident in users_expected
- assert service_user_map[fakeauth_record.service_ident] == quay_user.id
-
- # Check that the team's robots match those expected.
- robots_found = set([r.username for r in model.team.list_team_robots(sync_team_info.team)])
- assert robots_expected == robots_found
-
-
-def test_sync_teams_to_groups(user_creation, invite_only_user_creation, blacklisted_emails, app):
- # Necessary for the fake auth entries to be created in FederatedLogin.
- database.LoginService.create(name=_FAKE_AUTH)
-
- # Assert the team has not yet been updated.
- sync_team_info = model.team.get_team_sync_information('buynlarge', 'synced')
- assert sync_team_info.last_updated is None
-
- # Call to sync all teams.
- fake_auth = FakeUsers([])
- sync_teams_to_groups(fake_auth, timedelta(seconds=1))
-
- # Ensure the team was synced.
- updated_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
- assert updated_sync_info.last_updated is not None
- assert updated_sync_info.transaction_id != sync_team_info.transaction_id
-
- # Set the stale threshold to a high amount and ensure the team is not resynced.
- current_info = model.team.get_team_sync_information('buynlarge', 'synced')
- current_info.last_updated = datetime.now() - timedelta(seconds=2)
- current_info.save()
-
- sync_teams_to_groups(fake_auth, timedelta(seconds=120))
-
- third_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
- assert third_sync_info.transaction_id == updated_sync_info.transaction_id
-
- # Set the stale threshold to 10 seconds, and ensure the team is resynced, after making it
- # "updated" 20s ago.
- current_info = model.team.get_team_sync_information('buynlarge', 'synced')
- current_info.last_updated = datetime.now() - timedelta(seconds=20)
- current_info.save()
-
- sync_teams_to_groups(fake_auth, timedelta(seconds=10))
-
- fourth_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
- assert fourth_sync_info.transaction_id != updated_sync_info.transaction_id
-
-
-@pytest.mark.parametrize('auth_system_builder,config', [
- (mock_ldap, {'group_dn': 'cn=AwesomeFolk'}),
- (fake_keystone, {'group_id': 'somegroupid'}),
-])
-def test_teamsync_end_to_end(user_creation, invite_only_user_creation, auth_system_builder, config,
- blacklisted_emails, app):
- with auth_system_builder() as auth:
- # Create an new team to sync.
- org = model.organization.get_organization('buynlarge')
- new_synced_team = model.team.create_team('synced2', org, 'member', 'Some synced team.')
- sync_team_info = model.team.set_team_syncing(new_synced_team, auth.federated_service, config)
-
- # Sync the team.
- assert sync_team(auth, sync_team_info)
-
- # Ensure we now have members.
- msg = 'Auth system: %s' % auth.federated_service
- sync_team_info = model.team.get_team_sync_information('buynlarge', 'synced2')
- team_members = list(model.team.list_team_users(sync_team_info.team))
- assert len(team_members) > 1, msg
-
- it, _ = auth.iterate_group_members(config)
- assert len(team_members) == len(list(it)), msg
-
- sync_team_info.last_updated = datetime.now() - timedelta(hours=6)
- sync_team_info.save()
-
- # Remove one of the members and force a sync again to ensure we re-link the correct users.
- first_member = team_members[0]
- model.team.remove_user_from_team('buynlarge', 'synced2', first_member.username, 'devtable')
-
- team_members2 = list(model.team.list_team_users(sync_team_info.team))
- assert len(team_members2) == 1, msg
- assert sync_team(auth, sync_team_info)
-
- team_members3 = list(model.team.list_team_users(sync_team_info.team))
- assert len(team_members3) > 1, msg
- assert set([m.id for m in team_members]) == set([m.id for m in team_members3])
-
-
-@pytest.mark.parametrize('auth_system_builder,config', [
- (mock_ldap, {'group_dn': 'cn=AwesomeFolk'}),
- (fake_keystone, {'group_id': 'somegroupid'}),
-])
-def test_teamsync_existing_email(user_creation, invite_only_user_creation, auth_system_builder,
- blacklisted_emails, config, app):
- with auth_system_builder() as auth:
- # Create an new team to sync.
- org = model.organization.get_organization('buynlarge')
- new_synced_team = model.team.create_team('synced2', org, 'member', 'Some synced team.')
- sync_team_info = model.team.set_team_syncing(new_synced_team, auth.federated_service, config)
-
- # Add a new *unlinked* user with the same email address as one of the team members.
- it, _ = auth.iterate_group_members(config)
- members = list(it)
- model.user.create_user_noverify('someusername', members[0][0].email)
-
- # Sync the team and ensure it doesn't fail.
- assert sync_team(auth, sync_team_info)
-
- team_members = list(model.team.list_team_users(sync_team_info.team))
- assert len(team_members) > 0
diff --git a/data/users/test/test_users.py b/data/users/test/test_users.py
deleted file mode 100644
index 81f6660bd..000000000
--- a/data/users/test/test_users.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import pytest
-
-from contextlib import contextmanager
-from mock import patch
-
-from data.database import model
-from data.users.federated import DISABLED_MESSAGE
-from test.test_ldap import mock_ldap
-from test.test_keystone_auth import fake_keystone
-from test.test_external_jwt_authn import fake_jwt
-
-from test.fixtures import *
-
-@pytest.mark.parametrize('auth_system_builder, user1, user2', [
- (mock_ldap, ('someuser', 'somepass'), ('testy', 'password')),
- (fake_keystone, ('cool.user', 'password'), ('some.neat.user', 'foobar')),
-])
-def test_auth_createuser(auth_system_builder, user1, user2, config, app):
- with auth_system_builder() as auth:
- # Login as a user and ensure a row in the database is created for them.
- user, err = auth.verify_and_link_user(*user1)
- assert err is None
- assert user
-
- federated_info = model.user.lookup_federated_login(user, auth.federated_service)
- assert federated_info is not None
-
- # Disable user creation.
- with patch('features.USER_CREATION', False):
- # Ensure that the existing user can login.
- user_again, err = auth.verify_and_link_user(*user1)
- assert err is None
- assert user_again.id == user.id
-
- # Ensure that a new user cannot.
- new_user, err = auth.verify_and_link_user(*user2)
- assert new_user is None
- assert err == DISABLED_MESSAGE
-
-
-@pytest.mark.parametrize(
- 'email, blacklisting_enabled, can_create',
- [
- # Blacklisting Enabled, Blacklisted Domain => Blocked
- ('foo@blacklisted.net', True, False),
- ('foo@blacklisted.com', True, False),
-
- # Blacklisting Enabled, similar to blacklisted domain => Allowed
- ('foo@notblacklisted.com', True, True),
- ('foo@blacklisted.org', True, True),
-
- # Blacklisting *Disabled*, Blacklisted Domain => Allowed
- ('foo@blacklisted.com', False, True),
- ('foo@blacklisted.net', False, True),
- ]
-)
-@pytest.mark.parametrize('auth_system_builder', [mock_ldap, fake_keystone, fake_jwt])
-def test_createuser_with_blacklist(auth_system_builder, email, blacklisting_enabled, can_create, config, app):
- """Verify email blacklisting with User Creation"""
-
- MOCK_CONFIG = {'BLACKLISTED_EMAIL_DOMAINS': ['blacklisted.com', 'blacklisted.net']}
- MOCK_PASSWORD = 'somepass'
-
- with auth_system_builder() as auth:
- with patch('features.BLACKLISTED_EMAILS', blacklisting_enabled):
- with patch.dict('data.model.config.app_config', MOCK_CONFIG):
- with patch('features.USER_CREATION', True):
- new_user, err = auth.verify_and_link_user(email, MOCK_PASSWORD)
- if can_create:
- assert err is None
- assert new_user
- else:
- assert err
- assert new_user is None
-
-
-@pytest.mark.parametrize('auth_system_builder,auth_kwargs', [
- (mock_ldap, {}),
- (fake_keystone, {'version': 3}),
- (fake_keystone, {'version': 2}),
- (fake_jwt, {}),
-])
-def test_ping(auth_system_builder, auth_kwargs, app):
- with auth_system_builder(**auth_kwargs) as auth:
- status, err = auth.ping()
- assert status
- assert err is None
-
-
-@pytest.mark.parametrize('auth_system_builder,auth_kwargs', [
- (mock_ldap, {}),
- (fake_keystone, {'version': 3}),
- (fake_keystone, {'version': 2}),
-])
-def test_at_least_one_user_exists(auth_system_builder, auth_kwargs, app):
- with auth_system_builder(**auth_kwargs) as auth:
- status, err = auth.at_least_one_user_exists()
- assert status
- assert err is None
diff --git a/deploy/README.md b/deploy/README.md
deleted file mode 100644
index 6885fa85f..000000000
--- a/deploy/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Quay Deployment Manifests for Kubernetes/OpenShift
-
-OpenShift deployments should be using the Quay Setup Operator. Manifests are provided here for manual deployment or situations where the Operator is not possible.
-
-Instructions for Deploying on OpenShift
-
-
-
diff --git a/deploy/k8s/clair-config.yaml b/deploy/k8s/clair-config.yaml
deleted file mode 100644
index 6c69579db..000000000
--- a/deploy/k8s/clair-config.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-clair:
- database:
- type: pgsql
- options:
- # Check that the database options match those set earlier in postgres-clair-deployment.yaml.
- source: host=postgres-clair port=5432 dbname=clair user=clair password=test123 sslmode=disable
- cachesize: 16384
- api:
- # The port at which Clair will report its health status. For example, if Clair is running at
- # https://clair.mycompany.com, the health will be reported at
- # http://clair.mycompany.com:6061/health.
- healthport: 6061
-
- port: 6062
- timeout: 900s
-
- # paginationkey can be any random set of characters. *Must be the same across all Clair
- # instances*.
- paginationkey: "XxoPtCUzrUv4JV5dS+yQ+MdW7yLEJnRMwigVY/bpgtQ="
-
- updater:
- # interval defines how often Clair will check for updates from its upstream vulnerability databases.
- interval: 6h
- notifier:
- attempts: 3
- renotifyinterval: 1h
- http:
- # QUAY_ENDPOINT defines the endpoint at which Quay Enterprise is running.
- # For example: https://myregistry.mycompany.com
- endpoint: http://quay-enterprise-clusterip/secscan/notify
- proxy: http://localhost:6063
-
-jwtproxy:
- signer_proxy:
- enabled: true
- listen_addr: :6063
- ca_key_file: /certificates/mitm.key # Generated internally, do not change.
- ca_crt_file: /certificates/mitm.crt # Generated internally, do not change.
- signer:
- issuer: security_scanner
- expiration_time: 5m
- max_skew: 1m
- nonce_length: 32
- private_key:
- type: preshared
- options:
- # The ID of the service key generated for Clair. The ID is returned when setting up
- # the key in [Quay Enterprise Setup](security-scanning.md)
- key_id: cd40f1c6a63f574c68ce882258925374882fac2b2f535ae5f8157c429e0c4b2e
- private_key_path: /clair/config/security_scanner.pem
-
- verifier_proxies:
- - enabled: true
- # The port at which Clair will listen.
- listen_addr: :6060
-
- # If Clair is to be served via TLS, uncomment these lines. See the "Running Clair under TLS"
- # section below for more information.
- # key_file: /config/clair.key
- # crt_file: /config/clair.crt
-
- verifier:
- # CLAIR_ENDPOINT is the endpoint at which this Clair will be accessible. Note that the port
- # specified here must match the listen_addr port a few lines above this.
- # Example: https://myclair.mycompany.com:6060
- audience: http://clair-service:6060
-
- upstream: http://localhost:6062
- key_server:
- type: keyregistry
- options:
- # QUAY_ENDPOINT defines the endpoint at which Quay Enterprise is running.
- # Example: https://myregistry.mycompany.com
- registry: http://quay-enterprise-clusterip/keys/
diff --git a/deploy/k8s/clair-deployment.yaml b/deploy/k8s/clair-deployment.yaml
deleted file mode 100644
index 994c3770c..000000000
--- a/deploy/k8s/clair-deployment.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- labels:
- quay-enterprise-component: clair-scanner
- name: clair-scanner
- namespace: quay-enterprise
-spec:
- replicas: 1
- selector:
- matchLabels:
- quay-enterprise-component: clair-scanner
- template:
- metadata:
- labels:
- quay-enterprise-component: clair-scanner
- namespace: quay-enterprise
- spec:
- containers:
- - image: quay.io/redhat/clair-jwt:v3.0.4
- imagePullPolicy: IfNotPresent
- name: clair-scanner
- ports:
- - containerPort: 6060
- name: clair-api
- protocol: TCP
- - containerPort: 6061
- name: clair-health
- protocol: TCP
- volumeMounts:
- - mountPath: /clair/config
- name: configvolume
- imagePullSecrets:
- - name: redhat-quay-pull-secret
- restartPolicy: Always
- volumes:
- - name: configvolume
- secret:
- secretName: clair-scanner-config-secret
\ No newline at end of file
diff --git a/deploy/k8s/clair-service.yaml b/deploy/k8s/clair-service.yaml
deleted file mode 100644
index cfb8c0cb4..000000000
--- a/deploy/k8s/clair-service.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
- name: clair-service
- namespace: quay-enterprise
-spec:
- ports:
- - name: clair-api
- port: 6060
- protocol: TCP
- targetPort: 6060
- - name: clair-health
- port: 6061
- protocol: TCP
- targetPort: 6061
- selector:
- quay-enterprise-component: clair-scanner
- type: ClusterIP
\ No newline at end of file
diff --git a/deploy/k8s/db-pvc.yaml b/deploy/k8s/db-pvc.yaml
deleted file mode 100644
index 30a9f1d56..000000000
--- a/deploy/k8s/db-pvc.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: postgres-storage
- namespace: quay-enterprise
-spec:
- accessModes:
- - ReadWriteOnce
- volumeMode: Filesystem
- resources:
- requests:
- # The 10Gi creates 10 gigabytes of storage for use by the Postgres database.
- storage: 10Gi
- storageClassName: quay-storageclass
diff --git a/deploy/k8s/postgres-clair-deployment.yaml b/deploy/k8s/postgres-clair-deployment.yaml
deleted file mode 100644
index 66cd62c66..000000000
--- a/deploy/k8s/postgres-clair-deployment.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- labels:
- app: postgres-clair
- name: postgres-clair
- namespace: quay-enterprise
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: postgres-clair
- template:
- metadata:
- labels:
- app: postgres-clair
- spec:
- containers:
- - env:
- - name: POSTGRESQL_USER
- # Set the username for the Clair postgres database (clair by default)
- value: clair
- - name: POSTGRESQL_DATABASE
- # Set the name of the Clair postgres database
- value: clair
- - name: POSTGRESQL_PASSWORD
- # Set the password for the Clair postgress user
- value: test123
- image: registry.access.redhat.com/rhscl/postgresql-10-rhel7:1-35
- imagePullPolicy: IfNotPresent
- name: postgres-clair
- ports:
- - containerPort: 5432
- protocol: TCP
- volumeMounts:
- - mountPath: /var/lib/pgsql/data
- name: postgredb
- serviceAccount: postgres
- serviceAccountName: postgres
- volumes:
- - name: postgredb
- persistentVolumeClaim:
- claimName: postgres-clair-storage
\ No newline at end of file
diff --git a/deploy/k8s/postgres-clair-service.yaml b/deploy/k8s/postgres-clair-service.yaml
deleted file mode 100644
index a44ee33de..000000000
--- a/deploy/k8s/postgres-clair-service.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
- labels:
- app: postgres-clair
- name: postgres-clair
- namespace: quay-enterprise
-spec:
- ports:
- - nodePort: 30680
- port: 5432
- protocol: TCP
- targetPort: 5432
- selector:
- app: postgres-clair
- type: NodePort
\ No newline at end of file
diff --git a/deploy/k8s/postgres-clair-storage.yaml b/deploy/k8s/postgres-clair-storage.yaml
deleted file mode 100644
index ba941ffce..000000000
--- a/deploy/k8s/postgres-clair-storage.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: postgres-clair-storage
- namespace: quay-enterprise
-spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 5Gi
- storageClassName: quay-storageclass
\ No newline at end of file
diff --git a/deploy/k8s/postgres-deployment.yaml b/deploy/k8s/postgres-deployment.yaml
deleted file mode 100644
index c2f38bb1f..000000000
--- a/deploy/k8s/postgres-deployment.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: postgres
- namespace: quay-enterprise
-spec:
- # Only one instance of the postgres database is defined here. Adjust replicas based on demand.
- replicas: 1
- template:
- metadata:
- labels:
- app: postgres
- spec:
- containers:
- - name: postgres
- image: registry.access.redhat.com/rhscl/postgresql-10-rhel7:1-35
- imagePullPolicy: "IfNotPresent"
- ports:
- - containerPort: 5432
- env:
- - name: POSTGRESQL_USER
- # Replace "username" with a name for your Postgres user
- value: "username"
- - name: POSTGRESQL_DATABASE
- # Replace "password" with a password for your Postgres user
- value: "quay"
- - name: POSTGRESQL_PASSWORD
- value: "password"
- volumeMounts:
- - mountPath: /var/lib/pgsql/data
- name: postgredb
- serviceAccount: postgres
- serviceAccountName: postgres
- volumes:
- - name: postgredb
- persistentVolumeClaim:
- claimName: postgres-storage
\ No newline at end of file
diff --git a/deploy/k8s/postgres-service.yaml b/deploy/k8s/postgres-service.yaml
deleted file mode 100644
index 898123c43..000000000
--- a/deploy/k8s/postgres-service.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
- name: postgres
- namespace: quay-enterprise
- labels:
- app: postgres
-spec:
- type: NodePort
- ports:
- - port: 5432
- selector:
- app: postgres
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-app-rc.yaml b/deploy/k8s/quay-enterprise-app-rc.yaml
deleted file mode 100644
index f65bf6045..000000000
--- a/deploy/k8s/quay-enterprise-app-rc.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- namespace: quay-enterprise
- name: quay-enterprise-app
- labels:
- quay-enterprise-component: app
-spec:
- # Only one instance of the quay container is defined here. Adjust replicas based on demand.
- replicas: 1
- selector:
- matchLabels:
- quay-enterprise-component: app
- template:
- metadata:
- namespace: quay-enterprise
- labels:
- quay-enterprise-component: app
- spec:
- volumes:
- - name: configvolume
- secret:
- secretName: quay-enterprise-secret
- containers:
- - name: quay-enterprise-app
- # Pick the Quay version you wish to run
- image: quay.io/redhat/quay:v3.1.0
- ports:
- - containerPort: 8443
- volumeMounts:
- - name: configvolume
- readOnly: false
- mountPath: /conf/stack
- imagePullSecrets:
- - name: redhat-quay-pull-secret
diff --git a/deploy/k8s/quay-enterprise-config-secret.yaml b/deploy/k8s/quay-enterprise-config-secret.yaml
deleted file mode 100644
index 1b74c4fa3..000000000
--- a/deploy/k8s/quay-enterprise-config-secret.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-apiVersion: v1
-kind: Secret
-metadata:
- namespace: quay-enterprise
- name: quay-enterprise-config-secret
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-config-service-clusterip.yaml b/deploy/k8s/quay-enterprise-config-service-clusterip.yaml
deleted file mode 100644
index 4f6a90b34..000000000
--- a/deploy/k8s/quay-enterprise-config-service-clusterip.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
- namespace: quay-enterprise
- name: quay-enterprise-config
-spec:
- type: ClusterIP
- ports:
- - protocol: TCP
- name: https
- port: 443
- targetPort: 8443
- selector:
- quay-enterprise-component: config-app
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-config.yaml b/deploy/k8s/quay-enterprise-config.yaml
deleted file mode 100644
index 4b6647ec9..000000000
--- a/deploy/k8s/quay-enterprise-config.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- namespace: quay-enterprise
- name: quay-enterprise-config-app
- labels:
- quay-enterprise-component: config-app
-spec:
- replicas: 1
- selector:
- matchLabels:
- quay-enterprise-component: config-app
- template:
- metadata:
- namespace: quay-enterprise
- labels:
- quay-enterprise-component: config-app
- spec:
- containers:
- - name: quay-enterprise-config-app
- image: quay.io/redhat/quay:v3.1.0
- ports:
- - containerPort: 8443
- command: ["/quay-registry/quay-entrypoint.sh"]
- args: ["config", "secret"]
- imagePullSecrets:
- - name: redhat-quay-pull-secret
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-mirror.yaml b/deploy/k8s/quay-enterprise-mirror.yaml
deleted file mode 100644
index 86b923d3b..000000000
--- a/deploy/k8s/quay-enterprise-mirror.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- namespace: quay-enterprise
- name: quay-enterprise-mirror
- labels:
- quay-enterprise-component: mirror-app
-spec:
- replicas: 1
- selector:
- matchLabels:
- quay-enterprise-component: mirror-app
- template:
- metadata:
- namespace: quay-enterprise
- labels:
- quay-enterprise-component: mirror-app
- spec:
- containers:
- - name: quay-enterprise-mirror-app
- image: quay.io/redhat/quay:v3.1.0
- ports:
- - containerPort: 8443
- command: ["/quay-registry/quay-entrypoint.sh"]
- args: ["repomirror"]
- imagePullSecrets:
- - name: redhat-quay-pull-secret
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-namespace.yaml b/deploy/k8s/quay-enterprise-namespace.yaml
deleted file mode 100644
index 9c8b90322..000000000
--- a/deploy/k8s/quay-enterprise-namespace.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-apiVersion: v1
-kind: Namespace
-metadata:
- name: quay-enterprise
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-redhat-quay-pull-secret.yaml b/deploy/k8s/quay-enterprise-redhat-quay-pull-secret.yaml
deleted file mode 100644
index 7fb445ac0..000000000
--- a/deploy/k8s/quay-enterprise-redhat-quay-pull-secret.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-apiVersion: v1
-kind: Secret
-metadata:
- namespace: quay-enterprise
- name: redhat-quay-pull-secret
-data:
- # Change to include the credentials shown from https://access.redhat.com/solutions/3533201
- .dockerconfigjson:
-type: kubernetes.io/dockerconfigjson
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-redis.yaml b/deploy/k8s/quay-enterprise-redis.yaml
deleted file mode 100644
index 2a77f41d4..000000000
--- a/deploy/k8s/quay-enterprise-redis.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- namespace: quay-enterprise
- name: quay-enterprise-redis
- labels:
- quay-enterprise-component: redis
-spec:
- # Only one instance of the redis database is defined here. Adjust replicas based on demand.
- replicas: 1
- selector:
- matchLabels:
- quay-enterprise-component: redis
- template:
- metadata:
- namespace: quay-enterprise
- labels:
- quay-enterprise-component: redis
- spec:
- containers:
- - name: redis-master
- image: registry.access.redhat.com/rhscl/redis-32-rhel7
- imagePullPolicy: "IfNotPresent"
- ports:
- - containerPort: 6379
----
-apiVersion: v1
-kind: Service
-metadata:
- namespace: quay-enterprise
- name: quay-enterprise-redis
- labels:
- quay-enterprise-component: redis
-spec:
- ports:
- - port: 6379
- selector:
- quay-enterprise-component: redis
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-service-clusterip.yaml b/deploy/k8s/quay-enterprise-service-clusterip.yaml
deleted file mode 100644
index 7ae0e3d84..000000000
--- a/deploy/k8s/quay-enterprise-service-clusterip.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
- namespace: quay-enterprise
- name: quay-enterprise-clusterip
-spec:
- type: ClusterIP
- ports:
- - protocol: TCP
- name: https
- port: 443
- targetPort: 8443
- selector:
- quay-enterprise-component: app
\ No newline at end of file
diff --git a/deploy/k8s/quay-servicetoken-role-binding-k8s1-6.yaml b/deploy/k8s/quay-servicetoken-role-binding-k8s1-6.yaml
deleted file mode 100644
index a882af64b..000000000
--- a/deploy/k8s/quay-servicetoken-role-binding-k8s1-6.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: RoleBinding
-metadata:
- name: quay-enterprise-secret-writer
- namespace: quay-enterprise
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: quay-enterprise-serviceaccount
-subjects:
-- kind: ServiceAccount
- name: default
\ No newline at end of file
diff --git a/deploy/k8s/quay-servicetoken-role-k8s1-6.yaml b/deploy/k8s/quay-servicetoken-role-k8s1-6.yaml
deleted file mode 100644
index 0dc5de79c..000000000
--- a/deploy/k8s/quay-servicetoken-role-k8s1-6.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: Role
-metadata:
- name: quay-enterprise-serviceaccount
- namespace: quay-enterprise
-rules:
-- apiGroups:
- - ""
- resources:
- - secrets
- verbs:
- - get
- - put
- - patch
- - update
-- apiGroups:
- - ""
- resources:
- - namespaces
- verbs:
- - get
-- apiGroups:
- - extensions
- - apps
- resources:
- - deployments
- verbs:
- - get
- - list
- - patch
- - update
- - watch
\ No newline at end of file
diff --git a/deploy/k8s/quay-storageclass.yaml b/deploy/k8s/quay-storageclass.yaml
deleted file mode 100644
index 44cdbaced..000000000
--- a/deploy/k8s/quay-storageclass.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: quay-storageclass
-parameters:
- type: gp2
- # Uncomment out following lines to encrypt the volume (AWS EBS example with xfs shown)
- #encrypted: "true"
- #fsType: xfs
- #kmsKeyId:
-provisioner: kubernetes.io/aws-ebs
-reclaimPolicy: Delete
\ No newline at end of file
diff --git a/deploy/openshift/quay-app.yaml b/deploy/openshift/quay-app.yaml
deleted file mode 100644
index 08780b317..000000000
--- a/deploy/openshift/quay-app.yaml
+++ /dev/null
@@ -1,219 +0,0 @@
----
-apiVersion: v1
-kind: Template
-metadata:
- name: quay
-objects:
-- apiVersion: rbac.authorization.k8s.io/v1beta1
- kind: Role
- metadata:
- name: ${{NAME}}
- rules:
- - apiGroups:
- - ""
- resources:
- - secrets
- verbs:
- - get
- - patch
- - update
- - apiGroups:
- - ""
- resources:
- - namespaces
- verbs:
- - get
- - apiGroups:
- - extensions
- - apps
- resources:
- - deployments
- verbs:
- - get
- - list
- - patch
- - update
- - watch
-- apiVersion: rbac.authorization.k8s.io/v1beta1
- kind: RoleBinding
- metadata:
- name: ${{NAME}}
- roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: ${{NAME}}
- subjects:
- - kind: ServiceAccount
- name: default
-- apiVersion: v1
- kind: Service
- metadata:
- name: quay-clusterip-service
- spec:
- type: ClusterIP
- ports:
- - protocol: TCP
- name: clusterip
- port: ${{CLUSTERIP_SERVICE_PORT}}
- targetPort: ${{CLUSTERIP_SERVICE_TARGET_PORT}}
- selector:
- ${{QUAY_APP_COMPONENT_LABEL_KEY}}: ${{QUAY_APP_COMPONENT_LABEL_VALUE}}
-- apiVersion: v1
- kind: Service
- metadata:
- name: quay-loadbalancer-service
- spec:
- ports:
- - name: loadbalancer
- protocol: TCP
- port: ${{LOADBALANCER_SERVICE_PORT}}
- targetPort: ${{LOADBALANCER_SERVICE_TARGET_PORT}}
- loadBalancerIP:
- type: LoadBalancer
- selector:
- ${{QUAY_APP_COMPONENT_LABEL_KEY}}: ${{QUAY_APP_COMPONENT_LABEL_VALUE}}
-- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: quay-app
- labels:
- ${{QUAY_APP_COMPONENT_LABEL_KEY}}: ${{QUAY_APP_COMPONENT_LABEL_VALUE}}
- spec:
- replicas: ${{QUAY_APP_DEPLOYMENT_REPLICAS}}
- minReadySeconds: ${{QUAY_APP_DEPLOYMENT_MIN_READY_SECONDS}}
- progressDeadlineSeconds: ${{QUAY_APP_DEPLOYMENT_PROGRESS_DEADLINE_SECONDS}}
- revisionHistoryLimit: ${{QUAY_APP_DEPLOYMENT_REVISION_HISTORY_LIMITS}}
- strategy:
- type: ${{QUAY_APP_DEPLOYMENT_STRATEGY_TYPE}}
- rollingUpdate:
- maxUnavailable: ${{QUAY_APP_DEPLOYMENT_MAX_UNAVAILABLE}}
- maxSurge: ${{QUAY_APP_DEPLOYMENT_MAX_SURGE}}
- selector:
- matchLabels:
- ${{QUAY_APP_COMPONENT_LABEL_KEY}}: ${{QUAY_APP_COMPONENT_LABEL_VALUE}}
- template:
- metadata:
- labels:
- ${{QUAY_APP_COMPONENT_LABEL_KEY}}: ${{QUAY_APP_COMPONENT_LABEL_VALUE}}
- spec:
- volumes:
- - name: configvolume
- secret:
- secretName: ${{QUAY_APP_CONFIG_SECRET}}
- containers:
- - name: quay-app
- image: ${IMAGE}:${IMAGE_TAG}
- imagePullPolicy: Always
- ports:
- - containerPort: 8443
- volumeMounts:
- - name: configvolume
- readOnly: false
- mountPath: /conf/stack
- livenessProbe:
- httpGet:
- path: /health/instance
- port: 8443
- initialDelaySeconds: ${{QUAY_APP_LIVENESS_PROBE_INITIAL_DELAY_SECONDS}}
- periodSeconds: ${{QUAY_APP_LIVENESS_PROBE_PERIOD_SECONDS}}
- timeoutSeconds: ${{QUAY_APP_LIVENESS_PROBE_TIMEOUT_SECONDS}}
- readinessProbe:
- httpGet:
- path: /health/endtoend
- port: 8443
- initialDelaySeconds: ${{QUAY_APP_READINESS_PROBE_INITIAL_DELAY_SECONDS}}
- periodSeconds: ${{QUAY_APP_READINESS_PROBE_PERIOD_SECONDS}}
- timeoutSeconds: ${{QUAY_APP_READINESS_PROBE_TIMEOUT_SECONDS}}
- resources:
- limits:
- cpu: ${{QUAY_APP_CPU_LIMIT}}
- memory: ${{QUAY_APP_MEMORY_LIMIT}}
- requests:
- cpu: ${{QUAY_APP_CPU_REQUEST}}
- memory: ${{QUAY_APP_MEMORY_REQUEST}}
-parameters:
- - name: NAME
- value: "quay"
- displayName: name
- description: Defaults to quay.
- - name: IMAGE
- value: "quay.io/app-sre/quay"
- displayName: quay image
- description: quay docker image. Defaults to quay.io/app-sre/quay.
- - name: IMAGE_TAG
- value: "latest"
- displayName: quay version
- description: quay version which defaults to latest
- - name: CLUSTERIP_SERVICE_PORT
- value: "443"
- displayName: clusterip service port
- - name: CLUSTERIP_SERVICE_TARGET_PORT
- value: "8443"
- displayName: clusterip service target port
- - name: QUAY_APP_COMPONENT_LABEL_KEY
- value: "quay-component"
- displayName: quay app selector label
- - name: QUAY_APP_COMPONENT_LABEL_VALUE
- value: "app"
- displayName: quay app selector label value
- - name: LOADBALANCER_SERVICE_PORT
- value: "443"
- displayName: loadbalancer service port
- - name: LOADBALANCER_SERVICE_TARGET_PORT
- value: "8443"
- displayName: loadbalancer service target port
- - name: QUAY_APP_CONFIG_SECRET
- value: "quay-config-secret"
- displayName: quay app config secret
- - name: QUAY_APP_DEPLOYMENT_REPLICAS
- value: "1"
- displayName: quay app deployment replicas
- - name: QUAY_APP_MEMORY_REQUEST
- value: "4096Mi"
- displayName: "quay app memory request"
- - name: QUAY_APP_CPU_REQUEST
- value: "1"
- displayName: "quay app CPU request"
- - name: QUAY_APP_MEMORY_LIMIT
- value: "4096Mi"
- displayName: "quay app memory limit"
- - name: QUAY_APP_CPU_LIMIT
- value: "1"
- displayName: "quay app CPU limit"
- - name: QUAY_APP_DEPLOYMENT_MIN_READY_SECONDS
- value: "0"
- displayName: quay app deployment min ready seconds
- - name: QUAY_APP_DEPLOYMENT_PROGRESS_DEADLINE_SECONDS
- value: "600"
- displayName: quay app deployment progress deadline seconds
- - name: QUAY_APP_DEPLOYMENT_REVISION_HISTORY_LIMITS
- value: "10"
- displayName: quay app deployment revision history limits
- - name: QUAY_APP_DEPLOYMENT_STRATEGY_TYPE
- value: "RollingUpdate"
- displayName: quay app deployment strategy
- - name: QUAY_APP_DEPLOYMENT_MAX_SURGE
- value: "1"
- displayName: quay app deployment max surge
- - name: QUAY_APP_DEPLOYMENT_MAX_UNAVAILABLE
- value: "0"
- displayName: quay app deployment max unavailable
- - name: QUAY_APP_LIVENESS_PROBE_INITIAL_DELAY_SECONDS
- value: "15"
- displayName: quay app liveness probe initial delay seconds
- - name: QUAY_APP_LIVENESS_PROBE_PERIOD_SECONDS
- value: "30"
- displayName: quay app liveness probe period seconds
- - name: QUAY_APP_LIVENESS_PROBE_TIMEOUT_SECONDS
- value: "10"
- displayName: quay app liveness probe timeout
- - name: QUAY_APP_READINESS_PROBE_INITIAL_DELAY_SECONDS
- value: "15"
- displayName: quay app readiness probe initial delay seconds
- - name: QUAY_APP_READINESS_PROBE_PERIOD_SECONDS
- value: "30"
- displayName: quay app readiness probe period seconds
- - name: QUAY_APP_READINESS_PROBE_TIMEOUT_SECONDS
- value: "10"
- displayName: quay app readiness probe timeout
-
diff --git a/deploy/openshift/quay-enterprise-app-route.yaml b/deploy/openshift/quay-enterprise-app-route.yaml
deleted file mode 100644
index 0de2dc570..000000000
--- a/deploy/openshift/quay-enterprise-app-route.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-apiVersion: v1
-kind: Route
-metadata:
- name: quay-enterprise
- namespace: quay-enterprise
-spec:
- to:
- kind: Service
- name: quay-enterprise-clusterip
- tls:
- termination: passthrough
\ No newline at end of file
diff --git a/deploy/openshift/quay-enterprise-config-route.yaml b/deploy/openshift/quay-enterprise-config-route.yaml
deleted file mode 100644
index b5ddf7fb9..000000000
--- a/deploy/openshift/quay-enterprise-config-route.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-apiVersion: v1
-kind: Route
-metadata:
- name: quay-enterprise-config
- namespace: quay-enterprise
-spec:
- to:
- kind: Service
- name: quay-enterprise-config
- tls:
- termination: passthrough
\ No newline at end of file
diff --git a/dev.df b/dev.df
index 57db0befa..f99c4cc61 100644
--- a/dev.df
+++ b/dev.df
@@ -1,6 +1,6 @@
# vim:ft=dockerfile
-FROM phusion/baseimage:0.9.18
+FROM phusion/baseimage:0.9.16
ENV DEBIAN_FRONTEND noninteractive
ENV HOME /root
@@ -16,13 +16,6 @@ ADD requirements.txt requirements.txt
RUN virtualenv --distribute venv
RUN venv/bin/pip install -r requirements.txt
-ARG src_subdir
-
-RUN apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D \
- && echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" > /etc/apt/sources.list.d/docker.list \
- && apt-get update \
- && apt-get install -y docker-engine
-
+WORKDIR /src/quay
ENV PYTHONPATH=/
-ENV PATH=/venv/bin:$PATH
-WORKDIR /src/$src_subdir
+ENV PATH=$PATH:/venv/bin
diff --git a/digest/checksums.py b/digest/checksums.py
index dd7c6f2ba..154907823 100644
--- a/digest/checksums.py
+++ b/digest/checksums.py
@@ -1,3 +1,4 @@
+
import hashlib
import logging
import tarfile
@@ -8,91 +9,83 @@ logger = logging.getLogger(__name__)
def sha256_file(fp, data=None):
- h = hashlib.sha256(data or '')
- if not fp:
+ h = hashlib.sha256(data or '')
+ if not fp:
+ return h.hexdigest()
+ while True:
+ buf = fp.read(4096)
+ if not buf:
+ break
+ h.update(buf)
return h.hexdigest()
- while True:
- buf = fp.read(4096)
- if not buf:
- break
- h.update(buf)
- return h.hexdigest()
def sha256_string(s):
- return hashlib.sha256(s).hexdigest()
+ return hashlib.sha256(s).hexdigest()
def compute_tarsum(fp, json_data):
- header_fields = ('name', 'mode', 'uid', 'gid', 'size', 'mtime',
- 'type', 'linkname', 'uname', 'gname', 'devmajor',
- 'devminor')
- tar = None
- hashes = []
- try:
- tar = tarfile.open(mode='r|*', fileobj=fp)
- for member in tar:
- header = ''
- for field in header_fields:
- value = getattr(member, field)
- if field == 'type':
- field = 'typeflag'
- elif field == 'name':
- if member.isdir() and not value.endswith('/'):
- value += '/'
- header += '{0}{1}'.format(field, value)
- h = None
- try:
- if member.size > 0:
- f = tar.extractfile(member)
- h = sha256_file(f, header)
- else:
- h = sha256_string(header)
- except KeyError:
- h = sha256_string(header)
- hashes.append(h)
- hashes.sort()
- except tarfile.ReadError as e:
- if e.message != 'empty file':
- # NOTE(samalba): ignore empty tarfiles but still let the tarsum
- # compute with json data
- raise
- finally:
- if tar:
- tar.close()
- data = json_data + ''.join(hashes)
- tarsum = 'tarsum+sha256:{0}'.format(sha256_string(data))
- logger.debug('checksums.compute_tarsum: return %s', tarsum)
- return tarsum
+ header_fields = ('name', 'mode', 'uid', 'gid', 'size', 'mtime',
+ 'type', 'linkname', 'uname', 'gname', 'devmajor',
+ 'devminor')
+ tar = None
+ hashes = []
+ try:
+ tar = tarfile.open(mode='r|*', fileobj=fp)
+ for member in tar:
+ header = ''
+ for field in header_fields:
+ value = getattr(member, field)
+ if field == 'type':
+ field = 'typeflag'
+ elif field == 'name':
+ if member.isdir() and not value.endswith('/'):
+ value += '/'
+ header += '{0}{1}'.format(field, value)
+ h = None
+ try:
+ if member.size > 0:
+ f = tar.extractfile(member)
+ h = sha256_file(f, header)
+ else:
+ h = sha256_string(header)
+ except KeyError:
+ h = sha256_string(header)
+ hashes.append(h)
+ hashes.sort()
+ except tarfile.ReadError as e:
+ if e.message != 'empty file':
+ # NOTE(samalba): ignore empty tarfiles but still let the tarsum
+ # compute with json data
+ raise
+ finally:
+ if tar:
+ tar.close()
+ data = json_data + ''.join(hashes)
+ tarsum = 'tarsum+sha256:{0}'.format(sha256_string(data))
+ logger.debug('checksums.compute_tarsum: return {0}'.format(tarsum))
+ return tarsum
def simple_checksum_handler(json_data):
- h = hashlib.sha256(json_data.encode('utf8') + '\n')
+ h = hashlib.sha256(json_data + '\n')
- def fn(buf):
- h.update(buf)
- return h, fn
-
-
-def content_checksum_handler():
- h = hashlib.sha256()
-
- def fn(buf):
- h.update(buf)
- return h, fn
+ def fn(buf):
+ h.update(buf)
+ return h, fn
def compute_simple(fp, json_data):
- data = json_data + '\n'
- return 'sha256:{0}'.format(sha256_file(fp, data))
+ data = json_data + '\n'
+ return 'sha256:{0}'.format(sha256_file(fp, data))
if __name__ == '__main__':
- import sys
- if len(sys.argv) < 3:
- print 'Usage: {0} json_file layer_file'.format(sys.argv[0])
- sys.exit(1)
- json_data = file(sys.argv[1]).read()
- fp = open(sys.argv[2])
- print compute_simple(fp, json_data)
- print compute_tarsum(fp, json_data)
+ import sys
+ if len(sys.argv) < 3:
+ print 'Usage: {0} json_file layer_file'.format(sys.argv[0])
+ sys.exit(1)
+ json_data = file(sys.argv[1]).read()
+ fp = open(sys.argv[2])
+ print compute_simple(fp, json_data)
+ print compute_tarsum(fp, json_data)
diff --git a/digest/digest_tools.py b/digest/digest_tools.py
index 212088236..efebac831 100644
--- a/digest/digest_tools.py
+++ b/digest/digest_tools.py
@@ -2,57 +2,43 @@ import re
import os.path
import hashlib
+from collections import namedtuple
+
+
+Digest = namedtuple('Digest', ['is_tarsum', 'tarsum_version', 'hash_alg', 'hash_bytes'])
+
+
+DIGEST_PATTERN = r'(tarsum\.(v[\w]+)\+)?([\w]+):([0-9a-f]+)'
+DIGEST_REGEX = re.compile(DIGEST_PATTERN)
-DIGEST_PATTERN = r'([A-Za-z0-9_+.-]+):([A-Fa-f0-9]+)'
-REPLACE_WITH_PATH = re.compile(r'[+.]')
-REPLACE_DOUBLE_SLASHES = re.compile(r'/+')
class InvalidDigestException(RuntimeError):
pass
-class Digest(object):
- DIGEST_REGEX = re.compile(DIGEST_PATTERN)
+def parse_digest(digest):
+ """ Returns the digest parsed out to its components. """
+ match = DIGEST_REGEX.match(digest)
+ if match is None or match.end() != len(digest):
+ raise InvalidDigestException('Not a valid digest: %s', digest)
- def __init__(self, hash_alg, hash_bytes):
- self._hash_alg = hash_alg
- self._hash_bytes = hash_bytes
-
- def __str__(self):
- return '{0}:{1}'.format(self._hash_alg, self._hash_bytes)
-
- def __eq__(self, rhs):
- return isinstance(rhs, Digest) and str(self) == str(rhs)
-
- @staticmethod
- def parse_digest(digest):
- """ Returns the digest parsed out to its components. """
- match = Digest.DIGEST_REGEX.match(digest)
- if match is None or match.end() != len(digest):
- raise InvalidDigestException('Not a valid digest: %s', digest)
-
- return Digest(match.group(1), match.group(2))
-
- @property
- def hash_alg(self):
- return self._hash_alg
-
- @property
- def hash_bytes(self):
- return self._hash_bytes
+ is_tarsum = match.group(1) is not None
+ return Digest(is_tarsum, match.group(2), match.group(3), match.group(4))
def content_path(digest):
""" Returns a relative path to the parsed digest. """
- parsed = Digest.parse_digest(digest)
+ parsed = parse_digest(digest)
components = []
+ if parsed.is_tarsum:
+ components.extend(['tarsum', parsed.tarsum_version])
+
# Generate a prefix which is always two characters, and which will be filled with leading zeros
# if the input does not contain at least two characters. e.g. ABC -> AB, A -> 0A
prefix = parsed.hash_bytes[0:2].zfill(2)
- pathish = REPLACE_WITH_PATH.sub('/', parsed.hash_alg)
- normalized = REPLACE_DOUBLE_SLASHES.sub('/', pathish).lstrip('/')
- components.extend([normalized, prefix, parsed.hash_bytes])
+ components.extend([parsed.hash_alg, prefix, parsed.hash_bytes])
+
return os.path.join(*components)
@@ -72,11 +58,7 @@ def sha256_digest_from_generator(content_generator):
return 'sha256:{0}'.format(digest.hexdigest())
-def sha256_digest_from_hashlib(sha256_hash_obj):
- return 'sha256:{0}'.format(sha256_hash_obj.hexdigest())
-
-
def digests_equal(lhs_digest_string, rhs_digest_string):
""" Parse and compare the two digests, returns True if the digests are equal, False otherwise.
"""
- return Digest.parse_digest(lhs_digest_string) == Digest.parse_digest(rhs_digest_string)
+ return parse_digest(lhs_digest_string) == parse_digest(rhs_digest_string)
diff --git a/digest/test/test_digest_tools.py b/digest/test/test_digest_tools.py
deleted file mode 100644
index b04f64c6f..000000000
--- a/digest/test/test_digest_tools.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import pytest
-
-from digest.digest_tools import Digest, content_path, InvalidDigestException
-
-@pytest.mark.parametrize('digest, output_args', [
- ('tarsum.v123123+sha1:123deadbeef', ('tarsum.v123123+sha1', '123deadbeef')),
- ('tarsum.v1+sha256:123123', ('tarsum.v1+sha256', '123123')),
- ('tarsum.v0+md5:abc', ('tarsum.v0+md5', 'abc')),
- ('tarsum+sha1:abc', ('tarsum+sha1', 'abc')),
- ('sha1:123deadbeef', ('sha1', '123deadbeef')),
- ('sha256:123123', ('sha256', '123123')),
- ('md5:abc', ('md5', 'abc')),
-])
-def test_parse_good(digest, output_args):
- assert Digest.parse_digest(digest) == Digest(*output_args)
- assert str(Digest.parse_digest(digest)) == digest
-
-
-@pytest.mark.parametrize('bad_digest', [
- 'tarsum.v+md5:abc:',
- 'sha1:123deadbeefzxczxv',
- 'sha256123123',
- 'tarsum.v1+',
- 'tarsum.v1123+sha1:',
-])
-def test_parse_fail(bad_digest):
- with pytest.raises(InvalidDigestException):
- Digest.parse_digest(bad_digest)
-
-
-@pytest.mark.parametrize('digest, path', [
- ('tarsum.v123123+sha1:123deadbeef', 'tarsum/v123123/sha1/12/123deadbeef'),
- ('tarsum.v1+sha256:123123', 'tarsum/v1/sha256/12/123123'),
- ('tarsum.v0+md5:abc', 'tarsum/v0/md5/ab/abc'),
- ('sha1:123deadbeef', 'sha1/12/123deadbeef'),
- ('sha256:123123', 'sha256/12/123123'),
- ('md5:abc', 'md5/ab/abc'),
- ('md5:1', 'md5/01/1'),
- ('md5.....+++:1', 'md5/01/1'),
- ('.md5.:1', 'md5/01/1'),
-])
-def test_paths(digest, path):
- assert content_path(digest) == path
diff --git a/displayversion.py b/displayversion.py
deleted file mode 100644
index 463271059..000000000
--- a/displayversion.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from _init import __version__, __gitrev__
-
-def displayversion():
- version_string = ('Quay %s (%s)' % (__version__, __gitrev__.strip())).strip()
-
- print '=' * (len(version_string) + 4)
- print '= ' + version_string + ' ='
- print '=' * (len(version_string) + 4)
- print ""
-
-if __name__ == "__main__":
- displayversion()
diff --git a/docs/development-container.md b/docs/development-container.md
deleted file mode 100644
index 20e1831f6..000000000
--- a/docs/development-container.md
+++ /dev/null
@@ -1,121 +0,0 @@
-# Quay Development Container
-
-The instructions below describe how to create and setup a container for working with local source changes and testing. It is meant for a developer familiar with python, javascript, and the tools associated with development.
-
-## Quay Source
-
-For convenience, the environment variable `QUAY_DEVEL_HOME` will be referenced for locations of the parent directory for source and other directories.
-
-The directory `$QUAY_DEVEL_HOME/quay` is your _development_ git checkout of [quay](https://github.com/quay/quay).
-```
-cd $QUAY_DEVEL_HOME
-git checkout github.com/quay/quay.git
-```
-
-For local storage, create the directory where images will be written to disk.
-```
-cd $QUAY_DEVEL_HOME
-mkdir quay-storage
-```
-
-The Quay config app should be previously run and the resulting _quay-config.tar.gz_ expanded into place. This file is copied into this directory for convenience only: Keeping it available will allow upload into the config app for modifications later.
-```
-cd $QUAY_DEVEL_HOME
-mkdir quay-config
-cp ~/Downloads/quay-config.tar.gz $QUAY_DEVEL_HOME/quay-config/quay-config.tar.gz
-tar xvf $QUAY_DEVEL_HOME/quay-config/quay-config.tar.gz -C $QUAY_DEVEL_HOME/quay-config
-```
-
-## Building Development Container
-
-Build an image in a clean git checkout of master branch. It's important not to do this in your own development directory as there may be files that conflict or break a clean build.
-```
-cd $QUAY_DEVEL_HOME/quay
-docker build -t quay:devel -f Dockerfile .
-```
-
-## Run Development Container
-
-Note: Assumptions are that the config app has successfully run and database is available. This can be done via regular user documentation and using the `quay:devel` image built above.
-
-```
-docker run --rm --name quay \
- -v $QUAY_DEVEL_HOME/quay-config:/conf/stack \
- -v $QUAY_DEVEL_HOME/quay-storage:/datastorage \
- -v $QUAY_DEVEL_HOME/quay:$QUAY_DEVEL_HOME/quay \
- -p 8080:8080 \
- -p 8443:8443 \
- -p 9092:9092 \
- -e QUAY_DEVEL_HOME=$QUAY_DEVEL_HOME \
- quay:devel
-```
-
-This will start the quay container and be fully running. The web UI is available at port 8443.
-
-## Switch Services to Development
-
-### Inside the Container
-
-When exec'ing into the development container, it is best to run under the [SCLs](https://www.softwarecollections.org) used during production. This will provide the correct paths to python and other executables.
-
-```
-docker exec --rm -it quay scl enable python27 rh-nginx112 bash
-```
-
-The following sections are perhaps easiest to run in separate `docker exec` sessions, which is how they will be described. Some or all could be run in the background and managed differently than described here.
-
-### Stop Services
-
-When running the quay container, all regular services are started by default. In order to run a service locally, stopping the default is important. Below are the steps for developing the web UI and web backend python service.
-
-Stop services:
-```
-supervisorctl -c /quay-registry/conf/supervisord.conf stop gunicorn-web
-```
-
-Change the web resources to devel location:
-```
-cd /quay-registry
-mv static static.bak
-ln -s $QUAY_DEVEL_HOME/static
-```
-
-Build web assets:
-```
-cd $QUAY_DEVEL_HOME
-mkdir -p static/webfonts
-mkdir -p static/fonts
-mkdir -p static/ldn
-PYTHONPATH=. python -m external_libraries
-
-yarn build
-```
-
-### Run Migrations
-
-If `$QUAY_DEVEL_HOME/quay`, which presumably has your local code changes, has migrations, special care should be taken when switching between different versions of code.
-
-To run a migration:
-```
-cd $QUAY_DEVEL_HOME
-PYTHONPATH=. alembic upgrade 5248ddf35167
-```
-
-To revert a migration:
-```
-cd $QUAY_DEVEL_HOME
-PYTHONPATH=. alembic downgrade -1
-```
-
-### Web UI Assets
-
-```
-cd $QUAY_DEVEL_HOME
-yarn build && npm run watch
-```
-
-### Run Web Server
-```
-cd $QUAY_DEVEL_HOME
-PYTHONPATH=. gunicorn -c conf/gunicorn_web.py web:application
-```
diff --git a/docs/talks.md b/docs/talks.md
deleted file mode 100644
index 6d2bd6225..000000000
--- a/docs/talks.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Talks
-
-The following is a chronological list of presentations given by various members of the community.
-
-## 2017
-
-* [Towards a standard Containerized App Registry - Antoine Legrand, Jimmy Zelinskie](https://youtu.be/zGJsXyzE5A8)
-* [Demoing a Kubernetes CI/CD Pipeline using App Registry - Antoine Legrand, Jimmy Zelinskie](https://youtu.be/6Gpuj_cCZII)
-
-## 2016
-
-* [Better Image Distribution to Worldwide Clusters - Joey Schorr](https://youtu.be/dX9-ComoJTs)
-* [Container Management at eBay - Thuc Nguyen](https://youtu.be/h4f7nqYRPK8)
-
-## 2015
-
-* [Continuous Containerization - Jake Moshenko, Joey Schorr](https://youtu.be/anfmeB_JzB0)
diff --git a/emails/base.html b/emails/base.html
index 52004713f..00a89e51d 100644
--- a/emails/base.html
+++ b/emails/base.html
@@ -1,4 +1,3 @@
-{% if with_base_template %}
@@ -12,7 +11,7 @@
{% endif %}
-
-