From 84adf680b99bf8d919cf79d15b05c0149a4f6ce6 Mon Sep 17 00:00:00 2001 From: yackob03 Date: Mon, 30 Sep 2013 19:10:27 -0400 Subject: [PATCH] Switch the registry and index to use real s3 and rds. --- config.py | 39 +++++++- data/database.py | 15 ++-- requirements-nover.txt | 3 +- requirements.txt | 1 + storage/__init__.py | 200 +++++++++++++++++++++-------------------- 5 files changed, 154 insertions(+), 104 deletions(-) diff --git a/config.py b/config.py index 136d11945..b6242b116 100644 --- a/config.py +++ b/config.py @@ -1,3 +1,5 @@ +from peewee import MySQLDatabase, SqliteDatabase + class FlaskConfig(object): SECRET_KEY = '1cb18882-6d12-440d-a4cc-b7430fb5f884' @@ -13,5 +15,40 @@ class MailConfig(object): TESTING = False -class ProductionConfig(FlaskConfig, MailConfig): +class SQLiteDB(object): + DB_NAME = 'test.db' + DB_CONNECTION_ARGS = { + 'threadlocals': True + } + DB_DRIVER = SqliteDatabase + + +class RDSMySQL(object): + DB_NAME = 'quay' + DB_CONNECTION_ARGS = { + 'host': 'fluxmonkeylogin.cb0vumcygprn.us-east-1.rds.amazonaws.com', + 'user': 'fluxmonkey', + 'passwd': '8eifM#uoZ85xqC^', + 'threadlocals': True + } + DB_DRIVER = MySQLDatabase + + +class S3Storage(object): + AWS_ACCESS_KEY = 'AKIAJWZWUIS24TWSMWRA' + AWS_SECRET_KEY = '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' + REGISTRY_S3_BUCKET = 'quay-registry' + STORAGE_KIND = 's3' + + +class LocalStorage(object): + STORAGE_KIND = 'local' + LOCAL_STORAGE_DIR = '/tmp/registry' + + +class DebugConfig(FlaskConfig, MailConfig, LocalStorage, SQLiteDB): REGISTRY_SERVER = 'localhost:5000' + + +class ProductionConfig(FlaskConfig, MailConfig, S3Storage, RDSMySQL): + REGISTRY_SERVER = 'quay.io' diff --git a/data/database.py b/data/database.py index 13791af53..3a377d02c 100644 --- a/data/database.py +++ b/data/database.py @@ -1,12 +1,15 @@ import string -from random import SystemRandom +from random import SystemRandom +from datetime import datetime from peewee import * from peewee import create_model_tables -from datetime import datetime +from app import app -db = SqliteDatabase('test.db', threadlocals=True) + +db = app.config['DB_DRIVER'](app.config['DB_NAME'], + **app.config['DB_CONNECTION_ARGS']) class BaseModel(Model): @@ -29,7 +32,7 @@ class Repository(BaseModel): namespace = CharField() name = CharField() visibility = ForeignKeyField(Visibility) - description = CharField(null=True) + description = BlobField(null=True) class Meta: database = db @@ -86,11 +89,11 @@ class Image(BaseModel): image_id = CharField() checksum = CharField(null=True) created = DateTimeField(null=True) - comment = CharField(null=True) + comment = BlobField(null=True) repository = ForeignKeyField(Repository) # '/' separated list of ancestory ids, e.g. /1/2/6/7/10/ - ancestors = CharField(index=True, default='/', max_length=65535) + ancestors = CharField(index=True, default='/', max_length=64535) class Meta: database = db diff --git a/requirements-nover.txt b/requirements-nover.txt index 7cdb7d4b6..4f7a3d3b2 100644 --- a/requirements-nover.txt +++ b/requirements-nover.txt @@ -5,4 +5,5 @@ Flask-Principal Flask-Login Flask-Mail python-dateutil -boto \ No newline at end of file +boto +MySQL-python \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 2c28e828a..6543f5c24 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,7 @@ Flask-Mail==0.9.0 Flask-Principal==0.4.0 Jinja2==2.7.1 MarkupSafe==0.18 +MySQL-python==1.2.4 Werkzeug==0.9.4 argparse==1.2.1 blinker==1.3 diff --git a/storage/__init__.py b/storage/__init__.py index 2a67815f9..ddcc5948c 100644 --- a/storage/__init__.py +++ b/storage/__init__.py @@ -1,119 +1,123 @@ +import logging import contextlib import tempfile +from app import app + __all__ = ['load'] +logger = logging.getLogger(__name__) class Storage(object): - """Storage is organized as follow: - $ROOT/images//json - $ROOT/images//layer - $ROOT/repositories/// - """ + """Storage is organized as follow: + $ROOT/images//json + $ROOT/images//layer + $ROOT/repositories/// + """ - # Useful if we want to change those locations later without rewriting - # the code which uses Storage - repositories = 'repositories' - images = 'images' - # Set the IO buffer to 64kB - buffer_size = 64 * 1024 + # Useful if we want to change those locations later without rewriting + # the code which uses Storage + repositories = 'repositories' + images = 'images' + # Set the IO buffer to 64kB + buffer_size = 64 * 1024 - #FIXME(samalba): Move all path resolver in each module (out of the base) - def images_list_path(self, namespace, repository): - return '{0}/{1}/{2}/_images_list'.format(self.repositories, - namespace, - repository) + #FIXME(samalba): Move all path resolver in each module (out of the base) + def images_list_path(self, namespace, repository): + return '{0}/{1}/{2}/_images_list'.format(self.repositories, + namespace, + repository) - def image_json_path(self, namespace, repository, image_id): - return '{0}/{1}/{2}/{3}/json'.format(self.images, namespace, - repository, image_id) + def image_json_path(self, namespace, repository, image_id): + return '{0}/{1}/{2}/{3}/json'.format(self.images, namespace, + repository, image_id) - def image_mark_path(self, namespace, repository, image_id): - return '{0}/{1}/{2}/{3}/_inprogress'.format(self.images, namespace, - repository, image_id) + def image_mark_path(self, namespace, repository, image_id): + return '{0}/{1}/{2}/{3}/_inprogress'.format(self.images, namespace, + repository, image_id) - def image_checksum_path(self, namespace, repository, image_id): - return '{0}/{1}/{2}/{3}/_checksum'.format(self.images, namespace, - repository, image_id) + def image_checksum_path(self, namespace, repository, image_id): + return '{0}/{1}/{2}/{3}/_checksum'.format(self.images, namespace, + repository, image_id) - def image_layer_path(self, namespace, repository, image_id): - return '{0}/{1}/{2}/{3}/layer'.format(self.images, namespace, - repository, image_id) + def image_layer_path(self, namespace, repository, image_id): + return '{0}/{1}/{2}/{3}/layer'.format(self.images, namespace, + repository, image_id) - def image_ancestry_path(self, namespace, repository, image_id): - return '{0}/{1}/{2}/{3}/ancestry'.format(self.images, namespace, - repository, image_id) + def image_ancestry_path(self, namespace, repository, image_id): + return '{0}/{1}/{2}/{3}/ancestry'.format(self.images, namespace, + repository, image_id) - def tag_path(self, namespace, repository, tagname=None): - if not tagname: - return '{0}/{1}/{2}'.format(self.repositories, - namespace, - repository) - return '{0}/{1}/{2}/tag_{3}'.format(self.repositories, - namespace, - repository, - tagname) + def tag_path(self, namespace, repository, tagname=None): + if not tagname: + return '{0}/{1}/{2}'.format(self.repositories, + namespace, + repository) + return '{0}/{1}/{2}/tag_{3}'.format(self.repositories, + namespace, + repository, + tagname) - def index_images_path(self, namespace, repository): - return '{0}/{1}/{2}/_index_images'.format(self.repositories, - namespace, - repository) + def index_images_path(self, namespace, repository): + return '{0}/{1}/{2}/_index_images'.format(self.repositories, + namespace, + repository) - def get_content(self, path): - raise NotImplementedError + def get_content(self, path): + raise NotImplementedError - def put_content(self, path, content): - raise NotImplementedError + def put_content(self, path, content): + raise NotImplementedError - def stream_read(self, path): - raise NotImplementedError + def stream_read(self, path): + raise NotImplementedError - def stream_write(self, path, fp): - raise NotImplementedError + def stream_write(self, path, fp): + raise NotImplementedError - def list_directory(self, path=None): - raise NotImplementedError + def list_directory(self, path=None): + raise NotImplementedError - def exists(self, path): - raise NotImplementedError + def exists(self, path): + raise NotImplementedError - def remove(self, path): - raise NotImplementedError + def remove(self, path): + raise NotImplementedError - def get_size(self, path): - raise NotImplementedError + def get_size(self, path): + raise NotImplementedError @contextlib.contextmanager def store_stream(stream): - """Stores the entire stream to a temporary file.""" - tmpf = tempfile.TemporaryFile() - while True: - try: - buf = stream.read(4096) - if not buf: - break - tmpf.write(buf) - except IOError: - break - tmpf.seek(0) - yield tmpf - tmpf.close() + """Stores the entire stream to a temporary file.""" + tmpf = tempfile.TemporaryFile() + while True: + try: + buf = stream.read(4096) + if not buf: + break + tmpf.write(buf) + except IOError: + break + tmpf.seek(0) + yield tmpf + tmpf.close() def temp_store_handler(): - tmpf = tempfile.TemporaryFile() + tmpf = tempfile.TemporaryFile() - def fn(buf): - try: - tmpf.write(buf) - except IOError: - pass + def fn(buf): + try: + tmpf.write(buf) + except IOError: + pass - return tmpf, fn + return tmpf, fn from local import LocalStorage @@ -124,20 +128,24 @@ _storage = {} def load(kind=None): - """Returns the right storage class according to the configuration.""" - global _storage + """Returns the right storage class according to the configuration.""" + global _storage - # TODO hard code to local for now - kind = 'local' - # if not kind: - # kind = cfg.storage.lower() - if kind in _storage: - return _storage[kind] - if kind == 's3': - store = S3Storage('/registry', 'access_key', 'secret_key', 'bucket') - elif kind == 'local': - store = LocalStorage('/tmp/registry') - else: - raise ValueError('Not supported storage \'{0}\''.format(kind)) - _storage[kind] = store - return store + # TODO hard code to local for now + kind = app.config['STORAGE_KIND'] + # if not kind: + # kind = cfg.storage.lower() + if kind in _storage: + return _storage[kind] + if kind == 's3': + logger.debug('Using s3 storage.') + store = S3Storage('', app.config['AWS_ACCESS_KEY'], + app.config['AWS_SECRET_KEY'], + app.config['REGISTRY_S3_BUCKET']) + elif kind == 'local': + logger.debug('Using local storage.') + store = LocalStorage(app.config['LOCAL_STORAGE_DIR']) + else: + raise ValueError('Not supported storage \'{0}\''.format(kind)) + _storage[kind] = store + return store