Merge branch 'master' of https://bitbucket.org/yackob03/quay
This commit is contained in:
commit
b7d43e1593
10 changed files with 184 additions and 115 deletions
|
@ -35,4 +35,6 @@ container_commands:
|
|||
|
||||
option_settings:
|
||||
"aws:elasticbeanstalk:container:python:staticfiles":
|
||||
"/static": "static/"
|
||||
"/static": "static/"
|
||||
"aws:elasticbeanstalk:application:environment":
|
||||
"STACK": "prod"
|
||||
|
|
8
app.py
8
app.py
|
@ -1,13 +1,17 @@
|
|||
import logging
|
||||
import os
|
||||
|
||||
from flask import Flask
|
||||
from flask.ext.principal import Principal
|
||||
from flask.ext.login import LoginManager
|
||||
from flask.ext.mail import Mail
|
||||
from config import ProductionConfig
|
||||
from config import ProductionConfig, DebugConfig
|
||||
|
||||
app = Flask(__name__)
|
||||
app.config.from_object(ProductionConfig())
|
||||
|
||||
is_prod = os.environ.get('STACK', '').strip().lower().startswith('prod')
|
||||
config_object = ProductionConfig() if is_prod else DebugConfig()
|
||||
app.config.from_object(config_object)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -11,9 +11,7 @@ import endpoints.registry
|
|||
# Remove this for prod config
|
||||
application.debug = True
|
||||
|
||||
if __name__ == '__main__':
|
||||
FORMAT = '%(asctime)-15s - %(levelname)s - %(pathname)s - ' + \
|
||||
'%(funcName)s - %(message)s'
|
||||
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
|
||||
logging.basicConfig(**application.config['LOGGING_CONFIG'])
|
||||
|
||||
if __name__ == '__main__':
|
||||
application.run(port=5001, debug=True)
|
||||
|
|
56
config.py
56
config.py
|
@ -1,3 +1,13 @@
|
|||
import logging
|
||||
import sys
|
||||
|
||||
from peewee import MySQLDatabase, SqliteDatabase
|
||||
|
||||
|
||||
LOG_FORMAT = '%(asctime)-15s - %(levelname)s - %(pathname)s - ' + \
|
||||
'%(funcName)s - %(message)s'
|
||||
|
||||
|
||||
class FlaskConfig(object):
|
||||
SECRET_KEY = '1cb18882-6d12-440d-a4cc-b7430fb5f884'
|
||||
|
||||
|
@ -13,5 +23,49 @@ class MailConfig(object):
|
|||
TESTING = False
|
||||
|
||||
|
||||
class ProductionConfig(FlaskConfig, MailConfig):
|
||||
class SQLiteDB(object):
|
||||
DB_NAME = 'test.db'
|
||||
DB_CONNECTION_ARGS = {
|
||||
'threadlocals': True
|
||||
}
|
||||
DB_DRIVER = SqliteDatabase
|
||||
|
||||
|
||||
class RDSMySQL(object):
|
||||
DB_NAME = 'quay'
|
||||
DB_CONNECTION_ARGS = {
|
||||
'host': 'fluxmonkeylogin.cb0vumcygprn.us-east-1.rds.amazonaws.com',
|
||||
'user': 'fluxmonkey',
|
||||
'passwd': '8eifM#uoZ85xqC^',
|
||||
'threadlocals': True,
|
||||
}
|
||||
DB_DRIVER = MySQLDatabase
|
||||
|
||||
|
||||
class S3Storage(object):
|
||||
AWS_ACCESS_KEY = 'AKIAJWZWUIS24TWSMWRA'
|
||||
AWS_SECRET_KEY = 'EllGwP+noVvzmsUGQJO1qOMk3vm10Vg+UE6xmmpw'
|
||||
REGISTRY_S3_BUCKET = 'quay-registry'
|
||||
STORAGE_KIND = 's3'
|
||||
|
||||
|
||||
class LocalStorage(object):
|
||||
STORAGE_KIND = 'local'
|
||||
LOCAL_STORAGE_DIR = '/tmp/registry'
|
||||
|
||||
|
||||
class DebugConfig(FlaskConfig, MailConfig, LocalStorage, SQLiteDB):
|
||||
REGISTRY_SERVER = 'localhost:5000'
|
||||
LOGGING_CONFIG = {
|
||||
'level': logging.DEBUG,
|
||||
'format': LOG_FORMAT
|
||||
}
|
||||
|
||||
|
||||
class ProductionConfig(FlaskConfig, MailConfig, S3Storage, RDSMySQL):
|
||||
REGISTRY_SERVER = 'quay.io'
|
||||
LOGGING_CONFIG = {
|
||||
'stream': sys.stderr,
|
||||
'level': logging.DEBUG,
|
||||
'format': LOG_FORMAT,
|
||||
}
|
||||
|
|
|
@ -1,12 +1,15 @@
|
|||
import string
|
||||
from random import SystemRandom
|
||||
|
||||
from random import SystemRandom
|
||||
from datetime import datetime
|
||||
from peewee import *
|
||||
from peewee import create_model_tables
|
||||
|
||||
from datetime import datetime
|
||||
from app import app
|
||||
|
||||
db = SqliteDatabase('test.db', threadlocals=True)
|
||||
|
||||
db = app.config['DB_DRIVER'](app.config['DB_NAME'],
|
||||
**app.config['DB_CONNECTION_ARGS'])
|
||||
|
||||
|
||||
class BaseModel(Model):
|
||||
|
@ -29,7 +32,7 @@ class Repository(BaseModel):
|
|||
namespace = CharField()
|
||||
name = CharField()
|
||||
visibility = ForeignKeyField(Visibility)
|
||||
description = CharField(null=True)
|
||||
description = TextField(null=True)
|
||||
|
||||
class Meta:
|
||||
database = db
|
||||
|
@ -86,11 +89,11 @@ class Image(BaseModel):
|
|||
image_id = CharField()
|
||||
checksum = CharField(null=True)
|
||||
created = DateTimeField(null=True)
|
||||
comment = CharField(null=True)
|
||||
comment = TextField(null=True)
|
||||
repository = ForeignKeyField(Repository)
|
||||
|
||||
# '/' separated list of ancestory ids, e.g. /1/2/6/7/10/
|
||||
ancestors = CharField(index=True, default='/', max_length=65535)
|
||||
ancestors = CharField(index=True, default='/', max_length=64535)
|
||||
|
||||
class Meta:
|
||||
database = db
|
||||
|
|
|
@ -112,9 +112,7 @@ def get_visible_repositories(username=None):
|
|||
def get_matching_repositories(repo_term, username=None):
|
||||
visible = get_visible_repositories(username)
|
||||
search_clauses = (Repository.name ** ('%' + repo_term + '%') |
|
||||
Repository.namespace ** ('%' + repo_term + '%') |
|
||||
Repository.description ** ('%' + repo_term + '%'))
|
||||
|
||||
Repository.namespace ** ('%' + repo_term + '%'))
|
||||
|
||||
final = visible.where(search_clauses).limit(10)
|
||||
return list(final)
|
||||
|
|
|
@ -5,4 +5,5 @@ Flask-Principal
|
|||
Flask-Login
|
||||
Flask-Mail
|
||||
python-dateutil
|
||||
boto
|
||||
boto
|
||||
pymysql
|
|
@ -4,11 +4,11 @@ Flask-Mail==0.9.0
|
|||
Flask-Principal==0.4.0
|
||||
Jinja2==2.7.1
|
||||
MarkupSafe==0.18
|
||||
PyMySQL==0.5
|
||||
Werkzeug==0.9.4
|
||||
argparse==1.2.1
|
||||
blinker==1.3
|
||||
boto==2.13.3
|
||||
distribute==0.6.34
|
||||
itsdangerous==0.23
|
||||
peewee==2.1.4
|
||||
py-bcrypt==0.4
|
||||
|
|
|
@ -1,119 +1,123 @@
|
|||
import logging
|
||||
|
||||
import contextlib
|
||||
import tempfile
|
||||
|
||||
from app import app
|
||||
|
||||
|
||||
__all__ = ['load']
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Storage(object):
|
||||
|
||||
"""Storage is organized as follow:
|
||||
$ROOT/images/<image_id>/json
|
||||
$ROOT/images/<image_id>/layer
|
||||
$ROOT/repositories/<namespace>/<repository_name>/<tag_name>
|
||||
"""
|
||||
"""Storage is organized as follow:
|
||||
$ROOT/images/<image_id>/json
|
||||
$ROOT/images/<image_id>/layer
|
||||
$ROOT/repositories/<namespace>/<repository_name>/<tag_name>
|
||||
"""
|
||||
|
||||
# Useful if we want to change those locations later without rewriting
|
||||
# the code which uses Storage
|
||||
repositories = 'repositories'
|
||||
images = 'images'
|
||||
# Set the IO buffer to 64kB
|
||||
buffer_size = 64 * 1024
|
||||
# Useful if we want to change those locations later without rewriting
|
||||
# the code which uses Storage
|
||||
repositories = 'repositories'
|
||||
images = 'images'
|
||||
# Set the IO buffer to 64kB
|
||||
buffer_size = 64 * 1024
|
||||
|
||||
#FIXME(samalba): Move all path resolver in each module (out of the base)
|
||||
def images_list_path(self, namespace, repository):
|
||||
return '{0}/{1}/{2}/_images_list'.format(self.repositories,
|
||||
namespace,
|
||||
repository)
|
||||
#FIXME(samalba): Move all path resolver in each module (out of the base)
|
||||
def images_list_path(self, namespace, repository):
|
||||
return '{0}/{1}/{2}/_images_list'.format(self.repositories,
|
||||
namespace,
|
||||
repository)
|
||||
|
||||
def image_json_path(self, namespace, repository, image_id):
|
||||
return '{0}/{1}/{2}/{3}/json'.format(self.images, namespace,
|
||||
repository, image_id)
|
||||
def image_json_path(self, namespace, repository, image_id):
|
||||
return '{0}/{1}/{2}/{3}/json'.format(self.images, namespace,
|
||||
repository, image_id)
|
||||
|
||||
def image_mark_path(self, namespace, repository, image_id):
|
||||
return '{0}/{1}/{2}/{3}/_inprogress'.format(self.images, namespace,
|
||||
repository, image_id)
|
||||
def image_mark_path(self, namespace, repository, image_id):
|
||||
return '{0}/{1}/{2}/{3}/_inprogress'.format(self.images, namespace,
|
||||
repository, image_id)
|
||||
|
||||
def image_checksum_path(self, namespace, repository, image_id):
|
||||
return '{0}/{1}/{2}/{3}/_checksum'.format(self.images, namespace,
|
||||
repository, image_id)
|
||||
def image_checksum_path(self, namespace, repository, image_id):
|
||||
return '{0}/{1}/{2}/{3}/_checksum'.format(self.images, namespace,
|
||||
repository, image_id)
|
||||
|
||||
def image_layer_path(self, namespace, repository, image_id):
|
||||
return '{0}/{1}/{2}/{3}/layer'.format(self.images, namespace,
|
||||
repository, image_id)
|
||||
def image_layer_path(self, namespace, repository, image_id):
|
||||
return '{0}/{1}/{2}/{3}/layer'.format(self.images, namespace,
|
||||
repository, image_id)
|
||||
|
||||
def image_ancestry_path(self, namespace, repository, image_id):
|
||||
return '{0}/{1}/{2}/{3}/ancestry'.format(self.images, namespace,
|
||||
repository, image_id)
|
||||
def image_ancestry_path(self, namespace, repository, image_id):
|
||||
return '{0}/{1}/{2}/{3}/ancestry'.format(self.images, namespace,
|
||||
repository, image_id)
|
||||
|
||||
def tag_path(self, namespace, repository, tagname=None):
|
||||
if not tagname:
|
||||
return '{0}/{1}/{2}'.format(self.repositories,
|
||||
namespace,
|
||||
repository)
|
||||
return '{0}/{1}/{2}/tag_{3}'.format(self.repositories,
|
||||
namespace,
|
||||
repository,
|
||||
tagname)
|
||||
def tag_path(self, namespace, repository, tagname=None):
|
||||
if not tagname:
|
||||
return '{0}/{1}/{2}'.format(self.repositories,
|
||||
namespace,
|
||||
repository)
|
||||
return '{0}/{1}/{2}/tag_{3}'.format(self.repositories,
|
||||
namespace,
|
||||
repository,
|
||||
tagname)
|
||||
|
||||
def index_images_path(self, namespace, repository):
|
||||
return '{0}/{1}/{2}/_index_images'.format(self.repositories,
|
||||
namespace,
|
||||
repository)
|
||||
def index_images_path(self, namespace, repository):
|
||||
return '{0}/{1}/{2}/_index_images'.format(self.repositories,
|
||||
namespace,
|
||||
repository)
|
||||
|
||||
def get_content(self, path):
|
||||
raise NotImplementedError
|
||||
def get_content(self, path):
|
||||
raise NotImplementedError
|
||||
|
||||
def put_content(self, path, content):
|
||||
raise NotImplementedError
|
||||
def put_content(self, path, content):
|
||||
raise NotImplementedError
|
||||
|
||||
def stream_read(self, path):
|
||||
raise NotImplementedError
|
||||
def stream_read(self, path):
|
||||
raise NotImplementedError
|
||||
|
||||
def stream_write(self, path, fp):
|
||||
raise NotImplementedError
|
||||
def stream_write(self, path, fp):
|
||||
raise NotImplementedError
|
||||
|
||||
def list_directory(self, path=None):
|
||||
raise NotImplementedError
|
||||
def list_directory(self, path=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def exists(self, path):
|
||||
raise NotImplementedError
|
||||
def exists(self, path):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove(self, path):
|
||||
raise NotImplementedError
|
||||
def remove(self, path):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_size(self, path):
|
||||
raise NotImplementedError
|
||||
def get_size(self, path):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def store_stream(stream):
|
||||
"""Stores the entire stream to a temporary file."""
|
||||
tmpf = tempfile.TemporaryFile()
|
||||
while True:
|
||||
try:
|
||||
buf = stream.read(4096)
|
||||
if not buf:
|
||||
break
|
||||
tmpf.write(buf)
|
||||
except IOError:
|
||||
break
|
||||
tmpf.seek(0)
|
||||
yield tmpf
|
||||
tmpf.close()
|
||||
"""Stores the entire stream to a temporary file."""
|
||||
tmpf = tempfile.TemporaryFile()
|
||||
while True:
|
||||
try:
|
||||
buf = stream.read(4096)
|
||||
if not buf:
|
||||
break
|
||||
tmpf.write(buf)
|
||||
except IOError:
|
||||
break
|
||||
tmpf.seek(0)
|
||||
yield tmpf
|
||||
tmpf.close()
|
||||
|
||||
|
||||
def temp_store_handler():
|
||||
tmpf = tempfile.TemporaryFile()
|
||||
tmpf = tempfile.TemporaryFile()
|
||||
|
||||
def fn(buf):
|
||||
try:
|
||||
tmpf.write(buf)
|
||||
except IOError:
|
||||
pass
|
||||
def fn(buf):
|
||||
try:
|
||||
tmpf.write(buf)
|
||||
except IOError:
|
||||
pass
|
||||
|
||||
return tmpf, fn
|
||||
return tmpf, fn
|
||||
|
||||
|
||||
from local import LocalStorage
|
||||
|
@ -124,20 +128,24 @@ _storage = {}
|
|||
|
||||
|
||||
def load(kind=None):
|
||||
"""Returns the right storage class according to the configuration."""
|
||||
global _storage
|
||||
"""Returns the right storage class according to the configuration."""
|
||||
global _storage
|
||||
|
||||
# TODO hard code to local for now
|
||||
kind = 'local'
|
||||
# if not kind:
|
||||
# kind = cfg.storage.lower()
|
||||
if kind in _storage:
|
||||
return _storage[kind]
|
||||
if kind == 's3':
|
||||
store = S3Storage('/registry', 'access_key', 'secret_key', 'bucket')
|
||||
elif kind == 'local':
|
||||
store = LocalStorage('/tmp/registry')
|
||||
else:
|
||||
raise ValueError('Not supported storage \'{0}\''.format(kind))
|
||||
_storage[kind] = store
|
||||
return store
|
||||
# TODO hard code to local for now
|
||||
kind = app.config['STORAGE_KIND']
|
||||
# if not kind:
|
||||
# kind = cfg.storage.lower()
|
||||
if kind in _storage:
|
||||
return _storage[kind]
|
||||
if kind == 's3':
|
||||
logger.debug('Using s3 storage.')
|
||||
store = S3Storage('', app.config['AWS_ACCESS_KEY'],
|
||||
app.config['AWS_SECRET_KEY'],
|
||||
app.config['REGISTRY_S3_BUCKET'])
|
||||
elif kind == 'local':
|
||||
logger.debug('Using local storage.')
|
||||
store = LocalStorage(app.config['LOCAL_STORAGE_DIR'])
|
||||
else:
|
||||
raise ValueError('Not supported storage \'{0}\''.format(kind))
|
||||
_storage[kind] = store
|
||||
return store
|
||||
|
|
|
@ -3,6 +3,7 @@ LoadModule wsgi_module modules/mod_wsgi.so
|
|||
WSGIPythonHome /opt/python/run/venv
|
||||
WSGISocketPrefix run/wsgi
|
||||
WSGIRestrictEmbedded On
|
||||
WSGIPassAuthorization On
|
||||
|
||||
<VirtualHost *:80>
|
||||
|
||||
|
|
Reference in a new issue