Merge remote-tracking branch 'upstream/master' into python-registry-v2

This commit is contained in:
Jake Moshenko 2015-09-04 16:32:01 -04:00
commit 210ed7cf02
148 changed files with 1829 additions and 445 deletions

View file

@ -0,0 +1,3 @@
def get_app_url(config):
""" Returns the application's URL, based on the given config. """
return '%s://%s' % (config['PREFERRED_URL_SCHEME'], config['SERVER_HOSTNAME'])

View file

@ -63,7 +63,16 @@ class ParsedDockerfile(object):
def strip_comments(contents):
lines = [line for line in contents.split('\n') if not line.startswith(COMMENT_CHARACTER)]
lines = []
for line in contents.split('\n'):
index = line.find(COMMENT_CHARACTER)
if index < 0:
lines.append(line)
continue
line = line[:index]
lines.append(line)
return '\n'.join(lines)
@ -72,15 +81,22 @@ def join_continued_lines(contents):
def parse_dockerfile(contents):
# If we receive ASCII, translate into unicode.
try:
contents = contents.decode('utf-8')
except ValueError:
# Already unicode or unable to convert.
pass
contents = join_continued_lines(strip_comments(contents))
lines = [line for line in contents.split('\n') if len(line) > 0]
lines = [line.strip() for line in contents.split('\n') if len(line) > 0]
commands = []
for line in lines:
match_command = COMMAND_REGEX.match(line)
if match_command:
command = match_command.group(1).decode('utf-8').upper()
parameters = match_command.group(2).decode('utf-8')
command = match_command.group(1).upper()
parameters = match_command.group(2)
commands.append({
'command': command,
@ -88,9 +104,3 @@ def parse_dockerfile(contents):
})
return ParsedDockerfile(commands)
def serialize_dockerfile(parsed_dockerfile):
return '\n'.join([' '.join([command['command'].encode('utf-8'),
command['parameters'].encode('utf-8')])
for command in parsed_dockerfile.commands])

View file

@ -13,10 +13,11 @@ def run_slackwebhook_migration():
encountered = set()
while True:
found = list(RepositoryNotification.select().where(
RepositoryNotification.method == slack_method,
RepositoryNotification.config_json ** "%subdomain%",
~(RepositoryNotification.config_json ** "%url%")))
found = list(RepositoryNotification.select(RepositoryNotification.uuid,
RepositoryNotification.config_json)
.where(RepositoryNotification.method == slack_method,
RepositoryNotification.config_json ** "%subdomain%",
~(RepositoryNotification.config_json ** "%url%")))
found = [f for f in found if not f.uuid in encountered]

View file

@ -10,10 +10,16 @@ class GeneratorFile(object):
self._generator = generator
self._closed = False
self._buf = ''
self._position = 0
def __iter__(self):
return self
def tell(self):
"""Return the file's current position, like stdio's ftell()."""
_complain_ifclosed(self._closed)
return self._position
def next(self):
"""A file object is its own iterator, for example iter(f) returns f
(unless f is closed). When a file is used as an iterator, typically
@ -62,8 +68,11 @@ class GeneratorFile(object):
self._buf = ''
returned = buf
self._position = self._position + len(returned)
return returned
def seek(self):
raise NotImplementedError
def close(self):
self._closed = True

View file

@ -0,0 +1,113 @@
import zlib
import string
BLOCK_SIZE = 16384
"""Read block size"""
WINDOW_BUFFER_SIZE = 16 + zlib.MAX_WBITS
"""zlib window buffer size, set to gzip's format"""
class GzipInputStream(object):
"""
Simple class that allow streaming reads from GZip files.
Python 2.x gzip.GZipFile relies on .seek() and .tell(), so it
doesn't support this (@see: http://bo4.me/YKWSsL).
Adapted from: https://gist.github.com/beaufour/4205533
"""
def __init__(self, fileobj):
"""
Initialize with the given file-like object.
@param fileobj: file-like object,
"""
self._file = fileobj
self._zip = zlib.decompressobj(WINDOW_BUFFER_SIZE)
self._offset = 0 # position in unzipped stream
self._data = ""
def __fill(self, num_bytes):
"""
Fill the internal buffer with 'num_bytes' of data.
@param num_bytes: int, number of bytes to read in (0 = everything)
"""
if not self._zip:
return
while not num_bytes or len(self._data) < num_bytes:
data = self._file.read(BLOCK_SIZE)
if not data:
self._data = self._data + self._zip.flush()
self._zip = None # no more data
break
self._data = self._data + self._zip.decompress(data)
def __iter__(self):
return self
def seek(self, offset, whence=0):
if whence == 0:
position = offset
elif whence == 1:
position = self._offset + offset
else:
raise IOError("Illegal argument")
if position < self._offset:
raise IOError("Cannot seek backwards")
# skip forward, in blocks
while position > self._offset:
if not self.read(min(position - self._offset, BLOCK_SIZE)):
break
def tell(self):
return self._offset
def read(self, size=0):
self.__fill(size)
if size:
data = self._data[:size]
self._data = self._data[size:]
else:
data = self._data
self._data = ""
self._offset = self._offset + len(data)
return data
def next(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readline(self):
# make sure we have an entire line
while self._zip and "\n" not in self._data:
self.__fill(len(self._data) + 512)
pos = string.find(self._data, "\n") + 1
if pos <= 0:
return self.read()
return self.read(pos)
def readlines(self):
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def close(self):
self._file.close()

View file

@ -1,35 +1,46 @@
import logging
import boto
import time
from Queue import Queue
from Queue import Empty
from threading import Thread
logger = logging.getLogger(__name__)
def get_queue(app):
"""
Returns a queue to a new CloudWatchSender.
"""
access_key = app.config['CLOUDWATCH_AWS_ACCESS_KEY']
secret_key = app.config['CLOUDWATCH_AWS_SECRET_KEY']
MAX_BATCH_METRICS = 20
queue = Queue()
sender = CloudWatchSender(queue, access_key, secret_key)
# Sleep for this much time between failed send requests.
# This prevents hammering cloudwatch when it's not available.
FAILED_SEND_SLEEP_SECS = 5
def start_cloudwatch_sender(metrics, app):
"""
Starts sending from metrics to a new CloudWatchSender.
"""
access_key = app.config.get('CLOUDWATCH_AWS_ACCESS_KEY')
secret_key = app.config.get('CLOUDWATCH_AWS_SECRET_KEY')
namespace = app.config.get('CLOUDWATCH_NAMESPACE')
if not namespace:
logger.debug('CloudWatch not configured')
return
sender = CloudWatchSender(metrics, access_key, secret_key, namespace)
sender.start()
return queue
class CloudWatchSender(Thread):
"""
CloudWatchSender loops indefinitely and pulls metrics off of a queue then sends it to CloudWatch.
"""
def __init__(self, request_queue, aws_access_key, aws_secret_key):
def __init__(self, metrics, aws_access_key, aws_secret_key, namespace):
Thread.__init__(self)
self.daemon = True
self._aws_access_key = aws_access_key
self._aws_secret_key = aws_secret_key
self._put_metrics_queue = request_queue
self._metrics = metrics
self._namespace = namespace
def run(self):
try:
@ -37,11 +48,46 @@ class CloudWatchSender(Thread):
connection = boto.connect_cloudwatch(self._aws_access_key, self._aws_secret_key)
except:
logger.exception('Failed to connect to CloudWatch.')
self._metrics.enable()
while True:
put_metric_args, kwargs = self._put_metrics_queue.get()
logger.debug('Got queued put metrics request.')
metrics = {
'name': [],
'value': [],
'unit': [],
'timestamp': [],
'dimensions': [],
}
metric = self._metrics.get()
append_metric(metrics, metric)
while len(metrics['name']) < MAX_BATCH_METRICS:
try:
metric = self._metrics.get_nowait()
append_metric(metrics, metric)
except Empty:
break
try:
connection.put_metric_data(*put_metric_args, **kwargs)
connection.put_metric_data(self._namespace, **metrics)
logger.debug('Sent %d CloudWatch metrics', len(metrics['name']))
except:
logger.exception('Failed to write to CloudWatch')
for i in range(len(metrics['name'])):
self._metrics.put(metrics['name'][i], metrics['value'][i],
unit=metrics['unit'][i],
dimensions=metrics['dimensions'][i],
timestamp=metrics['timestamp'][i],
)
logger.exception('Failed to write to CloudWatch: %s', metrics)
logger.debug('Attempted to requeue %d metrics.', len(metrics['name']))
time.sleep(FAILED_SEND_SLEEP_SECS)
def append_metric(metrics, m):
name, value, kwargs = m
metrics['name'].append(name)
metrics['value'].append(value)
metrics['unit'].append(kwargs.get('unit'))
metrics['dimensions'].append(kwargs.get('dimensions'))
metrics['timestamp'].append(kwargs.get('timestamp'))

72
util/saas/metricqueue.py Normal file
View file

@ -0,0 +1,72 @@
import datetime
import logging
import time
from functools import wraps
from Queue import Queue, Full
from flask import g, request
logger = logging.getLogger(__name__)
class MetricQueue(object):
def __init__(self):
self._queue = None
def enable(self, maxsize=10000):
self._queue = Queue(maxsize)
def put(self, name, value, **kwargs):
if self._queue is None:
logger.debug('No metric queue %s %s %s', name, value, kwargs)
return
try:
kwargs.setdefault('timestamp', datetime.datetime.now())
kwargs.setdefault('dimensions', {})
self._queue.put_nowait((name, value, kwargs))
except Full:
logger.error('Metric queue full')
def get(self):
return self._queue.get()
def get_nowait(self):
return self._queue.get_nowait()
def time_blueprint(bp, metric_queue):
bp.before_request(time_before_request)
bp.after_request(time_after_request(bp.name, metric_queue))
def time_before_request():
g._request_start_time = time.time()
def time_after_request(name, metric_queue):
def f(r):
start = getattr(g, '_request_start_time', None)
if start is None:
return r
dur = time.time() - start
dims = {'endpoint': request.endpoint}
metric_queue.put('ResponseTime', dur, dimensions=dims, unit='Seconds')
metric_queue.put('ResponseCode', r.status_code, dimensions=dims)
if r.status_code < 200 or r.status_code >= 300:
metric_queue.put('Non200Response', 1, dimensions={'name': name})
return r
return f
def time_decorator(name, metric_queue):
after = time_after_request(name, metric_queue)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
time_before_request()
rv = func(*args, **kwargs)
after(rv)
return rv
return wrapper
return decorator

View file

@ -1,56 +0,0 @@
import logging
from util.saas.cloudwatch import get_queue
logger = logging.getLogger(__name__)
class NullReporter(object):
def report(self, *args):
pass
class CloudWatchReporter(object):
""" CloudWatchReporter reports work queue metrics to CloudWatch """
def __init__(self, request_queue, namespace, need_capacity_name, build_percent_name):
self._namespace = namespace
self._need_capacity_name = need_capacity_name
self._build_percent_name = build_percent_name
self._put_metrics_queue = request_queue
def _send_to_queue(self, *args, **kwargs):
self._put_metrics_queue.put((args, kwargs))
def report(self, currently_processing, running_count, total_count):
logger.debug('Worker indicated %s running count and %s total count', running_count,
total_count)
need_capacity_count = total_count - running_count
self._send_to_queue(self._namespace, self._need_capacity_name, need_capacity_count,
unit='Count')
building_percent = 100 if currently_processing else 0
self._send_to_queue(self._namespace, self._build_percent_name, building_percent,
unit='Percent')
class QueueMetrics(object):
"""
QueueMetrics initializes a reporter for recording metrics of work queues.
"""
def __init__(self, app=None):
self._app = app
self._reporter = NullReporter()
if app is not None:
reporter_type = app.config.get('QUEUE_METRICS_TYPE', 'Null')
if reporter_type == 'CloudWatch':
namespace = app.config['QUEUE_METRICS_NAMESPACE']
req_capacity_name = app.config['QUEUE_METRICS_CAPACITY_SHORTAGE_NAME']
build_percent_name = app.config['QUEUE_METRICS_BUILD_PERCENT_NAME']
request_queue = get_queue(app)
self._reporter = CloudWatchReporter(request_queue, namespace, req_capacity_name,
build_percent_name)
def __getattr__(self, name):
return getattr(self._reporter, name, None)

View file

@ -0,0 +1,21 @@
from jwt import PyJWT
from jwt.exceptions import (
InvalidTokenError, DecodeError, InvalidAudienceError, ExpiredSignatureError,
ImmatureSignatureError, InvalidIssuedAtError, InvalidIssuerError, MissingRequiredClaimError
)
class StrictJWT(PyJWT):
@staticmethod
def _get_default_options():
# Weird syntax to call super on a staticmethod
defaults = super(StrictJWT, StrictJWT)._get_default_options()
defaults.update({
'require_exp': True,
'require_iat': True,
'require_nbf': True,
})
return defaults
decode = StrictJWT().decode