Merge pull request #885 from jakedt/python-registry-v2

Python registry v2 mega merge
This commit is contained in:
Jake Moshenko 2015-11-16 16:15:40 -05:00
commit 7205bf5e7f
54 changed files with 3256 additions and 837 deletions

View file

@ -67,7 +67,7 @@ def backfill_sizes_from_data():
decompressor = zlib.decompressobj(ZLIB_GZIP_WINDOW)
uncompressed_size = 0
with store.stream_read_file(with_locs.locations, store.image_layer_path(uuid)) as stream:
with store.stream_read_file(with_locs.locations, store.v1_image_layer_path(uuid)) as stream:
while True:
current_data = stream.read(CHUNK_SIZE)
if len(current_data) == 0:

160
util/registry/filelike.py Normal file
View file

@ -0,0 +1,160 @@
WHENCE_ABSOLUTE = 0
WHENCE_RELATIVE = 1
WHENCE_RELATIVE_END = 2
READ_UNTIL_END = -1
class BaseStreamFilelike(object):
def __init__(self, fileobj):
self._fileobj = fileobj
self._cursor_position = 0
def close(self):
self._fileobj.close()
def read(self, size=READ_UNTIL_END):
buf = self._fileobj.read(size)
if buf is None:
return None
self._cursor_position += len(buf)
return buf
def tell(self):
return self._cursor_position
def seek(self, index, whence=WHENCE_ABSOLUTE):
num_bytes_to_ff = 0
if whence == WHENCE_ABSOLUTE:
if index < self._cursor_position:
raise IOError('Cannot seek backwards')
num_bytes_to_ff = index - self._cursor_position
elif whence == WHENCE_RELATIVE:
if index < 0:
raise IOError('Cannnot seek backwards')
num_bytes_to_ff = index
elif whence == WHENCE_RELATIVE_END:
raise IOError('Stream does not have a known end point')
bytes_forward = num_bytes_to_ff
while num_bytes_to_ff > 0:
buf = self._fileobj.read(num_bytes_to_ff)
if not buf:
raise IOError('Seek past end of file')
num_bytes_to_ff -= len(buf)
self._cursor_position += bytes_forward
return bytes_forward
class SocketReader(BaseStreamFilelike):
def __init__(self, fileobj):
super(SocketReader, self).__init__(fileobj)
self.handlers = []
def add_handler(self, handler):
self.handlers.append(handler)
def read(self, size=READ_UNTIL_END):
buf = super(SocketReader, self).read(size)
for handler in self.handlers:
handler(buf)
return buf
def wrap_with_handler(in_fp, handler):
wrapper = SocketReader(in_fp)
wrapper.add_handler(handler)
return wrapper
class FilelikeStreamConcat(object):
""" A file-like object which concats all the file-like objects in the specified generator into
a single stream.
"""
def __init__(self, file_generator):
self._file_generator = file_generator
self._current_file = file_generator.next()
self._current_position = 0
self._closed = False
def tell(self):
return self._current_position
def close(self):
self._closed = True
def read(self, size=READ_UNTIL_END):
buf = ''
current_size = size
while size == READ_UNTIL_END or len(buf) < size:
current_buf = self._current_file.read(current_size)
if current_buf:
buf += current_buf
self._current_position += len(current_buf)
if size != READ_UNTIL_END:
current_size -= len(current_buf)
else:
# That file was out of data, prime a new one
self._current_file.close()
try:
self._current_file = self._file_generator.next()
except StopIteration:
return buf
return buf
class StreamSlice(BaseStreamFilelike):
""" A file-like object which returns a file-like object that represents a slice of the data in
the specified file obj. All methods will act as if the slice is its own file.
"""
def __init__(self, fileobj, start_offset=0, end_offset_exclusive=READ_UNTIL_END):
super(StreamSlice, self).__init__(fileobj)
self._end_offset_exclusive = end_offset_exclusive
self._start_offset = start_offset
if start_offset > 0:
self.seek(start_offset)
def read(self, size=READ_UNTIL_END):
if self._end_offset_exclusive == READ_UNTIL_END:
# We weren't asked to limit the end of the stream
return super(StreamSlice, self).read(size)
# Compute the max bytes to read until the end or until we reach the user requested max
max_bytes_to_read = self._end_offset_exclusive - super(StreamSlice, self).tell()
if size != READ_UNTIL_END:
max_bytes_to_read = min(max_bytes_to_read, size)
return super(StreamSlice, self).read(max_bytes_to_read)
def _file_min(self, first, second):
if first == READ_UNTIL_END:
return second
if second == READ_UNTIL_END:
return first
return min(first, second)
def tell(self):
return super(StreamSlice, self).tell() - self._start_offset
def seek(self, index, whence=WHENCE_ABSOLUTE):
index = self._file_min(self._end_offset_exclusive, index)
super(StreamSlice, self).seek(index, whence)
class LimitingStream(StreamSlice):
""" A file-like object which mimics the specified file stream being limited to the given number
of bytes. All calls after that limit (if specified) will act as if the file has no additional
data.
"""
def __init__(self, fileobj, read_limit=READ_UNTIL_END):
super(LimitingStream, self).__init__(fileobj, 0, read_limit)

View file

@ -11,6 +11,9 @@ class GzipWrap(object):
self.is_done = False
def read(self, size=-1):
if size is None or size < 0:
raise Exception('Call to GzipWrap with unbound size will result in poor performance')
# If the buffer already has enough bytes, then simply pop them off of
# the beginning and return them.
if len(self.buffer) >= size or self.is_done:

View file

@ -1,3 +1,4 @@
from datetime import datetime, timedelta
from jwt import PyJWT
from jwt.exceptions import (
InvalidTokenError, DecodeError, InvalidAudienceError, ExpiredSignatureError,
@ -14,8 +15,41 @@ class StrictJWT(PyJWT):
'require_exp': True,
'require_iat': True,
'require_nbf': True,
'exp_max_s': None,
})
return defaults
def _validate_claims(self, payload, options, audience=None, issuer=None, leeway=0, **kwargs):
if options.get('exp_max_s') is not None:
if 'verify_expiration' in kwargs and not kwargs.get('verify_expiration'):
raise ValueError('exp_max_s option implies verify_expiration')
options['verify_exp'] = True
# Do all of the other checks
super(StrictJWT, self)._validate_claims(payload, options, audience, issuer, leeway, **kwargs)
if 'exp' in payload and options.get('exp_max_s') is not None:
# Validate that the expiration was not more than exp_max_s seconds after the issue time
# or in the absense of an issue time, more than exp_max_s in the future from now
# This will work because the parent method already checked the type of exp
expiration = datetime.utcfromtimestamp(int(payload['exp']))
max_signed_s = options.get('exp_max_s')
start_time = datetime.utcnow()
if 'iat' in payload:
start_time = datetime.utcfromtimestamp(int(payload['iat']))
if expiration > start_time + timedelta(seconds=max_signed_s):
raise InvalidTokenError('Token was signed for more than %s seconds from %s', max_signed_s,
start_time)
def exp_max_s_option(max_exp_s):
return {
'exp_max_s': max_exp_s,
}
decode = StrictJWT().decode