- Update the migrations tool to verify migrations work up and down for both MySQL and PostgresSQL.
- Add migrations for the squashed image tables and for backfilling the uncompressed sizes - Make sure gzip stream uses a max length when determining the uncompressed size
This commit is contained in:
parent
f38ce51943
commit
f4daa5e97b
10 changed files with 152 additions and 43 deletions
|
@ -9,6 +9,8 @@ import zlib
|
|||
# http://stackoverflow.com/questions/3122145/zlib-error-error-3-while-decompressing-incorrect-header-check/22310760#22310760
|
||||
ZLIB_GZIP_WINDOW = zlib.MAX_WBITS | 32
|
||||
|
||||
CHUNK_SIZE = 5 * 1024 * 1024
|
||||
|
||||
class SizeInfo(object):
|
||||
def __init__(self):
|
||||
self.size = 0
|
||||
|
@ -23,6 +25,11 @@ def calculate_size_handler():
|
|||
decompressor = zlib.decompressobj(ZLIB_GZIP_WINDOW)
|
||||
|
||||
def fn(buf):
|
||||
size_info.size += len(decompressor.decompress(buf))
|
||||
# Note: We set a maximum CHUNK_SIZE to prevent the decompress from taking too much
|
||||
# memory. As a result, we have to loop until the unconsumed tail is empty.
|
||||
current_data = buf
|
||||
while len(current_data) > 0:
|
||||
size_info.size += len(decompressor.decompress(current_data, CHUNK_SIZE))
|
||||
current_data = decompressor.unconsumed_tail
|
||||
|
||||
return size_info, fn
|
||||
|
|
Reference in a new issue