From 55460cdcbaeb03f5a46eaf749fdc026c5bc86d87 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Tue, 7 Oct 2014 10:22:02 -0400 Subject: [PATCH] Better error handling in the uncompressedsize script. --- tools/uncompressedsize.py | 61 ++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/tools/uncompressedsize.py b/tools/uncompressedsize.py index 1182f0838..0eeaddaae 100644 --- a/tools/uncompressedsize.py +++ b/tools/uncompressedsize.py @@ -30,41 +30,44 @@ def backfill_sizes_from_data(): for record in batch_ids: uuid = record.uuid - with_locations = model.get_storage_by_uuid(uuid) - if with_locations.uncompressed_size is not None: - logger.debug('Somebody else already filled this in for us: %s', uuid) - continue - - # Read the layer from backing storage and calculate the uncompressed size. - logger.debug('Loading data: %s (%s bytes)', uuid, with_locations.image_size) - decompressor = zlib.decompressobj(ZLIB_GZIP_WINDOW) - - uncompressed_size = 0 - with store.stream_read_file(with_locations.locations, store.image_layer_path(uuid)) as stream: - while True: - current_data = stream.read(CHUNK_SIZE) - if len(current_data) == 0: - break - - uncompressed_size += len(decompressor.decompress(current_data)) - - # Write the size to the image storage. We do so under a transaction AFTER checking to - # make sure the image storage still exists and has not changed. - logger.debug('Writing entry: %s. Size: %s', uuid, uncompressed_size) - with app.config['DB_TRANSACTION_FACTORY'](db): - try: - current_record = model.get_storage_by_uuid(uuid) - except model.InvalidImageException: - logger.warning('Storage with uuid no longer exists: %s', uuid) + try: + with_locs = model.get_storage_by_uuid(uuid) + if with_locs.uncompressed_size is not None: + logger.debug('Somebody else already filled this in for us: %s', uuid) continue - if not current_record.uploading and current_record.uncompressed_size == None: - current_record.uncompressed_size = uncompressed_size - current_record.save() + # Read the layer from backing storage and calculate the uncompressed size. + logger.debug('Loading data: %s (%s bytes)', uuid, with_locs.image_size) + decompressor = zlib.decompressobj(ZLIB_GZIP_WINDOW) + uncompressed_size = 0 + with store.stream_read_file(with_locs.locations, store.image_layer_path(uuid)) as stream: + while True: + current_data = stream.read(CHUNK_SIZE) + if len(current_data) == 0: + break + + uncompressed_size += len(decompressor.decompress(current_data)) + + # Write the size to the image storage. We do so under a transaction AFTER checking to + # make sure the image storage still exists and has not changed. + logger.debug('Writing entry: %s. Size: %s', uuid, uncompressed_size) + with app.config['DB_TRANSACTION_FACTORY'](db): + current_record = model.get_storage_by_uuid(uuid) + + if not current_record.uploading and current_record.uncompressed_size == None: + current_record.uncompressed_size = uncompressed_size + current_record.save() + else: + logger.debug('Somebody else already filled this in for us, after we did the work: %s', + uuid) + + except model.InvalidImageException: + logger.warning('Storage with uuid no longer exists: %s', uuid) if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) logging.getLogger('boto').setLevel(logging.CRITICAL) + logging.getLogger('peewee').setLevel(logging.CRITICAL) backfill_sizes_from_data()