diff --git a/data/database.py b/data/database.py index 771cc57d7..765a4948e 100644 --- a/data/database.py +++ b/data/database.py @@ -487,12 +487,8 @@ class EmailConfirmation(BaseModel): class ImageStorage(BaseModel): uuid = CharField(default=uuid_generator, index=True, unique=True) checksum = CharField(null=True) - created = DateTimeField(null=True) - comment = TextField(null=True) - command = TextField(null=True) image_size = BigIntegerField(null=True) uncompressed_size = BigIntegerField(null=True) - aggregate_size = BigIntegerField(null=True) uploading = BooleanField(default=True, null=True) diff --git a/data/model/image.py b/data/model/image.py index 56583751e..c73198e60 100644 --- a/data/model/image.py +++ b/data/model/image.py @@ -285,24 +285,15 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created # We cleanup any old checksum in case it's a retry after a fail fetched.storage.checksum = None - now = datetime.now() - # TODO stop writing to storage when all readers are removed - fetched.storage.created = now - fetched.created = now + fetched.created = datetime.now() if created_date_str is not None: try: - # TODO stop writing to storage fields when all readers are removed - parsed_created_time = dateutil.parser.parse(created_date_str).replace(tzinfo=None) - fetched.created = parsed_created_time - fetched.storage.created = parsed_created_time + fetched.created = dateutil.parser.parse(created_date_str).replace(tzinfo=None) except: # parse raises different exceptions, so we cannot use a specific kind of handler here. pass - # TODO stop writing to storage fields when all readers are removed - fetched.storage.comment = comment - fetched.storage.command = command fetched.comment = comment fetched.command = command fetched.v1_json_metadata = v1_json_metadata @@ -346,17 +337,11 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size, .where(Image.id << ancestors) .scalar()) - # TODO stop writing to storage when all readers are removed if ancestor_size is not None: - # total_size = image_size + parent_image.storage.aggregate_size - total_size = ancestor_size + image_size - image.storage.aggregate_size = total_size - image.aggregate_size = total_size + image.aggregate_size = ancestor_size + image_size except Image.DoesNotExist: pass else: - # TODO stop writing to storage when all readers are removed - image.storage.aggregate_size = image_size image.aggregate_size = image_size image.storage.save() diff --git a/endpoints/v1/registry.py b/endpoints/v1/registry.py index 636da6f30..3d049c757 100644 --- a/endpoints/v1/registry.py +++ b/endpoints/v1/registry.py @@ -521,16 +521,11 @@ def put_image_json(namespace, repository, image_id): model.image.set_image_metadata(image_id, namespace, repository, data.get('created'), data.get('comment'), command, v1_metadata, parent_image) - logger.debug('Putting json path') - uuid = repo_image.storage.uuid - json_path = store.image_json_path(uuid) - store.put_content(repo_image.storage.locations, json_path, request.data) - logger.debug('Generating image ancestry') try: - generate_ancestry(image_id, uuid, repo_image.storage.locations, parent_id, parent_uuid, - parent_locations) + generate_ancestry(image_id, repo_image.storage.uuid, repo_image.storage.locations, parent_id, + parent_uuid, parent_locations) except IOError as ioe: logger.debug('Error when generating ancestry: %s', ioe.message) abort(404) diff --git a/initdb.py b/initdb.py index 29b151601..33b8e2b5a 100644 --- a/initdb.py +++ b/initdb.py @@ -88,8 +88,7 @@ def __create_subtree(repo, structure, creator_username, parent, tag_map): # Write some data for the storage. if os.environ.get('WRITE_STORAGE_FILES'): storage_paths = StoragePaths() - paths = [storage_paths.image_json_path, - storage_paths.image_ancestry_path, + paths = [storage_paths.image_ancestry_path, storage_paths.image_layer_path] for path_builder in paths: diff --git a/storage/basestorage.py b/storage/basestorage.py index 9406fffec..0b822b054 100644 --- a/storage/basestorage.py +++ b/storage/basestorage.py @@ -19,10 +19,6 @@ class StoragePaths(object): def image_path(self, storage_uuid): return '{0}/{1}/'.format(self.shared_images, storage_uuid) - def image_json_path(self, storage_uuid): - base_path = self.image_path(storage_uuid) - return '{0}json'.format(base_path) - def image_layer_path(self, storage_uuid): base_path = self.image_path(storage_uuid) return '{0}layer'.format(base_path) diff --git a/test/data/test.db b/test/data/test.db index 762ad4378..5e5246bba 100644 Binary files a/test/data/test.db and b/test/data/test.db differ diff --git a/tools/migrateimage.py b/tools/migrateimage.py index 950f171a4..7cc3fbe22 100644 --- a/tools/migrateimage.py +++ b/tools/migrateimage.py @@ -10,7 +10,6 @@ logger = logging.getLogger(__name__) PATHSPECS = [ - (storage.image_json_path, True), (storage.image_layer_path, True), (storage.image_ancestry_path, True), (storage.image_file_trie_path, False), diff --git a/util/migrate/backfill_v1_metadata.py b/util/migrate/backfill_v1_metadata.py index be7a37c93..f0455758c 100644 --- a/util/migrate/backfill_v1_metadata.py +++ b/util/migrate/backfill_v1_metadata.py @@ -11,6 +11,11 @@ from data import model logger = logging.getLogger(__name__) +def image_json_path(storage_uuid): + base_path = storage.image_path(storage_uuid) + return '{0}json'.format(base_path) + + def backfill_v1_metadata(): """ Copies metadata from image storages to their images. """ logger.debug('Image v1 metadata backfill: Began execution') @@ -48,7 +53,7 @@ def backfill_v1_metadata(): repo_image = repo_image_list[0] uuid = repo_image.storage.uuid - json_path = storage.image_json_path(uuid) + json_path = image_json_path(uuid) logger.debug('Updating image: %s from: %s', repo_image.id, json_path) try: diff --git a/workers/storagereplication.py b/workers/storagereplication.py index d888a8926..7b0dd4562 100644 --- a/workers/storagereplication.py +++ b/workers/storagereplication.py @@ -49,8 +49,7 @@ class StorageReplicationWorker(QueueWorker): logger.debug('Copying image storage %s to location %s', partial_storage.uuid, location) # Copy the various paths. - paths = [storage_paths.image_json_path, - storage_paths.image_ancestry_path, + paths = [storage_paths.image_ancestry_path, storage_paths.image_layer_path] try: