Stop writing to deprecated columns for image data.

This commit is contained in:
Jake Moshenko 2015-09-17 15:23:33 -04:00
parent ad53bf5671
commit ce94931540
9 changed files with 13 additions and 39 deletions

View file

@ -487,12 +487,8 @@ class EmailConfirmation(BaseModel):
class ImageStorage(BaseModel):
uuid = CharField(default=uuid_generator, index=True, unique=True)
checksum = CharField(null=True)
created = DateTimeField(null=True)
comment = TextField(null=True)
command = TextField(null=True)
image_size = BigIntegerField(null=True)
uncompressed_size = BigIntegerField(null=True)
aggregate_size = BigIntegerField(null=True)
uploading = BooleanField(default=True, null=True)

View file

@ -285,24 +285,15 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
# We cleanup any old checksum in case it's a retry after a fail
fetched.storage.checksum = None
now = datetime.now()
# TODO stop writing to storage when all readers are removed
fetched.storage.created = now
fetched.created = now
fetched.created = datetime.now()
if created_date_str is not None:
try:
# TODO stop writing to storage fields when all readers are removed
parsed_created_time = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
fetched.created = parsed_created_time
fetched.storage.created = parsed_created_time
fetched.created = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
except:
# parse raises different exceptions, so we cannot use a specific kind of handler here.
pass
# TODO stop writing to storage fields when all readers are removed
fetched.storage.comment = comment
fetched.storage.command = command
fetched.comment = comment
fetched.command = command
fetched.v1_json_metadata = v1_json_metadata
@ -346,17 +337,11 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size,
.where(Image.id << ancestors)
.scalar())
# TODO stop writing to storage when all readers are removed
if ancestor_size is not None:
# total_size = image_size + parent_image.storage.aggregate_size
total_size = ancestor_size + image_size
image.storage.aggregate_size = total_size
image.aggregate_size = total_size
image.aggregate_size = ancestor_size + image_size
except Image.DoesNotExist:
pass
else:
# TODO stop writing to storage when all readers are removed
image.storage.aggregate_size = image_size
image.aggregate_size = image_size
image.storage.save()

View file

@ -521,16 +521,11 @@ def put_image_json(namespace, repository, image_id):
model.image.set_image_metadata(image_id, namespace, repository, data.get('created'),
data.get('comment'), command, v1_metadata, parent_image)
logger.debug('Putting json path')
uuid = repo_image.storage.uuid
json_path = store.image_json_path(uuid)
store.put_content(repo_image.storage.locations, json_path, request.data)
logger.debug('Generating image ancestry')
try:
generate_ancestry(image_id, uuid, repo_image.storage.locations, parent_id, parent_uuid,
parent_locations)
generate_ancestry(image_id, repo_image.storage.uuid, repo_image.storage.locations, parent_id,
parent_uuid, parent_locations)
except IOError as ioe:
logger.debug('Error when generating ancestry: %s', ioe.message)
abort(404)

View file

@ -88,8 +88,7 @@ def __create_subtree(repo, structure, creator_username, parent, tag_map):
# Write some data for the storage.
if os.environ.get('WRITE_STORAGE_FILES'):
storage_paths = StoragePaths()
paths = [storage_paths.image_json_path,
storage_paths.image_ancestry_path,
paths = [storage_paths.image_ancestry_path,
storage_paths.image_layer_path]
for path_builder in paths:

View file

@ -19,10 +19,6 @@ class StoragePaths(object):
def image_path(self, storage_uuid):
return '{0}/{1}/'.format(self.shared_images, storage_uuid)
def image_json_path(self, storage_uuid):
base_path = self.image_path(storage_uuid)
return '{0}json'.format(base_path)
def image_layer_path(self, storage_uuid):
base_path = self.image_path(storage_uuid)
return '{0}layer'.format(base_path)

Binary file not shown.

View file

@ -10,7 +10,6 @@ logger = logging.getLogger(__name__)
PATHSPECS = [
(storage.image_json_path, True),
(storage.image_layer_path, True),
(storage.image_ancestry_path, True),
(storage.image_file_trie_path, False),

View file

@ -11,6 +11,11 @@ from data import model
logger = logging.getLogger(__name__)
def image_json_path(storage_uuid):
base_path = storage.image_path(storage_uuid)
return '{0}json'.format(base_path)
def backfill_v1_metadata():
""" Copies metadata from image storages to their images. """
logger.debug('Image v1 metadata backfill: Began execution')
@ -48,7 +53,7 @@ def backfill_v1_metadata():
repo_image = repo_image_list[0]
uuid = repo_image.storage.uuid
json_path = storage.image_json_path(uuid)
json_path = image_json_path(uuid)
logger.debug('Updating image: %s from: %s', repo_image.id, json_path)
try:

View file

@ -49,8 +49,7 @@ class StorageReplicationWorker(QueueWorker):
logger.debug('Copying image storage %s to location %s', partial_storage.uuid, location)
# Copy the various paths.
paths = [storage_paths.image_json_path,
storage_paths.image_ancestry_path,
paths = [storage_paths.image_ancestry_path,
storage_paths.image_layer_path]
try: