Stop writing to deprecated columns for image data.
This commit is contained in:
parent
ad53bf5671
commit
ce94931540
9 changed files with 13 additions and 39 deletions
|
@ -487,12 +487,8 @@ class EmailConfirmation(BaseModel):
|
||||||
class ImageStorage(BaseModel):
|
class ImageStorage(BaseModel):
|
||||||
uuid = CharField(default=uuid_generator, index=True, unique=True)
|
uuid = CharField(default=uuid_generator, index=True, unique=True)
|
||||||
checksum = CharField(null=True)
|
checksum = CharField(null=True)
|
||||||
created = DateTimeField(null=True)
|
|
||||||
comment = TextField(null=True)
|
|
||||||
command = TextField(null=True)
|
|
||||||
image_size = BigIntegerField(null=True)
|
image_size = BigIntegerField(null=True)
|
||||||
uncompressed_size = BigIntegerField(null=True)
|
uncompressed_size = BigIntegerField(null=True)
|
||||||
aggregate_size = BigIntegerField(null=True)
|
|
||||||
uploading = BooleanField(default=True, null=True)
|
uploading = BooleanField(default=True, null=True)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -285,24 +285,15 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
|
||||||
|
|
||||||
# We cleanup any old checksum in case it's a retry after a fail
|
# We cleanup any old checksum in case it's a retry after a fail
|
||||||
fetched.storage.checksum = None
|
fetched.storage.checksum = None
|
||||||
now = datetime.now()
|
fetched.created = datetime.now()
|
||||||
# TODO stop writing to storage when all readers are removed
|
|
||||||
fetched.storage.created = now
|
|
||||||
fetched.created = now
|
|
||||||
|
|
||||||
if created_date_str is not None:
|
if created_date_str is not None:
|
||||||
try:
|
try:
|
||||||
# TODO stop writing to storage fields when all readers are removed
|
fetched.created = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
|
||||||
parsed_created_time = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
|
|
||||||
fetched.created = parsed_created_time
|
|
||||||
fetched.storage.created = parsed_created_time
|
|
||||||
except:
|
except:
|
||||||
# parse raises different exceptions, so we cannot use a specific kind of handler here.
|
# parse raises different exceptions, so we cannot use a specific kind of handler here.
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# TODO stop writing to storage fields when all readers are removed
|
|
||||||
fetched.storage.comment = comment
|
|
||||||
fetched.storage.command = command
|
|
||||||
fetched.comment = comment
|
fetched.comment = comment
|
||||||
fetched.command = command
|
fetched.command = command
|
||||||
fetched.v1_json_metadata = v1_json_metadata
|
fetched.v1_json_metadata = v1_json_metadata
|
||||||
|
@ -346,17 +337,11 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size,
|
||||||
.where(Image.id << ancestors)
|
.where(Image.id << ancestors)
|
||||||
.scalar())
|
.scalar())
|
||||||
|
|
||||||
# TODO stop writing to storage when all readers are removed
|
|
||||||
if ancestor_size is not None:
|
if ancestor_size is not None:
|
||||||
# total_size = image_size + parent_image.storage.aggregate_size
|
image.aggregate_size = ancestor_size + image_size
|
||||||
total_size = ancestor_size + image_size
|
|
||||||
image.storage.aggregate_size = total_size
|
|
||||||
image.aggregate_size = total_size
|
|
||||||
except Image.DoesNotExist:
|
except Image.DoesNotExist:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
# TODO stop writing to storage when all readers are removed
|
|
||||||
image.storage.aggregate_size = image_size
|
|
||||||
image.aggregate_size = image_size
|
image.aggregate_size = image_size
|
||||||
|
|
||||||
image.storage.save()
|
image.storage.save()
|
||||||
|
|
|
@ -521,16 +521,11 @@ def put_image_json(namespace, repository, image_id):
|
||||||
model.image.set_image_metadata(image_id, namespace, repository, data.get('created'),
|
model.image.set_image_metadata(image_id, namespace, repository, data.get('created'),
|
||||||
data.get('comment'), command, v1_metadata, parent_image)
|
data.get('comment'), command, v1_metadata, parent_image)
|
||||||
|
|
||||||
logger.debug('Putting json path')
|
|
||||||
uuid = repo_image.storage.uuid
|
|
||||||
json_path = store.image_json_path(uuid)
|
|
||||||
store.put_content(repo_image.storage.locations, json_path, request.data)
|
|
||||||
|
|
||||||
logger.debug('Generating image ancestry')
|
logger.debug('Generating image ancestry')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
generate_ancestry(image_id, uuid, repo_image.storage.locations, parent_id, parent_uuid,
|
generate_ancestry(image_id, repo_image.storage.uuid, repo_image.storage.locations, parent_id,
|
||||||
parent_locations)
|
parent_uuid, parent_locations)
|
||||||
except IOError as ioe:
|
except IOError as ioe:
|
||||||
logger.debug('Error when generating ancestry: %s', ioe.message)
|
logger.debug('Error when generating ancestry: %s', ioe.message)
|
||||||
abort(404)
|
abort(404)
|
||||||
|
|
|
@ -88,8 +88,7 @@ def __create_subtree(repo, structure, creator_username, parent, tag_map):
|
||||||
# Write some data for the storage.
|
# Write some data for the storage.
|
||||||
if os.environ.get('WRITE_STORAGE_FILES'):
|
if os.environ.get('WRITE_STORAGE_FILES'):
|
||||||
storage_paths = StoragePaths()
|
storage_paths = StoragePaths()
|
||||||
paths = [storage_paths.image_json_path,
|
paths = [storage_paths.image_ancestry_path,
|
||||||
storage_paths.image_ancestry_path,
|
|
||||||
storage_paths.image_layer_path]
|
storage_paths.image_layer_path]
|
||||||
|
|
||||||
for path_builder in paths:
|
for path_builder in paths:
|
||||||
|
|
|
@ -19,10 +19,6 @@ class StoragePaths(object):
|
||||||
def image_path(self, storage_uuid):
|
def image_path(self, storage_uuid):
|
||||||
return '{0}/{1}/'.format(self.shared_images, storage_uuid)
|
return '{0}/{1}/'.format(self.shared_images, storage_uuid)
|
||||||
|
|
||||||
def image_json_path(self, storage_uuid):
|
|
||||||
base_path = self.image_path(storage_uuid)
|
|
||||||
return '{0}json'.format(base_path)
|
|
||||||
|
|
||||||
def image_layer_path(self, storage_uuid):
|
def image_layer_path(self, storage_uuid):
|
||||||
base_path = self.image_path(storage_uuid)
|
base_path = self.image_path(storage_uuid)
|
||||||
return '{0}layer'.format(base_path)
|
return '{0}layer'.format(base_path)
|
||||||
|
|
Binary file not shown.
|
@ -10,7 +10,6 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
PATHSPECS = [
|
PATHSPECS = [
|
||||||
(storage.image_json_path, True),
|
|
||||||
(storage.image_layer_path, True),
|
(storage.image_layer_path, True),
|
||||||
(storage.image_ancestry_path, True),
|
(storage.image_ancestry_path, True),
|
||||||
(storage.image_file_trie_path, False),
|
(storage.image_file_trie_path, False),
|
||||||
|
|
|
@ -11,6 +11,11 @@ from data import model
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def image_json_path(storage_uuid):
|
||||||
|
base_path = storage.image_path(storage_uuid)
|
||||||
|
return '{0}json'.format(base_path)
|
||||||
|
|
||||||
|
|
||||||
def backfill_v1_metadata():
|
def backfill_v1_metadata():
|
||||||
""" Copies metadata from image storages to their images. """
|
""" Copies metadata from image storages to their images. """
|
||||||
logger.debug('Image v1 metadata backfill: Began execution')
|
logger.debug('Image v1 metadata backfill: Began execution')
|
||||||
|
@ -48,7 +53,7 @@ def backfill_v1_metadata():
|
||||||
|
|
||||||
repo_image = repo_image_list[0]
|
repo_image = repo_image_list[0]
|
||||||
uuid = repo_image.storage.uuid
|
uuid = repo_image.storage.uuid
|
||||||
json_path = storage.image_json_path(uuid)
|
json_path = image_json_path(uuid)
|
||||||
|
|
||||||
logger.debug('Updating image: %s from: %s', repo_image.id, json_path)
|
logger.debug('Updating image: %s from: %s', repo_image.id, json_path)
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -49,8 +49,7 @@ class StorageReplicationWorker(QueueWorker):
|
||||||
logger.debug('Copying image storage %s to location %s', partial_storage.uuid, location)
|
logger.debug('Copying image storage %s to location %s', partial_storage.uuid, location)
|
||||||
|
|
||||||
# Copy the various paths.
|
# Copy the various paths.
|
||||||
paths = [storage_paths.image_json_path,
|
paths = [storage_paths.image_ancestry_path,
|
||||||
storage_paths.image_ancestry_path,
|
|
||||||
storage_paths.image_layer_path]
|
storage_paths.image_layer_path]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
Reference in a new issue