Optimize lookup of shared global blobs
Currently, we only have one (the shared empty layer), but this should make the blob lookups for repositories significantly faster, as we won't need to do the massive join.
This commit is contained in:
parent
7beac643ec
commit
f75f315037
6 changed files with 78 additions and 24 deletions
|
@ -164,6 +164,18 @@ def initiate_upload(namespace, repo_name, uuid, location_name, storage_metadata)
|
|||
storage_metadata=storage_metadata)
|
||||
|
||||
|
||||
def get_shared_blob(digest):
|
||||
""" Returns the ImageStorage blob with the given digest or, if not present,
|
||||
returns None. This method is *only* to be used for shared blobs that are
|
||||
globally accessible, such as the special empty gzipped tar layer that Docker
|
||||
no longer pushes to us.
|
||||
"""
|
||||
try:
|
||||
return ImageStorage.get(content_checksum=digest, uploading=False)
|
||||
except ImageStorage.DoesNotExist:
|
||||
return None
|
||||
|
||||
|
||||
def get_or_create_shared_blob(digest, byte_data, storage):
|
||||
""" Returns the ImageStorage blob with the given digest or, if not present,
|
||||
adds a row and writes the given byte data to the storage engine.
|
||||
|
|
Reference in a new issue