v2: refactor blob.py to use data.types
This commit is contained in:
parent
21cbe0bd07
commit
3de6000428
3 changed files with 387 additions and 207 deletions
|
@ -1,8 +1,10 @@
|
|||
from data.types import (
|
||||
Blob,
|
||||
BlobUpload,
|
||||
DockerV1Metadata,
|
||||
ManifestJSON,
|
||||
Repository,
|
||||
Tag,
|
||||
ManifestJSON,
|
||||
DockerV1Metadata,
|
||||
)
|
||||
|
||||
def get_repository(namespace_name, repo_name):
|
||||
|
@ -108,3 +110,73 @@ def repository_tags(namespace_name, repo_name, limit, offset):
|
|||
|
||||
def get_visible_repositories(username, limit, offset):
|
||||
return [Repository()]
|
||||
|
||||
|
||||
def create_blob_upload(namespace_name, repo_name, upload_uuid, location_name, storage_metadata):
|
||||
"""
|
||||
Creates a blob upload.
|
||||
|
||||
Returns False if the upload's repository does not exist.
|
||||
"""
|
||||
|
||||
try:
|
||||
model.blob.initiate_upload(namespace_name, repo_name, new_upload_uuid, location_name,
|
||||
upload_metadata)
|
||||
return True
|
||||
except database.Repository.DoesNotExist:
|
||||
return False
|
||||
|
||||
|
||||
def blob_upload_by_uuid(uuid):
|
||||
try:
|
||||
found = model.blob.get_blob_upload(namespace_name, repo_name, upload_uuid)
|
||||
except model.InvalidBlobUpload:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
return BlobUpload(
|
||||
uuid=uuid,
|
||||
byte_count=found.byte_count,
|
||||
uncompressed_byte_count=found.uncompressed_byte_count,
|
||||
chunk_count=found.chunk_count,
|
||||
location_name=found.location.name,
|
||||
storage_metadata=found.storage_metadata,
|
||||
)
|
||||
|
||||
|
||||
def update_blob_upload(blob_upload):
|
||||
# old implementation:
|
||||
# blob_upload.save()
|
||||
pass
|
||||
|
||||
|
||||
def delete_blob_upload(uuid):
|
||||
try:
|
||||
found = model.blob.get_blob_upload(namespace_name, repo_name, upload_uuid)
|
||||
except model.InvalidBlobUpload:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
found.delete_instance()
|
||||
|
||||
def create_blob_and_temp_tag(namespace_name, repo_name, expected_digest, upload_obj):
|
||||
return model.blob.store_blob_record_and_temp_link(namespace_name, repo_name, expected_digest,
|
||||
upload_obj.location,
|
||||
upload_obj.byte_count,
|
||||
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'],
|
||||
upload_obj.uncompressed_byte_count)
|
||||
|
||||
|
||||
def blob_by_digest(namespace_name, repo_name, digest):
|
||||
try:
|
||||
return model.blob.get_repo_blob_by_digest(namespace_name, repo_name, digest)
|
||||
except model.BlobDoesNotExist:
|
||||
return None
|
||||
|
||||
|
||||
def create_bittorrent_pieces(blob_storage, piece_size, piece_bytes)
|
||||
model.storage.save_torrent_info(blob_storage.id, piece_size, piece_bytes)
|
||||
|
||||
|
||||
def get_blob_path(blob):
|
||||
# Once everything is moved over, this could be in util.registry and not even
|
||||
# touch the database.
|
||||
model.storage.get_layer_path(blob)
|
||||
|
|
|
@ -344,3 +344,15 @@ DockerV1Metadata = namedtuple('DockerV1Metadata', ['namespace_name',
|
|||
'command',
|
||||
'parent_image_id',
|
||||
'compat_json'])
|
||||
|
||||
BlobUpload = namedtuple('BlobUpload', ['uuid',
|
||||
'byte_count',
|
||||
'uncompressed_byte_count',
|
||||
'chunk_count',
|
||||
'sha_state',
|
||||
'location_name',
|
||||
'storage_metadata',
|
||||
'piece_sha_state',
|
||||
'piece_hashes'])
|
||||
|
||||
Blob = namedtuple('Blob', ['digest', 'size', 'locations'])
|
||||
|
|
Reference in a new issue