Another huge batch of registry v2 changes
Add patch support and resumeable sha Implement all actual registry methods Add a simple database generation option
This commit is contained in:
parent
5ba3521e67
commit
e1b3e9e6ae
29 changed files with 1095 additions and 430 deletions
|
@ -1,5 +1,7 @@
|
|||
import tempfile
|
||||
|
||||
from digest.digest_tools import content_path
|
||||
|
||||
|
||||
class StoragePaths(object):
|
||||
shared_images = 'sharedimages'
|
||||
|
@ -23,13 +25,12 @@ class StoragePaths(object):
|
|||
base_path = self.image_path(storage_uuid)
|
||||
return '{0}json'.format(base_path)
|
||||
|
||||
def image_layer_path(self, storage_uuid):
|
||||
def v1_image_layer_path(self, storage_uuid):
|
||||
base_path = self.image_path(storage_uuid)
|
||||
return '{0}layer'.format(base_path)
|
||||
|
||||
def image_ancestry_path(self, storage_uuid):
|
||||
base_path = self.image_path(storage_uuid)
|
||||
return '{0}ancestry'.format(base_path)
|
||||
def blob_path(self, digest_str):
|
||||
return content_path(digest_str)
|
||||
|
||||
def image_file_trie_path(self, storage_uuid):
|
||||
base_path = self.image_path(storage_uuid)
|
||||
|
@ -99,26 +100,30 @@ class BaseStorage(StoragePaths):
|
|||
raise NotImplementedError
|
||||
|
||||
|
||||
class DigestInvalidException(RuntimeError):
|
||||
class InvalidChunkException(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class BaseStorageV2(BaseStorage):
|
||||
def initiate_chunked_upload(self):
|
||||
""" Start a new chunked upload, and return a handle with which the upload can be referenced.
|
||||
def initiate_chunked_upload(self, upload_uuid):
|
||||
""" Start a new chunked upload
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def stream_upload_chunk(self, uuid, offset, length, in_fp):
|
||||
def stream_upload_chunk(self, uuid, offset, length, in_fp, hash_obj):
|
||||
""" Upload the specified amount of data from the given file pointer to the chunked destination
|
||||
specified, starting at the given offset. Returns the number of bytes written.
|
||||
specified, starting at the given offset. Raises InvalidChunkException if the offset or
|
||||
length can not be accepted.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def complete_chunked_upload(self, uuid, final_path, digest_to_verify):
|
||||
def complete_chunked_upload(self, uuid, final_path):
|
||||
""" Complete the chunked upload and store the final results in the path indicated.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def cancel_chunked_upload(self, uuid):
|
||||
""" Cancel the chunked upload and clean up any outstanding partially uploaded data.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
|
|
@ -45,3 +45,4 @@ class DistributedStorage(StoragePaths):
|
|||
initiate_chunked_upload = _location_aware(BaseStorageV2.initiate_chunked_upload)
|
||||
stream_upload_chunk = _location_aware(BaseStorageV2.stream_upload_chunk)
|
||||
complete_chunked_upload = _location_aware(BaseStorageV2.complete_chunked_upload)
|
||||
cancel_chunked_upload = _location_aware(BaseStorageV2.cancel_chunked_upload)
|
||||
|
|
|
@ -8,7 +8,6 @@ import psutil
|
|||
from uuid import uuid4
|
||||
|
||||
from storage.basestorage import BaseStorageV2
|
||||
from digest import digest_tools
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -64,8 +63,9 @@ class LocalStorage(BaseStorageV2):
|
|||
bytes_copied = 0
|
||||
bytes_remaining = num_bytes
|
||||
while bytes_remaining > 0 or num_bytes < 0:
|
||||
size_to_read = min(bytes_remaining, self.buffer_size)
|
||||
try:
|
||||
buf = in_fp.read(self.buffer_size)
|
||||
buf = in_fp.read(size_to_read)
|
||||
if not buf:
|
||||
break
|
||||
out_fp.write(buf)
|
||||
|
@ -112,11 +112,9 @@ class LocalStorage(BaseStorageV2):
|
|||
sha_hash.update(buf)
|
||||
return sha_hash.hexdigest()[:7]
|
||||
|
||||
|
||||
def _rel_upload_path(self, uuid):
|
||||
return 'uploads/{0}'.format(uuid)
|
||||
|
||||
|
||||
def initiate_chunked_upload(self):
|
||||
new_uuid = str(uuid4())
|
||||
|
||||
|
@ -131,14 +129,8 @@ class LocalStorage(BaseStorageV2):
|
|||
upload_storage.seek(offset)
|
||||
return self._stream_write_to_fp(in_fp, upload_storage, length)
|
||||
|
||||
def complete_chunked_upload(self, uuid, final_path, digest_to_verify):
|
||||
def complete_chunked_upload(self, uuid, final_path):
|
||||
content_path = self._rel_upload_path(uuid)
|
||||
content_digest = digest_tools.sha256_digest_from_generator(self.stream_read(content_path))
|
||||
|
||||
if not digest_tools.digests_equal(content_digest, digest_to_verify):
|
||||
msg = 'Given: {0} Computed: {1}'.format(digest_to_verify, content_digest)
|
||||
raise digest_tools.InvalidDigestException(msg)
|
||||
|
||||
final_path_abs = self._init_path(final_path, create=True)
|
||||
if not self.exists(final_path_abs):
|
||||
logger.debug('Moving content into place at path: %s', final_path_abs)
|
||||
|
@ -146,6 +138,10 @@ class LocalStorage(BaseStorageV2):
|
|||
else:
|
||||
logger.debug('Content already exists at path: %s', final_path_abs)
|
||||
|
||||
def cancel_chunked_upload(self, uuid):
|
||||
content_path = self._init_path(self._rel_upload_path(uuid))
|
||||
os.remove(content_path)
|
||||
|
||||
def validate(self):
|
||||
# Load the set of disk mounts.
|
||||
try:
|
||||
|
|
Reference in a new issue