bea8b9ac53
Implement the minimal changes to the local filesystem storage driver and feed them through the distributed storage driver. Create a digest package which contains digest_tools and checksums. Fix the tests to use the new v1 endpoint locations. Fix repository.delete_instance to properly filter the generated queries to avoid most subquery deletes, but still generate them when not explicitly filtered.
120 lines
3.1 KiB
Python
120 lines
3.1 KiB
Python
import tempfile
|
|
|
|
|
|
class StoragePaths(object):
|
|
shared_images = 'sharedimages'
|
|
|
|
@staticmethod
|
|
def temp_store_handler():
|
|
tmpf = tempfile.TemporaryFile()
|
|
|
|
def fn(buf):
|
|
try:
|
|
tmpf.write(buf)
|
|
except IOError:
|
|
pass
|
|
|
|
return tmpf, fn
|
|
|
|
def image_path(self, storage_uuid):
|
|
return '{0}/{1}/'.format(self.shared_images, storage_uuid)
|
|
|
|
def image_json_path(self, storage_uuid):
|
|
base_path = self.image_path(storage_uuid)
|
|
return '{0}json'.format(base_path)
|
|
|
|
def image_layer_path(self, storage_uuid):
|
|
base_path = self.image_path(storage_uuid)
|
|
return '{0}layer'.format(base_path)
|
|
|
|
def image_ancestry_path(self, storage_uuid):
|
|
base_path = self.image_path(storage_uuid)
|
|
return '{0}ancestry'.format(base_path)
|
|
|
|
def image_file_trie_path(self, storage_uuid):
|
|
base_path = self.image_path(storage_uuid)
|
|
return '{0}files.trie'.format(base_path)
|
|
|
|
def image_file_diffs_path(self, storage_uuid):
|
|
base_path = self.image_path(storage_uuid)
|
|
return '{0}diffs.json'.format(base_path)
|
|
|
|
|
|
class BaseStorage(StoragePaths):
|
|
"""Storage is organized as follow:
|
|
$ROOT/images/<image_id>/json
|
|
$ROOT/images/<image_id>/layer
|
|
$ROOT/repositories/<namespace>/<repository_name>/<tag_name>
|
|
"""
|
|
|
|
# Useful if we want to change those locations later without rewriting
|
|
# the code which uses Storage
|
|
repositories = 'repositories'
|
|
images = 'images'
|
|
# Set the IO buffer to 64kB
|
|
buffer_size = 64 * 1024
|
|
|
|
def setup(self):
|
|
""" Called to perform any storage system setup. """
|
|
pass
|
|
|
|
def get_direct_download_url(self, path, expires_in=60, requires_cors=False):
|
|
return None
|
|
|
|
def get_direct_upload_url(self, path, mime_type, requires_cors=True):
|
|
return None
|
|
|
|
def get_supports_resumable_downloads(self):
|
|
return False
|
|
|
|
def get_content(self, path):
|
|
raise NotImplementedError
|
|
|
|
def put_content(self, path, content):
|
|
raise NotImplementedError
|
|
|
|
def stream_read(self, path):
|
|
raise NotImplementedError
|
|
|
|
def stream_read_file(self, path):
|
|
raise NotImplementedError
|
|
|
|
def stream_write(self, path, fp, content_type=None, content_encoding=None):
|
|
raise NotImplementedError
|
|
|
|
def list_directory(self, path=None):
|
|
raise NotImplementedError
|
|
|
|
def exists(self, path):
|
|
raise NotImplementedError
|
|
|
|
def remove(self, path):
|
|
raise NotImplementedError
|
|
|
|
def get_checksum(self, path):
|
|
raise NotImplementedError
|
|
|
|
|
|
class DigestInvalidException(RuntimeError):
|
|
pass
|
|
|
|
|
|
class BaseStorageV2(BaseStorage):
|
|
def initiate_chunked_upload(self):
|
|
""" Start a new chunked upload, and return a handle with which the upload can be referenced.
|
|
"""
|
|
raise NotImplementedError
|
|
|
|
def stream_upload_chunk(self, uuid, offset, length, in_fp):
|
|
""" Upload the specified amount of data from the given file pointer to the chunked destination
|
|
specified, starting at the given offset. Returns the number of bytes written.
|
|
"""
|
|
raise NotImplementedError
|
|
|
|
def complete_chunked_upload(self, uuid, final_path, digest_to_verify):
|
|
""" Complete the chunked upload and store the final results in the path indicated.
|
|
"""
|
|
raise NotImplementedError
|
|
|
|
|
|
|