Merge remote-tracking branch 'upstream/master' into python-registry-v2

This commit is contained in:
Jake Moshenko 2015-09-04 16:32:01 -04:00
commit 210ed7cf02
148 changed files with 1829 additions and 445 deletions

View file

@ -39,7 +39,8 @@ class Storage(object):
if not preference:
preference = storages.keys()
d_storage = DistributedStorage(storages, preference)
default_locations = app.config.get('DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS') or []
d_storage = DistributedStorage(storages, preference, default_locations)
# register extension with app
app.extensions = getattr(app, 'extensions', {})

View file

@ -111,6 +111,9 @@ class BaseStorage(StoragePaths):
return bytes_copied
def copy_to(self, destination, path):
raise NotImplementedError
class InvalidChunkException(RuntimeError):
pass
@ -141,4 +144,3 @@ class BaseStorageV2(BaseStorage):
Returns nothing.
"""
raise NotImplementedError

View file

@ -14,8 +14,8 @@ from uuid import uuid4
from collections import namedtuple
from util.registry import filelike
from storage.basestorage import BaseStorageV2, InvalidChunkException
import app
logger = logging.getLogger(__name__)
@ -161,6 +161,7 @@ class _CloudStorage(BaseStorageV2):
if content_encoding is not None:
metadata['Content-Encoding'] = content_encoding
app.metric_queue.put('MultipartUploadStart', 1)
return self._cloud_bucket.initiate_multipart_upload(path, metadata=metadata,
**self._upload_params)
@ -194,11 +195,13 @@ class _CloudStorage(BaseStorageV2):
total_bytes_written += bytes_staged
num_part += 1
except IOError:
app.metric_queue.put('MultipartUploadFailure', 1)
if cancel_on_error:
mp.cancel_upload()
return 0
if total_bytes_written > 0:
app.metric_queue.put('MultipartUploadSuccess', 1)
mp.complete_upload()
return total_bytes_written
@ -253,6 +256,28 @@ class _CloudStorage(BaseStorageV2):
return k.etag[1:-1][:7]
def copy_to(self, destination, path):
# First try to copy directly via boto, but only if the storages are the
# same type, with the same access information.
if (self.__class__ == destination.__class__ and
self._access_key == destination._access_key and
self._secret_key == destination._secret_key):
logger.debug('Copying file from %s to %s via a direct boto copy', self._cloud_bucket,
destination._cloud_bucket)
source_path = self._init_path(path)
source_key = self._key_class(self._cloud_bucket, source_path)
dest_path = destination._init_path(path)
source_key.copy(destination._cloud_bucket, dest_path)
return
# Fallback to a slower, default copy.
logger.debug('Copying file from %s to %s via a streamed copy', self._cloud_bucket,
destination)
with self.stream_read_file(path) as fp:
destination.stream_write(path, fp)
def _rel_upload_path(self, uuid):
return 'uploads/{0}'.format(uuid)
@ -371,7 +396,6 @@ class S3Storage(_CloudStorage):
</CORSRule>
</CORSConfiguration>""")
class GoogleCloudStorage(_CloudStorage):
def __init__(self, storage_path, access_key, secret_key, bucket_name):
upload_params = {}

View file

@ -26,9 +26,15 @@ def _location_aware(unbound_func):
class DistributedStorage(StoragePaths):
def __init__(self, storages, preferred_locations=[]):
def __init__(self, storages, preferred_locations=[], default_locations=[]):
self._storages = dict(storages)
self.preferred_locations = list(preferred_locations)
self.default_locations = list(default_locations)
@property
def locations(self):
""" Returns the names of the locations supported. """
return list(self._storages.keys())
get_direct_download_url = _location_aware(BaseStorage.get_direct_download_url)
get_direct_upload_url = _location_aware(BaseStorage.get_direct_upload_url)
@ -42,7 +48,14 @@ class DistributedStorage(StoragePaths):
remove = _location_aware(BaseStorage.remove)
get_checksum = _location_aware(BaseStorage.get_checksum)
get_supports_resumable_downloads = _location_aware(BaseStorage.get_supports_resumable_downloads)
initiate_chunked_upload = _location_aware(BaseStorageV2.initiate_chunked_upload)
stream_upload_chunk = _location_aware(BaseStorageV2.stream_upload_chunk)
complete_chunked_upload = _location_aware(BaseStorageV2.complete_chunked_upload)
cancel_chunked_upload = _location_aware(BaseStorageV2.cancel_chunked_upload)
def copy_between(self, path, source_location, destination_location):
""" Copies a file between the source location and the destination location. """
source_storage = self._storages[source_location]
destination_storage = self._storages[destination_location]
source_storage.copy_to(destination_storage, path)

View file

@ -32,6 +32,9 @@ class FakeStorage(BaseStorageV2):
break
yield buf
def stream_read_file(self, path):
return StringIO(_FAKE_STORAGE_MAP[path])
def stream_write(self, path, fp, content_type=None, content_encoding=None):
out_fp = _FAKE_STORAGE_MAP[path]
out_fp.seek(0)

View file

@ -139,3 +139,7 @@ class LocalStorage(BaseStorageV2):
raise Exception('Storage path %s is not under a mounted volume.\n\n'
'Registry data must be stored under a mounted volume '
'to prevent data loss' % self._root_path)
def copy_to(self, destination, path):
with self.stream_read_file(path) as fp:
destination.stream_write(path, fp)

View file

@ -24,7 +24,11 @@ class SwiftStorage(BaseStorage):
self._swift_user = swift_user
self._swift_password = swift_password
self._auth_version = auth_version or 2
try:
self._auth_version = int(auth_version or '2')
except ValueError:
self._auth_version = 2
self._os_options = os_options or {}
self._initialized = False