User the secure s3 endpoint and store our files encrypted.
This commit is contained in:
parent
65aad1a2d9
commit
b96f678df8
2 changed files with 127 additions and 129 deletions
|
@ -20,8 +20,7 @@ class S3FileWriteException(Exception):
|
|||
|
||||
class UserRequestFiles(object):
|
||||
def __init__(self, s3_access_key, s3_secret_key, bucket_name):
|
||||
self._s3_conn = boto.connect_s3(s3_access_key, s3_secret_key,
|
||||
is_secure=False)
|
||||
self._s3_conn = boto.connect_s3(s3_access_key, s3_secret_key)
|
||||
self._bucket_name = bucket_name
|
||||
self._bucket = self._s3_conn.get_bucket(bucket_name)
|
||||
self._access_key = s3_access_key
|
||||
|
@ -34,7 +33,8 @@ class UserRequestFiles(object):
|
|||
file_id = str(uuid4())
|
||||
full_key = os.path.join(self._prefix, file_id)
|
||||
k = Key(self._bucket, full_key)
|
||||
url = k.generate_url(300, 'PUT', headers={'Content-Type': mime_type})
|
||||
url = k.generate_url(300, 'PUT', headers={'Content-Type': mime_type},
|
||||
encrypt_key=True)
|
||||
return (url, file_id)
|
||||
|
||||
def store_file(self, flask_file):
|
||||
|
@ -43,7 +43,7 @@ class UserRequestFiles(object):
|
|||
k = Key(self._bucket, full_key)
|
||||
logger.debug('Setting s3 content type to: %s' % flask_file.content_type)
|
||||
k.set_metadata('Content-Type', flask_file.content_type)
|
||||
bytes_written = k.set_contents_from_file(flask_file)
|
||||
bytes_written = k.set_contents_from_file(flask_file, encrypt_key=True)
|
||||
|
||||
if bytes_written == 0:
|
||||
raise S3FileWriteException('Unable to write file to S3')
|
||||
|
|
248
storage/s3.py
248
storage/s3.py
|
@ -12,148 +12,146 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class StreamReadKeyAsFile(object):
|
||||
def __init__(self, key):
|
||||
self._key = key
|
||||
self._finished = False
|
||||
def __init__(self, key):
|
||||
self._key = key
|
||||
self._finished = False
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, tb):
|
||||
self._key.close(fast=True)
|
||||
def __exit__(self, type, value, tb):
|
||||
self._key.close(fast=True)
|
||||
|
||||
def read(self, amt=None):
|
||||
if self._finished:
|
||||
return None
|
||||
def read(self, amt=None):
|
||||
if self._finished:
|
||||
return None
|
||||
|
||||
resp = self._key.read(amt)
|
||||
if not resp:
|
||||
self._finished = True
|
||||
return resp
|
||||
resp = self._key.read(amt)
|
||||
if not resp:
|
||||
self._finished = True
|
||||
return resp
|
||||
|
||||
|
||||
class S3Storage(Storage):
|
||||
|
||||
def __init__(self, storage_path, s3_access_key, s3_secret_key, s3_bucket):
|
||||
self._s3_conn = \
|
||||
boto.s3.connection.S3Connection(s3_access_key,
|
||||
s3_secret_key,
|
||||
is_secure=False)
|
||||
self._s3_bucket = self._s3_conn.get_bucket(s3_bucket)
|
||||
self._root_path = storage_path
|
||||
def __init__(self, storage_path, s3_access_key, s3_secret_key, s3_bucket):
|
||||
self._s3_conn = \
|
||||
boto.s3.connection.S3Connection(s3_access_key, s3_secret_key)
|
||||
self._s3_bucket = self._s3_conn.get_bucket(s3_bucket)
|
||||
self._root_path = storage_path
|
||||
|
||||
def _debug_key(self, key):
|
||||
"""Used for debugging only."""
|
||||
orig_meth = key.bucket.connection.make_request
|
||||
def _debug_key(self, key):
|
||||
"""Used for debugging only."""
|
||||
orig_meth = key.bucket.connection.make_request
|
||||
|
||||
def new_meth(*args, **kwargs):
|
||||
print '#' * 16
|
||||
print args
|
||||
print kwargs
|
||||
print '#' * 16
|
||||
return orig_meth(*args, **kwargs)
|
||||
key.bucket.connection.make_request = new_meth
|
||||
def new_meth(*args, **kwargs):
|
||||
print '#' * 16
|
||||
print args
|
||||
print kwargs
|
||||
print '#' * 16
|
||||
return orig_meth(*args, **kwargs)
|
||||
key.bucket.connection.make_request = new_meth
|
||||
|
||||
def _init_path(self, path=None):
|
||||
path = os.path.join(self._root_path, path) if path else self._root_path
|
||||
if path and path[0] == '/':
|
||||
return path[1:]
|
||||
return path
|
||||
def _init_path(self, path=None):
|
||||
path = os.path.join(self._root_path, path) if path else self._root_path
|
||||
if path and path[0] == '/':
|
||||
return path[1:]
|
||||
return path
|
||||
|
||||
def get_content(self, path):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
if not key.exists():
|
||||
raise IOError('No such key: \'{0}\''.format(path))
|
||||
return key.get_contents_as_string()
|
||||
def get_content(self, path):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
if not key.exists():
|
||||
raise IOError('No such key: \'{0}\''.format(path))
|
||||
return key.get_contents_as_string()
|
||||
|
||||
def put_content(self, path, content):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
key.set_contents_from_string(content)
|
||||
return path
|
||||
def put_content(self, path, content):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
key.set_contents_from_string(content, encrypt_key=True)
|
||||
return path
|
||||
|
||||
def stream_read(self, path):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
if not key.exists():
|
||||
raise IOError('No such key: \'{0}\''.format(path))
|
||||
while True:
|
||||
buf = key.read(self.buffer_size)
|
||||
if not buf:
|
||||
break
|
||||
yield buf
|
||||
def stream_read(self, path):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
if not key.exists():
|
||||
raise IOError('No such key: \'{0}\''.format(path))
|
||||
while True:
|
||||
buf = key.read(self.buffer_size)
|
||||
if not buf:
|
||||
break
|
||||
yield buf
|
||||
|
||||
def stream_read_file(self, path):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
if not key.exists():
|
||||
raise IOError('No such key: \'{0}\''.format(path))
|
||||
return StreamReadKeyAsFile(key)
|
||||
def stream_read_file(self, path):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
if not key.exists():
|
||||
raise IOError('No such key: \'{0}\''.format(path))
|
||||
return StreamReadKeyAsFile(key)
|
||||
|
||||
def stream_write(self, path, fp):
|
||||
# Minimum size of upload part size on S3 is 5MB
|
||||
buffer_size = 5 * 1024 * 1024
|
||||
if self.buffer_size > buffer_size:
|
||||
buffer_size = self.buffer_size
|
||||
path = self._init_path(path)
|
||||
mp = self._s3_bucket.initiate_multipart_upload(path)
|
||||
num_part = 1
|
||||
while True:
|
||||
try:
|
||||
buf = fp.read(buffer_size)
|
||||
if not buf:
|
||||
break
|
||||
io = StringIO.StringIO(buf)
|
||||
mp.upload_part_from_file(io, num_part)
|
||||
num_part += 1
|
||||
io.close()
|
||||
except IOError:
|
||||
break
|
||||
mp.complete_upload()
|
||||
def stream_write(self, path, fp):
|
||||
# Minimum size of upload part size on S3 is 5MB
|
||||
buffer_size = 5 * 1024 * 1024
|
||||
if self.buffer_size > buffer_size:
|
||||
buffer_size = self.buffer_size
|
||||
path = self._init_path(path)
|
||||
mp = self._s3_bucket.initiate_multipart_upload(path, encrypt_key=True)
|
||||
num_part = 1
|
||||
while True:
|
||||
try:
|
||||
buf = fp.read(buffer_size)
|
||||
if not buf:
|
||||
break
|
||||
io = StringIO.StringIO(buf)
|
||||
mp.upload_part_from_file(io, num_part)
|
||||
num_part += 1
|
||||
io.close()
|
||||
except IOError:
|
||||
break
|
||||
mp.complete_upload()
|
||||
|
||||
def list_directory(self, path=None):
|
||||
path = self._init_path(path)
|
||||
if not path.endswith('/'):
|
||||
path += '/'
|
||||
ln = 0
|
||||
if self._root_path != '/':
|
||||
ln = len(self._root_path)
|
||||
exists = False
|
||||
for key in self._s3_bucket.list(prefix=path, delimiter='/'):
|
||||
exists = True
|
||||
name = key.name
|
||||
if name.endswith('/'):
|
||||
yield name[ln:-1]
|
||||
else:
|
||||
yield name[ln:]
|
||||
if exists is False:
|
||||
# In order to be compliant with the LocalStorage API. Even though
|
||||
# S3 does not have a concept of folders.
|
||||
raise OSError('No such directory: \'{0}\''.format(path))
|
||||
def list_directory(self, path=None):
|
||||
path = self._init_path(path)
|
||||
if not path.endswith('/'):
|
||||
path += '/'
|
||||
ln = 0
|
||||
if self._root_path != '/':
|
||||
ln = len(self._root_path)
|
||||
exists = False
|
||||
for key in self._s3_bucket.list(prefix=path, delimiter='/'):
|
||||
exists = True
|
||||
name = key.name
|
||||
if name.endswith('/'):
|
||||
yield name[ln:-1]
|
||||
else:
|
||||
yield name[ln:]
|
||||
if exists is False:
|
||||
# In order to be compliant with the LocalStorage API. Even though
|
||||
# S3 does not have a concept of folders.
|
||||
raise OSError('No such directory: \'{0}\''.format(path))
|
||||
|
||||
def exists(self, path):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
return key.exists()
|
||||
def exists(self, path):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
return key.exists()
|
||||
|
||||
def remove(self, path):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
if key.exists():
|
||||
# It's a file
|
||||
key.delete()
|
||||
return
|
||||
# We assume it's a directory
|
||||
if not path.endswith('/'):
|
||||
path += '/'
|
||||
for key in self._s3_bucket.list(prefix=path):
|
||||
key.delete()
|
||||
def remove(self, path):
|
||||
path = self._init_path(path)
|
||||
key = boto.s3.key.Key(self._s3_bucket, path)
|
||||
if key.exists():
|
||||
# It's a file
|
||||
key.delete()
|
||||
return
|
||||
# We assume it's a directory
|
||||
if not path.endswith('/'):
|
||||
path += '/'
|
||||
for key in self._s3_bucket.list(prefix=path):
|
||||
key.delete()
|
||||
|
||||
def get_size(self, path):
|
||||
path = self._init_path(path)
|
||||
# Lookup does a HEAD HTTP Request on the object
|
||||
key = self._s3_bucket.lookup(path)
|
||||
if not key:
|
||||
raise OSError('No such key: \'{0}\''.format(path))
|
||||
return key.size
|
||||
def get_size(self, path):
|
||||
path = self._init_path(path)
|
||||
# Lookup does a HEAD HTTP Request on the object
|
||||
key = self._s3_bucket.lookup(path)
|
||||
if not key:
|
||||
raise OSError('No such key: \'{0}\''.format(path))
|
||||
return key.size
|
||||
|
|
Reference in a new issue