2015-07-15 21:25:41 +00:00
|
|
|
import logging
|
|
|
|
|
2016-08-11 21:16:31 +00:00
|
|
|
from peewee import SQL, IntegrityError
|
2016-06-02 19:57:59 +00:00
|
|
|
from cachetools import lru_cache
|
|
|
|
from collections import namedtuple
|
2015-07-15 21:25:41 +00:00
|
|
|
|
2016-01-05 17:14:52 +00:00
|
|
|
from data.model import (config, db_transaction, InvalidImageException, TorrentInfoDoesNotExist,
|
|
|
|
DataModelException, _basequery)
|
|
|
|
from data.database import (ImageStorage, Image, ImageStoragePlacement, ImageStorageLocation,
|
|
|
|
ImageStorageTransformation, ImageStorageSignature,
|
2018-07-16 21:12:04 +00:00
|
|
|
ImageStorageSignatureKind, Repository, Namespace, TorrentInfo, ApprBlob,
|
2018-07-31 19:43:09 +00:00
|
|
|
ensure_under_transaction, ManifestBlob)
|
2015-07-15 21:25:41 +00:00
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2016-06-02 19:57:59 +00:00
|
|
|
_Location = namedtuple('location', ['id', 'name'])
|
|
|
|
|
|
|
|
@lru_cache(maxsize=1)
|
|
|
|
def get_image_locations():
|
|
|
|
location_map = {}
|
|
|
|
for location in ImageStorageLocation.select():
|
|
|
|
location_tuple = _Location(location.id, location.name)
|
|
|
|
location_map[location.id] = location_tuple
|
|
|
|
location_map[location.name] = location_tuple
|
|
|
|
|
|
|
|
return location_map
|
|
|
|
|
|
|
|
|
|
|
|
def get_image_location_for_name(location_name):
|
|
|
|
locations = get_image_locations()
|
|
|
|
return locations[location_name]
|
|
|
|
|
|
|
|
|
|
|
|
def get_image_location_for_id(location_id):
|
|
|
|
locations = get_image_locations()
|
|
|
|
return locations[location_id]
|
|
|
|
|
2015-07-15 21:25:41 +00:00
|
|
|
|
2015-06-28 10:29:22 +00:00
|
|
|
def add_storage_placement(storage, location_name):
|
|
|
|
""" Adds a storage placement for the given storage at the given location. """
|
2016-06-02 19:57:59 +00:00
|
|
|
location = get_image_location_for_name(location_name)
|
2016-07-19 20:44:05 +00:00
|
|
|
try:
|
|
|
|
ImageStoragePlacement.create(location=location.id, storage=storage)
|
|
|
|
except IntegrityError:
|
|
|
|
# Placement already exists. Nothing to do.
|
2016-07-20 02:24:27 +00:00
|
|
|
pass
|
2015-06-28 10:29:22 +00:00
|
|
|
|
|
|
|
|
2016-07-25 21:02:00 +00:00
|
|
|
def _orphaned_storage_query(candidate_ids):
|
|
|
|
""" Returns the subset of the candidate ImageStorage IDs representing storages that are no
|
|
|
|
longer referenced by images.
|
|
|
|
"""
|
|
|
|
# Issue a union query to find all storages that are still referenced by a candidate storage. This
|
|
|
|
# is much faster than the group_by and having call we used to use here.
|
|
|
|
nonorphaned_queries = []
|
|
|
|
for counter, candidate_id in enumerate(candidate_ids):
|
|
|
|
query_alias = 'q{0}'.format(counter)
|
|
|
|
storage_subq = (ImageStorage
|
|
|
|
.select(ImageStorage.id)
|
|
|
|
.join(Image)
|
|
|
|
.where(ImageStorage.id == candidate_id)
|
|
|
|
.limit(1)
|
|
|
|
.alias(query_alias))
|
|
|
|
|
|
|
|
nonorphaned_queries.append(ImageStorage
|
|
|
|
.select(SQL('*'))
|
|
|
|
.from_(storage_subq))
|
|
|
|
|
|
|
|
# Build the set of storages that are missing. These storages are orphaned.
|
2017-05-02 19:38:25 +00:00
|
|
|
nonorphaned_storage_ids = {storage.id for storage
|
|
|
|
in _basequery.reduce_as_tree(nonorphaned_queries)}
|
2016-07-25 21:02:00 +00:00
|
|
|
return list(candidate_ids - nonorphaned_storage_ids)
|
|
|
|
|
|
|
|
|
2015-07-15 21:25:41 +00:00
|
|
|
def garbage_collect_storage(storage_id_whitelist):
|
2016-12-22 19:27:42 +00:00
|
|
|
""" Performs GC on a possible subset of the storage's with the IDs found in the
|
|
|
|
whitelist. The storages in the whitelist will be checked, and any orphaned will
|
|
|
|
be removed, with those IDs being returned.
|
|
|
|
"""
|
2015-07-15 21:25:41 +00:00
|
|
|
if len(storage_id_whitelist) == 0:
|
2016-12-22 19:27:42 +00:00
|
|
|
return []
|
2015-07-15 21:25:41 +00:00
|
|
|
|
2017-03-06 21:51:05 +00:00
|
|
|
def placements_to_filtered_paths_set(placements_list):
|
|
|
|
""" Returns the list of paths to remove from storage, filtered from the given placements
|
|
|
|
query by removing any CAS paths that are still referenced by storage(s) in the database.
|
|
|
|
"""
|
2017-03-08 22:01:07 +00:00
|
|
|
with ensure_under_transaction():
|
|
|
|
if not placements_list:
|
|
|
|
return set()
|
|
|
|
|
|
|
|
# Find the content checksums not referenced by other storages. Any that are, we cannot
|
|
|
|
# remove.
|
|
|
|
content_checksums = set([placement.storage.content_checksum for placement in placements_list
|
|
|
|
if placement.storage.cas_path])
|
|
|
|
|
|
|
|
unreferenced_checksums = set()
|
|
|
|
if content_checksums:
|
2017-03-23 03:45:46 +00:00
|
|
|
# Check the current image storage.
|
2017-03-08 22:01:07 +00:00
|
|
|
query = (ImageStorage
|
|
|
|
.select(ImageStorage.content_checksum)
|
|
|
|
.where(ImageStorage.content_checksum << list(content_checksums)))
|
2017-03-23 03:45:46 +00:00
|
|
|
is_referenced_checksums = set([image_storage.content_checksum for image_storage in query])
|
|
|
|
if is_referenced_checksums:
|
|
|
|
logger.warning('GC attempted to remove CAS checksums %s, which are still IS referenced',
|
|
|
|
is_referenced_checksums)
|
|
|
|
|
2018-07-16 21:12:04 +00:00
|
|
|
# Check the ApprBlob table as well.
|
2018-05-24 21:54:51 +00:00
|
|
|
query = ApprBlob.select(ApprBlob.digest).where(ApprBlob.digest << list(content_checksums))
|
|
|
|
appr_blob_referenced_checksums = set([blob.digest for blob in query])
|
|
|
|
if appr_blob_referenced_checksums:
|
|
|
|
logger.warning('GC attempted to remove CAS checksums %s, which are ApprBlob referenced',
|
|
|
|
appr_blob_referenced_checksums)
|
|
|
|
|
2018-07-16 21:12:04 +00:00
|
|
|
unreferenced_checksums = (content_checksums - appr_blob_referenced_checksums -
|
|
|
|
is_referenced_checksums)
|
2017-03-08 22:01:07 +00:00
|
|
|
|
|
|
|
# Return all placements for all image storages found not at a CAS path or with a content
|
|
|
|
# checksum that is referenced.
|
|
|
|
return {(get_image_location_for_id(placement.location_id).name,
|
|
|
|
get_layer_path(placement.storage))
|
|
|
|
for placement in placements_list
|
|
|
|
if not placement.storage.cas_path or
|
|
|
|
placement.storage.content_checksum in unreferenced_checksums}
|
2015-07-15 21:25:41 +00:00
|
|
|
|
|
|
|
# Note: Both of these deletes must occur in the same transaction (unfortunately) because a
|
|
|
|
# storage without any placement is invalid, and a placement cannot exist without a storage.
|
|
|
|
# TODO(jake): We might want to allow for null storages on placements, which would allow us to
|
|
|
|
# delete the storages, then delete the placements in a non-transaction.
|
|
|
|
logger.debug('Garbage collecting storages from candidates: %s', storage_id_whitelist)
|
|
|
|
with db_transaction():
|
2016-07-25 21:02:00 +00:00
|
|
|
orphaned_storage_ids = _orphaned_storage_query(storage_id_whitelist)
|
|
|
|
if len(orphaned_storage_ids) == 0:
|
|
|
|
# Nothing to GC.
|
2016-12-22 19:27:42 +00:00
|
|
|
return []
|
2016-07-25 21:02:00 +00:00
|
|
|
|
|
|
|
placements_to_remove = list(ImageStoragePlacement
|
2017-03-06 21:51:05 +00:00
|
|
|
.select(ImageStoragePlacement, ImageStorage)
|
2016-07-25 21:02:00 +00:00
|
|
|
.join(ImageStorage)
|
|
|
|
.where(ImageStorage.id << orphaned_storage_ids))
|
2015-07-15 21:25:41 +00:00
|
|
|
|
|
|
|
# Remove the placements for orphaned storages
|
|
|
|
if len(placements_to_remove) > 0:
|
|
|
|
placement_ids_to_remove = [placement.id for placement in placements_to_remove]
|
|
|
|
placements_removed = (ImageStoragePlacement
|
|
|
|
.delete()
|
|
|
|
.where(ImageStoragePlacement.id << placement_ids_to_remove)
|
|
|
|
.execute())
|
|
|
|
logger.debug('Removed %s image storage placements', placements_removed)
|
|
|
|
|
|
|
|
# Remove all orphaned storages
|
2016-07-25 21:02:00 +00:00
|
|
|
torrents_removed = (TorrentInfo
|
|
|
|
.delete()
|
|
|
|
.where(TorrentInfo.storage << orphaned_storage_ids)
|
|
|
|
.execute())
|
|
|
|
logger.debug('Removed %s torrent info records', torrents_removed)
|
2016-01-12 16:43:07 +00:00
|
|
|
|
2016-07-25 21:02:00 +00:00
|
|
|
signatures_removed = (ImageStorageSignature
|
2015-07-15 21:25:41 +00:00
|
|
|
.delete()
|
2016-07-25 21:02:00 +00:00
|
|
|
.where(ImageStorageSignature.storage << orphaned_storage_ids)
|
2015-07-15 21:25:41 +00:00
|
|
|
.execute())
|
2016-07-25 21:02:00 +00:00
|
|
|
logger.debug('Removed %s image storage signatures', signatures_removed)
|
|
|
|
|
2018-07-31 19:43:09 +00:00
|
|
|
blob_refs_removed = (ManifestBlob
|
|
|
|
.delete()
|
|
|
|
.where(ManifestBlob.blob << orphaned_storage_ids)
|
|
|
|
.execute())
|
|
|
|
logger.debug('Removed %s blob references', blob_refs_removed)
|
|
|
|
|
2016-07-25 21:02:00 +00:00
|
|
|
storages_removed = (ImageStorage
|
|
|
|
.delete()
|
|
|
|
.where(ImageStorage.id << orphaned_storage_ids)
|
|
|
|
.execute())
|
|
|
|
logger.debug('Removed %s image storage records', storages_removed)
|
2015-07-15 21:25:41 +00:00
|
|
|
|
2017-03-06 21:51:05 +00:00
|
|
|
# Determine the paths to remove. We cannot simply remove all paths matching storages, as CAS
|
|
|
|
# can share the same path. We further filter these paths by checking for any storages still in
|
|
|
|
# the database with the same content checksum.
|
|
|
|
paths_to_remove = placements_to_filtered_paths_set(placements_to_remove)
|
|
|
|
|
2015-07-15 21:25:41 +00:00
|
|
|
# We are going to make the conscious decision to not delete image storage blobs inside
|
|
|
|
# transactions.
|
|
|
|
# This may end up producing garbage in s3, trading off for higher availability in the database.
|
|
|
|
for location_name, image_path in paths_to_remove:
|
|
|
|
logger.debug('Removing %s from %s', image_path, location_name)
|
|
|
|
config.store.remove({location_name}, image_path)
|
|
|
|
|
2016-12-22 19:27:42 +00:00
|
|
|
return orphaned_storage_ids
|
|
|
|
|
2015-07-15 21:25:41 +00:00
|
|
|
|
2015-08-12 20:39:32 +00:00
|
|
|
def create_v1_storage(location_name):
|
|
|
|
storage = ImageStorage.create(cas_path=False)
|
2016-06-02 19:57:59 +00:00
|
|
|
location = get_image_location_for_name(location_name)
|
|
|
|
ImageStoragePlacement.create(location=location.id, storage=storage)
|
2015-07-15 21:25:41 +00:00
|
|
|
storage.locations = {location_name}
|
|
|
|
return storage
|
|
|
|
|
|
|
|
|
2016-01-19 19:49:40 +00:00
|
|
|
def find_or_create_storage_signature(storage, signature_kind_name):
|
|
|
|
found = lookup_storage_signature(storage, signature_kind_name)
|
2015-07-15 21:25:41 +00:00
|
|
|
if found is None:
|
2016-01-19 19:49:40 +00:00
|
|
|
kind = ImageStorageSignatureKind.get(name=signature_kind_name)
|
2015-07-15 21:25:41 +00:00
|
|
|
found = ImageStorageSignature.create(storage=storage, kind=kind)
|
|
|
|
|
|
|
|
return found
|
|
|
|
|
|
|
|
|
2016-01-19 19:49:40 +00:00
|
|
|
def lookup_storage_signature(storage, signature_kind_name):
|
|
|
|
kind = ImageStorageSignatureKind.get(name=signature_kind_name)
|
2015-07-15 21:25:41 +00:00
|
|
|
try:
|
|
|
|
return (ImageStorageSignature
|
2015-08-12 20:39:32 +00:00
|
|
|
.select()
|
|
|
|
.where(ImageStorageSignature.storage == storage, ImageStorageSignature.kind == kind)
|
|
|
|
.get())
|
2015-07-15 21:25:41 +00:00
|
|
|
except ImageStorageSignature.DoesNotExist:
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2015-08-12 20:39:32 +00:00
|
|
|
def _get_storage(query_modifier):
|
|
|
|
query = (ImageStoragePlacement
|
2016-06-02 19:57:59 +00:00
|
|
|
.select(ImageStoragePlacement, ImageStorage)
|
2015-08-12 20:39:32 +00:00
|
|
|
.switch(ImageStoragePlacement)
|
|
|
|
.join(ImageStorage))
|
|
|
|
|
|
|
|
placements = list(query_modifier(query))
|
2015-07-15 21:25:41 +00:00
|
|
|
|
|
|
|
if not placements:
|
2015-08-12 20:39:32 +00:00
|
|
|
raise InvalidImageException()
|
2015-07-15 21:25:41 +00:00
|
|
|
|
|
|
|
found = placements[0].storage
|
2016-06-02 19:57:59 +00:00
|
|
|
found.locations = {get_image_location_for_id(placement.location_id).name
|
|
|
|
for placement in placements}
|
2015-07-15 21:25:41 +00:00
|
|
|
return found
|
|
|
|
|
|
|
|
|
2015-08-12 20:39:32 +00:00
|
|
|
def get_storage_by_uuid(storage_uuid):
|
|
|
|
def filter_to_uuid(query):
|
|
|
|
return query.where(ImageStorage.uuid == storage_uuid)
|
|
|
|
|
|
|
|
try:
|
|
|
|
return _get_storage(filter_to_uuid)
|
|
|
|
except InvalidImageException:
|
|
|
|
raise InvalidImageException('No storage found with uuid: %s', storage_uuid)
|
|
|
|
|
|
|
|
|
2015-08-18 15:53:48 +00:00
|
|
|
def get_layer_path(storage_record):
|
2015-08-13 21:14:17 +00:00
|
|
|
""" Returns the path in the storage engine to the layer data referenced by the storage row. """
|
2018-08-29 02:58:19 +00:00
|
|
|
assert storage_record.cas_path is not None
|
2017-12-13 21:27:46 +00:00
|
|
|
return get_layer_path_for_storage(storage_record.uuid, storage_record.cas_path,
|
|
|
|
storage_record.content_checksum)
|
|
|
|
|
|
|
|
|
|
|
|
def get_layer_path_for_storage(storage_uuid, cas_path, content_checksum):
|
|
|
|
""" Returns the path in the storage engine to the layer data referenced by the storage
|
|
|
|
information. """
|
2015-08-18 15:53:48 +00:00
|
|
|
store = config.store
|
2017-12-13 21:27:46 +00:00
|
|
|
if not cas_path:
|
|
|
|
logger.debug('Serving layer from legacy v1 path for storage %s', storage_uuid)
|
|
|
|
return store.v1_image_layer_path(storage_uuid)
|
2015-08-13 21:14:17 +00:00
|
|
|
|
2017-12-13 21:27:46 +00:00
|
|
|
return store.blob_path(content_checksum)
|
2015-08-13 21:14:17 +00:00
|
|
|
|
2015-11-19 09:03:14 +00:00
|
|
|
|
2018-11-25 15:31:09 +00:00
|
|
|
def lookup_repo_storages_by_content_checksum(repo, checksums, by_manifest=False):
|
2015-09-29 21:53:39 +00:00
|
|
|
""" Looks up repository storages (without placements) matching the given repository
|
|
|
|
and checksum. """
|
2015-11-19 09:03:14 +00:00
|
|
|
# There may be many duplicates of the checksums, so for performance reasons we are going
|
|
|
|
# to use a union to select just one storage with each checksum
|
|
|
|
queries = []
|
|
|
|
|
2015-11-19 17:58:06 +00:00
|
|
|
for counter, checksum in enumerate(set(checksums)):
|
|
|
|
query_alias = 'q{0}'.format(counter)
|
2018-11-25 15:31:09 +00:00
|
|
|
|
|
|
|
# TODO(jschorr): Remove once we have a new-style model for tracking temp uploaded blobs and
|
|
|
|
# all legacy tables have been removed.
|
|
|
|
if by_manifest:
|
|
|
|
candidate_subq = (ImageStorage
|
|
|
|
.select(ImageStorage.id, ImageStorage.content_checksum,
|
|
|
|
ImageStorage.image_size, ImageStorage.uuid, ImageStorage.cas_path,
|
|
|
|
ImageStorage.uncompressed_size, ImageStorage.uploading)
|
|
|
|
.join(ManifestBlob)
|
|
|
|
.where(ManifestBlob.repository == repo,
|
|
|
|
ImageStorage.content_checksum == checksum)
|
|
|
|
.limit(1)
|
|
|
|
.alias(query_alias))
|
|
|
|
else:
|
|
|
|
candidate_subq = (ImageStorage
|
|
|
|
.select(ImageStorage.id, ImageStorage.content_checksum,
|
|
|
|
ImageStorage.image_size, ImageStorage.uuid, ImageStorage.cas_path,
|
|
|
|
ImageStorage.uncompressed_size, ImageStorage.uploading)
|
|
|
|
.join(Image)
|
|
|
|
.where(Image.repository == repo, ImageStorage.content_checksum == checksum)
|
|
|
|
.limit(1)
|
|
|
|
.alias(query_alias))
|
|
|
|
|
2015-11-19 09:03:14 +00:00
|
|
|
queries.append(ImageStorage
|
|
|
|
.select(SQL('*'))
|
|
|
|
.from_(candidate_subq))
|
|
|
|
|
2017-05-02 19:38:25 +00:00
|
|
|
return _basequery.reduce_as_tree(queries)
|
2015-11-19 09:03:14 +00:00
|
|
|
|
2015-11-17 22:42:52 +00:00
|
|
|
|
2016-01-05 17:14:52 +00:00
|
|
|
def set_image_storage_metadata(docker_image_id, namespace_name, repository_name, image_size,
|
|
|
|
uncompressed_size):
|
|
|
|
""" Sets metadata that is specific to the binary storage of the data, irrespective of how it
|
|
|
|
is used in the layer tree.
|
|
|
|
"""
|
|
|
|
if image_size is None:
|
|
|
|
raise DataModelException('Empty image size field')
|
|
|
|
|
|
|
|
try:
|
2016-01-08 21:38:02 +00:00
|
|
|
image = (Image
|
|
|
|
.select(Image, ImageStorage)
|
|
|
|
.join(Repository)
|
|
|
|
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
|
|
|
.switch(Image)
|
|
|
|
.join(ImageStorage)
|
|
|
|
.where(Repository.name == repository_name, Namespace.username == namespace_name,
|
|
|
|
Image.docker_image_id == docker_image_id)
|
|
|
|
.get())
|
2016-01-05 17:14:52 +00:00
|
|
|
except ImageStorage.DoesNotExist:
|
|
|
|
raise InvalidImageException('No image with specified id and repository')
|
|
|
|
|
|
|
|
# We MUST do this here, it can't be done in the corresponding image call because the storage
|
|
|
|
# has not yet been pushed
|
|
|
|
image.aggregate_size = _basequery.calculate_image_aggregate_size(image.ancestors, image_size,
|
|
|
|
image.parent)
|
|
|
|
image.save()
|
|
|
|
|
|
|
|
image.storage.image_size = image_size
|
|
|
|
image.storage.uncompressed_size = uncompressed_size
|
|
|
|
image.storage.save()
|
|
|
|
return image.storage
|
|
|
|
|
|
|
|
|
2015-11-17 22:42:52 +00:00
|
|
|
def get_storage_locations(uuid):
|
|
|
|
query = (ImageStoragePlacement
|
|
|
|
.select()
|
2015-12-03 21:19:22 +00:00
|
|
|
.join(ImageStorage)
|
2015-11-17 22:42:52 +00:00
|
|
|
.where(ImageStorage.uuid == uuid))
|
|
|
|
|
2016-06-02 19:57:59 +00:00
|
|
|
return [get_image_location_for_id(placement.location_id).name for placement in query]
|
2015-12-30 22:19:19 +00:00
|
|
|
|
|
|
|
|
|
|
|
def save_torrent_info(storage_object, piece_length, pieces):
|
2016-01-12 22:32:55 +00:00
|
|
|
try:
|
2018-10-30 19:39:57 +00:00
|
|
|
return TorrentInfo.get(storage=storage_object, piece_length=piece_length)
|
|
|
|
except TorrentInfo.DoesNotExist:
|
|
|
|
try:
|
|
|
|
return TorrentInfo.create(storage=storage_object, piece_length=piece_length, pieces=pieces)
|
|
|
|
except IntegrityError:
|
|
|
|
# TorrentInfo already exists for this storage.
|
|
|
|
return TorrentInfo.get(storage=storage_object, piece_length=piece_length)
|
|
|
|
|
2015-12-31 19:09:50 +00:00
|
|
|
|
|
|
|
def get_torrent_info(blob):
|
|
|
|
try:
|
|
|
|
return (TorrentInfo
|
|
|
|
.select()
|
2016-01-11 20:10:46 +00:00
|
|
|
.where(TorrentInfo.storage == blob)
|
2015-12-31 19:09:50 +00:00
|
|
|
.get())
|
|
|
|
except TorrentInfo.DoesNotExist:
|
|
|
|
raise TorrentInfoDoesNotExist
|