Another huge batch of registry v2 changes
Add patch support and resumeable sha Implement all actual registry methods Add a simple database generation option
This commit is contained in:
parent
5ba3521e67
commit
e1b3e9e6ae
29 changed files with 1095 additions and 430 deletions
|
@ -1,104 +0,0 @@
|
|||
import logging
|
||||
import json
|
||||
|
||||
from data.database import Image, ImageStorage, Repository, User, configure
|
||||
from data import model
|
||||
from app import app, storage as store
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
configure(app.config)
|
||||
|
||||
# Turn off debug logging for boto
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
|
||||
|
||||
query = (Image
|
||||
.select(Image, ImageStorage, Repository, User)
|
||||
.join(ImageStorage)
|
||||
.switch(Image)
|
||||
.join(Repository)
|
||||
.join(User)
|
||||
.where(ImageStorage.uploading == False))
|
||||
|
||||
bad_count = 0
|
||||
good_count = 0
|
||||
|
||||
def resolve_or_create(repo, docker_image_id, new_ancestry):
|
||||
existing = model.image.get_repo_image_extended(repo.namespace_user.username, repo.name,
|
||||
docker_image_id)
|
||||
if existing:
|
||||
logger.debug('Found existing image: %s, %s', existing.id, docker_image_id)
|
||||
return existing
|
||||
else:
|
||||
# we need to find some storage to link it to
|
||||
try:
|
||||
to_link = (ImageStorage
|
||||
.select()
|
||||
.join(Image)
|
||||
.where(Image.docker_image_id == docker_image_id)
|
||||
.get())
|
||||
logger.debug('Linking to storage: %s' % to_link.uuid)
|
||||
created = Image.create(docker_image_id=docker_image_id, repository=repo,
|
||||
storage=to_link, ancestors=new_ancestry)
|
||||
logger.debug('Created image: %s' % created)
|
||||
return created
|
||||
except ImageStorage.DoesNotExist:
|
||||
msg = 'No image available anywhere for storage: %s in namespace: %s'
|
||||
logger.error(msg, docker_image_id, repo.namespace_user.username)
|
||||
raise RuntimeError()
|
||||
|
||||
|
||||
def all_ancestors_exist(ancestors):
|
||||
if not ancestors:
|
||||
return True
|
||||
|
||||
found_count = len(list(Image
|
||||
.select()
|
||||
.where(Image.id << ancestors)))
|
||||
return found_count == len(ancestors)
|
||||
|
||||
|
||||
cant_fix = []
|
||||
for img in query:
|
||||
try:
|
||||
with_locations = model.image.get_repo_image_extended(img.repository.namespace_user.username,
|
||||
img.repository.name, img.docker_image_id)
|
||||
ancestry_storage = store.image_ancestry_path(img.storage.uuid)
|
||||
if store.exists(with_locations.storage.locations, ancestry_storage):
|
||||
full_ancestry = json.loads(store.get_content(with_locations.storage.locations,
|
||||
ancestry_storage))[1:]
|
||||
full_ancestry.reverse()
|
||||
|
||||
ancestor_dbids = [int(anc_id) for anc_id in img.ancestors.split('/')[1:-1]]
|
||||
|
||||
if len(full_ancestry) != len(ancestor_dbids) or not all_ancestors_exist(ancestor_dbids):
|
||||
logger.error('Image has incomplete ancestry: %s, %s, %s, %s', img.id, img.docker_image_id,
|
||||
full_ancestry, ancestor_dbids)
|
||||
|
||||
fixed_ancestry = '/'
|
||||
for ancestor in full_ancestry:
|
||||
ancestor_img = resolve_or_create(img.repository, ancestor,
|
||||
fixed_ancestry)
|
||||
fixed_ancestry += str(ancestor_img.id) + '/'
|
||||
|
||||
img.ancestors = fixed_ancestry
|
||||
img.save()
|
||||
|
||||
bad_count += 1
|
||||
else:
|
||||
good_count += 1
|
||||
else:
|
||||
bad_count += 1
|
||||
|
||||
except RuntimeError:
|
||||
cant_fix.append(img)
|
||||
|
||||
logger.debug('Bad: %s Good: %s Can\'t Fix: %s', bad_count, good_count,
|
||||
len(cant_fix))
|
||||
|
||||
for cant in cant_fix:
|
||||
logger.error('Unable to fix %s in repo %s/%s', cant.id, cant.repository.namespace_user.username,
|
||||
cant.repository.name)
|
|
@ -1,67 +0,0 @@
|
|||
import argparse
|
||||
import logging
|
||||
|
||||
from data import model
|
||||
from data.database import ImageStoragePlacement, ImageStorageLocation
|
||||
from app import storage
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
PATHSPECS = [
|
||||
(storage.image_json_path, True),
|
||||
(storage.image_layer_path, True),
|
||||
(storage.image_ancestry_path, True),
|
||||
(storage.image_file_trie_path, False),
|
||||
(storage.image_file_diffs_path, False),
|
||||
]
|
||||
|
||||
|
||||
def migrate_image(image, destination_location):
|
||||
logger.debug('Migrating image: %s -> %s', image.docker_image_id, destination_location.name)
|
||||
destination_location_set = {destination_location.name}
|
||||
|
||||
for path_func, required in PATHSPECS:
|
||||
path = path_func(image.storage.uuid)
|
||||
|
||||
if storage.exists(image.storage.locations, path):
|
||||
if not storage.exists(destination_location_set, path):
|
||||
logger.debug('Migrating path: %s', path)
|
||||
|
||||
with storage.stream_read_file(image.storage.locations, path) as file_to_migrate:
|
||||
storage.stream_write(destination_location_set, path, file_to_migrate)
|
||||
else:
|
||||
logger.debug('File already present in destination: %s', path)
|
||||
elif required:
|
||||
raise RuntimeError('Required file not present in image to migrate: %s', path)
|
||||
|
||||
# Successfully migrated, now write the placement
|
||||
ImageStoragePlacement.create(location=destination_location, storage=image.storage)
|
||||
|
||||
parser = argparse.ArgumentParser(description='Replicate an image storage.')
|
||||
parser.add_argument('--namespace', type=str, required=True,
|
||||
help='Namespace for the repository containing the image to be replicated')
|
||||
parser.add_argument('--repository', type=str, required=True,
|
||||
help='Name for the repository containing the image to be replicated')
|
||||
parser.add_argument('--imageid', type=str, default=None,
|
||||
help='Specific image to migrate, entire repo will be migrated if omitted')
|
||||
parser.add_argument('--to', type=str, required=True,
|
||||
help='Storage region to which the data should be replicated')
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
location = ImageStorageLocation.get(name=args.to)
|
||||
|
||||
images = []
|
||||
if args.imageid is not None:
|
||||
images = [model.image.get_image_by_id(args.namespace, args.repository, args.imageid)]
|
||||
else:
|
||||
images = model.image.get_repository_images(args.namespace, args.repository)
|
||||
|
||||
for img in images:
|
||||
migrate_image(img, location)
|
Reference in a new issue