Get the base image stuff working. Checkpoint before fixing the tests.

This commit is contained in:
jakedt 2014-02-16 17:38:47 -05:00
parent 5742e6ea4e
commit b619356907
120 changed files with 305 additions and 261 deletions

View file

@ -172,6 +172,15 @@ class EmailConfirmation(BaseModel):
created = DateTimeField(default=datetime.now)
class ImageStorage(BaseModel):
uuid = CharField(default=uuid_generator)
checksum = CharField(null=True)
created = DateTimeField(null=True)
comment = TextField(null=True)
command = TextField(null=True)
image_size = BigIntegerField(null=True)
class Image(BaseModel):
# This class is intentionally denormalized. Even though images are supposed
# to be globally unique we can't treat them as such for permissions and
@ -198,18 +207,6 @@ class Image(BaseModel):
)
class ImageStorage(BaseModel):
storage_uuid = CharField(default=uuid_generator)
checksum = CharField(null=True)
created = DateTimeField(null=True)
comment = TextField(null=True)
command = TextField(null=True)
image_size = BigIntegerField(null=True)
# '/' separated list of ancestory ids, e.g. /1/2/6/7/10/
ancestors = CharField(index=True, default='/', max_length=64535)
class RepositoryTag(BaseModel):
name = CharField()
image = ForeignKeyField(Image)
@ -262,4 +259,4 @@ all_models = [User, Repository, Image, AccessToken, Role,
RepositoryPermission, Visibility, RepositoryTag,
EmailConfirmation, FederatedLogin, LoginService, QueueItem,
RepositoryBuild, Team, TeamMember, TeamRole, Webhook,
LogEntryKind, LogEntry, PermissionPrototype]
LogEntryKind, LogEntry, PermissionPrototype, ImageStorage]

View file

@ -537,26 +537,30 @@ def get_user_teams_within_org(username, organization):
def get_visible_repository_count(username=None, include_public=True,
sort=False, namespace=None):
return get_visible_repository_internal(username=username,
include_public=include_public,
sort=sort, namespace=namespace,
get_count=True)
namespace=None):
query = _visible_repository_query(username=username,
include_public=include_public,
namespace=namespace)
return query.count()
def get_visible_repositories(username=None, include_public=True, page=None,
limit=None, sort=False, namespace=None):
return get_visible_repository_internal(username=username,
include_public=include_public,
page=page, limit=limit, sort=sort,
namespace=namespace, get_count=False)
query = _visible_repository_query(username=username,
include_public=include_public, page=page,
limit=limit, namespace=namespace)
if sort:
query = query.order_by(Repository.description.desc())
if limit:
query = query.limit(limit)
return query
def get_visible_repository_internal(username=None, include_public=True,
limit=None, page=None, sort=False,
namespace=None, get_count=False):
if not username and not include_public:
return []
def _visible_repository_query(username=None, include_public=True, limit=None,
page=None, namespace=None):
query = (Repository
.select() # Note: We need to leave this blank for the get_count case. Otherwise, MySQL/RDS complains.
.distinct()
@ -564,8 +568,19 @@ def get_visible_repository_internal(username=None, include_public=True,
.switch(Repository)
.join(RepositoryPermission, JOIN_LEFT_OUTER))
query = _filter_to_repos_for_user(query, username, namespace, include_public)
if page:
query = query.paginate(page, limit)
elif limit:
query = query.limit(limit)
return query
def _filter_to_repos_for_user(query, username=None, namespace=None,
include_public=True):
where_clause = None
admin_query = None
if username:
UserThroughTeam = User.alias()
Org = User.alias()
@ -574,6 +589,7 @@ def get_visible_repository_internal(username=None, include_public=True,
AdminUser = User.alias()
query = (query
.switch(RepositoryPermission)
.join(User, JOIN_LEFT_OUTER)
.switch(RepositoryPermission)
.join(Team, JOIN_LEFT_OUTER)
@ -606,19 +622,7 @@ def get_visible_repository_internal(username=None, include_public=True,
else:
where_clause = new_clause
if sort:
query = query.order_by(Repository.description.desc())
if page:
query = query.paginate(page, limit)
elif limit:
query = query.limit(limit)
where = query.where(where_clause)
if get_count:
return where.count()
else:
return where
return query.where(where_clause)
def get_matching_repositories(repo_term, username=None):
@ -779,14 +783,13 @@ def get_repo_image(namespace_name, repository_name, image_id):
.join(ImageStorage, JOIN_LEFT_OUTER)
.where(Repository.name == repository_name,
Repository.namespace == namespace_name,
Image.docker_image_id == image_id)
.limit(1))
result = list(query)
if not result:
Image.docker_image_id == image_id))
try:
return query.get()
except Image.DoesNotExist:
return None
return result[0]
def repository_is_public(namespace_name, repository_name):
joined = Repository.select().join(Visibility)
@ -868,10 +871,31 @@ def create_repository(namespace, name, creating_user, visibility='private'):
return repo
def create_image(docker_image_id, repository):
new_image = Image.create(docker_image_id=docker_image_id,
repository=repository)
return new_image
def create_or_link_image(docker_image_id, repository, username, create=True):
with db.transaction():
query = (ImageStorage
.select()
.distinct()
.join(Image)
.join(Repository)
.join(Visibility)
.switch(Repository)
.join(RepositoryPermission, JOIN_LEFT_OUTER))
query = (_filter_to_repos_for_user(query, username)
.where(Image.docker_image_id == docker_image_id))
try:
storage = query.get()
msg = 'Linking image to existing storage with docker id: %s and uuid: %s'
logger.debug(msg, docker_image_id, storage.uuid)
except ImageStorage.DoesNotExist:
logger.debug('Creating new storage for docker id: %s', docker_image_id)
storage = ImageStorage.create()
new_image = Image.create(docker_image_id=docker_image_id,
repository=repository, storage=storage)
return new_image
def set_image_checksum(docker_image_id, repository, checksum):
@ -884,46 +908,67 @@ def set_image_checksum(docker_image_id, repository, checksum):
def set_image_size(docker_image_id, namespace_name, repository_name,
image_size):
joined = Image.select().join(Repository)
image_list = list(joined.where(Repository.name == repository_name,
Repository.namespace == namespace_name,
Image.docker_image_id == docker_image_id))
try:
image = (Image
.select(Image, ImageStorage)
.join(Repository)
.switch(Image)
.join(ImageStorage, JOIN_LEFT_OUTER)
.where(Repository.name == repository_name,
Repository.namespace == namespace_name,
Image.docker_image_id == docker_image_id)
.get())
if not image_list:
except Image.DoesNotExist:
raise DataModelException('No image with specified id and repository')
fetched = image_list[0]
fetched.image_size = image_size
fetched.save()
return fetched
if image.storage:
image.storage.image_size = image_size
image.storage.save()
else:
image.image_size = image_size
image.save()
return image
def set_image_metadata(docker_image_id, namespace_name, repository_name,
created_date_str, comment, command, parent=None):
joined = Image.select().join(Repository)
image_list = list(joined.where(Repository.name == repository_name,
Repository.namespace == namespace_name,
Image.docker_image_id == docker_image_id))
with db.transaction():
query = (Image
.select(Image, ImageStorage)
.join(Repository)
.switch(Image)
.join(ImageStorage)
.where(Repository.name == repository_name,
Repository.namespace == namespace_name,
Image.docker_image_id == docker_image_id))
if not image_list:
raise DataModelException('No image with specified id and repository')
try:
fetched = query.get()
except Image.DoesNotExist:
raise DataModelException('No image with specified id and repository')
fetched = image_list[0]
fetched.created = dateutil.parser.parse(created_date_str)
fetched.comment = comment
fetched.command = command
fetched.storage.created = dateutil.parser.parse(created_date_str)
fetched.storage.comment = comment
fetched.storage.command = command
if parent:
fetched.ancestors = '%s%s/' % (parent.ancestors, parent.id)
if parent:
fetched.ancestors = '%s%s/' % (parent.ancestors, parent.id)
fetched.save()
return fetched
fetched.save()
fetched.storage.save()
return fetched
def get_repository_images(namespace_name, repository_name):
joined = Image.select().join(Repository)
return joined.where(Repository.name == repository_name,
Repository.namespace == namespace_name)
return (Image
.select(Image, ImageStorage)
.join(Repository)
.switch(Image)
.join(ImageStorage, JOIN_LEFT_OUTER)
.where(Repository.name == repository_name,
Repository.namespace == namespace_name))
def list_repository_tags(namespace_name, repository_name):
@ -933,111 +978,97 @@ def list_repository_tags(namespace_name, repository_name):
return with_image.where(Repository.name == repository_name,
Repository.namespace == namespace_name)
def delete_tag_and_images(namespace_name, repository_name, tag_name):
all_images = get_repository_images(namespace_name, repository_name)
all_tags = list_repository_tags(namespace_name, repository_name)
# Find the tag's information.
found_tag = None
for tag in all_tags:
if tag.name == tag_name:
found_tag = tag
break
if not found_tag:
return
# Build the set of database IDs corresponding to the tag's ancestor images,
# as well as the tag's image itself.
tag_image_ids = set(found_tag.image.ancestors.split('/'))
tag_image_ids.add(str(found_tag.image.id))
# Filter out any images that belong to any other tags.
for tag in all_tags:
if tag.name != tag_name:
# Remove all ancestors of the tag.
tag_image_ids = tag_image_ids - set(tag.image.ancestors.split('/'))
# Remove the current image ID.
tag_image_ids.discard(str(tag.image.id))
# Find all the images that belong to the tag.
tag_images = [image for image in all_images
if str(image.id) in tag_image_ids]
# Delete the tag found.
found_tag.delete_instance()
# Delete the images found.
for image in tag_images:
image.delete_instance()
repository_path = store.image_path(namespace_name, repository_name,
image.docker_image_id)
logger.debug('Recursively deleting image path: %s' % repository_path)
store.remove(repository_path)
def garbage_collect_repository(namespace_name, repository_name):
# Get a list of all images used by tags in the repository
tag_query = (RepositoryTag
.select(RepositoryTag, Image)
.join(Image)
.switch(RepositoryTag)
.join(Repository)
.where(Repository.name == repository_name,
Repository.namespace == namespace_name))
with db.transaction():
# Get a list of all images used by tags in the repository
tag_query = (RepositoryTag
.select(RepositoryTag, Image, ImageStorage)
.join(Image)
.join(ImageStorage, JOIN_LEFT_OUTER)
.switch(RepositoryTag)
.join(Repository)
.where(Repository.name == repository_name,
Repository.namespace == namespace_name))
referenced_anscestors = set()
for tag in tag_query:
# The anscestor list is in the format '/1/2/3/', extract just the ids
anscestor_id_strings = tag.image.ancestors.split('/')[1:-1]
ancestor_list = [int(img_id_str) for img_id_str in anscestor_id_strings]
referenced_anscestors = referenced_anscestors.union(set(ancestor_list))
referenced_anscestors.add(tag.image.id)
referenced_anscestors = set()
for tag in tag_query:
# The anscestor list is in the format '/1/2/3/', extract just the ids
anscestor_id_strings = tag.image.ancestors.split('/')[1:-1]
ancestor_list = [int(img_id_str) for img_id_str in anscestor_id_strings]
referenced_anscestors = referenced_anscestors.union(set(ancestor_list))
referenced_anscestors.add(tag.image.id)
all_repo_images = get_repository_images(namespace_name, repository_name)
all_images = {int(img.id):img for img in all_repo_images}
to_remove = set(all_images.keys()).difference(referenced_anscestors)
all_repo_images = get_repository_images(namespace_name, repository_name)
all_images = {int(img.id): img for img in all_repo_images}
to_remove = set(all_images.keys()).difference(referenced_anscestors)
logger.info('Cleaning up unreferenced images: %s', to_remove)
logger.info('Cleaning up unreferenced images: %s', to_remove)
for image_id_to_remove in to_remove:
image_to_remove = all_images[image_id_to_remove]
image_path = store.image_path(namespace_name, repository_name,
image_to_remove.docker_image_id)
image_to_remove.delete_instance()
logger.debug('Deleting image storage: %s' % image_path)
store.remove(image_path)
uuids_to_check_for_gc = set()
for image_id_to_remove in to_remove:
image_to_remove = all_images[image_id_to_remove]
image_to_remove.delete_instance()
if not image_to_remove.storage:
image_path = store.image_path(namespace_name, repository_name,
image_to_remove.docker_image_id, None)
logger.debug('Deleting image storage: %s', image_path)
else:
uuids_to_check_for_gc.add(image_to_remove.storage.uuid)
storage_to_remove = (ImageStorage
.select()
.join(Image, JOIN_LEFT_OUTER)
.group_by(ImageStorage)
.where(ImageStorage.uuid << list(uuids_to_check_for_gc))
.having(fn.Count(Image.id) == 0))
for storage in storage_to_remove:
logger.debug('Garbage collecting image storage: %s', storage.uuid)
storage.delete_instance()
image_path = store.image_path(namespace_name, repository_name,
image_to_remove.docker_image_id,
storage.uuid)
store.remove(image_path)
return len(to_remove)
def get_tag_image(namespace_name, repository_name, tag_name):
joined = Image.select().join(RepositoryTag).join(Repository)
fetched = list(joined.where(Repository.name == repository_name,
Repository.namespace == namespace_name,
RepositoryTag.name == tag_name))
query = (Image
.select(Image, ImageStorage)
.join(RepositoryTag)
.join(Repository)
.switch(Image)
.join(ImageStorage, JOIN_LEFT_OUTER)
.where(Repository.name == repository_name,
Repository.namespace == namespace_name,
RepositoryTag.name == tag_name))
if not fetched:
try:
return query.get()
except Image.DoesNotExist:
raise DataModelException('Unable to find image for tag.')
return fetched[0]
def get_image_by_id(namespace_name, repository_name, docker_image_id):
joined = Image.select().join(Repository)
fetched = list(joined.where(Repository.name == repository_name,
Repository.namespace == namespace_name,
Image.docker_image_id == docker_image_id))
query = (Image
.select(Image, ImageStorage)
.join(Repository)
.switch(Image)
.join(ImageStorage, JOIN_LEFT_OUTER)
.where(Repository.name == repository_name,
Repository.namespace == namespace_name,
Image.docker_image_id == docker_image_id))
if not fetched:
try:
return query.get()
except Image.DoesNotExist:
raise DataModelException('Unable to find image \'%s\' for repo \'%s/%s\'' %
(docker_image_id, namespace_name,
repository_name))
return fetched[0]
def get_parent_images(image_obj):
""" Returns a list of parent Image objects in chronilogical order. """
@ -1047,8 +1078,11 @@ def get_parent_images(image_obj):
if parent_db_ids == ['']:
return []
or_clauses = [(Image.id == db_id) for db_id in parent_db_ids]
parent_images = Image.select().where(reduce(operator.or_, or_clauses))
parent_images = (Image
.select(Image, ImageStorage)
.join(ImageStorage, JOIN_LEFT_OUTER)
.where(Image.id << parent_db_ids))
id_to_image = {unicode(image.id): image for image in parent_images}
return [id_to_image[parent_id] for parent_id in parent_db_ids]
@ -1206,15 +1240,17 @@ def set_team_repo_permission(team_name, namespace_name, repository_name,
def purge_repository(namespace_name, repository_name):
# Delete all tags to allow gc to reclaim storage
delete_all_repository_tags(namespace_name, repository_name)
# Gc to remove the images and storage
garbage_collect_repository(namespace_name, repository_name)
# Delete the rest of the repository metadata
fetched = Repository.get(Repository.name == repository_name,
Repository.namespace == namespace_name)
fetched.delete_instance(recursive=True)
repository_path = store.repository_namespace_path(namespace_name,
repository_name)
logger.debug('Recursively deleting path: %s' % repository_path)
store.remove(repository_path)
def get_private_repo_count(username):
joined = Repository.select().join(Visibility)

View file

@ -1012,7 +1012,6 @@ def list_repos():
if include_count:
repo_count = model.get_visible_repository_count(username,
include_public=include_public,
sort=sort,
namespace=namespace_filter)
repo_query = model.get_visible_repositories(username, limit=limit, page=page,
@ -1089,14 +1088,16 @@ def delete_repository(namespace, repository):
def image_view(image):
extended_props = image.storage or image
command = extended_props.command
return {
'id': image.docker_image_id,
'created': image.created,
'comment': image.comment,
'command': json.loads(image.command) if image.command else None,
'created': extended_props.created,
'comment': extended_props.comment,
'command': json.loads(command) if command else None,
'ancestors': image.ancestors,
'dbid': image.id,
'size': image.image_size,
'size': extended_props.image_size,
}
@ -1399,7 +1400,14 @@ def get_image(namespace, repository, image_id):
def get_image_changes(namespace, repository, image_id):
permission = ReadRepositoryPermission(namespace, repository)
if permission.can() or model.repository_is_public(namespace, repository):
diffs_path = store.image_file_diffs_path(namespace, repository, image_id)
image = model.get_repo_image(namespace, repository, image_id)
if not image:
abort(404)
uuid = image.storage and image.storage.uuid
diffs_path = store.image_file_diffs_path(namespace, repository, image_id,
uuid)
try:
response_json = store.get_content(diffs_path)
@ -1416,7 +1424,8 @@ def get_image_changes(namespace, repository, image_id):
def delete_full_tag(namespace, repository, tag):
permission = AdministerRepositoryPermission(namespace, repository)
if permission.can():
model.delete_tag_and_images(namespace, repository, tag)
model.delete_tag(namespace, repository, tag)
model.garbage_collect_repository(namespace, repository)
username = current_user.db_user().username
log_action('delete_tag', namespace,

View file

@ -33,6 +33,11 @@ def generate_headers(role='read'):
session['namespace'] = namespace
session['repository'] = repository
if get_authenticated_user():
session['username'] = get_authenticated_user().username
else:
session.pop('username', None)
# We run our index and registry on the same hosts for now
registry_server = urlparse.urlparse(request.url).netloc
response.headers['X-Docker-Endpoints'] = registry_server
@ -179,8 +184,9 @@ def create_repository(namespace, repository):
if existing.docker_image_id in new_repo_images:
added_images.pop(existing.docker_image_id)
username = get_authenticated_user() and get_authenticated_user().username
for image_description in added_images.values():
model.create_image(image_description['id'], repo)
model.create_or_link_image(image_description['id'], repo, username)
response = make_response('Created', 201)

View file

@ -44,8 +44,12 @@ def require_completion(f):
"""This make sure that the image push correctly finished."""
@wraps(f)
def wrapper(namespace, repository, *args, **kwargs):
if store.exists(store.image_mark_path(namespace, repository,
kwargs['image_id'])):
image_id = kwargs['image_id']
repo_image = model.get_repo_image(namespace, repository, image_id)
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
if store.exists(store.image_mark_path(namespace, repository, image_id,
uuid)):
abort(400, 'Image %(image_id)s is being uploaded, retry later',
issue='upload-in-progress', image_id=kwargs['image_id'])
@ -85,14 +89,18 @@ def set_cache_headers(f):
def get_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository)
if permission.can() or model.repository_is_public(namespace, repository):
path = store.image_layer_path(namespace, repository, image_id)
repo_image = model.get_repo_image(namespace, repository, image_id)
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
path = store.image_layer_path(namespace, repository, image_id, uuid)
direct_download_url = store.get_direct_download_url(path)
if direct_download_url:
return redirect(direct_download_url)
try:
return Response(store.stream_read(path), headers=headers)
except IOError:
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id)
abort(403)
@ -105,14 +113,17 @@ def put_image_layer(namespace, repository, image_id):
if not permission.can():
abort(403)
repo_image = model.get_repo_image(namespace, repository, image_id)
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
try:
json_data = store.get_content(store.image_json_path(namespace, repository,
image_id))
image_id, uuid))
except IOError:
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id)
layer_path = store.image_layer_path(namespace, repository, image_id)
mark_path = store.image_mark_path(namespace, repository, image_id)
layer_path = store.image_layer_path(namespace, repository, image_id, uuid)
mark_path = store.image_mark_path(namespace, repository, image_id, uuid)
if store.exists(layer_path) and not store.exists(mark_path):
abort(409, 'Image already exists', issue='image-exists', image_id=image_id)
@ -149,7 +160,7 @@ def put_image_layer(namespace, repository, image_id):
try:
checksum = store.get_content(store.image_checksum_path(namespace,
repository,
image_id))
image_id, uuid))
except IOError:
# We don't have a checksum stored yet, that's fine skipping the check.
# Not removing the mark though, image is not downloadable yet.
@ -193,15 +204,18 @@ def put_image_checksum(namespace, repository, image_id):
abort(400, 'Checksum not found in Cookie for image %(imaage_id)s',
issue='missing-checksum-cookie', image_id=image_id)
if not store.exists(store.image_json_path(namespace, repository, image_id)):
repo_image = model.get_repo_image(namespace, repository, image_id)
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
if not store.exists(store.image_json_path(namespace, repository, image_id,
uuid)):
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
mark_path = store.image_mark_path(namespace, repository, image_id)
mark_path = store.image_mark_path(namespace, repository, image_id, uuid)
if not store.exists(mark_path):
abort(409, 'Cannot set checksum for image %(image_id)s',
issue='image-write-error', image_id=image_id)
err = store_checksum(namespace, repository, image_id, checksum)
err = store_checksum(namespace, repository, image_id, uuid, checksum)
if err:
abort(400, err)
@ -238,20 +252,24 @@ def get_image_json(namespace, repository, image_id, headers):
repository):
abort(403)
repo_image = model.get_repo_image(namespace, repository, image_id)
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
try:
data = store.get_content(store.image_json_path(namespace, repository,
image_id))
image_id, uuid))
except IOError:
flask_abort(404)
try:
size = store.get_size(store.image_layer_path(namespace, repository,
image_id))
image_id, uuid))
headers['X-Docker-Size'] = str(size)
except OSError:
pass
checksum_path = store.image_checksum_path(namespace, repository, image_id)
checksum_path = store.image_checksum_path(namespace, repository, image_id,
uuid)
if store.exists(checksum_path):
headers['X-Docker-Checksum'] = store.get_content(checksum_path)
@ -271,39 +289,45 @@ def get_image_ancestry(namespace, repository, image_id, headers):
repository):
abort(403)
repo_image = model.get_repo_image(namespace, repository, image_id)
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
try:
data = store.get_content(store.image_ancestry_path(namespace, repository,
image_id))
image_id, uuid))
except IOError:
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id)
response = make_response(json.dumps(json.loads(data)), 200)
response.headers.extend(headers)
return response
def generate_ancestry(namespace, repository, image_id, parent_id=None):
def generate_ancestry(namespace, repository, image_id, uuid, parent_id=None,
parent_uuid=None):
if not parent_id:
store.put_content(store.image_ancestry_path(namespace, repository,
image_id),
image_id, uuid),
json.dumps([image_id]))
return
data = store.get_content(store.image_ancestry_path(namespace, repository,
parent_id))
parent_id, parent_uuid))
data = json.loads(data)
data.insert(0, image_id)
store.put_content(store.image_ancestry_path(namespace, repository,
image_id),
image_id, uuid),
json.dumps(data))
def store_checksum(namespace, repository, image_id, checksum):
def store_checksum(namespace, repository, image_id, uuid, checksum):
checksum_parts = checksum.split(':')
if len(checksum_parts) != 2:
return 'Invalid checksum format'
# We store the checksum
checksum_path = store.image_checksum_path(namespace, repository, image_id)
checksum_path = store.image_checksum_path(namespace, repository, image_id,
uuid)
store.put_content(checksum_path, checksum)
@ -327,58 +351,69 @@ def put_image_json(namespace, repository, image_id):
abort(400, 'Missing key `id` in JSON for image: %(image_id)s',
issue='invalid-request', image_id=image_id)
repo_image = model.get_repo_image(namespace, repository, image_id)
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
# Read the checksum
checksum = request.headers.get('X-Docker-Checksum')
if checksum:
# Storing the checksum is optional at this stage
err = store_checksum(namespace, repository, image_id, checksum)
err = store_checksum(namespace, repository, image_id, uuid, checksum)
if err:
abort(400, err, issue='write-error')
else:
# We cleanup any old checksum in case it's a retry after a fail
store.remove(store.image_checksum_path(namespace, repository, image_id))
store.remove(store.image_checksum_path(namespace, repository, image_id,
uuid))
if image_id != data['id']:
abort(400, 'JSON data contains invalid id for image: %(image_id)s',
issue='invalid-request', image_id=image_id)
parent_id = data.get('parent')
parent_image = None
if parent_id:
parent_image = model.get_repo_image(namespace, repository, parent_id)
parent_uuid = (parent_image and parent_image.storage and
parent_image.storage.uuid)
if (parent_id and not
store.exists(store.image_json_path(namespace, repository, parent_id))):
store.exists(store.image_json_path(namespace, repository, parent_id,
parent_uuid))):
abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s',
issue='invalid-request', image_id=image_id, parent_id=parent_id)
json_path = store.image_json_path(namespace, repository, image_id)
mark_path = store.image_mark_path(namespace, repository, image_id)
json_path = store.image_json_path(namespace, repository, image_id, uuid)
mark_path = store.image_mark_path(namespace, repository, image_id, uuid)
if store.exists(json_path) and not store.exists(mark_path):
abort(409, 'Image already exists', issue='image-exists', image_id=image_id)
# If we reach that point, it means that this is a new image or a retry
# on a failed push
# save the metadata
if parent_id:
parent_obj = model.get_image_by_id(namespace, repository, parent_id)
else:
parent_obj = None
command_list = data.get('container_config', {}).get('Cmd', None)
command = json.dumps(command_list) if command_list else None
model.set_image_metadata(image_id, namespace, repository,
data.get('created'), data.get('comment'), command,
parent_obj)
parent_image)
store.put_content(mark_path, 'true')
store.put_content(json_path, request.data)
generate_ancestry(namespace, repository, image_id, parent_id)
generate_ancestry(namespace, repository, image_id, uuid, parent_id,
parent_uuid)
return make_response('true', 200)
def process_image_changes(namespace, repository, image_id):
logger.debug('Generating diffs for image: %s' % image_id)
repo_image = model.get_repo_image(namespace, repository, image_id)
uuid = repo_image and repo_image.storage and repo_image.storage.uuid
image_diffs_path = store.image_file_diffs_path(namespace, repository,
image_id)
image_id, uuid)
image_trie_path = store.image_file_trie_path(namespace, repository,
image_id)
image_id, uuid)
if store.exists(image_diffs_path):
logger.debug('Diffs already exist for image: %s' % image_id)
@ -400,7 +435,7 @@ def process_image_changes(namespace, repository, image_id):
parent_trie.frombytes(parent_trie_bytes)
# Read in the file entries from the layer tar file
layer_path = store.image_layer_path(namespace, repository, image_id)
layer_path = store.image_layer_path(namespace, repository, image_id, uuid)
with store.stream_read_file(layer_path) as layer_tar_stream:
removed_files = set()
layer_files = changes.files_and_dirs_from_tar(layer_tar_stream,

View file

@ -43,7 +43,7 @@ def __gen_image_id(repo, image_num):
global_image_num = [0]
def __create_subtree(repo, structure, parent):
def __create_subtree(repo, structure, creator_username, parent):
num_nodes, subtrees, last_node_tags = structure
# create the nodes
@ -54,7 +54,7 @@ def __create_subtree(repo, structure, parent):
logger.debug('new docker id: %s' % docker_image_id)
checksum = __gen_checksum(docker_image_id)
new_image = model.create_image(docker_image_id, repo)
new_image = model.create_or_link_image(docker_image_id, repo, None)
model.set_image_checksum(docker_image_id, repo, checksum)
creation_time = REFERENCE_DATE + timedelta(days=image_num)
@ -69,7 +69,8 @@ def __create_subtree(repo, structure, parent):
# Populate the diff file
diff_path = store.image_file_diffs_path(repo.namespace, repo.name,
docker_image_id)
docker_image_id,
new_image.storage.uuid)
source_diff = SAMPLE_DIFFS[image_num % len(SAMPLE_DIFFS)]
with open(source_diff, 'r') as source_file:
@ -86,7 +87,7 @@ def __create_subtree(repo, structure, parent):
new_image.docker_image_id)
for subtree in subtrees:
__create_subtree(repo, subtree, new_image)
__create_subtree(repo, subtree, creator_username, new_image)
def __generate_repository(user, name, description, is_public, permissions,
@ -106,9 +107,9 @@ def __generate_repository(user, name, description, is_public, permissions,
if isinstance(structure, list):
for s in structure:
__create_subtree(repo, s, None)
__create_subtree(repo, s, user.username, None)
else:
__create_subtree(repo, structure, None)
__create_subtree(repo, structure, user.username, None)
return repo

View file

@ -56,9 +56,6 @@ class Storage(object):
base_path = self.image_path(namespace, repository, image_id, storage_uuid)
return '{0}/ancestry'.format(base_path)
def repository_namespace_path(self, namespace, repository):
return '{0}/{1}/{2}/'.format(self.images, namespace, repository)
def image_file_trie_path(self, namespace, repository, image_id,
storage_uuid):
base_path = self.image_path(namespace, repository, image_id, storage_uuid)

Some files were not shown because too many files have changed in this diff Show more