Implement V2 interfaces and remaining V1 interfaces

Also adds some tests to registry tests for V1 stuff.
Note: All *registry* tests currently pass, but as verbs are not yet converted, the verb tests in registry_tests.py currently fail.
This commit is contained in:
Joseph Schorr 2016-08-16 15:23:00 -04:00 committed by Jimmy Zelinskie
parent d67991987b
commit db60df827d
21 changed files with 588 additions and 338 deletions

View file

12
data/interfaces/common.py Normal file
View file

@ -0,0 +1,12 @@
from image import Repository
from data import model
def repository_for_repo(repo):
""" Returns a Repository object representing the repo data model instance given. """
return Repository(
id=repo.id,
name=repo.name,
namespace_name=repo.namespace_user.username,
description=repo.description,
is_public=model.repository.is_repository_public(repo)
)

View file

@ -2,6 +2,7 @@ from app import app, storage as store
from data import model
from data.model import db_transaction
from util.morecollections import AttrDict
from data.interfaces.common import repository_for_repo
def placement_locations_docker_v1(namespace_name, repo_name, image_id):
""" Returns all the placements for the image with the given V1 Docker ID, found under the
@ -198,52 +199,65 @@ def delete_tag(namespace_name, repo_name, tag_name):
model.tag.delete_tag(namespace_name, repo_name, tag_name)
def load_token(password):
def load_token(token):
""" Loads the data associated with the given (deprecated) access token, and, if found
returns True.
"""
try:
model.token.load_token_data(password)
model.token.load_token_data(token)
return True
except model.InvalidTokenException:
return False
def verify_robot(username, password):
def verify_robot(username, token):
""" Returns True if the given robot username and token match an existing robot
account.
"""
try:
model.user.verify_robot(username, password)
return True
return bool(model.user.verify_robot(username, token))
except model.InvalidRobotException:
return False
def change_user_password(user, new_password):
""" Changes the password associated with the given user. """
model.user.change_password(user, new_password)
def change_user_email(user, new_email_address):
model.user.update_email(user, new_email_address)
def get_repository(namespace_name, repo_name):
#repo = model.repository.get_repository(namespace_name, repo_name)
return Repository()
""" Returns the repository with the given name under the given namespace or None if none. """
repo = model.repository.get_repository(namespace_name, repo_name)
if repo is None:
return None
return repository_for_repo(repo)
def create_repository(namespace_name, repo_name, user):
#repo = model.repository.create_repository(namespace_name, repo_name, user)
pass
def create_repository(namespace_name, repo_name, user=None):
""" Creates a new repository under the given namespace with the given name, for the given user.
"""
model.repository.create_repository(namespace_name, repo_name, user)
def repository_is_public(namespace_name, repo_name):
# return model.repository.repository_is_public(namespace_name, repo_name)
pass
""" Returns whether the repository with the given name under the given namespace is public.
If no matching repository was found, returns False.
"""
return model.repository.repository_is_public(namespace_name, repo_name)
def validate_oauth_token(password):
if model.oauth_access_token(password):
return True
return False
def validate_oauth_token(token):
""" Returns whether the given OAuth token validates. """
return bool(model.oauth.validate_access_token(token))
def get_sorted_matching_repositories(search_term, only_public, can_read, limit):
matching_repos = model.repository.get_sorted_matching_repositories(query, only_public, can_read,
limit=5)
return [Repository()]
""" Returns a sorted list of repositories matching the given search term. can_read is a callback
that will be invoked for each repository found, to filter results to only those visible to
the current user (if any).
"""
repos = model.repository.get_sorted_matching_repositories(search_term, only_public, can_read,
limit=limit)
return [repository_for_repo(repo) for repo in repos]

361
data/interfaces/v2.py Normal file
View file

@ -0,0 +1,361 @@
from peewee import IntegrityError
from data import model, database
from data.model import DataModelException
from image import Blob, BlobUpload, ManifestJSON, RepositoryReference, Tag
from image.docker.v1 import DockerV1Metadata
from data.interfaces.common import repository_for_repo
_MEDIA_TYPE = "application/vnd.docker.distribution.manifest.v1+prettyjws"
def create_repository(namespace_name, repo_name, creating_user=None):
""" Creates a new repository under the specified namespace with the given name. The user supplied
is the user creating the repository, if any.
"""
return model.repository.create_repository(namespace_name, repo_name, creating_user)
def repository_is_public(namespace_name, repo_name):
""" Returns true if the repository with the given name under the given namespace has public
visibility.
"""
return model.repository.repository_is_public(namespace_name, repo_name)
def get_repository(namespace_name, repo_name):
""" Returns a repository tuple for the repository with the given name under the given namespace.
Returns None if no such repository was found.
"""
repo = model.repository.get_repository(namespace_name, repo_name)
if repo is None:
return None
return repository_for_repo(repo)
def has_active_tag(namespace_name, repo_name, tag_name):
""" Returns whether there is an active tag for the tag with the given name under the matching
repository, if any, or None if none.
"""
try:
model.tag.get_active_tag(namespace_name, repo_name, tag_name)
return True
except database.RepositoryTag.DoesNotExist:
return False
def get_manifest_by_tag(namespace_name, repo_name, tag_name):
""" Returns the current manifest for the tag with the given name under the matching repository,
if any, or None if none.
"""
try:
manifest = model.tag.load_tag_manifest(namespace_name, repo_name, tag_name)
return ManifestJSON(digest=manifest.digest, json=manifest.json_data, media_type=_MEDIA_TYPE)
except model.InvalidManifestException:
return None
def get_manifest_by_digest(namespace_name, repo_name, digest):
""" Returns the manifest matching the given digest under the matching repository, if any,
or None if none.
"""
try:
manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, digest)
return ManifestJSON(digest=digest, json=manifest.json_data, media_type=_MEDIA_TYPE)
except model.InvalidManifestException:
return None
def delete_manifest_by_digest(namespace_name, repo_name, digest):
""" Deletes the manifest with the associated digest (if any) and returns all removed tags
that pointed to that manifest. If the manifest was not found, returns an empty list.
"""
tags = model.tag.delete_manifest_by_digest(namespace_name, repo_name, digest)
def _tag_view(tag):
return Tag(
name=tag.name,
repository=RepositoryReference(
id=tag.repository_id,
name=repo_name,
namespace_name=namespace_name,
)
)
return [_tag_view(tag) for tag in tags]
def _docker_v1_metadata(namespace_name, repo_name, repo_image):
""" Returns a DockerV1Metadata object for the given image under the repository with the given
namespace and name. Note that the namespace and name are passed here as an optimization,
and are *not checked* against the image.
"""
return DockerV1Metadata(
namespace_name=namespace_name,
repo_name=repo_name,
image_id=repo_image.docker_image_id,
checksum=repo_image.v1_checksum,
content_checksum=repo_image.storage.content_checksum,
compat_json=repo_image.v1_json_metadata,
created=repo_image.created,
comment=repo_image.comment,
command=repo_image.command,
parent_image_id=None, # TODO: make sure this isn't needed anywhere, as it is expensive to lookup
)
def get_docker_v1_metadata_by_tag(namespace_name, repo_name, tag_name):
""" Returns the Docker V1 metadata associated with the tag with the given name under the
matching repository, if any. If none, returns None.
"""
try:
repo_image = model.tag.get_tag_image(namespace_name, repo_name, tag_name, include_storage=True)
return _docker_v1_metadata(namespace_name, repo_name, repo_image)
except DataModelException:
return None
def get_docker_v1_metadata_by_image_id(namespace_name, repo_name, docker_image_ids):
""" Returns a map of Docker V1 metadata for each given image ID, matched under the repository
with the given namespace and name. Returns an empty map if the matching repository was not
found.
"""
repo = model.repository.get_repository(namespace_name, repo_name)
if repo is None:
return {}
images_query = model.image.lookup_repository_images(repo, docker_image_ids)
return {image.docker_image_id: _docker_v1_metadata(namespace_name, repo_name, image)
for image in images_query}
def get_parents_docker_v1_metadata(namespace_name, repo_name, docker_image_id):
""" Returns an ordered list containing the Docker V1 metadata for each parent of the image
with the given docker ID under the matching repository. Returns an empty list if the image
was not found.
"""
repo_image = model.image.get_repo_image(namespace_name, repo_name, docker_image_id)
if repo_image is None:
return []
parents = model.image.get_parent_images(namespace_name, repo_name, repo_image)
return [_docker_v1_metadata(namespace_name, repo_name, image) for image in parents]
def create_manifest_and_update_tag(namespace_name, repo_name, tag_name, manifest_digest,
manifest_bytes):
""" Creates a new manifest with the given digest and byte data, and assigns the tag with the
given name under the matching repository to it.
"""
try:
model.tag.associate_generated_tag_manifest(namespace_name, repo_name, tag_name,
manifest_digest, manifest_bytes)
except IntegrityError:
# It's already there!
pass
def synthesize_v1_image(repository, storage, image_id, created, comment, command, compat_json,
parent_image_id):
""" Synthesizes a V1 image under the specified repository, pointing to the given storage
and returns the V1 metadata for the synthesized image.
"""
repo = model.repository.get_repository(repository.namespace_name, repository.name)
if repo is None:
raise DataModelException('Unknown repository: %s/%s' % (repository.namespace_name,
repository.name))
parent_image = None
if parent_image_id is not None:
parent_image = model.image.get_image(repo, parent_image_id)
if parent_image is None:
raise DataModelException('Unknown parent image: %s' % parent_image_id)
storage_obj = model.storage.get_storage_by_uuid(storage.uuid)
if storage_obj is None:
raise DataModelException('Unknown storage: %s' % storage.uuid)
repo_image = model.image.synthesize_v1_image(repo, storage_obj, image_id, created, comment,
command, compat_json, parent_image)
return _docker_v1_metadata(repo.namespace_user.username, repo.name, repo_image)
def save_manifest(namespace_name, repo_name, tag_name, leaf_layer_docker_id, manifest_digest,
manifest_bytes):
""" Saves a manifest pointing to the given leaf image, with the given manifest, under the matching
repository as a tag with the given name.
"""
model.tag.store_tag_manifest(namespace_name, repo_name, tag_name, leaf_layer_docker_id,
manifest_digest, manifest_bytes)
def repository_tags(namespace_name, repo_name, limit, offset):
""" Returns the active tags under the repository with the given name and namespace. """
tags_query = model.tag.list_repository_tags(namespace_name, repo_name)
tags_query = tags_query.limit(limit).offset(offset)
def _tag_view(tag):
return Tag(
name=tag.name,
repository=RepositoryReference(
id=tag.repository_id,
name=repo_name,
namespace_name=namespace_name,
)
)
return [_tag_view(tag) for tag in tags_query]
def get_visible_repositories(username, limit, offset):
""" Returns the repositories visible to the user with the given username, if any. """
query = model.repository.get_visible_repositories(username, include_public=(username is None))
query = query.limit(limit).offset(offset)
return [repository_for_repo(repo) for repo in query]
def create_blob_upload(namespace_name, repo_name, upload_uuid, location_name, storage_metadata):
""" Creates a blob upload under the matching repository with the given UUID and metadata.
Returns whether the matching repository exists.
"""
try:
model.blob.initiate_upload(namespace_name, repo_name, upload_uuid, location_name,
storage_metadata)
return True
except database.Repository.DoesNotExist:
return False
def blob_upload_by_uuid(namespace_name, repo_name, upload_uuid):
""" Searches for a blob upload with the given UUID under the given repository and returns it
or None if none.
"""
try:
found = model.blob.get_blob_upload(namespace_name, repo_name, upload_uuid)
except model.InvalidBlobUpload:
return None
return BlobUpload(
repo_namespace_name=namespace_name,
repo_name=repo_name,
uuid=upload_uuid,
byte_count=found.byte_count,
uncompressed_byte_count=found.uncompressed_byte_count,
chunk_count=found.chunk_count,
sha_state=found.sha_state,
piece_sha_state=found.piece_sha_state,
piece_hashes=found.piece_hashes,
location_name=found.location.name,
storage_metadata=found.storage_metadata,
)
def update_blob_upload(blob_upload):
""" Saves any changes to the blob upload object given to the backing data store.
Fields that can change:
- uncompressed_byte_count
- piece_hashes
- piece_sha_state
- storage_metadata
- byte_count
- chunk_count
- sha_state
"""
# Lookup the blob upload object.
try:
blob_upload_record = model.blob.get_blob_upload(blob_upload.repo_namespace_name,
blob_upload.repo_name, blob_upload.uuid)
except model.InvalidBlobUpload:
return
blob_upload_record.uncompressed_byte_count = blob_upload.uncompressed_byte_count
blob_upload_record.piece_hashes = blob_upload.piece_hashes
blob_upload_record.piece_sha_state = blob_upload.piece_sha_state
blob_upload_record.storage_metadata = blob_upload.storage_metadata
blob_upload_record.byte_count = blob_upload.byte_count
blob_upload_record.chunk_count = blob_upload.chunk_count
blob_upload_record.sha_state = blob_upload.sha_state
blob_upload_record.save()
def delete_blob_upload(namespace_name, repo_name, uuid):
""" Deletes the blob upload with the given uuid under the matching repository. If none, does
nothing.
"""
try:
found = model.blob.get_blob_upload(namespace_name, repo_name, uuid)
except model.InvalidBlobUpload:
return
found.delete_instance()
def create_blob_and_temp_tag(namespace_name, repo_name, blob_digest, blob_upload, expiration_sec):
""" Crates a blob and links a temporary tag with the specified expiration to it under the
matching repository.
"""
location_obj = model.storage.get_image_location_for_name(blob_upload.location_name)
blob_record = model.blob.store_blob_record_and_temp_link(namespace_name, repo_name,
blob_digest,
location_obj.id,
blob_upload.byte_count,
expiration_sec,
blob_upload.uncompressed_byte_count)
return Blob(
uuid=blob_record.uuid,
digest=blob_digest,
size=blob_upload.byte_count,
locations=[blob_upload.location_name],
)
def lookup_blobs_by_digest(namespace_name, repo_name, digests):
""" Returns all the blobs with matching digests found under the matching repository. If the
repository doesn't exist, returns {}.
"""
repo = model.repository.get_repository(namespace_name, repo_name)
if repo is None:
return {}
def _blob_view(blob_record):
return Blob(
uuid=blob_record.uuid,
digest=blob_record.content_checksum,
size=blob_record.image_size,
locations=None, # Note: Locations is None in this case.
)
query = model.storage.lookup_repo_storages_by_content_checksum(repo, digests)
return {storage.content_checksum: _blob_view(storage) for storage in query}
def get_blob_by_digest(namespace_name, repo_name, digest):
""" Returns the blob with the given digest under the matching repository or None if none. """
try:
blob_record = model.blob.get_repo_blob_by_digest(namespace_name, repo_name, digest)
return Blob(
uuid=blob_record.uuid,
digest=digest,
size=blob_record.image_size,
locations=blob_record.locations,
)
except model.BlobDoesNotExist:
return None
def save_bittorrent_pieces(blob, piece_size, piece_bytes):
""" Saves the BitTorrent piece hashes for the given blob. """
blob_record = model.storage.get_storage_by_uuid(blob.uuid)
model.storage.save_torrent_info(blob_record, piece_size, piece_bytes)
def get_blob_path(blob):
# Once everything is moved over, this could be in util.registry and not even
# touch the database.
blob_record = model.storage.get_storage_by_uuid(blob.uuid)
return model.storage.get_layer_path(blob_record)

View file

@ -3,7 +3,7 @@ from uuid import uuid4
from data.model import (tag, _basequery, BlobDoesNotExist, InvalidBlobUpload, db_transaction,
storage as storage_model, InvalidImageException)
from data.database import (Repository, Namespace, ImageStorage, Image, ImageStoragePlacement,
BlobUpload)
BlobUpload, ImageStorageLocation)
def get_repo_blob_by_digest(namespace, repo_name, blob_digest):
@ -63,7 +63,9 @@ def get_blob_upload(namespace, repo_name, upload_uuid):
"""
try:
return (BlobUpload
.select()
.select(BlobUpload, ImageStorageLocation)
.join(ImageStorageLocation)
.switch(BlobUpload)
.join(Repository)
.join(Namespace, on=(Namespace.id == Repository.namespace_user))
.where(Repository.name == repo_name, Namespace.username == namespace,

View file

@ -333,6 +333,16 @@ def load_tag_manifest(namespace, repo_name, tag_name):
raise InvalidManifestException(msg)
def delete_manifest_by_digest(namespace, repo_name, digest):
tag_manifests = list(_load_repo_manifests(namespace, repo_name)
.where(TagManifest.digest == digest))
for tag_manifest in tag_manifests:
delete_tag(namespace, repo_name, tag_manifest.tag.name)
return [tag_manifest.tag for tag_manifest in tag_manifests]
def load_manifest_by_digest(namespace, repo_name, digest):
try:
return (_load_repo_manifests(namespace, repo_name)

View file

@ -1,183 +0,0 @@
from image import Blob, BlobUpload, ManifestJSON, Repository, Tag
from image.docker.v1 import DockerV1Metadata
def create_repository(namespace_name, repo_name, user):
model.repository.create_repository(namespace, reponame, user)
def repository_is_public(namespace_name, repo_name):
model.repository.repository_is_public(namespace, reponame)):
def get_repository(namespace_name, repo_name):
repo = model.repository.get_repository(namespace_name, repo_name)
if repo is None:
return None
return Repository(
id=repo.id,
name=repo.name,
namespace_name=repo.namespace_user.username,
)
def get_active_tag(namespace_name, repo_name, tag_name):
try:
return model.tag.get_active_tag(namespace_name, repo_name, tag_name)
except RepositoryTag.DoesNotExist:
return None
def get_manifest_by_tag(namespace_name, repo_name, tag_name):
try:
manifest = model.tag.load_tag_manifest(namespace_name, repo_name, manifest_ref)
return ManifestJSON(digest=digest, json=manifest.json_data)
except model.InvalidManifestException:
return None
def get_manifest_by_digest(namespace_name, repo_name, digest):
try:
manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, manifest_ref)
return ManifestJSON(digest=digest, json=manifest.json_data)
except model.InvalidManifestException:
return None
def get_tag_by_manifest_digest(namespace_name, repo_name, digest):
return Tag()
def delete_tag(namespace_name, repo_name, tag_name):
model.tag.delete_tag(namespace_name, repo_name, tag.name)
return True
def get_docker_v1_metadata_by_tag(namespace_name, repo_name, tag_name):
if not repo_image:
return None
return DockerV1Metadata(
namespace_name=namespace_name,
repo_name=repo_name,
image_id=image_id,
checksum=repo_image.v1_checksum,
content_checksum=repo_image.content_checksum,
compat_json=repo_image.v1_json_metadata,
)
def get_docker_v1_metadata_by_image_id(namespace_name, repo_name, image_ids):
images_query = model.image.lookup_repository_images(repo, all_image_ids)
return {image.docker_image_id: DockerV1Metadata(namespace_name=namespace_name,
repo_name=repo_name,
image_id=image.docker_image_id,
checksum=image.v1_checksum,
content_checksum=image.content_checksum,
compat_json=image.v1_json_metadata)
for image in images_query}
def get_parents_docker_v1_metadata(namespace_name, repo_name, image_id):
# Old implementation:
# parents = model.image.get_parent_images(namespace_name, repo_name, image)
# desired:
# return a list of the AttrDict in docker_v1_metadata
return []
def create_manifest_and_update_tag(namespace_name, repo_name, tag_name, manifest_digest, manifest_bytes):
try:
model.tag.associate_generated_tag_manifest(namespace_name, repo_name, tag_name,
manifest.digest, manifest.bytes)
except IntegrityError:
# It's already there!
pass
def synthesize_v1_image(repo, storage, image_id, created, comment, command, compat_json, parent_image_id):
model.image.synthesize_v1_image(repo, storage, image_id, created, comment, command, compat_json, parent_image_id)
def save_manifest(namespace_name, repo_name, tag_name, leaf_layer_id, manifest_digest, manifest_bytes):
model.tag.store_tag_manifest(namespace_name, repo_name, tag_name, leaf_layer_id, manifest_digest,
manifest_bytes)
def repository_tags(namespace_name, repo_name, limit, offset):
return [Tag()]
def get_visible_repositories(username, limit, offset):
return [Repository()]
def create_blob_upload(namespace_name, repo_name, upload_uuid, location_name, storage_metadata):
"""
Creates a blob upload.
Returns False if the upload's repository does not exist.
"""
try:
model.blob.initiate_upload(namespace_name, repo_name, new_upload_uuid, location_name,
upload_metadata)
return True
except database.Repository.DoesNotExist:
return False
def blob_upload_by_uuid(uuid):
try:
found = model.blob.get_blob_upload(namespace_name, repo_name, upload_uuid)
except model.InvalidBlobUpload:
raise BlobUploadUnknown()
return BlobUpload(
uuid=uuid,
byte_count=found.byte_count,
uncompressed_byte_count=found.uncompressed_byte_count,
chunk_count=found.chunk_count,
location_name=found.location.name,
storage_metadata=found.storage_metadata,
)
def update_blob_upload(blob_upload):
# old implementation:
# blob_upload.save()
pass
def delete_blob_upload(uuid):
try:
found = model.blob.get_blob_upload(namespace_name, repo_name, upload_uuid)
except model.InvalidBlobUpload:
raise BlobUploadUnknown()
found.delete_instance()
def create_blob_and_temp_tag(namespace_name, repo_name, expected_digest, upload_obj):
return model.blob.store_blob_record_and_temp_link(namespace_name, repo_name, expected_digest,
upload_obj.location,
upload_obj.byte_count,
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'],
upload_obj.uncompressed_byte_count)
def get_blob_by_digest(namespace_name, repo_name, digest):
try:
return model.blob.get_repo_blob_by_digest(namespace_name, repo_name, digest)
except model.BlobDoesNotExist:
return None
def create_bittorrent_pieces(blob_storage, piece_size, piece_bytes)
model.storage.save_torrent_info(blob_storage.id, piece_size, piece_bytes)
def get_blob_path(blob):
# Once everything is moved over, this could be in util.registry and not even
# touch the database.
model.storage.get_layer_path(blob)

View file

@ -5,7 +5,7 @@ from auth.auth_context import get_authenticated_user, get_validated_oauth_token
import json
def build_event_data(repo, extra_data={}, subpage=None):
repo_string = '%s/%s' % (repo.namespace_user.username, repo.name)
repo_string = '%s/%s' % (repo.namespace_name, repo.name)
homepage = '%s://%s/repository/%s' % (app.config['PREFERRED_URL_SCHEME'],
app.config['SERVER_HOSTNAME'],
repo_string)
@ -18,11 +18,10 @@ def build_event_data(repo, extra_data={}, subpage=None):
event_data = {
'repository': repo_string,
'namespace': repo.namespace_user.username,
'namespace': repo.namespace_name,
'name': repo.name,
'docker_url': '%s/%s' % (app.config['SERVER_HOSTNAME'], repo_string),
'homepage': homepage,
'visibility': repo.visibility.name
}
event_data.update(extra_data)
@ -54,10 +53,10 @@ def spawn_notification(repo, event_name, extra_data={}, subpage=None, pathargs=[
performer_data=None):
event_data = build_event_data(repo, extra_data=extra_data, subpage=subpage)
notifications = model.notification.list_repo_notifications(repo.namespace_user.username,
notifications = model.notification.list_repo_notifications(repo.namespace_name,
repo.name,
event_name=event_name)
for notification in list(notifications):
notification_data = build_notification_data(notification, event_data, performer_data)
path = [repo.namespace_user.username, repo.name, event_name] + pathargs
path = [repo.namespace_name, repo.name, event_name] + pathargs
notification_queue.put(path, json.dumps(notification_data))

View file

@ -6,8 +6,9 @@ from functools import wraps
from flask import request, make_response, jsonify, session
from data.model import v1
from data.interfaces import v1
from app import authentication, userevents, metric_queue
from app import authentication, userevents
from auth.auth import process_auth, generate_signed_token
from auth.auth_context import get_authenticated_user, get_validated_token, get_validated_oauth_token
from auth.permissions import (ModifyRepositoryPermission, UserAdminPermission,
@ -148,10 +149,6 @@ def update_user(username):
logger.debug('Updating user password')
v1.change_user_password(get_authenticated_user(), update_request['password'])
if 'email' in update_request:
logger.debug('Updating user email')
v1.change_user_email(get_authenticated_user(), update_request['email'])
return jsonify({
'username': get_authenticated_user().username,
'email': get_authenticated_user().email

View file

@ -14,7 +14,7 @@ from auth.permissions import (ReadRepositoryPermission,
ModifyRepositoryPermission)
from auth.registry_jwt_auth import get_granted_username
from data import model, database
from data.model import v1
from data.interfaces import v1
from digest import checksums
from endpoints.v1 import v1_bp
from endpoints.decorators import anon_protect

View file

@ -9,7 +9,7 @@ from auth.auth import process_auth
from auth.permissions import (ReadRepositoryPermission,
ModifyRepositoryPermission)
from data import model
from data.model import v1
from data.interfaces import v1
from endpoints.common import parse_repository_name
from endpoints.decorators import anon_protect
from endpoints.v1 import v1_bp

View file

@ -65,7 +65,7 @@ def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset',
kwargs[limit_kwarg_name] = limit
kwargs[offset_kwarg_name] = offset
kwargs[callback_kwarg_name] = callback
func(*args, **kwargs)
return func(*args, **kwargs)
return wrapped
return wrapper

View file

@ -8,6 +8,7 @@ import resumablehashlib
from app import storage, app
from auth.registry_jwt_auth import process_registry_jwt_auth
from data import database
from data.interfaces import v2
from digest import digest_tools
from endpoints.common import parse_repository_name
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream
@ -134,7 +135,7 @@ def start_blob_upload(namespace_name, repo_name):
# The user plans to send us the entire body right now.
# Find the upload.
blob_upload = v2.blob_upload_by_uuid(new_upload_uuid)
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, new_upload_uuid)
if blob_upload is None:
raise BlobUploadUnknown()
@ -142,7 +143,7 @@ def start_blob_upload(namespace_name, repo_name):
# the upload state.
updated_blob_upload = _upload_chunk(blob_upload, request.headers.get('range'))
if updated_blob_upload is None:
_abort_range_not_satisfiable(updated_blob_upload.byte_count, new_upload_uuid)
_abort_range_not_satisfiable(blob_upload.byte_count, new_upload_uuid)
# Save the upload state to the database.
v2.update_blob_upload(updated_blob_upload)
@ -195,7 +196,7 @@ def upload_chunk(namespace_name, repo_name, upload_uuid):
# the upload state.
updated_blob_upload = _upload_chunk(blob_upload, request.headers.get('range'))
if updated_blob_upload is None:
_abort_range_not_satisfiable(updated_blob_upload.byte_count, upload_uuid)
_abort_range_not_satisfiable(blob_upload.byte_count, upload_uuid)
# Save the upload state to the database.
v2.update_blob_upload(updated_blob_upload)
@ -231,7 +232,7 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
# the upload state.
updated_blob_upload = _upload_chunk(blob_upload, request.headers.get('range'))
if updated_blob_upload is None:
_abort_range_not_satisfiable(updated_blob_upload.byte_count, upload_uuid)
_abort_range_not_satisfiable(blob_upload.byte_count, upload_uuid)
# Finalize the upload process in the database and storage.
_finish_upload(namespace_name, repo_name, updated_blob_upload, digest)
@ -253,14 +254,15 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
@require_repo_write
@anon_protect
def cancel_upload(namespace_name, repo_name, upload_uuid):
upload = v2.blob_upload_by_uuid(upload_uuid)
if upload is None:
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
if blob_upload is None:
raise BlobUploadUnknown()
# We delete the record for the upload first, since if the partial upload in
# storage fails to delete, it doesn't break anything
v2.delete_blob_upload(upload_uuid)
storage.cancel_chunked_upload({upload.location_name}, upload.uuid, upload.storage_metadata)
# storage fails to delete, it doesn't break anything.
v2.delete_blob_upload(namespace_name, repo_name, upload_uuid)
storage.cancel_chunked_upload({blob_upload.location_name}, blob_upload.uuid,
blob_upload.storage_metadata)
return Response(status=204)
@ -342,7 +344,7 @@ def _upload_chunk(blob_upload, range_header):
"""
# Get the offset and length of the current chunk.
start_offset, length = _start_offset_and_length(range_header)
if None in {blob_upload, start_offset, length}:
if blob_upload is None or None in {start_offset, length}:
logger.error('Invalid arguments provided to _upload_chunk')
return None
@ -393,7 +395,7 @@ def _upload_chunk(blob_upload, range_header):
size_info, fn = calculate_size_handler()
input_fp = wrap_with_handler(input_fp, fn)
length_written, new_metadata, error = storage.stream_upload_chunk(
length_written, new_metadata, upload_error = storage.stream_upload_chunk(
location_set,
blob_upload.uuid,
start_offset,
@ -402,8 +404,9 @@ def _upload_chunk(blob_upload, range_header):
blob_upload.storage_metadata,
content_type=BLOB_CONTENT_TYPE,
)
if error is not None:
logger.error('storage.stream_upload_chunk returned error %s', error)
if upload_error is not None:
logger.error('storage.stream_upload_chunk returned error %s', upload_error)
return None
# If we determined an uncompressed size and this is the first chunk, add it to the blob.
@ -418,6 +421,7 @@ def _upload_chunk(blob_upload, range_header):
if piece_hasher is not None:
blob_upload.piece_hashes = piece_hasher.piece_hashes
blob_upload.piece_sha_state = piece_hasher.hash_fragment
blob_upload.storage_metadata = new_metadata
blob_upload.byte_count += length_written
blob_upload.chunk_count += 1
@ -471,19 +475,17 @@ def _finalize_blob_database(namespace_name, repo_name, blob_upload, digest, alre
namespace_name,
repo_name,
digest,
blob_upload.location_name,
blob_upload.byte_count,
blob_upload.uncompressed_byte_count,
blob_upload,
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'],
)
# If it doesn't already exist, create the BitTorrent pieces for the blob.
if blob_upload.piece_sha_state is not None and not already_existed:
piece_bytes = blob_upload.piece_hashes + blob_upload.piece_sha_state.digest()
v2.create_bittorrent_pieces(blob_storage, app.config['BITTORRENT_PIECE_SIZE'], piece_bytes)
v2.save_bittorrent_pieces(blob_storage, app.config['BITTORRENT_PIECE_SIZE'], piece_bytes)
# Delete the blob upload.
v2.delete_upload(blob_upload.uuid)
v2.delete_blob_upload(namespace_name, repo_name, blob_upload.uuid)
def _finish_upload(namespace_name, repo_name, blob_upload, digest):

View file

@ -3,6 +3,7 @@ from flask import jsonify
from auth.registry_jwt_auth import process_registry_jwt_auth, get_granted_entity
from endpoints.decorators import anon_protect
from endpoints.v2 import v2_bp, paginate
from data.interfaces import v2
@v2_bp.route('/_catalog', methods=['GET'])
@process_registry_jwt_auth()
@ -14,10 +15,10 @@ def catalog_search(limit, offset, pagination_callback):
if entity:
username = entity.user.username
visible_repositories = v2.get_visible_repositories(username, limit, offset)
visible_repositories = v2.get_visible_repositories(username, limit+1, offset)
response = jsonify({
'repositories': ['%s/%s' % (repo.namespace_name, repo.name)
for repo in visible_repositories],
for repo in visible_repositories][0:limit],
})
pagination_callback(len(visible_repositories), response)

View file

@ -9,6 +9,7 @@ import features
from app import docker_v2_signing_key, app, metric_queue
from auth.registry_jwt_auth import process_registry_jwt_auth
from data import model
from data.interfaces import v2
from digest import digest_tools
from endpoints.common import parse_repository_name
from endpoints.decorators import anon_protect
@ -35,14 +36,14 @@ MANIFEST_TAGNAME_ROUTE = BASE_MANIFEST_ROUTE.format(VALID_TAG_PATTERN)
@process_registry_jwt_auth(scopes=['pull'])
@require_repo_read
@anon_protect
def fetch_manifest_by_tagname(namespace_name, repo_name, tag_name):
manifest = v2.get_manifest_by_tag(namespace_name, repo_name, tag_name)
def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
manifest = v2.get_manifest_by_tag(namespace_name, repo_name, manifest_ref)
if manifest is None:
tag = v2.get_active_tag(namespace_name, repo_name, tag_name)
if tag is None:
has_tag = v2.has_active_tag(namespace_name, repo_name, manifest_ref)
if not has_tag:
raise ManifestUnknown()
manifest = _generate_and_store_manifest(namespace_name, repo_name, tag_name)
manifest = _generate_and_store_manifest(namespace_name, repo_name, manifest_ref)
if manifest is None:
raise ManifestUnknown()
@ -52,9 +53,9 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, tag_name):
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2'])
return Response(
manifest.bytes,
manifest.json,
status=200,
headers={'Content-Type': manifest.content_type, 'Docker-Content-Digest': manifest.digest},
headers={'Content-Type': manifest.media_type, 'Docker-Content-Digest': manifest.digest},
)
@ -64,7 +65,7 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, tag_name):
@require_repo_read
@anon_protect
def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref):
manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, manifest_ref)
manifest = v2.get_manifest_by_digest(namespace_name, repo_name, manifest_ref)
if manifest is None:
# Without a tag name to reference, we can't make an attempt to generate the manifest
raise ManifestUnknown()
@ -74,7 +75,7 @@ def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref):
track_and_log('pull_repo', repo)
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2'])
return Response(manifest.json, status=200, headers={'Content-Type': manifest.content_type,
return Response(manifest.json, status=200, headers={'Content-Type': manifest.media_type,
'Docker-Content-Digest': manifest.digest})
@ -94,13 +95,13 @@ def _reject_manifest2_schema2(func):
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
def write_manifest_by_tagname(namespace_name, repo_name, tag_name):
def write_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
try:
manifest = DockerSchema1Manifest(request.data)
except ManifestException as me:
raise ManifestInvalid(detail={'message': me.message})
if manifest.tag != tag_name:
if manifest.tag != manifest_ref:
raise TagInvalid()
return _write_manifest(namespace_name, repo_name, manifest)
@ -144,8 +145,7 @@ def _write_manifest(namespace_name, repo_name, manifest):
raise ManifestInvalid(detail={'message': 'manifest does not reference any layers'})
# Ensure all the blobs in the manifest exist.
storage_query = model.storage.lookup_repo_storages_by_content_checksum(repo, manifest.checksums)
storage_map = {storage.content_checksum: storage for storage in storage_query}
storage_map = v2.lookup_blobs_by_digest(namespace_name, repo_name, manifest.checksums)
for layer in manifest.layers:
digest_str = str(layer.digest)
if digest_str not in storage_map:
@ -153,14 +153,14 @@ def _write_manifest(namespace_name, repo_name, manifest):
# Lookup all the images and their parent images (if any) inside the manifest.
# This will let us know which v1 images we need to synthesize and which ones are invalid.
all_image_ids = list(manifest.docker_image_ids | manifest.parent_image_ids)
all_image_ids = list(manifest.parent_image_ids | manifest.image_ids)
images_map = v2.get_docker_v1_metadata_by_image_id(namespace_name, repo_name, all_image_ids)
# Rewrite any v1 image IDs that do not match the checksum in the database.
try:
rewritten_images = manifest.rewrite_invalid_image_ids(images_map)
rewritten_images = list(manifest.rewrite_invalid_image_ids(images_map))
for rewritten_image in rewritten_images:
image = v2.synthesize_v1_image(
v1_metadata = v2.synthesize_v1_image(
repo,
storage_map[rewritten_image.content_checksum],
rewritten_image.image_id,
@ -170,13 +170,13 @@ def _write_manifest(namespace_name, repo_name, manifest):
rewritten_image.compat_json,
rewritten_image.parent_image_id,
)
images_map[image.image_id] = image
except ManifestException as me:
raise ManifestInvalid(detail={'message': me.message})
# Store the manifest pointing to the tag.
leaf_layer_id = images_map[manifest.leaf_layer.v1_metadata.image_id].image_id
v2.save_manifest(namespace_name, repo_name, tag_name, leaf_layer_id, manifest.digest, manifest.bytes)
leaf_layer_id = rewritten_images[-1].image_id
v2.save_manifest(namespace_name, repo_name, manifest.tag, leaf_layer_id, manifest.digest,
manifest.bytes)
# Queue all blob manifests for replication.
# TODO(jschorr): Find a way to optimize this insertion.
@ -206,25 +206,19 @@ def _write_manifest(namespace_name, repo_name, manifest):
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
def delete_manifest_by_digest(namespace_name, repo_name, digest):
def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref):
"""
Delete the manifest specified by the digest.
Note: there is no equivalent method for deleting by tag name because it is
forbidden by the spec.
"""
tag = v2.get_tag_by_manifest_digest(namespace_name, repo_name, digest)
if tag is None:
# TODO(jzelinskie): disambiguate between no manifest and no tag
tags = v2.delete_manifest_by_digest(namespace_name, repo_name, manifest_ref)
if not tags:
raise ManifestUnknown()
# Mark the tag as no longer alive.
deleted = v2.delete_tag(namespace_name, repo_name, tag.name)
if not deleted:
# Tag was not alive.
raise ManifestUnknown()
track_and_log('delete_tag', tag.repository, tag=tag.name, digest=digest)
for tag in tags:
track_and_log('delete_tag', tag.repository, tag=tag.name, digest=manifest_ref)
return Response(status=202)

View file

@ -5,6 +5,7 @@ from endpoints.common import parse_repository_name
from endpoints.v2 import v2_bp, require_repo_read, paginate
from endpoints.v2.errors import NameUnknown
from endpoints.decorators import anon_protect
from data.interfaces import v2
@v2_bp.route('/<repopath:repository>/tags/list', methods=['GET'])
@parse_repository_name()

View file

@ -11,6 +11,7 @@ from auth.permissions import (ModifyRepositoryPermission, ReadRepositoryPermissi
CreateRepositoryPermission)
from endpoints.v2 import v2_bp
from endpoints.decorators import anon_protect
from data.interfaces import v2
from util.cache import no_cache
from util.names import parse_namespace_repository, REPOSITORY_NAME_REGEX
from util.security.registry_jwt import generate_bearer_token, build_context_and_subject

View file

@ -1,6 +1,7 @@
import tarfile
from collections import namedtuple
from namedlist import namedlist
from util.registry.gzipwrap import GzipWrap
@ -10,6 +11,11 @@ class ManifestJSON(namedtuple('ManifestJSON', ['digest', 'json', 'media_type']))
ManifestJSON represents a Manifest of any format.
"""
class RepositoryReference(namedtuple('RepositoryReference', ['id', 'name', 'namespace_name'])):
"""
RepositoryReference represents a reference to a Repository, without its full metadata.
"""
class Repository(namedtuple('Repository', ['id', 'name', 'namespace_name', 'description',
'is_public'])):
@ -24,15 +30,16 @@ class Tag(namedtuple('Tag', ['name', 'repository'])):
"""
class BlobUpload(namedtuple('BlobUpload', ['uuid', 'byte_count', 'uncompressed_byte_count',
'chunk_count', 'sha_state', 'location_name',
'storage_metadata', 'piece_sha_state', 'piece_hashes'])):
class BlobUpload(namedlist('BlobUpload', ['uuid', 'byte_count', 'uncompressed_byte_count',
'chunk_count', 'sha_state', 'location_name',
'storage_metadata', 'piece_sha_state', 'piece_hashes',
'repo_namespace_name', 'repo_name'])):
"""
BlobUpload represents the current state of an Blob being uploaded.
"""
class Blob(namedtuple('Blob', ['digest', 'size', 'locations'])):
class Blob(namedtuple('Blob', ['uuid', 'digest', 'size', 'locations'])):
"""
Blob represents an opaque binary blob saved to the storage system.
"""

View file

@ -121,6 +121,10 @@ class DockerSchema1Manifest(object):
def content_type(self):
return DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
@property
def media_type(self):
return DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
@property
def signatures(self):
return self._signatures
@ -137,6 +141,10 @@ class DockerSchema1Manifest(object):
def tag(self):
return self._tag
@property
def json(self):
return self._bytes
@property
def bytes(self):
return self._bytes
@ -216,11 +224,12 @@ class DockerSchema1Manifest(object):
content, but the checksums don't match, then we need to rewrite the image ID
to something new in order to ensure consistency.
"""
# used to synthesize a new "content addressable" image id
digest_history = hashlib.sha256()
# Used to synthesize a new "content addressable" image id
digest_history = hashlib.sha256()
has_rewritten_ids = False
updated_id_map = {}
for layer in self.layers:
digest_str = str(layer.digest)
extracted_v1_metadata = layer.v1_metadata
@ -247,25 +256,33 @@ class DockerSchema1Manifest(object):
# Lookup the parent image for the layer, if any.
parent_image_id = None
if extracted_v1_metadata.parent_image_id is not None:
parent_image_id = images_map.get(extracted_v1_metadata.parent_image_id, None)
if parent_image_id is None:
parent_image = images_map.get(extracted_v1_metadata.parent_image_id, None)
if parent_image is None:
raise MalformedSchema1Manifest('parent not found with image ID: %s' %
extracted_v1_metadata.parent_image_id)
parent_image_id = updated_id_map.get(parent_image.image_id, parent_image.image_id)
# Synthesize and store the v1 metadata in the db.
v1_metadata_json = layer.raw_v1_metadata
if has_rewritten_ids:
v1_metadata_json = _updated_v1_metadata(v1_metadata_json, updated_id_map)
yield DockerV1Metadata(
updated_image = DockerV1Metadata(
namespace_name=self.namespace,
repo_name=self.repo_name,
image_id=working_image_id,
created=extracted_v1_metadata.created,
comment=extracted_v1_metadata.comment,
command=extracted_v1_metadata.command,
compat_json=v1_metadata_json,
parent_image_id=parent_image_id,
checksum=None, # TODO: Check if we need this.
content_checksum=digest_str,
)
images_map[updated_image.image_id] = updated_image
yield updated_image
class DockerSchema1ManifestBuilder(object):
"""

View file

@ -1,65 +1,66 @@
autobahn==0.9.3-3
aiowsgi
trollius
flask
py-bcrypt
Flask-Principal
Flask-Login
Flask-Mail
python-dateutil
boto
pymysql==0.6.7 # Remove version when baseimage has Python 2.7.9+
stripe
gunicorn<19.0
gevent
mixpanel
beautifulsoup4
marisa-trie
APScheduler==3.0.5
xhtml2pdf
redis
hiredis
flask-restful==0.2.12
jsonschema
-e git+https://github.com/NateFerrero/oauth2lib.git#egg=oauth2lib
alembic
sqlalchemy
python-magic
reportlab==2.7
raven
peewee
python-ldap
pycryptodome
psycopg2
pyyaml
PyGithub
-e git+https://github.com/DevTable/aniso8601-fake.git#egg=aniso8610
-e git+https://github.com/DevTable/anunidecode.git#egg=anunidecode
-e git+https://github.com/DevTable/container-cloud-config.git#egg=container-cloud-config
-e git+https://github.com/DevTable/python-etcd.git@sslfix#egg=python-etcd
-e git+https://github.com/NateFerrero/oauth2lib.git#egg=oauth2lib
-e git+https://github.com/coreos/mockldap.git@v0.1.x#egg=mockldap
-e git+https://github.com/coreos/py-bitbucket.git#egg=py-bitbucket
-e git+https://github.com/coreos/pyapi-gitlab.git@timeout#egg=pyapi-gitlab
-e git+https://github.com/coreos/mockldap.git@v0.1.x#egg=mockldap
-e git+https://github.com/coreos/resumablehashlib.git#egg=resumablehashlib
-e git+https://github.com/DevTable/python-etcd.git@sslfix#egg=python-etcd
gipc
pyOpenSSL
pygpgme
cachetools
mock
psutil
stringscore
python-swiftclient
python-keystoneclient
APScheduler==3.0.5
Flask-Login
Flask-Mail
Flask-Principal
Flask-Testing
pyjwt
toposort
pyjwkest
jsonpath-rw
bintrees
redlock
semantic-version
PyGithub
aiowsgi
alembic
autobahn==0.9.3-3
beautifulsoup4
bencode
bintrees
boto
cachetools
cryptography
flask
flask-restful==0.2.12
gevent
gipc
gunicorn<19.0
hiredis
httmock
jsonpath-rw
jsonschema
marisa-trie
mixpanel
mock
moto
namedlist
peewee
psutil
psycopg2
py-bcrypt
pyOpenSSL
pycryptodome
pygpgme
pyjwkest
pyjwt
pymysql==0.6.7 # Remove version when baseimage has Python 2.7.9+
python-dateutil
python-keystoneclient
python-ldap
python-magic
python-swiftclient
pyyaml
raven
redis
redlock
reportlab==2.7
semantic-version
sqlalchemy
stringscore
stripe
toposort
trollius
tzlocal
xhtml2pdf

View file

@ -21,8 +21,9 @@ from data import model
from endpoints.v1 import v1_bp
from endpoints.v2 import v2_bp
from endpoints.verbs import verbs
from endpoints.v2.manifest import SignedManifestBuilder
from endpoints.api import api_bp
from image.docker.schema1 import DockerSchema1ManifestBuilder
from initdb import wipe_database, initialize_database, populate_database
from endpoints.csrf import generate_csrf_token
from tempfile import NamedTemporaryFile
@ -425,7 +426,6 @@ class V1RegistryPullMixin(V1RegistryMixin):
# Ensure we do (or do not) have a matching image ID.
tag_image_id = tags_result['latest']
known_ids = [item['id'] for item in images]
self.assertEquals(not munge_shas, tag_image_id in known_ids)
# Retrieve the ancestry of the tag image.
@ -545,7 +545,7 @@ class V2RegistryPushMixin(V2RegistryMixin):
# Build a fake manifest.
tag_name = tag_name or 'latest'
builder = SignedManifestBuilder(namespace, repository, tag_name)
builder = DockerSchema1ManifestBuilder(namespace, repository, tag_name)
full_contents = {}
for image_data in reversed(images):
@ -1090,6 +1090,20 @@ class RegistryTestsMixin(object):
class V1RegistryTests(V1RegistryPullMixin, V1RegistryPushMixin, RegistryTestsMixin,
RegistryTestCaseMixin, LiveServerTestCase):
""" Tests for V1 registry. """
def test_users(self):
# Not logged in, should 404.
self.conduct('GET', '/v1/users', expected_code=404)
# Try some logins.
self.conduct('POST', '/v1/users', json_data={'username': 'freshuser'}, expected_code=400)
resp = self.conduct('POST', '/v1/users',
json_data={'username': 'devtable', 'password': 'password'},
expected_code=400)
# Because Docker
self.assertEquals('"Username or email already exists"', resp.text)
def test_push_reponame_with_slashes(self):
# Attempt to add a repository name with slashes. This should fail as we do not support it.
images = [{
@ -1190,7 +1204,7 @@ class V2RegistryTests(V2RegistryPullMixin, V2RegistryPushMixin, RegistryTestsMix
self.do_auth('devtable', 'password', namespace, repository, scopes=['push', 'pull'])
# Build a fake manifest.
builder = SignedManifestBuilder(namespace, repository, tag_name)
builder = DockerSchema1ManifestBuilder(namespace, repository, tag_name)
builder.add_layer('sha256:' + hashlib.sha256('invalid').hexdigest(), json.dumps({'id': 'foo'}))
manifest = builder.build(_JWK)
@ -1210,7 +1224,7 @@ class V2RegistryTests(V2RegistryPullMixin, V2RegistryPushMixin, RegistryTestsMix
self.do_auth('devtable', 'password', namespace, repository, scopes=['push', 'pull'])
# Build a fake manifest.
builder = SignedManifestBuilder(namespace, repository, tag_name)
builder = DockerSchema1ManifestBuilder(namespace, repository, tag_name)
builder.add_layer('sha256:' + hashlib.sha256('invalid').hexdigest(), json.dumps({'id': 'foo'}))
manifest = builder.build(_JWK)