initial v1 refactor to use model methods
This commit is contained in:
parent
9cfd6ec452
commit
c14437e54a
2 changed files with 201 additions and 133 deletions
126
data/model/v1/__init__.py
Normal file
126
data/model/v1/__init__.py
Normal file
|
@ -0,0 +1,126 @@
|
|||
from app import app, storage as store
|
||||
from data import model
|
||||
from util.morecollections import AttrDict
|
||||
|
||||
|
||||
# TODO(jzelinskie): implement all of these methods using both legacy and new models.
|
||||
|
||||
def blob_placement_locations_docker_v1(namespace_name, repo_name, image_id):
|
||||
repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id)
|
||||
if repo_image is None:
|
||||
return None
|
||||
return repo_image.storage.locations
|
||||
|
||||
|
||||
def blob_placement_locations_and_path_docker_v1(namespace_name, repo_name, image_id):
|
||||
repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id)
|
||||
if not repo_image:
|
||||
return None, None
|
||||
return model.storage.get_layer_path(repo_image.storage), repo_image.storage.locations
|
||||
|
||||
|
||||
def docker_v1_metadata(namespace_name, repo_name, image_id):
|
||||
if not repo_image:
|
||||
return None
|
||||
|
||||
return AttrDict({
|
||||
'namespace_name': namespace_name,
|
||||
'repo_name': repo_name,
|
||||
'image_id': image_id,
|
||||
'checksum': repo_image.v1_checksum,
|
||||
'compat_json': repo_image.v1_json_metadata,
|
||||
})
|
||||
|
||||
|
||||
def update_docker_v1_metadata(namespace_name, repo_name, image_id, created_date_str, comment,
|
||||
command, compat_json, parent_image_id=None):
|
||||
# Old implementation:
|
||||
# parent_image = get_repo_extended(namespace_name, repo_name, parent_image_id)
|
||||
# model.image.set_image_metadata(image_id, namespace_name, repo_name, create_date_str, comment, command, compat_json, parent_image)
|
||||
pass
|
||||
|
||||
|
||||
def storage_exists(namespace_name, repo_name, image_id):
|
||||
repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id)
|
||||
try:
|
||||
layer_path = store.v1_image_layer_path(repo_image.storage.uuid)
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
if (store.exists(repo_image.storage.locations, layer_path) and not
|
||||
repo_image.storage.uploading):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def store_docker_v1_checksum(namespace_name, repo_name, image_id, checksum, content_checksum):
|
||||
## Old implementation:
|
||||
# UPDATE repo_image.storage.content_checksum = content_checksum
|
||||
# UPDATE repo_image.v1_checksum = checksum
|
||||
pass
|
||||
|
||||
|
||||
def is_image_uploading(namespace_name, repo_name, image_id):
|
||||
repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id)
|
||||
if repo_image is None:
|
||||
return False
|
||||
return repo_image.storage.uploading
|
||||
|
||||
|
||||
def update_image_uploading(namespace_name, repo_name, image_id, is_uploading):
|
||||
## Old implementation:
|
||||
# UPDATE repo_image.storage.uploading = is_uploading
|
||||
pass
|
||||
|
||||
|
||||
def update_image_size(namespace_name, repo_name, image_id, size, uncompressed_size):
|
||||
model.storage.set_image_storage_metadata(
|
||||
image_id,
|
||||
namespace_name,
|
||||
repo_name,
|
||||
size,
|
||||
uncompressed_size,
|
||||
)
|
||||
|
||||
|
||||
def image_size(namespace_name, repo_name, image_id):
|
||||
return repo_image.storage.image_size
|
||||
|
||||
|
||||
def create_bittorrent_pieces(namespace_name, repo_name, image_id, pieces_bytes):
|
||||
repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id)
|
||||
try:
|
||||
model.storage.save_torrent_info(
|
||||
repo_image.storage,
|
||||
app.config['BITTORRENT_PIECE_SIZE'],
|
||||
pieces_bytes
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def image_ancestry(namespace_name, repo_name, image_id):
|
||||
try:
|
||||
image = model.image.get_image_by_id(namespace, repository, image_id)
|
||||
except model.InvalidImageException:
|
||||
return None
|
||||
|
||||
parents = model.image.get_parent_images(namespace, repository, image)
|
||||
ancestry_docker_ids = [image.docker_image_id]
|
||||
ancestry_docker_ids.extend([parent.docker_image_id for parent in parents])
|
||||
|
||||
|
||||
def repository_exists(namespace_name, repo_name):
|
||||
repo = model.repository.get_repository(namespace_name, repo_name)
|
||||
return repo is not None
|
||||
|
||||
|
||||
def create_or_link_image(username, repo_name, image_id, storage_location):
|
||||
pass
|
||||
|
||||
|
||||
def create_temp_hidden_tag(namespace_name, repo_name, expiration):
|
||||
# was this code:
|
||||
# model.tag.create_temporary_hidden_tag(repo, repo_image,
|
||||
# app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
|
||||
pass
|
|
@ -27,23 +27,12 @@ from util.registry.torrent import PieceHasher
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def image_is_uploading(repo_image):
|
||||
if repo_image is None:
|
||||
return False
|
||||
|
||||
return repo_image.storage.uploading
|
||||
|
||||
|
||||
def set_uploading_flag(repo_image, is_image_uploading):
|
||||
repo_image.storage.uploading = is_image_uploading
|
||||
repo_image.storage.save()
|
||||
|
||||
|
||||
def _finish_image(namespace, repo_image):
|
||||
def _finish_image(namespace, repository, image_id):
|
||||
# Checksum is ok, we remove the marker
|
||||
set_uploading_flag(repo_image, False)
|
||||
update_image_uploading(namespace, repository, image_id, False)
|
||||
|
||||
# Send a job to the work queue to replicate the image layer.
|
||||
# TODO(jzelinskie): make this not use imagestorage
|
||||
queue_storage_replication(namespace, repo_image.storage)
|
||||
|
||||
|
||||
|
@ -52,11 +41,9 @@ def require_completion(f):
|
|||
@wraps(f)
|
||||
def wrapper(namespace, repository, *args, **kwargs):
|
||||
image_id = kwargs['image_id']
|
||||
repo_image = model.image.get_repo_image_extended(namespace, repository, image_id)
|
||||
if image_is_uploading(repo_image):
|
||||
if is_image_uploading(namespace, repository, image_id):
|
||||
abort(400, 'Image %(image_id)s is being uploaded, retry later',
|
||||
issue='upload-in-progress', image_id=kwargs['image_id'])
|
||||
|
||||
issue='upload-in-progress', image_id=image_id)
|
||||
return f(namespace, repository, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
@ -96,18 +83,17 @@ def head_image_layer(namespace, repository, image_id, headers):
|
|||
|
||||
logger.debug('Checking repo permissions')
|
||||
if permission.can() or model.repository.repository_is_public(namespace, repository):
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.image.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image:
|
||||
logger.debug('Image not found')
|
||||
logger.debug('Looking up blob placement locations')
|
||||
locations = blob_placement_locations_docker_v1(namespace, repository, image_id)
|
||||
if locations is None:
|
||||
logger.debug('Could not find any blob placement locations')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
extra_headers = {}
|
||||
|
||||
# Add the Accept-Ranges header if the storage engine supports resumable
|
||||
# downloads.
|
||||
if store.get_supports_resumable_downloads(repo_image.storage.locations):
|
||||
extra_headers = {}
|
||||
if store.get_supports_resumable_downloads(locations):
|
||||
logger.debug('Storage supports resumable downloads')
|
||||
extra_headers['Accept-Ranges'] = 'bytes'
|
||||
|
||||
|
@ -130,29 +116,23 @@ def get_image_layer(namespace, repository, image_id, headers):
|
|||
|
||||
logger.debug('Checking repo permissions')
|
||||
if permission.can() or model.repository.repository_is_public(namespace, repository):
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.image.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image:
|
||||
logger.debug('Image not found')
|
||||
logger.debug('Looking up blob placement locations and path')
|
||||
locations, path = blob_placement_locations_and_path_docker_v1(namespace, repository, image_id)
|
||||
if not locations or not path:
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
image_id=image_id)
|
||||
|
||||
try:
|
||||
path = model.storage.get_layer_path(repo_image.storage)
|
||||
logger.debug('Looking up the direct download URL for path: %s', path)
|
||||
direct_download_url = store.get_direct_download_url(repo_image.storage.locations, path)
|
||||
|
||||
direct_download_url = store.get_direct_download_url(locations, path)
|
||||
if direct_download_url:
|
||||
logger.debug('Returning direct download URL')
|
||||
resp = redirect(direct_download_url)
|
||||
return resp
|
||||
|
||||
logger.debug('Streaming layer data')
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
|
||||
return Response(store.stream_read(repo_image.storage.locations, path), headers=headers)
|
||||
logger.debug('Streaming layer data')
|
||||
return Response(store.stream_read(locations, path), headers=headers)
|
||||
except (IOError, AttributeError):
|
||||
logger.exception('Image layer data not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
|
||||
|
@ -172,21 +152,7 @@ def put_image_layer(namespace, repository, image_id):
|
|||
abort(403)
|
||||
|
||||
logger.debug('Retrieving image')
|
||||
repo_image = model.image.get_repo_image_extended(namespace, repository, image_id)
|
||||
try:
|
||||
logger.debug('Retrieving image data')
|
||||
uuid = repo_image.storage.uuid
|
||||
json_data = repo_image.v1_json_metadata
|
||||
except AttributeError:
|
||||
logger.exception('Exception when retrieving image data')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
|
||||
|
||||
uuid = repo_image.storage.uuid
|
||||
layer_path = store.v1_image_layer_path(uuid)
|
||||
logger.info('Storing layer at v1 path: %s', layer_path)
|
||||
|
||||
if (store.exists(repo_image.storage.locations, layer_path) and not
|
||||
image_is_uploading(repo_image)):
|
||||
if storage_exists_docker_v1(namespace, repository, image_id):
|
||||
exact_abort(409, 'Image already exists')
|
||||
|
||||
logger.debug('Storing layer data')
|
||||
|
@ -216,7 +182,8 @@ def put_image_layer(namespace, repository, image_id):
|
|||
sr.add_handler(piece_hasher.update)
|
||||
|
||||
# Add a handler which computes the checksum.
|
||||
h, sum_hndlr = checksums.simple_checksum_handler(json_data)
|
||||
v1_metadata = docker_v1_metadata(namespace, repository, image_id)
|
||||
h, sum_hndlr = checksums.simple_checksum_handler(v1_metadata.compat_json)
|
||||
sr.add_handler(sum_hndlr)
|
||||
|
||||
# Add a handler which computes the content checksum only
|
||||
|
@ -224,20 +191,19 @@ def put_image_layer(namespace, repository, image_id):
|
|||
sr.add_handler(content_sum_hndlr)
|
||||
|
||||
# Stream write the data to storage.
|
||||
locations, path = blob_placement_locations_and_path_docker_v1(namespace, repository, image_id)
|
||||
with database.CloseForLongOperation(app.config):
|
||||
try:
|
||||
store.stream_write(repo_image.storage.locations, layer_path, sr)
|
||||
store.stream_write(locations, path, sr)
|
||||
except IOError:
|
||||
logger.exception('Exception when writing image data')
|
||||
abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id)
|
||||
|
||||
# Save the size of the image.
|
||||
updated_storage = model.storage.set_image_storage_metadata(image_id, namespace, repository,
|
||||
size_info.compressed_size,
|
||||
size_info.uncompressed_size)
|
||||
pieces_bytes = piece_hasher.final_piece_hashes()
|
||||
model.storage.save_torrent_info(updated_storage, app.config['BITTORRENT_PIECE_SIZE'],
|
||||
pieces_bytes)
|
||||
update_image_size(namespace, repository, image_id, size_info.compressed_size, size_info.uncompressed_size)
|
||||
|
||||
# Save the BitTorrent pieces.
|
||||
create_bittorrent_pieces(namespace, repository, image_id, piece_hasher.final_piece_hashes())
|
||||
|
||||
# Append the computed checksum.
|
||||
csums = []
|
||||
|
@ -246,29 +212,27 @@ def put_image_layer(namespace, repository, image_id):
|
|||
try:
|
||||
if requires_tarsum:
|
||||
tmp.seek(0)
|
||||
csums.append(checksums.compute_tarsum(tmp, json_data))
|
||||
csums.append(checksums.compute_tarsum(tmp, v1_metadata.compat_json))
|
||||
tmp.close()
|
||||
|
||||
except (IOError, checksums.TarError) as exc:
|
||||
logger.debug('put_image_layer: Error when computing tarsum %s', exc)
|
||||
|
||||
if repo_image.v1_checksum is None:
|
||||
v1_metadata = docker_v1_metadata(namespace, repository, image_id)
|
||||
if v1_metadata.checksum is None:
|
||||
# We don't have a checksum stored yet, that's fine skipping the check.
|
||||
# Not removing the mark though, image is not downloadable yet.
|
||||
session['checksum'] = csums
|
||||
session['content_checksum'] = 'sha256:{0}'.format(ch.hexdigest())
|
||||
return make_response('true', 200)
|
||||
|
||||
checksum = repo_image.v1_checksum
|
||||
|
||||
# We check if the checksums provided matches one the one we computed
|
||||
if checksum not in csums:
|
||||
if v1_metadata.checksum not in csums:
|
||||
logger.warning('put_image_layer: Wrong checksum')
|
||||
abort(400, 'Checksum mismatch; ignoring the layer for image %(image_id)s',
|
||||
issue='checksum-mismatch', image_id=image_id)
|
||||
|
||||
# Mark the image as uploaded.
|
||||
_finish_image(namespace, repo_image)
|
||||
_finish_image(namespace, repository, image_id)
|
||||
|
||||
return make_response('true', 200)
|
||||
|
||||
|
@ -305,24 +269,27 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
issue='missing-checksum-cookie', image_id=image_id)
|
||||
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.image.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image or not repo_image.storage:
|
||||
v1_metadata = docker_v1_metadata(namespace_name, repo_name, image_id)
|
||||
if not v1_metadata:
|
||||
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
||||
|
||||
logger.debug('Looking up repo layer data')
|
||||
if not repo_image.v1_json_metadata:
|
||||
if not v1_metadata.compat_json:
|
||||
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
||||
|
||||
logger.debug('Marking image path')
|
||||
if not image_is_uploading(repo_image):
|
||||
if not is_image_uploading(namespace, repository, image_id):
|
||||
abort(409, 'Cannot set checksum for image %(image_id)s',
|
||||
issue='image-write-error', image_id=image_id)
|
||||
|
||||
logger.debug('Storing image and content checksums')
|
||||
|
||||
content_checksum = session.get('content_checksum', None)
|
||||
err = store_checksum(repo_image, checksum, content_checksum)
|
||||
if err:
|
||||
abort(400, err)
|
||||
checksum_parts = checksum.split(':')
|
||||
if len(checksum_parts) != 2:
|
||||
abort(400, 'Invalid checksum format')
|
||||
|
||||
store_docker_v1_checksum(namespace, repository, image_id, checksum, content_checksum)
|
||||
|
||||
if checksum not in session.get('checksum', []):
|
||||
logger.debug('session checksums: %s', session.get('checksum', []))
|
||||
|
@ -332,7 +299,7 @@ def put_image_checksum(namespace, repository, image_id):
|
|||
issue='checksum-mismatch', image_id=image_id)
|
||||
|
||||
# Mark the image as uploaded.
|
||||
_finish_image(namespace, repo_image)
|
||||
_finish_image(namespace, repository, image_id)
|
||||
|
||||
return make_response('true', 200)
|
||||
|
||||
|
@ -350,18 +317,18 @@ def get_image_json(namespace, repository, image_id, headers):
|
|||
abort(403)
|
||||
|
||||
logger.debug('Looking up repo image')
|
||||
repo_image = model.image.get_repo_image_extended(namespace, repository, image_id)
|
||||
if repo_image is None:
|
||||
v1_metadata = docker_v1_metadata(namespace_name, repo_name, image_id)
|
||||
if v1_metadata is None:
|
||||
flask_abort(404)
|
||||
|
||||
logger.debug('Looking up repo layer size')
|
||||
size = repo_image.storage.image_size
|
||||
size = image_size(namespace_name, repo_name, image_id)
|
||||
if size is not None:
|
||||
# Note: X-Docker-Size is optional and we *can* end up with a NULL image_size,
|
||||
# so handle this case rather than failing.
|
||||
headers['X-Docker-Size'] = str(size)
|
||||
|
||||
response = make_response(repo_image.v1_json_metadata, 200)
|
||||
response = make_response(v1_metadata.compat_json, 200)
|
||||
response.headers.extend(headers)
|
||||
return response
|
||||
|
||||
|
@ -378,35 +345,16 @@ def get_image_ancestry(namespace, repository, image_id, headers):
|
|||
if not permission.can() and not model.repository.repository_is_public(namespace, repository):
|
||||
abort(403)
|
||||
|
||||
try:
|
||||
image = model.image.get_image_by_id(namespace, repository, image_id)
|
||||
except model.InvalidImageException:
|
||||
ancestry_docker_ids = image_ancestry(namespace, repository, image_id)
|
||||
if ancestry_docker_ids is None:
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
|
||||
|
||||
parents = model.image.get_parent_images(namespace, repository, image)
|
||||
|
||||
ancestry_docker_ids = [image.docker_image_id]
|
||||
ancestry_docker_ids.extend([parent.docker_image_id for parent in parents])
|
||||
|
||||
# We can not use jsonify here because we are returning a list not an object
|
||||
response = make_response(json.dumps(ancestry_docker_ids), 200)
|
||||
response.headers.extend(headers)
|
||||
return response
|
||||
|
||||
|
||||
def store_checksum(image_with_storage, checksum, content_checksum):
|
||||
checksum_parts = checksum.split(':')
|
||||
if len(checksum_parts) != 2:
|
||||
return 'Invalid checksum format'
|
||||
|
||||
# We store the checksum
|
||||
image_with_storage.storage.content_checksum = content_checksum
|
||||
image_with_storage.storage.save()
|
||||
|
||||
image_with_storage.v1_checksum = checksum
|
||||
image_with_storage.save()
|
||||
|
||||
|
||||
@v1_bp.route('/images/<image_id>/json', methods=['PUT'])
|
||||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
|
@ -419,8 +367,8 @@ def put_image_json(namespace, repository, image_id):
|
|||
|
||||
logger.debug('Parsing image JSON')
|
||||
try:
|
||||
v1_metadata = request.data
|
||||
data = json.loads(v1_metadata.decode('utf8'))
|
||||
uploaded_metadata = request.data
|
||||
data = json.loads(uploaded_metadata.decode('utf8'))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
@ -432,48 +380,42 @@ def put_image_json(namespace, repository, image_id):
|
|||
abort(400, 'Missing key `id` in JSON for image: %(image_id)s',
|
||||
issue='invalid-request', image_id=image_id)
|
||||
|
||||
logger.debug('Looking up repo image')
|
||||
|
||||
repo = model.repository.get_repository(namespace, repository)
|
||||
if repo is None:
|
||||
abort(404, 'Repository does not exist: %(namespace)s/%(repository)s', issue='no-repo',
|
||||
namespace=namespace, repository=repository)
|
||||
|
||||
repo_image = model.image.get_repo_image_extended(namespace, repository, image_id)
|
||||
if not repo_image:
|
||||
username = get_authenticated_user() and get_authenticated_user().username
|
||||
if not username:
|
||||
username = get_granted_username()
|
||||
|
||||
logger.debug('Image not found, creating image with initiating user context: %s', username)
|
||||
repo_image = model.image.find_create_or_link_image(image_id, repo, username, {},
|
||||
store.preferred_locations[0])
|
||||
|
||||
# Create a temporary tag to prevent this image from getting garbage collected while the push
|
||||
# is in progress.
|
||||
model.tag.create_temporary_hidden_tag(repo, repo_image,
|
||||
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
|
||||
|
||||
if image_id != data['id']:
|
||||
abort(400, 'JSON data contains invalid id for image: %(image_id)s',
|
||||
issue='invalid-request', image_id=image_id)
|
||||
|
||||
parent_id = data.get('parent', None)
|
||||
logger.debug('Looking up repo image')
|
||||
|
||||
parent_image = None
|
||||
if not repository_exists(namespace, repository):
|
||||
abort(404, 'Repository does not exist: %(namespace)s/%(repository)s', issue='no-repo',
|
||||
namespace=namespace, repository=repository)
|
||||
|
||||
v1_metadata = docker_v1_metadata(namespace, repository, image_id)
|
||||
if v1_metadata is None:
|
||||
username = get_authenticated_user() and get_authenticated_user().username
|
||||
if not username:
|
||||
username = get_granted_username()
|
||||
|
||||
logger.debug('Image not found, creating or linking image with initiating user context: %s', username)
|
||||
create_or_link_image(username, repository, image_id, store.preferred_locations[0])
|
||||
v1_metadata = docker_v1_metadata(namespace, repository, image_id)
|
||||
|
||||
# Create a temporary tag to prevent this image from getting garbage collected while the push
|
||||
# is in progress.
|
||||
create_temp_hidden_tag(namespace_name, repo_name, app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
|
||||
|
||||
parent_id = data.get('parent', None)
|
||||
if parent_id:
|
||||
logger.debug('Looking up parent image')
|
||||
parent_image = model.image.get_repo_image_extended(namespace, repository, parent_id)
|
||||
|
||||
if not parent_image or parent_image.storage.uploading:
|
||||
if docker_v1_metadata(namespace, repository, parent_id) is None:
|
||||
abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s',
|
||||
issue='invalid-request', image_id=image_id, parent_id=parent_id)
|
||||
|
||||
logger.debug('Checking if image already exists')
|
||||
if repo_image.v1_json_metadata and not image_is_uploading(repo_image):
|
||||
if v1_metadata and not is_image_uploading(namespace, repository, image_id):
|
||||
exact_abort(409, 'Image already exists')
|
||||
|
||||
set_uploading_flag(repo_image, True)
|
||||
update_image_uploading(namespace, repository, image_id, True)
|
||||
|
||||
# If we reach that point, it means that this is a new image or a retry
|
||||
# on a failed push, save the metadata
|
||||
|
@ -481,8 +423,8 @@ def put_image_json(namespace, repository, image_id):
|
|||
command = json.dumps(command_list) if command_list else None
|
||||
|
||||
logger.debug('Setting image metadata')
|
||||
model.image.set_image_metadata(image_id, namespace, repository, data.get('created'),
|
||||
data.get('comment'), command, v1_metadata, parent_image)
|
||||
update_docker_v1_metadata(namespace, repository, image_id, data.get('created'),
|
||||
data.get('comment'), command, uploaded_metadata, parent_image)
|
||||
|
||||
return make_response('true', 200)
|
||||
|
||||
|
|
Reference in a new issue