endpoints.v2: yapf format

This commit is contained in:
Jimmy Zelinskie 2017-06-26 18:16:15 -04:00
parent 0e26a03f7e
commit b1434b0380
9 changed files with 152 additions and 216 deletions

View file

@ -14,18 +14,16 @@ from digest import digest_tools
from endpoints.common import parse_repository_name
from endpoints.decorators import anon_protect
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream
from endpoints.v2.errors import (BlobUnknown, BlobUploadInvalid, BlobUploadUnknown, Unsupported,
NameUnknown, LayerTooLarge)
from endpoints.v2.errors import (
BlobUnknown, BlobUploadInvalid, BlobUploadUnknown, Unsupported, NameUnknown, LayerTooLarge)
from endpoints.v2.models_pre_oci import data_model as model
from util.cache import cache_control
from util.registry.filelike import wrap_with_handler, StreamSlice
from util.registry.gzipstream import calculate_size_handler
from util.registry.torrent import PieceHasher
logger = logging.getLogger(__name__)
BASE_BLOB_ROUTE = '/<repopath:repository>/blobs/<regex("{0}"):digest>'
BLOB_DIGEST_ROUTE = BASE_BLOB_ROUTE.format(digest_tools.DIGEST_PATTERN)
RANGE_HEADER_REGEX = re.compile(r'^bytes=([0-9]+)-([0-9]+)$')
@ -52,8 +50,7 @@ def check_blob_exists(namespace_name, repo_name, digest):
headers = {
'Docker-Content-Digest': digest,
'Content-Length': blob.size,
'Content-Type': BLOB_CONTENT_TYPE,
}
'Content-Type': BLOB_CONTENT_TYPE,}
# If our storage supports range requests, let the client know.
if storage.get_supports_resumable_downloads(blob.locations):
@ -102,10 +99,7 @@ def download_blob(namespace_name, repo_name, digest):
storage.stream_read(blob.locations, path),
headers=headers.update({
'Content-Length': blob.size,
'Content-Type': BLOB_CONTENT_TYPE,
}),
)
'Content-Type': BLOB_CONTENT_TYPE,}),)
@v2_bp.route('/<repopath:repository>/blobs/uploads/', methods=['POST'])
@ -128,13 +122,13 @@ def start_blob_upload(namespace_name, repo_name):
return Response(
status=202,
headers={
'Docker-Upload-UUID': new_upload_uuid,
'Range': _render_range(0),
'Location': get_app_url() + url_for('v2.upload_chunk',
repository='%s/%s' % (namespace_name, repo_name),
upload_uuid=new_upload_uuid)
},
)
'Docker-Upload-UUID':
new_upload_uuid,
'Range':
_render_range(0),
'Location':
get_app_url() + url_for('v2.upload_chunk', repository='%s/%s' %
(namespace_name, repo_name), upload_uuid=new_upload_uuid)},)
# The user plans to send us the entire body right now.
# Find the upload.
@ -158,12 +152,11 @@ def start_blob_upload(namespace_name, repo_name):
return Response(
status=201,
headers={
'Docker-Content-Digest': digest,
'Location': get_app_url() + url_for('v2.download_blob',
repository='%s/%s' % (namespace_name, repo_name),
digest=digest),
},
)
'Docker-Content-Digest':
digest,
'Location':
get_app_url() + url_for('v2.download_blob', repository='%s/%s' %
(namespace_name, repo_name), digest=digest),},)
@v2_bp.route('/<repopath:repository>/blobs/uploads/<upload_uuid>', methods=['GET'])
@ -180,9 +173,8 @@ def fetch_existing_upload(namespace_name, repo_name, upload_uuid):
status=204,
headers={
'Docker-Upload-UUID': upload_uuid,
'Range': _render_range(blob_upload.byte_count+1), # byte ranges are exclusive
},
)
'Range': _render_range(blob_upload.byte_count + 1), # byte ranges are exclusive
},)
@v2_bp.route('/<repopath:repository>/blobs/uploads/<upload_uuid>', methods=['PATCH'])
@ -211,9 +203,7 @@ def upload_chunk(namespace_name, repo_name, upload_uuid):
headers={
'Location': _current_request_url(),
'Range': _render_range(updated_blob_upload.byte_count, with_bytes_prefix=False),
'Docker-Upload-UUID': upload_uuid,
},
)
'Docker-Upload-UUID': upload_uuid,},)
@v2_bp.route('/<repopath:repository>/blobs/uploads/<upload_uuid>', methods=['PUT'])
@ -242,15 +232,12 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
_finish_upload(namespace_name, repo_name, updated_blob_upload, digest)
# Write the response to the client.
return Response(
status=201,
headers={
'Docker-Content-Digest': digest,
'Location': get_app_url() + url_for('v2.download_blob',
repository='%s/%s' % (namespace_name, repo_name),
digest=digest),
}
)
return Response(status=201, headers={
'Docker-Content-Digest':
digest,
'Location':
get_app_url() + url_for('v2.download_blob', repository='%s/%s' %
(namespace_name, repo_name), digest=digest),})
@v2_bp.route('/<repopath:repository>/blobs/uploads/<upload_uuid>', methods=['DELETE'])
@ -300,9 +287,11 @@ def _abort_range_not_satisfiable(valid_end, upload_uuid):
TODO(jzelinskie): Unify this with the V2RegistryException class.
"""
flask_abort(Response(status=416, headers={'Location': _current_request_url(),
'Range': '0-{0}'.format(valid_end),
'Docker-Upload-UUID': upload_uuid}))
flask_abort(
Response(status=416, headers={
'Location': _current_request_url(),
'Range': '0-{0}'.format(valid_end),
'Docker-Upload-UUID': upload_uuid}))
def _parse_range_header(range_header_text):
@ -415,16 +404,15 @@ def _upload_chunk(blob_upload, range_header):
length,
input_fp,
blob_upload.storage_metadata,
content_type=BLOB_CONTENT_TYPE,
)
content_type=BLOB_CONTENT_TYPE,)
if upload_error is not None:
logger.error('storage.stream_upload_chunk returned error %s', upload_error)
return None
# Update the chunk upload time metric.
metric_queue.chunk_upload_time.Observe(time.time() - start_time,
labelvalues=[length_written, list(location_set)[0]])
metric_queue.chunk_upload_time.Observe(time.time() - start_time, labelvalues=[
length_written, list(location_set)[0]])
# If we determined an uncompressed size and this is the first chunk, add it to the blob.
# Otherwise, we clear the size from the blob as it was uploaded in multiple chunks.
@ -499,8 +487,7 @@ def _finalize_blob_database(namespace_name, repo_name, blob_upload, digest, alre
repo_name,
digest,
blob_upload,
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'],
)
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'],)
# If it doesn't already exist, create the BitTorrent pieces for the blob.
if blob_upload.piece_sha_state is not None and not already_existed:
@ -521,5 +508,4 @@ def _finish_upload(namespace_name, repo_name, blob_upload, digest):
repo_name,
blob_upload,
digest,
_finalize_blob_storage(blob_upload, digest),
)
_finalize_blob_storage(blob_upload, digest),)