From 419779b9c51dc5c258204f3e3045bcae25425802 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 9 Aug 2016 16:06:12 -0400 Subject: [PATCH] v2/blob: remove references to docker client --- endpoints/v2/blob.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/endpoints/v2/blob.py b/endpoints/v2/blob.py index 38ae12140..2a18bb979 100644 --- a/endpoints/v2/blob.py +++ b/endpoints/v2/blob.py @@ -52,11 +52,11 @@ def check_blob_exists(namespace_name, repo_name, digest): 'Content-Type': BLOB_CONTENT_TYPE, } - # If our storage supports range requests, let the Docker client know. + # If our storage supports range requests, let the client know. if storage.get_supports_resumable_downloads(blob.locations): headers['Accept-Ranges'] = 'bytes' - # Write the response to the Docker client. + # Write the response to the client. return Response(headers=headers) @@ -75,7 +75,7 @@ def download_blob(namespace_name, repo_name, digest): # Build the response headers. headers = {'Docker-Content-Digest': digest} - # If our storage supports range requests, let the Docker client know. + # If our storage supports range requests, let the client know. if storage.get_supports_resumable_downloads(blob.locations): headers['Accept-Ranges'] = 'bytes' @@ -94,7 +94,7 @@ def download_blob(namespace_name, repo_name, digest): # Close the database connection before we stream the download. logger.debug('Closing database connection before streaming layer data') with database.CloseForLongOperation(app.config): - # Stream the response to the Docker client. + # Stream the response to the client. return Response( storage.stream_read(blob.locations, path), headers=headers.update({ @@ -150,7 +150,7 @@ def start_blob_upload(namespace_name, repo_name): # Finalize the upload process in the database and storage. _finish_upload(namespace_name, repo_name, updated_blob_upload, digest) - # Write the response to the docker client. + # Write the response to the client. return Response( status=201, headers={ @@ -175,7 +175,7 @@ def fetch_existing_upload(namespace_name, repo_name, upload_uuid): status=204, headers={ 'Docker-Upload-UUID': upload_uuid, - 'Range': _render_range(blob_upload.byte_count+1), # Docker byte ranges are exclusive + 'Range': _render_range(blob_upload.byte_count+1), # byte ranges are exclusive }, ) @@ -200,7 +200,7 @@ def upload_chunk(namespace_name, repo_name, upload_uuid): # Save the upload state to the database. v2.update_blob_upload(updated_blob_upload) - # Write the response to the Docker client. + # Write the response to the client. return Response( status=204, headers={ @@ -236,7 +236,7 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid): # Finalize the upload process in the database and storage. _finish_upload(namespace_name, repo_name, updated_blob_upload, digest) - # Write the response to the Docker client. + # Write the response to the client. return Response( status=201, headers={ @@ -387,7 +387,7 @@ def _upload_chunk(blob_upload, range_header): # If this is the first chunk and we're starting at the 0 offset, add a handler to gunzip the # stream so we can determine the uncompressed size. We'll throw out this data if another chunk - # comes in, but in the common case Docker only sends one chunk. + # comes in, but in the common case the docker client only sends one chunk. size_info = None if start_offset == 0 and blob_upload.chunk_count == 0: size_info, fn = calculate_size_handler()