From 9f743fd6cdf9761f43fa713642a546eab3f56ef0 Mon Sep 17 00:00:00 2001
From: Jimmy Zelinskie <jimmy.zelinskie+git@gmail.com>
Date: Tue, 9 Aug 2016 15:11:35 -0400
Subject: [PATCH] address PR comments

---
 endpoints/v2/__init__.py |  3 +++
 endpoints/v2/blob.py     | 26 +++++++++++++-------------
 image/docker/__init__.py |  4 ++--
 image/docker/squashed.py |  2 +-
 4 files changed, 19 insertions(+), 16 deletions(-)

diff --git a/endpoints/v2/__init__.py b/endpoints/v2/__init__.py
index 97ecc40e6..1e01f6416 100644
--- a/endpoints/v2/__init__.py
+++ b/endpoints/v2/__init__.py
@@ -33,6 +33,9 @@ _MAX_RESULTS_PER_PAGE = 50
 
 def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset',
              callback_kwarg_name='pagination_callback'):
+  """
+  Decorates a handler adding a parsed pagination token and a callback to encode a response token.
+  """
   def wrapper(func):
     @wraps(func)
     def wrapped(*args, **kwargs):
diff --git a/endpoints/v2/blob.py b/endpoints/v2/blob.py
index b463816db..f04e767da 100644
--- a/endpoints/v2/blob.py
+++ b/endpoints/v2/blob.py
@@ -343,9 +343,11 @@ def _upload_chunk(blob_upload, start_offset, length):
   """
   # Check for invalidate arguments.
   if None in {blob_upload, start_offset, length}:
+    logger.error('None provided as argument to _upload_chunk')
     return None
 
   if start_offset > 0 and start_offset > blob_upload.byte_count:
+    logger.error('start_offset provided to _upload_chunk greater than blob.upload.byte_count')
     return None
 
   location_set = {blob_upload.location_name}
@@ -391,19 +393,17 @@ def _upload_chunk(blob_upload, start_offset, length):
       size_info, fn = calculate_size_handler()
       input_fp = wrap_with_handler(input_fp, fn)
 
-    try:
-      length_written, new_metadata, error = storage.stream_upload_chunk(
-        location_set,
-        blob_upload.uuid,
-        start_offset,
-        length,
-        input_fp,
-        blob_upload.storage_metadata,
-        content_type=BLOB_CONTENT_TYPE,
-      )
-      if error is not None:
-        return None
-    except InvalidChunkException:
+    length_written, new_metadata, error = storage.stream_upload_chunk(
+      location_set,
+      blob_upload.uuid,
+      start_offset,
+      length,
+      input_fp,
+      blob_upload.storage_metadata,
+      content_type=BLOB_CONTENT_TYPE,
+    )
+    if error is not None:
+      logger.error('storage.stream_upload_chunk returned error %s', error)
       return None
 
   # If we determined an uncompressed size and this is the first chunk, add it to the blob.
diff --git a/image/docker/__init__.py b/image/docker/__init__.py
index 74ceba2d7..f694dcb12 100644
--- a/image/docker/__init__.py
+++ b/image/docker/__init__.py
@@ -2,9 +2,9 @@
 docker implements pure data transformations according to the many Docker specifications.
 """
 
-class DockerException(Exception):
+class DockerFormatException(Exception):
   pass
 
 
-class ManifestException(DockerException):
+class ManifestException(DockerFormatException):
   pass
diff --git a/image/docker/squashed.py b/image/docker/squashed.py
index 0f6628952..d3c886185 100644
--- a/image/docker/squashed.py
+++ b/image/docker/squashed.py
@@ -40,7 +40,7 @@ class SquashedDockerImageFormatter(TarImageFormatter):
     #  repositories - JSON file containing a repo -> tag -> image map
     #  {image ID folder}:
     #     json - The layer JSON
-    #     layer.tar - The tared contents of the layer
+    #     layer.tar - The tarballed contents of the layer
     #     VERSION - The docker import version: '1.0'
     layer_merger = StreamLayerMerger(get_layer_iterator)