diff --git a/data/model/tag.py b/data/model/tag.py
index 14abc026d..24926174b 100644
--- a/data/model/tag.py
+++ b/data/model/tag.py
@@ -88,7 +88,7 @@ def get_matching_tags_for_images(image_pairs, filter_images=None, filter_tags=No
   images = []
 
   while image_pairs:
-    image_pairs_slice = image_pairs[0:_MAX_IMAGE_LOOKUP_COUNT]
+    image_pairs_slice = image_pairs[:_MAX_IMAGE_LOOKUP_COUNT]
 
     ids = [pair[0] for pair in image_pairs_slice]
     uuids = [pair[1] for pair in image_pairs_slice]
@@ -109,7 +109,11 @@ def get_matching_tags_for_images(image_pairs, filter_images=None, filter_tags=No
   individual_image_queries = []
 
   for img in images:
-    # Make sure the actual image was requested.
+    # Make sure the image found is in the set of those requested, and that we haven't already
+    # processed it. We need this check because the query above checks for images with matching
+    # IDs OR storage UUIDs, rather than the expected ID+UUID pair. We do this for efficiency
+    # reasons, and it is highly unlikely we'll find an image with a mismatch, but we need this
+    # check to be absolutely sure.
     pair = (img.docker_image_id, img.storage.uuid)
     if pair not in image_pairs_set:
       continue
@@ -131,7 +135,7 @@ def get_matching_tags_for_images(image_pairs, filter_images=None, filter_tags=No
   # size, as well as to prevent the DB from locking up on a massive query.
   sharded_queries = []
   while individual_image_queries:
-    shard = individual_image_queries[0:_MAX_SUB_QUERIES]
+    shard = individual_image_queries[:_MAX_SUB_QUERIES]
     sharded_queries.append(_basequery.reduce_as_tree(shard))
     individual_image_queries = individual_image_queries[_MAX_SUB_QUERIES:]