Change the security notification system to use get_matching_tags_for_images
This should vastly reduce the number of database calls we make, as instead of making 2-3 calls per image, we'll make two calls per ~100 images
This commit is contained in:
parent
74dd0ef8e8
commit
98fcae753b
1 changed files with 22 additions and 15 deletions
|
@ -5,7 +5,7 @@ from collections import defaultdict
|
|||
from enum import Enum
|
||||
|
||||
from app import secscan_api
|
||||
from data.model.tag import filter_tags_have_repository_event, get_matching_tags
|
||||
from data.model.tag import filter_tags_have_repository_event, get_matching_tags_for_images
|
||||
from data.database import (Image, ImageStorage, ExternalNotificationEvent, Repository,
|
||||
RepositoryTag)
|
||||
from endpoints.notificationhelper import notification_batch
|
||||
|
@ -33,10 +33,10 @@ class SecurityNotificationHandler(object):
|
|||
self.tag_map = defaultdict(set)
|
||||
self.repository_map = {}
|
||||
self.check_map = {}
|
||||
self.layer_ids = set()
|
||||
|
||||
self.stream_tracker = None
|
||||
self.results_per_stream = results_per_stream
|
||||
self.reporting_failed = False
|
||||
self.vulnerability_info = None
|
||||
|
||||
self.event = ExternalNotificationEvent.get(name='vulnerability_found')
|
||||
|
@ -133,10 +133,6 @@ class SecurityNotificationHandler(object):
|
|||
self.stream_tracker.push_new(new_layer_ids)
|
||||
self.stream_tracker.push_old(old_layer_ids)
|
||||
|
||||
# If the reporting failed at any point, nothing more we can do.
|
||||
if self.reporting_failed:
|
||||
return ProcessNotificationPageResult.FAILED
|
||||
|
||||
# Check to see if there are any additional pages to process.
|
||||
if 'NextPage' not in notification_page_data:
|
||||
return self._done()
|
||||
|
@ -145,21 +141,30 @@ class SecurityNotificationHandler(object):
|
|||
|
||||
def _done(self):
|
||||
if self.stream_tracker is not None:
|
||||
# Mark the tracker as done, so that it finishes reporting any outstanding layers.
|
||||
self.stream_tracker.done()
|
||||
|
||||
if self.reporting_failed:
|
||||
return ProcessNotificationPageResult.FAILED
|
||||
# Process all the layers.
|
||||
if self.vulnerability_info is not None:
|
||||
if not self._process_layers():
|
||||
return ProcessNotificationPageResult.FAILED
|
||||
|
||||
return ProcessNotificationPageResult.FINISHED_PROCESSING
|
||||
|
||||
def _report(self, new_layer_id):
|
||||
# Split the layer ID into its Docker Image ID and storage ID.
|
||||
(docker_image_id, storage_uuid) = new_layer_id.split('.', 2)
|
||||
self.layer_ids.add(new_layer_id)
|
||||
|
||||
def _process_layers(self):
|
||||
# Builds the pairs of layer ID and storage uuid.
|
||||
pairs = [tuple(layer_id.split('.', 2)) for layer_id in self.layer_ids]
|
||||
|
||||
def filter_notifying_repos(query):
|
||||
query = query.join(ImageStorage)
|
||||
return filter_tags_have_repository_event(query, self.event)
|
||||
|
||||
# Find the matching tags.
|
||||
matching = get_matching_tags(docker_image_id, storage_uuid, RepositoryTag, Repository,
|
||||
Image, ImageStorage)
|
||||
tags = list(filter_tags_have_repository_event(matching, self.event))
|
||||
tags = get_matching_tags_for_images(pairs, selections=[RepositoryTag, Image, ImageStorage],
|
||||
filter_query=filter_notifying_repos)
|
||||
|
||||
cve_id = self.vulnerability_info['Name']
|
||||
for tag in tags:
|
||||
|
@ -170,12 +175,14 @@ class SecurityNotificationHandler(object):
|
|||
try:
|
||||
self.check_map[tag_layer_id] = secscan_api.check_layer_vulnerable(tag_layer_id, cve_id)
|
||||
except APIRequestFailure:
|
||||
self.reporting_failed = True
|
||||
return
|
||||
return False
|
||||
|
||||
logger.debug('Result of layer %s is vulnerable to %s check: %s', tag_layer_id, cve_id,
|
||||
self.check_map[tag_layer_id])
|
||||
|
||||
if self.check_map[tag_layer_id]:
|
||||
# Add the vulnerable tag to the list.
|
||||
self.tag_map[tag.repository_id].add(tag.name)
|
||||
self.repository_map[tag.repository_id] = tag.repository
|
||||
|
||||
return True
|
||||
|
|
Reference in a new issue