util/metrics: remove metricqueue abstraction

This change replaces the metricqueue library with a native Prometheus
client implementation with the intention to aggregated results with the
Prometheus PushGateway.

This change also adds instrumentation for greenlet context switches.
This commit is contained in:
Jimmy Zelinskie 2019-11-13 14:50:33 -05:00
parent 23c5120790
commit 4bf4ce33c9
57 changed files with 526 additions and 690 deletions

View file

@ -22,42 +22,41 @@ STORAGE_DRIVER_CLASSES = {
}
def get_storage_driver(location, metric_queue, chunk_cleanup_queue, config_provider, ip_resolver,
def get_storage_driver(location, chunk_cleanup_queue, config_provider, ip_resolver,
storage_params):
""" Returns a storage driver class for the given storage configuration
(a pair of string name and a dict of parameters). """
driver = storage_params[0]
parameters = storage_params[1]
driver_class = STORAGE_DRIVER_CLASSES.get(driver, FakeStorage)
context = StorageContext(location, metric_queue, chunk_cleanup_queue, config_provider,
context = StorageContext(location, chunk_cleanup_queue, config_provider,
ip_resolver)
return driver_class(context, **parameters)
class StorageContext(object):
def __init__(self, location, metric_queue, chunk_cleanup_queue, config_provider, ip_resolver):
def __init__(self, location, chunk_cleanup_queue, config_provider, ip_resolver):
self.location = location
self.metric_queue = metric_queue
self.chunk_cleanup_queue = chunk_cleanup_queue
self.config_provider = config_provider
self.ip_resolver = ip_resolver or NoopIPResolver()
class Storage(object):
def __init__(self, app=None, metric_queue=None, chunk_cleanup_queue=None, instance_keys=None,
def __init__(self, app=None, chunk_cleanup_queue=None, instance_keys=None,
config_provider=None, ip_resolver=None):
self.app = app
if app is not None:
self.state = self.init_app(app, metric_queue, chunk_cleanup_queue, instance_keys,
self.state = self.init_app(app, chunk_cleanup_queue, instance_keys,
config_provider, ip_resolver)
else:
self.state = None
def init_app(self, app, metric_queue, chunk_cleanup_queue, instance_keys, config_provider,
def init_app(self, app, chunk_cleanup_queue, instance_keys, config_provider,
ip_resolver):
storages = {}
for location, storage_params in app.config.get('DISTRIBUTED_STORAGE_CONFIG').items():
storages[location] = get_storage_driver(location, metric_queue, chunk_cleanup_queue,
storages[location] = get_storage_driver(location, chunk_cleanup_queue,
config_provider, ip_resolver, storage_params)
preference = app.config.get('DISTRIBUTED_STORAGE_PREFERENCE', None)

View file

@ -3,27 +3,26 @@ import os
import logging
import copy
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cachetools.func import lru_cache
from itertools import chain
from collections import namedtuple
from datetime import datetime, timedelta
from io import BufferedIOBase
from itertools import chain
from uuid import uuid4
from botocore.signers import CloudFrontSigner
from boto.exception import S3ResponseError
import boto.s3.connection
import boto.s3.multipart
import boto.gs.connection
import boto.s3.key
import boto.gs.key
from io import BufferedIOBase
from uuid import uuid4
from collections import namedtuple
from boto.exception import S3ResponseError
from botocore.signers import CloudFrontSigner
from cachetools.func import lru_cache
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from prometheus_client import Counter
from util.registry import filelike
from storage.basestorage import BaseStorageV2
@ -31,6 +30,13 @@ from storage.basestorage import BaseStorageV2
logger = logging.getLogger(__name__)
multipart_uploads_started = Counter('quay_multipart_uploads_started_total',
'number of multipart uploads to Quay storage that started')
multipart_uploads_completed = Counter('quay_multipart_uploads_completed_total',
'number of multipart uploads to Quay storage that completed')
_PartUploadMetadata = namedtuple('_PartUploadMetadata', ['path', 'offset', 'length'])
_CHUNKS_KEY = 'chunks'
@ -181,8 +187,7 @@ class _CloudStorage(BaseStorageV2):
if content_encoding is not None:
metadata['Content-Encoding'] = content_encoding
if self._context.metric_queue is not None:
self._context.metric_queue.multipart_upload_start.Inc()
multipart_uploads_started.inc()
return self._cloud_bucket.initiate_multipart_upload(path, metadata=metadata,
**self._upload_params)
@ -237,8 +242,7 @@ class _CloudStorage(BaseStorageV2):
logger.warn('Error when writing to stream in stream_write_internal at path %s: %s', path, e)
write_error = e
if self._context.metric_queue is not None:
self._context.metric_queue.multipart_upload_end.Inc(labelvalues=['failure'])
multipart_uploads_completed.inc()
if cancel_on_error:
try:
@ -251,8 +255,7 @@ class _CloudStorage(BaseStorageV2):
break
if total_bytes_written > 0:
if self._context.metric_queue is not None:
self._context.metric_queue.multipart_upload_end.Inc(labelvalues=['success'])
multipart_uploads_completed.inc()
self._perform_action_with_retry(mp.complete_upload)

View file

@ -18,7 +18,7 @@ _TEST_BUCKET = 'some_bucket'
_TEST_USER = 'someuser'
_TEST_PASSWORD = 'somepassword'
_TEST_PATH = 'some/cool/path'
_TEST_CONTEXT = StorageContext('nyc', None, None, None, None)
_TEST_CONTEXT = StorageContext('nyc', None, None, None)
@pytest.fixture(scope='function')

View file

@ -44,7 +44,7 @@ def test_direct_download(test_aws_ip, test_empty_ip_range_cache, test_ip_range_c
if ipranges_populated:
ipresolver.sync_token = test_ip_range_cache['sync_token'] if ipranges_populated else test_empty_ip_range_cache['sync_token']
ipresolver.amazon_ranges = test_ip_range_cache['all_amazon'] if ipranges_populated else test_empty_ip_range_cache['all_amazon']
context = StorageContext('nyc', None, None, config_provider, ipresolver)
context = StorageContext('nyc', None, config_provider, ipresolver)
# Create a test bucket and put some test content.
boto.connect_s3().create_bucket(_TEST_BUCKET)
@ -68,7 +68,7 @@ def test_direct_download(test_aws_ip, test_empty_ip_range_cache, test_ip_range_c
@mock_s3
def test_direct_download_no_ip(test_aws_ip, aws_ip_range_data, ipranges_populated, app):
ipresolver = IPResolver(app)
context = StorageContext('nyc', None, None, config_provider, ipresolver)
context = StorageContext('nyc', None, config_provider, ipresolver)
# Create a test bucket and put some test content.
boto.connect_s3().create_bucket(_TEST_BUCKET)

View file

@ -11,7 +11,7 @@ from storage.swift import SwiftStorage, _EMPTY_SEGMENTS_KEY
from swiftclient.client import ClientException
base_args = {
'context': StorageContext('nyc', None, None, None, None),
'context': StorageContext('nyc', None, None, None),
'swift_container': 'container-name',
'storage_path': '/basepath',
'auth_url': 'https://auth.com',
@ -265,7 +265,7 @@ def test_cancel_chunked_upload():
chunk_cleanup_queue = FakeQueue()
args = dict(base_args)
args['context'] = StorageContext('nyc', None, chunk_cleanup_queue, None, None)
args['context'] = StorageContext('nyc', chunk_cleanup_queue, None, None)
swift = FakeSwiftStorage(**args)
uuid, metadata = swift.initiate_chunked_upload()
@ -288,7 +288,7 @@ def test_cancel_chunked_upload():
def test_empty_chunks_queued_for_deletion():
chunk_cleanup_queue = FakeQueue()
args = dict(base_args)
args['context'] = StorageContext('nyc', None, chunk_cleanup_queue, None, None)
args['context'] = StorageContext('nyc', chunk_cleanup_queue, None, None)
swift = FakeSwiftStorage(**args)
uuid, metadata = swift.initiate_chunked_upload()