Use prometheus as a metric backend

This entails writing a metric aggregation program since each worker has its
own memory, and thus own metrics because of python gunicorn. The python
client is a simple wrapper that makes web requests to it.
This commit is contained in:
Matt Jibson 2015-11-20 15:32:17 -05:00 committed by Joseph Schorr
parent 781f2eec72
commit 3d9acf2fff
10 changed files with 502 additions and 0 deletions

View file

@ -15,10 +15,13 @@ from collections import namedtuple
from util.registry import filelike
from storage.basestorage import BaseStorageV2, InvalidChunkException
from util.prometheus import Counter
logger = logging.getLogger(__name__)
multipart_upload_start = Counter('multipart_upload_start', 'Multipart upload startse')
multipart_upload_end = Counter('multipart_upload_end', 'Multipart upload ends.', labelnames=['type'])
_PartUploadMetadata = namedtuple('_PartUploadMetadata', ['path', 'offset', 'length'])
_CHUNKS_KEY = 'chunks'
@ -166,6 +169,7 @@ class _CloudStorage(BaseStorageV2):
if self._metric_queue is not None:
self._metric_queue.put('MultipartUploadStart', 1)
multipart_upload_start.Inc()
return self._cloud_bucket.initiate_multipart_upload(path, metadata=metadata,
**self._upload_params)
@ -206,6 +210,8 @@ class _CloudStorage(BaseStorageV2):
if self._metric_queue is not None:
self._metric_queue.put('MultipartUploadFailure', 1)
multipart_upload_end.Inc(labelvalues=['failure'])
if cancel_on_error:
mp.cancel_upload()
return 0, error
@ -216,7 +222,9 @@ class _CloudStorage(BaseStorageV2):
if self._metric_queue is not None:
self._metric_queue.put('MultipartUploadSuccess', 1)
multipart_upload_end.Inc(labelvalues=['success'])
mp.complete_upload()
return total_bytes_written, error
def exists(self, path):