From a2df2787b32e0846205f7151dfad88ceab592beb Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Fri, 31 Jan 2025 06:04:53 +0100 Subject: [PATCH] server : update help metrics processing/deferred (#11512) This commit updates the help text for the metrics `requests_processing` and `requests_deferred` to be more grammatically correct. Currently the returned metrics look like this: ```console \# HELP llamacpp:requests_processing Number of request processing. \# TYPE llamacpp:requests_processing gauge llamacpp:requests_processing 0 \# HELP llamacpp:requests_deferred Number of request deferred. \# TYPE llamacpp:requests_deferred gauge llamacpp:requests_deferred 0 ``` With this commit, the metrics will look like this: ```console \# HELP llamacpp:requests_processing Number of requests processing. \# TYPE llamacpp:requests_processing gauge llamacpp:requests_processing 0 \# HELP llamacpp:requests_deferred Number of requests deferred. \# TYPE llamacpp:requests_deferred gauge llamacpp:requests_deferred 0 ``` This is also consistent with the description of the metrics in the server examples [README.md](https://github.com/ggerganov/llama.cpp/tree/master/examples/server#get-metrics-prometheus-compatible-metrics-exporter). --- examples/server/server.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index d1ea343dd..1ebcb5085 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -3633,11 +3633,11 @@ int main(int argc, char ** argv) { {"value", (uint64_t) res_metrics->kv_cache_tokens_count} },{ {"name", "requests_processing"}, - {"help", "Number of request processing."}, + {"help", "Number of requests processing."}, {"value", (uint64_t) res_metrics->n_processing_slots} },{ {"name", "requests_deferred"}, - {"help", "Number of request deferred."}, + {"help", "Number of requests deferred."}, {"value", (uint64_t) res_metrics->n_tasks_deferred} }}} };