From 59dc4bbb997770e9f149d7c9c5362bf493447a60 Mon Sep 17 00:00:00 2001 From: Pierrick HYMBERT Date: Fri, 5 Apr 2024 00:53:08 +0200 Subject: [PATCH] ci: bench: fix case when there is no token generated --- examples/server/bench/script.js | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/examples/server/bench/script.js b/examples/server/bench/script.js index 5ef27db69..9a6f5437f 100644 --- a/examples/server/bench/script.js +++ b/examples/server/bench/script.js @@ -96,13 +96,13 @@ export default function () { const params = {method: 'POST', body: JSON.stringify(payload)}; const startTime = new Date() - let promptEvalTime = null + let promptEvalEndTime = null let prompt_tokens = 0 let completions_tokens = 0 const res = sse.open(`${server_url}/chat/completions`, params, function (client) { client.on('event', function (event) { - if (promptEvalTime == null) { - promptEvalTime = new Date() + if (promptEvalEndTime == null) { + promptEvalEndTime = new Date() } let chunk = JSON.parse(event.data) @@ -131,8 +131,15 @@ export default function () { const endTime = new Date() - llamacpp_tokens_second.add(completions_tokens / (endTime - promptEvalTime) * 1.e3) - llamacpp_prompt_processing_second.add(prompt_tokens / (promptEvalTime - startTime) * 1.e3) + const promptEvalTime = promptEvalEndTime - startTime + if (promptEvalTime > 0) { + llamacpp_prompt_processing_second.add(prompt_tokens / (promptEvalEndTime - startTime) * 1.e3) + } + + const completion_time = endTime - promptEvalEndTime + if (completions_tokens > 0 && completion_time > 0) { + llamacpp_tokens_second.add(completions_tokens / completion_time * 1.e3) + } sleep(0.3) }