From af1ea58b60f91a4612d9f5d65fe9ddb06099872e Mon Sep 17 00:00:00 2001 From: Jhen Date: Mon, 21 Aug 2023 13:42:06 +0800 Subject: [PATCH] fix content of format_final_response --- examples/server/server.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c8ee52206..5bf5a0abb 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1031,7 +1031,7 @@ static json format_final_response(llama_server_context &llama, const std::string { json res = json{ - {"content", ""}, + {"content", content}, {"stop", true}, {"model", llama.params.model_alias}, {"tokens_predicted", llama.num_tokens_predicted}, @@ -1332,10 +1332,10 @@ int main(int argc, char **argv) return false; } } - + if (!llama.has_next_token) { // Generation is done, send extra information. - const json data = format_final_response(llama, to_send, llama.generated_token_probs); + const json data = format_final_response(llama, "", llama.generated_token_probs); const std::string str = "data: " +