fix content of format_final_response

This commit is contained in:
Jhen 2023-08-21 13:42:06 +08:00
parent 1bef2dcf87
commit af1ea58b60

View file

@ -1031,7 +1031,7 @@ static json format_final_response(llama_server_context &llama, const std::string
{ {
json res = json{ json res = json{
{"content", ""}, {"content", content},
{"stop", true}, {"stop", true},
{"model", llama.params.model_alias}, {"model", llama.params.model_alias},
{"tokens_predicted", llama.num_tokens_predicted}, {"tokens_predicted", llama.num_tokens_predicted},
@ -1335,7 +1335,7 @@ int main(int argc, char **argv)
if (!llama.has_next_token) { if (!llama.has_next_token) {
// Generation is done, send extra information. // Generation is done, send extra information.
const json data = format_final_response(llama, to_send, llama.generated_token_probs); const json data = format_final_response(llama, "", llama.generated_token_probs);
const std::string str = const std::string str =
"data: " + "data: " +