From 6c257f4709b1848a8d7bd73daf95aec763e7a4f5 Mon Sep 17 00:00:00 2001 From: Pierrick HYMBERT Date: Fri, 19 Apr 2024 21:01:44 +0200 Subject: [PATCH] server: include prompt tokens in the EOS limit --- examples/server/server.cpp | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 9caea8852..c8dddec2f 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1210,18 +1210,19 @@ struct server_context { auto n_ctx_train = llama_n_ctx_train(model); if (slot.params.n_predict < 1 && slot.ga_n == 1 - && (int32_t) slot.prompt_tokens.size() + slot.n_decoded >= n_ctx_train) { + && slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) { LOG_WARNING("n_predict is not set and self-context extend is disabled." " Limiting generated tokens to n_ctx_train to avoid EOS-less generation infinite loop", { - { "id_slot", slot.id }, - { "params.n_predict", slot.params.n_predict }, - { "slot.n_predict", slot.n_predict }, - { "slot.n_decoded", slot.n_decoded }, - { "n_slots", params.n_parallel }, - { "slot.n_ctx", slot.n_ctx }, - { "n_ctx", n_ctx }, - { "n_ctx_train", n_ctx_train }, - { "ga_n", slot.ga_n }, + { "id_slot", slot.id }, + { "params.n_predict", slot.params.n_predict }, + { "slot.n_prompt_tokens", slot.n_prompt_tokens }, + { "slot.n_decoded", slot.n_decoded }, + { "slot.n_predict", slot.n_predict }, + { "n_slots", params.n_parallel }, + { "slot.n_ctx", slot.n_ctx }, + { "n_ctx", n_ctx }, + { "n_ctx_train", n_ctx_train }, + { "ga_n", slot.ga_n }, }); slot.truncated = true; slot.stopped_limit = true; @@ -2162,7 +2163,7 @@ struct server_context { }); // process the created batch of tokens - for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) { + for (int32_t i = 0; i < batch.n_tokens; i += n_batch) { const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i); for (auto & slot : slots) {