server: include prompt tokens in the EOS limit

This commit is contained in:
Pierrick HYMBERT 2024-04-19 21:01:44 +02:00
parent 0d3eca6920
commit 6c257f4709

View file

@ -1210,13 +1210,14 @@ struct server_context {
auto n_ctx_train = llama_n_ctx_train(model);
if (slot.params.n_predict < 1 && slot.ga_n == 1
&& (int32_t) slot.prompt_tokens.size() + slot.n_decoded >= n_ctx_train) {
&& slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) {
LOG_WARNING("n_predict is not set and self-context extend is disabled."
" Limiting generated tokens to n_ctx_train to avoid EOS-less generation infinite loop", {
{ "id_slot", slot.id },
{ "params.n_predict", slot.params.n_predict },
{ "slot.n_predict", slot.n_predict },
{ "slot.n_prompt_tokens", slot.n_prompt_tokens },
{ "slot.n_decoded", slot.n_decoded },
{ "slot.n_predict", slot.n_predict },
{ "n_slots", params.n_parallel },
{ "slot.n_ctx", slot.n_ctx },
{ "n_ctx", n_ctx },
@ -2162,7 +2163,7 @@ struct server_context {
});
// process the created batch of tokens
for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) {
for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
for (auto & slot : slots) {