server: fix crash in multimodal models with add_bos_token = false

This commit is contained in:
Aidan Thornton 2024-01-12 23:38:56 +00:00
parent de473f5f8e
commit 3b36f2068e

View file

@ -1835,7 +1835,7 @@ struct llama_server_context
slot.cache_tokens = prompt_tokens;
if (slot.n_past == slot.num_prompt_tokens)
if (slot.n_past == slot.num_prompt_tokens && slot.n_past > 0)
{
// we have to evaluate at least 1 token to generate logits.
LOG_TEE("slot %d : we have to evaluate at least 1 token to generate logits\n", slot.id);