Fixed segfault when prompt was too long

This commit is contained in:
Christopher Oezbek 2023-10-18 09:12:20 +02:00
parent cb33f43a2a
commit ef9f76e389

View file

@ -439,28 +439,32 @@ struct llama_server_context
// if input prompt is too big, truncate like normal
if (num_prompt_tokens >= (size_t)n_ctx)
{
const int n_left = (n_ctx - params.n_keep) / 2;
const int n_left = n_ctx - params.n_keep;
const int n_block_size = n_left / 2;
// Keep n_keep tokens at start of prompt (at most n_ctx - 4)
std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
std::copy(prompt_tokens.end() - n_ctx, prompt_tokens.end(), last_n_tokens.begin());
const int erased_blocks = (num_prompt_tokens - params.n_keep - n_block_size) / n_block_size;
new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_block_size, prompt_tokens.end());
LOG_VERBOSE("input truncated", {
{"n_ctx", n_ctx},
{"n_keep", params.n_keep},
{"n_left", n_left},
{"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
{"num_prompt_tokens", new_tokens.size()}
});
truncated = true;
prompt_tokens = new_tokens;
num_prompt_tokens = prompt_tokens.size();
GGML_ASSERT(num_prompt_tokens < (size_t)n_ctx);
}
else
{
// Initialize last_n_tokens
const size_t ps = num_prompt_tokens;
std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
}
// compare the evaluated prompt with the new prompt
n_past = common_part(embd, prompt_tokens);