llama : fix handling of "future" tokens when loading sessions

This commit is contained in:
Georgi Gerganov 2023-10-03 18:29:22 +03:00
parent 0f332a9104
commit 337120cc0d
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
6 changed files with 41 additions and 40 deletions

View file

@ -332,7 +332,7 @@ int main(int argc, char ** argv) {
}
// delete only the generated part of the sequence, i.e. keep the system prompt in the cache
llama_kv_cache_seq_rm(ctx, client.id, n_tokens_system, n_ctx);
llama_kv_cache_seq_rm(ctx, client.id, n_tokens_system, -1);
const auto t_main_end = ggml_time_us();