llama : fix handling of "future" tokens when loading sessions
This commit is contained in:
parent
0f332a9104
commit
337120cc0d
6 changed files with 41 additions and 40 deletions
|
@ -332,7 +332,7 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
// delete only the generated part of the sequence, i.e. keep the system prompt in the cache
|
||||
llama_kv_cache_seq_rm(ctx, client.id, n_tokens_system, n_ctx);
|
||||
llama_kv_cache_seq_rm(ctx, client.id, n_tokens_system, -1);
|
||||
|
||||
const auto t_main_end = ggml_time_us();
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue