fixes : speculative KV cache + llama worst-case graph
This commit is contained in:
parent
466b513851
commit
897caccdf4
3 changed files with 12 additions and 19 deletions
|
@ -172,6 +172,7 @@ int main(int argc, char ** argv) {
|
|||
LOG("out of drafted tokens\n");
|
||||
}
|
||||
|
||||
llama_kv_cache_rm_seq(ctx_dft, 0, n_past_dft, n_ctx);
|
||||
llama_decode(ctx_dft, llama_batch_get_one(&id, 1, n_past_dft, 0), params.n_threads);
|
||||
++n_past_dft;
|
||||
|
||||
|
@ -256,6 +257,7 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
// evaluate the drafted token on the draft model
|
||||
llama_kv_cache_rm_seq(ctx_dft, 0, n_past_cur, n_ctx);
|
||||
llama_decode(ctx_dft, llama_batch_get_one(&drafted.back(), 1, n_past_cur, 0), params.n_threads);
|
||||
++n_past_cur;
|
||||
|
||||
|
@ -265,6 +267,7 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
// evaluate the target model on the drafted tokens
|
||||
llama_kv_cache_rm_seq(ctx_tgt, 0, n_past_tgt, n_ctx);
|
||||
llama_decode(ctx_tgt, llama_batch_get_one(drafted.data(), drafted.size(), n_past_tgt, 0), params.n_threads);
|
||||
++n_past_tgt;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue