llama : update llama_kv_self API
ggml-ci
This commit is contained in:
parent
fd05ab87aa
commit
17b363afd3
30 changed files with 387 additions and 205 deletions
|
@ -134,7 +134,6 @@ int main(int argc, char ** argv) {
|
|||
|
||||
llama_model * model = llama_init.model.get();
|
||||
llama_context * ctx = llama_init.context.get();
|
||||
llama_kv_cache * kv = llama_get_kv_cache(ctx);
|
||||
|
||||
const llama_vocab * vocab = llama_model_get_vocab(model);
|
||||
|
||||
|
@ -202,7 +201,7 @@ int main(int argc, char ** argv) {
|
|||
|
||||
// assign the system KV cache to all parallel sequences
|
||||
for (int32_t i = 1; i <= n_clients; ++i) {
|
||||
llama_kv_cache_seq_cp(kv, 0, i, -1, -1);
|
||||
llama_kv_self_seq_cp(ctx, 0, i, -1, -1);
|
||||
}
|
||||
|
||||
LOG_INF("\n");
|
||||
|
@ -234,9 +233,9 @@ int main(int argc, char ** argv) {
|
|||
if (batch.n_tokens == 0) {
|
||||
// all sequences have ended - clear the entire KV cache
|
||||
for (int i = 1; i <= n_clients; ++i) {
|
||||
llama_kv_cache_seq_rm(kv, i, -1, -1);
|
||||
llama_kv_self_seq_rm(ctx, i, -1, -1);
|
||||
// but keep the system prompt
|
||||
llama_kv_cache_seq_cp(kv, 0, i, -1, -1);
|
||||
llama_kv_self_seq_cp(ctx, 0, i, -1, -1);
|
||||
}
|
||||
|
||||
LOG_INF("%s: clearing the KV cache\n", __func__);
|
||||
|
@ -372,8 +371,8 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
// delete only the generated part of the sequence, i.e. keep the system prompt in the cache
|
||||
llama_kv_cache_seq_rm(kv, client.id + 1, -1, -1);
|
||||
llama_kv_cache_seq_cp(kv, 0, client.id + 1, -1, -1);
|
||||
llama_kv_self_seq_rm(ctx, client.id + 1, -1, -1);
|
||||
llama_kv_self_seq_cp(ctx, 0, client.id + 1, -1, -1);
|
||||
|
||||
const auto t_main_end = ggml_time_us();
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue