llama : update llama_kv_self API

ggml-ci
This commit is contained in:
Georgi Gerganov 2025-01-14 16:47:34 +02:00
parent fd05ab87aa
commit 17b363afd3
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
30 changed files with 387 additions and 205 deletions

View file

@ -756,8 +756,7 @@ static int apply_chat_template(const common_chat_template & tmpl, LlamaData & ll
// Function to tokenize the prompt
static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt,
std::vector<llama_token> & prompt_tokens, const LlamaData & llama_data) {
const llama_kv_cache * kv = llama_get_kv_cache(llama_data.context.get());
const bool is_first = llama_kv_cache_used_cells(kv) == 0;
const bool is_first = llama_kv_self_used_cells(llama_data.context.get()) == 0;
const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true);
prompt_tokens.resize(n_prompt_tokens);
@ -772,10 +771,8 @@ static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt
// Check if we have enough space in the context to evaluate this batch
static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) {
llama_kv_cache * kv = llama_get_kv_cache(ctx.get());
const int n_ctx = llama_n_ctx(ctx.get());
const int n_ctx_used = llama_kv_cache_used_cells(kv);
const int n_ctx_used = llama_kv_self_used_cells(ctx.get());
if (n_ctx_used + batch.n_tokens > n_ctx) {
printf("\033[0m\n");
printe("context size exceeded\n");