model : avoid hardcoded chat template constant

ggml-ci
This commit is contained in:
Georgi Gerganov 2025-01-09 20:02:45 +02:00
parent d8931a701c
commit 0f0229736c
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
3 changed files with 3 additions and 1 deletions

View file

@ -176,6 +176,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
{ LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" },
{ LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
{ LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
{ LLM_KV_TOKENIZER_CHAT_TEMPLATE, "tokenizer.chat.template" },
{ LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" },
{ LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" },
{ LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" },

View file

@ -174,6 +174,7 @@ enum llm_kv {
LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,
LLM_KV_TOKENIZER_HF_JSON,
LLM_KV_TOKENIZER_RWKV,
LLM_KV_TOKENIZER_CHAT_TEMPLATE,
LLM_KV_TOKENIZER_FIM_PRE_ID,
LLM_KV_TOKENIZER_FIM_SUF_ID,
LLM_KV_TOKENIZER_FIM_MID_ID,

View file

@ -3836,7 +3836,7 @@ uint64_t llama_model_size(const struct llama_model * model) {
}
const char * llama_model_chat_template(const struct llama_model * model) {
const auto & it = model->gguf_kv.find("tokenizer.chat_template");
const auto & it = model->gguf_kv.find(LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE));
if (it == model->gguf_kv.end()) {
return nullptr;
}