From 11c1f0c7d42825f90c9287db55476d9c7621236a Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Mon, 3 Feb 2025 23:52:28 +0000 Subject: [PATCH] actually we want eos_token in the template to infer tool call examples, explicitly skipped in new template options --- common/common.cpp | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index edba6fb4b..8661e164a 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1904,10 +1904,6 @@ common_chat_templates common_chat_templates_from_model(const struct llama_model default_template_src = CHATML_TEMPLATE_SRC; } } - std::string token_bos; - std::string token_eos; - // TODO: update logic that adds BOS and EOS tokens to the tokenized prompt, in favour of the template. -#if 0 auto vocab = llama_model_get_vocab(model); const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) { if (token == LLAMA_TOKEN_NULL) { @@ -1920,9 +1916,8 @@ common_chat_templates common_chat_templates_from_model(const struct llama_model return common_token_to_piece(vocab, token, true); } }; - token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token"); - token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token"); -#endif + auto token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token"); + auto token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token"); try { return { has_explicit_template,