diff --git a/common/common.cpp b/common/common.cpp index 6c81d18f9..5f5302074 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1871,7 +1871,6 @@ std::string common_chat_format_example(const common_chat_template & tmpl, bool u common_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override) { - auto vocab = llama_model_get_vocab(model); std::string default_template_src = chat_template_override; std::string template_tool_use_src = chat_template_override; bool has_explicit_template = !chat_template_override.empty(); @@ -1901,6 +1900,11 @@ common_chat_templates common_chat_templates_from_model(const struct llama_model )"; } } + std::string token_bos; + std::string token_eos; + // TODO: update logic that adds BOS and EOS tokens to the tokenized prompt, in favour of the template. +#if 0 + auto vocab = llama_model_get_vocab(model); const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) { if (token == LLAMA_TOKEN_NULL) { if (default_template_src.find(jinja_variable_name) != std::string::npos @@ -1912,8 +1916,9 @@ common_chat_templates common_chat_templates_from_model(const struct llama_model return common_token_to_piece(vocab, token, true); } }; - auto token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token"); - auto token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token"); + token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token"); + token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token"); +#endif return { has_explicit_template, std::make_unique(default_template_src, token_bos, token_eos),