From d5fa351a2494836742b935442aefc12fdc13b4ad Mon Sep 17 00:00:00 2001 From: ochafik Date: Sat, 18 Jan 2025 01:04:12 +0000 Subject: [PATCH] Revert LLAMA_CHATML_TEMPLATE refactor --- common/common.cpp | 18 ++++++++---------- common/common.h | 2 -- tests/test-chat-template.cpp | 3 --- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 8dd8912e5..b7770b02c 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -74,15 +74,6 @@ #endif #define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083 -const std::string LLAMA_CHATML_TEMPLATE(R"( - {%- for message in messages -%} - {{- "<|im_start|>" + message.role + "\n" + message.content + "<|im_end|>\n" -}} - {%- endfor -%} - {%- if add_generation_prompt -%} - {{- "<|im_start|>assistant\n" -}} - {%- endif -%} -)"); - // // CURL utils // @@ -1846,7 +1837,14 @@ llama_chat_templates llama_chat_templates_from_model(const struct llama_model * if (!tool_use_template_src.empty()) { default_template_src = tool_use_template_src; } else { - default_template_src = LLAMA_CHATML_TEMPLATE; + default_template_src = R"( + {%- for message in messages -%} + {{- "<|im_start|>" + message.role + "\n" + message.content + "<|im_end|>\n" -}} + {%- endfor -%} + {%- if add_generation_prompt -%} + {{- "<|im_start|>assistant\n" -}} + {%- endif -%} + )"; } } return { diff --git a/common/common.h b/common/common.h index 04e1272d6..2a7c3ee3c 100644 --- a/common/common.h +++ b/common/common.h @@ -26,8 +26,6 @@ #define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf" -extern const std::string LLAMA_CHATML_TEMPLATE; - struct common_adapter_lora_info { std::string path; float scale; diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index 0c3f20f3d..3bd11a1f0 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -8,7 +8,6 @@ #include "llama.h" #include "common.h" #include "chat-template.hpp" -#include "llama-chat.h" int main(void) { std::vector conversation { @@ -365,7 +364,5 @@ int main(void) { assert(fmt_single("llama3") == "<|start_header_id|>user<|end_header_id|>\n\nHow are you<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"); assert(fmt_single("gigachat") == "user<|role_sep|>How are you<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>"); - assert(llm_chat_detect_template(LLAMA_CHATML_TEMPLATE) == LLM_CHAT_TEMPLATE_CHATML); - return 0; }