diff --git a/src/llama.cpp b/src/llama.cpp index c0d18fd26..4aa10ba69 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -19603,11 +19603,10 @@ static int32_t llama_chat_apply_template_internal( } } else if (tmpl == "minicpm" || tmpl_contains(u8"<用户>")) { // MiniCPM-3B-OpenHermes-2.5-v2-GGUF - std::string user_tag = u8"<用户>"; for (auto message : chat) { std::string role(message->role); if (role == "user") { - ss << user_tag; + ss << u8"<用户>"; ss << trim(message->content); ss << ""; } else { @@ -19616,7 +19615,6 @@ static int32_t llama_chat_apply_template_internal( } } else if (tmpl == "deepseek2" || tmpl_contains("'Assistant: ' + message['content'] + eos_token")) { // DeepSeek-V2 - std::string eos_token = u8"<|end▁of▁sentence|>"; for (auto message : chat) { std::string role(message->role); if (role == "system") { @@ -19624,7 +19622,7 @@ static int32_t llama_chat_apply_template_internal( } else if (role == "user") { ss << "User: " << message->content << "\n\n"; } else if (role == "assistant") { - ss << "Assistant: " << message->content << eos_token; + ss << "Assistant: " << message->content << u8"<|end▁of▁sentence|>"; } } if (add_ass) { diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index a2d1b4f86..b154038b2 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -60,7 +60,7 @@ int main(void) { "{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{- '<|assistant|>\n' -}}{% endif %}", // MiniCPM-3B-OpenHermes-2.5-v2-GGUF u8"{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + ''}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}", - // DeepSeek-Coder-V2-Lite-Instruct-GGUF + // DeepSeek-V2 "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}", }; std::vector expected_output = {