diff --git a/src/llama.cpp b/src/llama.cpp index f68024f5b..8fbac99bc 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -21752,6 +21752,21 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "[|assistant|]"; } + } else if (tmpl == "nemotron" || (tmpl_contains("System") && tmpl_contains("User"))) { + // nvidia/Mistral-NeMo-Minitron-8B-Instruct + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << "System\n" << trim(message->content) << "\n\n"; + } else if (role == "user") { + ss << "User\n" << trim(message->content) << "\n\n"; + } else if (role == "assistant") { + ss << "Assistant\n" << trim(message->content) << "\n\n"; + } + } + if (add_ass) { + ss << "Assistant\n"; + } } else { // template not supported return -1; diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index 6f046249f..425c9355f 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -65,6 +65,8 @@ int main(void) { u8"{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + ''}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}", // DeepSeek-V2 "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}", + // nvidia/Mistral-NeMo-Minitron-8B-Instruct + "{{'System'}}{% for message in messages %}{% if message['role'] == 'system' %}{{'\n' + message['content'].strip()}}{% endif %}{% endfor %}{{'\n'}}{% for message in messages %}{% if message['role'] == 'user' %}{{ '\nUser\n' + message['content'].strip() + '\nAssistant\n' }}{% elif message['role'] == 'assistant' %}{{ message['content'].strip() }}{% endif %}{% endfor %}", }; std::vector expected_output = { // teknium/OpenHermes-2.5-Mistral-7B @@ -109,6 +111,8 @@ int main(void) { u8"You are a helpful assistant<用户>HelloHi there<用户>Who are youI am an assistant<用户>Another question", // DeepSeek-V2 u8"You are a helpful assistant\n\nUser: Hello\n\nAssistant: Hi there<|end▁of▁sentence|>User: Who are you\n\nAssistant: I am an assistant <|end▁of▁sentence|>User: Another question\n\nAssistant:", + // nvidia/Mistral-NeMo-Minitron-8B-Instruct + "System\nYou are a helpful assistant\n\nUser\nHello\n\nAssistant\nHi there\n\nUser\nWho are you\n\nAssistant\nI am an assistant\n\nUser\nAnother question\n\nAssistant\n", }; std::vector formatted_chat(1024); int32_t res;