llama : add support for Deepseek-R1-Qwen distill model (#11310)

* llama : add support for Deepseek-R1-Qwen distill model

* coding style
This commit is contained in:
Xuan Son Nguyen 2025-01-20 14:35:07 +01:00 committed by GitHub
parent ef6dada60c
commit ec7f3ac9ab
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 208 additions and 45 deletions

View file

@ -152,7 +152,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
return LLM_CHAT_TEMPLATE_MINICPM;
} else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
return LLM_CHAT_TEMPLATE_DEEPSEEK_2;
} else if (tmpl_contains(LU8("'<Assistant>' + message['content'] + '<end▁of▁sentence>'"))) {
} else if (tmpl_contains(LU8("<Assistant>")) && tmpl_contains(LU8("<User>")) && tmpl_contains(LU8("<end▁of▁sentence>"))) {
return LLM_CHAT_TEMPLATE_DEEPSEEK_3;
} else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) {
// ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb