llama : add Orion chat template (#6066)

This commit is contained in:
Xuan Son Nguyen 2024-03-15 09:44:57 +01:00 committed by GitHub
parent b0bc9f4a9d
commit aab606a11f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 24 additions and 0 deletions

View file

@ -14242,6 +14242,26 @@ static int32_t llama_chat_apply_template_internal(
if (add_ass) {
ss << "<start_of_turn>model\n";
}
} else if (tmpl == "orion" || tmpl.find("'\\n\\nAssistant: ' + eos_token") != std::string::npos) {
// OrionStarAI/Orion-14B-Chat
std::string system_prompt = "";
for (auto message : chat) {
std::string role(message->role);
if (role == "system") {
// there is no system message support, we will merge it with user prompt
system_prompt = message->content;
continue;
} else if (role == "user") {
ss << "Human: ";
if (!system_prompt.empty()) {
ss << system_prompt << "\n\n";
system_prompt = "";
}
ss << message->content << "\n\nAssistant: </s>";
} else {
ss << message->content << "</s>";
}
}
} else {
// template not supported
return -1;