Add separate template name for vicuna-orca

This commit is contained in:
Kai Zau 2024-03-30 19:29:27 +09:00
parent f1a3b12ced
commit a4986dd52e

View file

@ -15713,13 +15713,13 @@ static int32_t llama_chat_apply_template_internal(
if (add_ass) {
ss << "GPT4 Correct Assistant:";
}
} else if (tmpl == "vicuna" || (tmpl.find("ASSISTANT: ") != std::string::npos && tmpl.find("USER: ") != std::string::npos)) {
} else if (tmpl == "vicuna" || tmpl == "vicuna-orca" || (tmpl.find("USER: ") != std::string::npos && tmpl.find("ASSISTANT: ") != std::string::npos)) {
// eachadea/vicuna-13b-1.1 (and Orca variant)
for (auto message : chat) {
std::string role(message->role);
if (role == "system") {
// Orca-Vicuna variant uses a system prefix
if (tmpl.find("SYSTEM: ") != std::string::npos) {
if (tmpl == "vicuna-orca" || tmpl.find("SYSTEM: ") != std::string::npos) {
ss << "SYSTEM: " << message->content << "\n";
} else {
ss << message->content << "\n\n";