diff --git a/llama.cpp b/llama.cpp index 7a694e2ba..5d653efd3 100644 --- a/llama.cpp +++ b/llama.cpp @@ -15713,13 +15713,13 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "GPT4 Correct Assistant:"; } - } else if (tmpl == "vicuna" || (tmpl.find("ASSISTANT: ") != std::string::npos && tmpl.find("USER: ") != std::string::npos)) { + } else if (tmpl == "vicuna" || tmpl == "vicuna-orca" || (tmpl.find("USER: ") != std::string::npos && tmpl.find("ASSISTANT: ") != std::string::npos)) { // eachadea/vicuna-13b-1.1 (and Orca variant) for (auto message : chat) { std::string role(message->role); if (role == "system") { // Orca-Vicuna variant uses a system prefix - if (tmpl.find("SYSTEM: ") != std::string::npos) { + if (tmpl == "vicuna-orca" || tmpl.find("SYSTEM: ") != std::string::npos) { ss << "SYSTEM: " << message->content << "\n"; } else { ss << message->content << "\n\n";