Add separate template name for vicuna-orca
This commit is contained in:
parent
f1a3b12ced
commit
a4986dd52e
1 changed files with 2 additions and 2 deletions
|
@ -15713,13 +15713,13 @@ static int32_t llama_chat_apply_template_internal(
|
|||
if (add_ass) {
|
||||
ss << "GPT4 Correct Assistant:";
|
||||
}
|
||||
} else if (tmpl == "vicuna" || (tmpl.find("ASSISTANT: ") != std::string::npos && tmpl.find("USER: ") != std::string::npos)) {
|
||||
} else if (tmpl == "vicuna" || tmpl == "vicuna-orca" || (tmpl.find("USER: ") != std::string::npos && tmpl.find("ASSISTANT: ") != std::string::npos)) {
|
||||
// eachadea/vicuna-13b-1.1 (and Orca variant)
|
||||
for (auto message : chat) {
|
||||
std::string role(message->role);
|
||||
if (role == "system") {
|
||||
// Orca-Vicuna variant uses a system prefix
|
||||
if (tmpl.find("SYSTEM: ") != std::string::npos) {
|
||||
if (tmpl == "vicuna-orca" || tmpl.find("SYSTEM: ") != std::string::npos) {
|
||||
ss << "SYSTEM: " << message->content << "\n";
|
||||
} else {
|
||||
ss << message->content << "\n\n";
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue