Add granite template to llama.cpp

This commit is contained in:
arch-btw 2024-10-23 04:12:53 -07:00 committed by GitHub
parent 4c9388fb96
commit e74773ec43
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -21713,7 +21713,17 @@ static int32_t llama_chat_apply_template_internal(
ss << message->content << "\n\n";
}
}
} else {
} else if (tmpl == "granite" || tmpl_contains("<|start_of_role|>")) {
// IBM Granite template
for (const auto& message : chat) {
std::string role(message->role);
ss << "<|start_of_role|>" << role << "<|end_of_role|>" << "\n"
<< message->content << "<|end_of_text|>\n";
}
if (add_ass) {
ss << "<|start_of_role|>assistant<|end_of_role|>\n";
}
} else {
// template not supported
return -1;
}