Add granite template to llama.cpp
This commit is contained in:
parent
4c9388fb96
commit
e74773ec43
1 changed files with 11 additions and 1 deletions
|
@ -21713,7 +21713,17 @@ static int32_t llama_chat_apply_template_internal(
|
||||||
ss << message->content << "\n\n";
|
ss << message->content << "\n\n";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else if (tmpl == "granite" || tmpl_contains("<|start_of_role|>")) {
|
||||||
|
// IBM Granite template
|
||||||
|
for (const auto& message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
ss << "<|start_of_role|>" << role << "<|end_of_role|>" << "\n"
|
||||||
|
<< message->content << "<|end_of_text|>\n";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|start_of_role|>assistant<|end_of_role|>\n";
|
||||||
|
}
|
||||||
|
} else {
|
||||||
// template not supported
|
// template not supported
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue