From e74773ec434fea97b5c365bb9781df3b1fa6c494 Mon Sep 17 00:00:00 2001 From: arch-btw <57669023+arch-btw@users.noreply.github.com> Date: Wed, 23 Oct 2024 04:12:53 -0700 Subject: [PATCH] Add granite template to llama.cpp --- src/llama.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/llama.cpp b/src/llama.cpp index 24e1f1f01..92865a0fa 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -21713,7 +21713,17 @@ static int32_t llama_chat_apply_template_internal( ss << message->content << "\n\n"; } } - } else { + } else if (tmpl == "granite" || tmpl_contains("<|start_of_role|>")) { + // IBM Granite template + for (const auto& message : chat) { + std::string role(message->role); + ss << "<|start_of_role|>" << role << "<|end_of_role|>" << "\n" + << message->content << "<|end_of_text|>\n"; + } + if (add_ass) { + ss << "<|start_of_role|>assistant<|end_of_role|>\n"; + } +} else { // template not supported return -1; }