Refactor common_chat_* functions to accept minja template + use_jinja option

This commit is contained in:
ochafik 2025-01-18 00:43:38 +00:00
parent 3ed670b6dd
commit b75d0622e4
7 changed files with 82 additions and 80 deletions

View file

@ -3869,7 +3869,7 @@ int main(int argc, char ** argv) {
auto body = json::parse(req.body);
const auto & templates = get_chat_templates();
const auto & chat_template = body.contains("tools") && templates.tool_use_template ? *templates.tool_use_template : templates.default_template;
json data = oaicompat_completion_params_parse(ctx_server.model, body, chat_template, params.use_jinja);
json data = oaicompat_completion_params_parse(body, chat_template, params.use_jinja);
return handle_completions_impl(
SERVER_TASK_TYPE_COMPLETION,
@ -4288,7 +4288,7 @@ int main(int argc, char ** argv) {
// print sample chat example to make it clear which template is used
LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
get_chat_templates().default_template.source().c_str(),
common_chat_format_example(ctx_server.model, get_chat_templates().default_template, ctx_server.params_base.use_jinja).c_str());
common_chat_format_example(get_chat_templates().default_template, ctx_server.params_base.use_jinja).c_str());
ctx_server.queue_tasks.on_new_task(std::bind(
&server_context::process_single_task, &ctx_server, std::placeholders::_1));

View file

@ -351,7 +351,7 @@ static llama_tokens format_infill(
}
// Format given chat. If tmpl is empty, we take the template from model metadata
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
inline std::string format_chat(const llama_chat_template & tmpl, const std::vector<json> & messages) {
std::vector<common_chat_msg> chat;
for (size_t i = 0; i < messages.size(); ++i) {
@ -379,7 +379,7 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
chat.push_back({role, content});
}
const auto formatted_chat = common_chat_apply_template(model, tmpl, chat, true);
const auto formatted_chat = common_chat_apply_template(tmpl, chat, true, /* use_jinja= */ false);
LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
return formatted_chat;
@ -579,9 +579,8 @@ static json oaicompat_completion_params_parse(const json & body) {
}
static json oaicompat_completion_params_parse(
const struct llama_model * model,
const json & body, /* openai api json semantics */
const minja::chat_template & tmpl,
const llama_chat_template & tmpl,
bool use_jinja)
{
json llama_params;
@ -622,7 +621,7 @@ static json oaicompat_completion_params_parse(
if (use_jinja) {
llama_params["prompt"] = tmpl.apply(body.at("messages"), tools, /* add_generation_prompt= */ true);
} else {
llama_params["prompt"] = format_chat(model, tmpl.source(), body.at("messages"));
llama_params["prompt"] = format_chat(tmpl, body.at("messages"));
}
// Handle "n" field