Finish suggested renamings

This commit is contained in:
ochafik 2025-01-20 21:06:18 +00:00
parent 153e852411
commit db9dd0c1ac
5 changed files with 29 additions and 29 deletions

View file

@ -1827,7 +1827,7 @@ llama_chat_templates common_chat_templates_from_model(const struct llama_model *
auto token_bos = common_token_to_piece(vocab, llama_vocab_bos(vocab), true);
auto token_eos = common_token_to_piece(vocab, llama_vocab_eos(vocab), true);
std::string default_template_src = chat_template_override;
std::string tool_use_template_src = chat_template_override;
std::string template_tool_use_src = chat_template_override;
bool has_explicit_template = !chat_template_override.empty();
if (chat_template_override.empty()) {
auto str = llama_model_chat_template(model, /* name */ nullptr);
@ -1837,13 +1837,13 @@ llama_chat_templates common_chat_templates_from_model(const struct llama_model *
}
str = llama_model_chat_template(model, /* name */ "tool_use");
if (str) {
tool_use_template_src = str;
template_tool_use_src = str;
has_explicit_template = true;
}
}
if (default_template_src.empty() || default_template_src == "chatml") {
if (!tool_use_template_src.empty()) {
default_template_src = tool_use_template_src;
if (!template_tool_use_src.empty()) {
default_template_src = template_tool_use_src;
} else {
default_template_src = R"(
{%- for message in messages -%}
@ -1857,10 +1857,10 @@ llama_chat_templates common_chat_templates_from_model(const struct llama_model *
}
return {
has_explicit_template,
std::make_unique<minja::chat_template>(default_template_src, bos_token, eos_token),
tool_use_template_src.empty()
std::make_unique<minja::chat_template>(default_template_src, token_bos, token_eos),
template_tool_use_src.empty()
? nullptr
: std::make_unique<minja::chat_template>(tool_use_template_src, bos_token, eos_token)
: std::make_unique<minja::chat_template>(template_tool_use_src, token_bos, token_eos)
};
}

View file

@ -632,7 +632,7 @@ std::string common_chat_format_single(
std::string common_chat_format_example(
const llama_chat_template & tmpl, bool use_jinja);
llama_chat_templates llama_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override);
llama_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override);
//
// KV cache utils

View file

@ -158,7 +158,7 @@ int main(int argc, char ** argv) {
}
const llama_vocab * vocab = llama_model_get_vocab(model);
auto chat_templates = llama_chat_templates_from_model(model, params.chat_template);
auto chat_templates = common_chat_templates_from_model(model, params.chat_template);
LOG_INF("%s: llama threadpool init, n_threads = %d\n", __func__, (int) params.cpuparams.n_threads);
@ -201,7 +201,7 @@ int main(int argc, char ** argv) {
}
// auto enable conversation mode if chat template is available
const bool has_chat_template = chat_templates.has_explicit_template && chat_templates.default_template;
const bool has_chat_template = chat_templates.has_explicit_template && chat_templates.template_default;
if (params.conversation_mode == COMMON_CONVERSATION_MODE_AUTO) {
if (has_chat_template) {
LOG_INF("%s: chat template is available, enabling conversation mode (disable it with -no-cnv)\n", __func__);
@ -219,7 +219,7 @@ int main(int argc, char ** argv) {
// print chat template example in conversation mode
if (params.conversation_mode) {
if (params.enable_chat_template) {
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(*chat_templates.default_template, params.use_jinja).c_str());
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(*chat_templates.template_default, params.use_jinja).c_str());
} else {
LOG_INF("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
}
@ -265,7 +265,7 @@ int main(int argc, char ** argv) {
auto chat_add_and_format = [&chat_msgs, &chat_templates](const std::string & role, const std::string & content) {
common_chat_msg new_msg{role, content};
auto formatted = common_chat_format_single(*chat_templates.default_template, chat_msgs, new_msg, role == "user", g_params->use_jinja);
auto formatted = common_chat_format_single(*chat_templates.template_default, chat_msgs, new_msg, role == "user", g_params->use_jinja);
chat_msgs.push_back({role, content});
LOG_DBG("formatted: '%s'\n", formatted.c_str());
return formatted;

View file

@ -936,8 +936,8 @@ static int get_user_input(std::string & user_input, const std::string & user) {
static int chat_loop(LlamaData & llama_data, const std::string & user, bool use_jinja) {
int prev_len = 0;
llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get()));
auto chat_templates = llama_chat_templates_from_model(llama_data.model.get(), "");
GGML_ASSERT(chat_templates.default_template);
auto chat_templates = common_chat_templates_from_model(llama_data.model.get(), "");
GGML_ASSERT(chat_templates.template_default);
static const bool stdout_a_terminal = is_stdout_a_terminal();
while (true) {
// Get user input
@ -948,7 +948,7 @@ static int chat_loop(LlamaData & llama_data, const std::string & user, bool use_
add_message("user", user.empty() ? user_input : user, llama_data);
int new_len;
if (apply_chat_template_with_error_handling(*chat_templates.default_template, llama_data, true, new_len, use_jinja) < 0) {
if (apply_chat_template_with_error_handling(*chat_templates.template_default, llama_data, true, new_len, use_jinja) < 0) {
return 1;
}
@ -963,7 +963,7 @@ static int chat_loop(LlamaData & llama_data, const std::string & user, bool use_
}
add_message("assistant", response, llama_data);
if (apply_chat_template_with_error_handling(*chat_templates.default_template, llama_data, false, prev_len, use_jinja) < 0) {
if (apply_chat_template_with_error_handling(*chat_templates.template_default, llama_data, false, prev_len, use_jinja) < 0) {
return 1;
}
}

View file

@ -1745,15 +1745,15 @@ struct server_context {
llama_chat_message chat[] = {{"user", "test"}};
if (use_jinja) {
auto templates = llama_chat_templates_from_model(model, "");
GGML_ASSERT(templates.default_template);
auto templates = common_chat_templates_from_model(model, "");
GGML_ASSERT(templates.template_default);
try {
templates.default_template->apply({{
templates.template_default->apply({{
{"role", "user"},
{"content", "test"},
}}, json(), true);
if (templates.tool_use_template) {
templates.tool_use_template->apply({{
if (templates.template_tool_use) {
templates.template_tool_use->apply({{
{"role", "user"},
{"content", "test"},
}}, json(), true);
@ -3631,8 +3631,8 @@ int main(int argc, char ** argv) {
auto get_chat_templates = [&ctx_server, &chat_templates_mutex, &chat_templates]() -> const llama_chat_templates & {
std::lock_guard<std::mutex> lock(chat_templates_mutex);
if (!chat_templates) {
chat_templates = llama_chat_templates_from_model(ctx_server.model, ctx_server.params_base.chat_template);
GGML_ASSERT(chat_templates->default_template);
chat_templates = common_chat_templates_from_model(ctx_server.model, ctx_server.params_base.chat_template);
GGML_ASSERT(chat_templates->template_default);
}
return *chat_templates;
};
@ -3644,11 +3644,11 @@ int main(int argc, char ** argv) {
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
{ "total_slots", ctx_server.params_base.n_parallel },
{ "model_path", ctx_server.params_base.model },
{ "chat_template", templates.default_template->source() },
{ "chat_template", templates.template_default->source() },
{ "build_info", build_info },
};
if (ctx_server.params_base.use_jinja && templates.tool_use_template) {
data["chat_template_tool_use"] = templates.tool_use_template->source();
if (ctx_server.params_base.use_jinja && templates.template_tool_use) {
data["chat_template_tool_use"] = templates.template_tool_use->source();
}
res_ok(res, data);
@ -3871,7 +3871,7 @@ int main(int argc, char ** argv) {
auto body = json::parse(req.body);
const auto & templates = get_chat_templates();
const auto & chat_template = body.contains("tools") && templates.tool_use_template ? *templates.tool_use_template : *templates.default_template;
const auto & chat_template = body.contains("tools") && templates.template_tool_use ? *templates.template_tool_use : *templates.template_default;
json data = oaicompat_completion_params_parse(body, chat_template, params.use_jinja);
return handle_completions_impl(
@ -4290,8 +4290,8 @@ int main(int argc, char ** argv) {
// print sample chat example to make it clear which template is used
LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
get_chat_templates().default_template->source().c_str(),
common_chat_format_example(*get_chat_templates().default_template, ctx_server.params_base.use_jinja).c_str());
get_chat_templates().template_default->source().c_str(),
common_chat_format_example(*get_chat_templates().template_default, ctx_server.params_base.use_jinja).c_str());
ctx_server.queue_tasks.on_new_task(std::bind(
&server_context::process_single_task, &ctx_server, std::placeholders::_1));