add llama_chat_format_example

This commit is contained in:
ngxson 2024-06-24 10:52:17 +02:00
parent 43cab6bfc6
commit a3dbfabe93
4 changed files with 19 additions and 10 deletions

View file

@ -3020,6 +3020,17 @@ std::string llama_chat_format_single(const struct llama_model * model,
return formatted; return formatted;
} }
std::string llama_chat_format_example(const struct llama_model * model,
const std::string & tmpl) {
std::vector<llama_chat_msg> msgs = {
{"system", "You are a helpful assistant"},
{"user", "Hello"},
{"assistant", "Hi there"},
{"user", "How are you?"},
};
return llama_chat_apply_template(model, tmpl, msgs, true);
}
// //
// KV cache utils // KV cache utils
// //

View file

@ -382,6 +382,10 @@ std::string llama_chat_format_single(const struct llama_model * model,
const llama_chat_msg & new_msg, const llama_chat_msg & new_msg,
bool add_ass); bool add_ass);
// Returns an example of formatted chat
std::string llama_chat_format_example(const struct llama_model * model,
const std::string & tmpl);
// //
// KV cache utils // KV cache utils
// //

View file

@ -224,6 +224,8 @@ int main(int argc, char ** argv) {
__func__, n_ctx_train, n_ctx); __func__, n_ctx_train, n_ctx);
} }
LOG_TEE("%s: chat template example: %s\n", __func__, llama_chat_format_example(model, params.chat_template).c_str());
// print system information // print system information
{ {
LOG_TEE("\n"); LOG_TEE("\n");

View file

@ -2606,17 +2606,9 @@ int main(int argc, char ** argv) {
// print sample chat example to make it clear which template is used // print sample chat example to make it clear which template is used
{ {
json chat;
chat.push_back({{"role", "system"}, {"content", "You are a helpful assistant"}});
chat.push_back({{"role", "user"}, {"content", "Hello"}});
chat.push_back({{"role", "assistant"}, {"content", "Hi there"}});
chat.push_back({{"role", "user"}, {"content", "How are you?"}});
const std::string chat_example = format_chat(ctx_server.model, params.chat_template, chat);
LOG_INFO("chat template", { LOG_INFO("chat template", {
{"chat_example", chat_example}, {"chat_example", llama_chat_format_example(model)},
{"built_in", params.chat_template.empty()}, {"built_in", params.chat_template.empty()},
}); });
} }