Format test-chat.cpp

This commit is contained in:
Olivier Chafik 2025-01-30 14:09:54 +00:00
parent 5a64af6c70
commit f223df0271

View file

@ -5,43 +5,42 @@
// //
// cmake -B build && cmake --build build --parallel && ./build/bin/test-chat ../minja/build/tests/*.jinja 2>/dev/null // cmake -B build && cmake --build build --parallel && ./build/bin/test-chat ../minja/build/tests/*.jinja 2>/dev/null
// //
#include "chat.hpp"
#include "chat-template.hpp"
#include "llama-grammar.h"
#include "unicode.h"
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <string>
#include <json.hpp> #include <json.hpp>
#include <string>
#include "chat-template.hpp"
#include "chat.hpp"
#include "llama-grammar.h"
#include "unicode.h"
using json = nlohmann::ordered_json; using json = nlohmann::ordered_json;
static common_chat_msg msg_from_json(const json & message) { static common_chat_msg msg_from_json(const json & message) {
common_chat_msg ret { common_chat_msg ret{
"assistant", "assistant",
"", "",
{}, {},
}; };
if (message.contains("content") && !message.at("content").is_null()) { if (message.contains("content") && !message.at("content").is_null()) {
ret.content = message.at("content").get<std::string>(); ret.content = message.at("content").get<std::string>();
}
auto has_tool_calls = message.contains("tool_calls");
if (has_tool_calls) {
for (const auto & tc : message.at("tool_calls")) {
const auto & arguments = tc.at("function").at("arguments");
ret.tool_calls.push_back({
tc.at("function").at("name").get<std::string>(),
arguments.is_string() ? arguments.get<std::string>() : arguments.dump(),
tc.contains("id") ? tc.at("id").get<std::string>() : "",
});
} }
} auto has_tool_calls = message.contains("tool_calls");
return ret; if (has_tool_calls) {
for (const auto & tc : message.at("tool_calls")) {
const auto & arguments = tc.at("function").at("arguments");
ret.tool_calls.push_back({
tc.at("function").at("name").get<std::string>(),
arguments.is_string() ? arguments.get<std::string>() : arguments.dump(),
tc.contains("id") ? tc.at("id").get<std::string>() : "",
});
}
}
return ret;
} }
template <class T> template <class T> static void assert_equals(const T & expected, const T & actual) {
static void assert_equals(const T & expected, const T & actual) {
if (expected != actual) { if (expected != actual) {
std::cerr << "Expected: " << expected << std::endl; std::cerr << "Expected: " << expected << std::endl;
std::cerr << "Actual: " << actual << std::endl; std::cerr << "Actual: " << actual << std::endl;
@ -50,26 +49,27 @@ static void assert_equals(const T & expected, const T & actual) {
} }
} }
static std::string read_file(const std::string &path) { static std::string read_file(const std::string & path) {
std::cerr << "# Reading: " << path << std::endl << std::flush; std::cerr << "# Reading: " << path << std::endl << std::flush;
std::ifstream fs(path, std::ios_base::binary); std::ifstream fs(path, std::ios_base::binary);
if (!fs.is_open()) {
fs = std::ifstream("../" + path, std::ios_base::binary);
if (!fs.is_open()) { if (!fs.is_open()) {
throw std::runtime_error("Failed to open file: " + path); fs = std::ifstream("../" + path, std::ios_base::binary);
if (!fs.is_open()) {
throw std::runtime_error("Failed to open file: " + path);
}
} }
} fs.seekg(0, std::ios_base::end);
fs.seekg(0, std::ios_base::end); auto size = fs.tellg();
auto size = fs.tellg(); fs.seekg(0);
fs.seekg(0); std::string out;
std::string out; out.resize(static_cast<size_t>(size));
out.resize(static_cast<size_t>(size)); fs.read(&out[0], static_cast<std::streamsize>(size));
fs.read(&out[0], static_cast<std::streamsize>(size)); return out;
return out;
} }
static std::unique_ptr<llama_grammar> build_grammar(const std::string & grammar_str) { static std::unique_ptr<llama_grammar> build_grammar(const std::string & grammar_str) {
return std::unique_ptr<llama_grammar>(llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root", false, nullptr, 0, nullptr, 0)); return std::unique_ptr<llama_grammar>(
llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root", false, nullptr, 0, nullptr, 0));
} }
// TODO: extract to common helper (copied from test-grammar-integration.cpp) // TODO: extract to common helper (copied from test-grammar-integration.cpp)
@ -99,7 +99,7 @@ static bool match_string(const std::string & input, llama_grammar * grammar) {
// Dumps `{"a": 1}` as `"{\"a\": 1}"`, unlike nlohmann::json::dump which would dump it as `"{\"a\":1}"`. // Dumps `{"a": 1}` as `"{\"a\": 1}"`, unlike nlohmann::json::dump which would dump it as `"{\"a\":1}"`.
static std::string dump(const json & j) { static std::string dump(const json & j) {
return minja::Value(j).dump(-1, /* to_json= */ true); return minja::Value(j).dump(-1, /* to_json= */ true);
} }
static void assert_msg_equals(const common_chat_msg & expected, const common_chat_msg & actual) { static void assert_msg_equals(const common_chat_msg & expected, const common_chat_msg & actual) {
@ -108,7 +108,7 @@ static void assert_msg_equals(const common_chat_msg & expected, const common_cha
assert_equals(expected.tool_calls.size(), actual.tool_calls.size()); assert_equals(expected.tool_calls.size(), actual.tool_calls.size());
for (size_t i = 0; i < expected.tool_calls.size(); i++) { for (size_t i = 0; i < expected.tool_calls.size(); i++) {
const auto & expected_tool_call = expected.tool_calls[i]; const auto & expected_tool_call = expected.tool_calls[i];
const auto & actual_tool_call = actual.tool_calls[i]; const auto & actual_tool_call = actual.tool_calls[i];
assert_equals(expected_tool_call.name, actual_tool_call.name); assert_equals(expected_tool_call.name, actual_tool_call.name);
assert_equals(dump(json::parse(expected_tool_call.arguments)), dump(json::parse(actual_tool_call.arguments))); assert_equals(dump(json::parse(expected_tool_call.arguments)), dump(json::parse(actual_tool_call.arguments)));
assert_equals(expected_tool_call.id, actual_tool_call.id); assert_equals(expected_tool_call.id, actual_tool_call.id);
@ -132,7 +132,7 @@ const auto special_function_tool = json::parse(R"({
} }
} }
})"); })");
const auto python_tool = json::parse(R"({ const auto python_tool = json::parse(R"({
"type": "function", "type": "function",
"function": { "function": {
"name": "python", "name": "python",
@ -166,53 +166,55 @@ const auto code_interpreter_tool = json::parse(R"({
} }
} }
})"); })");
const json tools = {special_function_tool, python_tool}; const json tools = { special_function_tool, python_tool };
const json llama_3_1_tools = {special_function_tool, code_interpreter_tool}; const json llama_3_1_tools = { special_function_tool, code_interpreter_tool };
struct delta_data { struct delta_data {
std::string delta; std::string delta;
std::string grammar; std::string grammar;
common_chat_format format; common_chat_format format;
}; };
static delta_data init_delta(const common_chat_template & tmpl, const std::vector<std::string> & end_tokens, const json & user_message, const json & delta_message, const json & tools, const json & tool_choice) { static delta_data init_delta(const common_chat_template & tmpl, const std::vector<std::string> & end_tokens,
common_chat_inputs inputs; const json & user_message, const json & delta_message, const json & tools,
inputs.parallel_tool_calls = true; const json & tool_choice) {
inputs.messages = json::array(); common_chat_inputs inputs;
inputs.messages.push_back(user_message); inputs.parallel_tool_calls = true;
inputs.tools = tools; inputs.messages = json::array();
inputs.tool_choice = tool_choice; inputs.messages.push_back(user_message);
auto params_prefix = common_chat_params_init(tmpl, inputs); inputs.tools = tools;
inputs.tool_choice = tool_choice;
auto params_prefix = common_chat_params_init(tmpl, inputs);
inputs.messages.push_back(delta_message); inputs.messages.push_back(delta_message);
inputs.add_generation_prompt = false; inputs.add_generation_prompt = false;
auto params_full = common_chat_params_init(tmpl, inputs); auto params_full = common_chat_params_init(tmpl, inputs);
std::string prefix = params_prefix.prompt; std::string prefix = params_prefix.prompt;
std::string full = params_full.prompt; std::string full = params_full.prompt;
// Check full starts with prefix // Check full starts with prefix
if (full.find(prefix) != 0) { if (full.find(prefix) != 0) {
fprintf(stderr, "Full:\n%s\n\nPrefix:\n%s\n\n", full.c_str(), prefix.c_str()); fprintf(stderr, "Full:\n%s\n\nPrefix:\n%s\n\n", full.c_str(), prefix.c_str());
throw std::runtime_error("Full message does not start with prefix"); throw std::runtime_error("Full message does not start with prefix");
}
if (full == prefix) {
throw std::runtime_error("Full message is the same as the prefix");
}
auto delta = full.substr(prefix.size());
// Strip end tokens
for (const auto & end_token : end_tokens) {
// rfind to find the last occurrence
auto pos = delta.rfind(end_token);
if (pos != std::string::npos) {
delta = delta.substr(0, pos);
break;
} }
}
return {delta, params_full.grammar, params_full.format}; if (full == prefix) {
throw std::runtime_error("Full message is the same as the prefix");
}
auto delta = full.substr(prefix.size());
// Strip end tokens
for (const auto & end_token : end_tokens) {
// rfind to find the last occurrence
auto pos = delta.rfind(end_token);
if (pos != std::string::npos) {
delta = delta.substr(0, pos);
break;
}
}
return { delta, params_full.grammar, params_full.format };
} }
/* /*
@ -220,277 +222,300 @@ static delta_data init_delta(const common_chat_template & tmpl, const std::vecto
gets the diff, removes any end tokens and parses the result w/ the grammar, checking that gets the diff, removes any end tokens and parses the result w/ the grammar, checking that
the parsed message is the same as the test_message the parsed message is the same as the test_message
*/ */
static void test_template(const common_chat_template & tmpl, const std::vector<std::string> & end_tokens, const json & test_message, const json & tools = {}, const std::string & expected_delta = "", bool skip_grammar_test = false, bool skip_parser_test = false) { static void test_template(const common_chat_template & tmpl, const std::vector<std::string> & end_tokens,
common_chat_msg expected_msg = msg_from_json(test_message); const json & test_message, const json & tools = {}, const std::string & expected_delta = "",
bool skip_grammar_test = false, bool skip_parser_test = false) {
common_chat_msg expected_msg = msg_from_json(test_message);
auto user_message = json { auto user_message = json{
{"role", "user"}, { "role", "user" },
{"content", "Hello, world!"} { "content", "Hello, world!" }
}; };
for (const auto & tool_choice : json({"auto", "required"})) { for (const auto & tool_choice : json({ "auto", "required" })) {
auto data = init_delta(tmpl, end_tokens, user_message, test_message, tools, tool_choice); auto data = init_delta(tmpl, end_tokens, user_message, test_message, tools, tool_choice);
if (!expected_delta.empty()) { if (!expected_delta.empty()) {
assert_equals(expected_delta, data.delta); assert_equals(expected_delta, data.delta);
} }
if (!skip_parser_test) { if (!skip_parser_test) {
const auto msg = common_chat_parse(data.delta, data.format); const auto msg = common_chat_parse(data.delta, data.format);
assert_msg_equals(expected_msg, msg); assert_msg_equals(expected_msg, msg);
} }
if (!expected_msg.tool_calls.empty()) { if (!expected_msg.tool_calls.empty()) {
GGML_ASSERT(!data.grammar.empty()); GGML_ASSERT(!data.grammar.empty());
} }
if (!data.grammar.empty()) { if (!data.grammar.empty()) {
auto grammar = build_grammar(data.grammar); auto grammar = build_grammar(data.grammar);
if (!grammar) { if (!grammar) {
throw std::runtime_error("Failed to build grammar"); throw std::runtime_error("Failed to build grammar");
} }
// TODO: exercice lazy grammars + triggers here, instead of skipping the test // TODO: exercice lazy grammars + triggers here, instead of skipping the test
if (!skip_grammar_test) { if (!skip_grammar_test) {
if (!match_string(data.delta, grammar.get())) { if (!match_string(data.delta, grammar.get())) {
throw std::runtime_error("Failed to match delta against grammar:\n\n" + data.delta + "\n\nGrammar: " + data.grammar); throw std::runtime_error("Failed to match delta against grammar:\n\n" + data.delta +
"\n\nGrammar: " + data.grammar);
}
}
} }
}
} }
}
} }
static void test_template_output_parsers() { static void test_template_output_parsers() {
auto text_message = json { auto text_message = json{
{"role", "assistant"}, { "role", "assistant" },
{"content", "Hello, world!"}, { "content", "Hello, world!" },
}; };
auto tool_call_message = json { auto tool_call_message = json{
{"role", "assistant"}, { "role", "assistant" },
{"content", {}}, { "content", {} },
{"tool_calls", json {{ { "tool_calls", json{ {
{"type", "function"}, { "type", "function" },
{"function", { { "function", { { "name", "special_function" }, { "arguments", "{\"arg1\": 1}" } } },
{"name", "special_function"}, } } }
{"arguments", "{\"arg1\": 1}"} };
}}, auto tool_call_message_with_id = json::parse(tool_call_message.dump());
}}} tool_call_message_with_id["tool_calls"][0]["id"] = "123456789";
};
auto tool_call_message_with_id = json::parse(tool_call_message.dump());
tool_call_message_with_id["tool_calls"][0]["id"] = "123456789";
auto python_tool_call_message = json { auto python_tool_call_message = json{
{"role", "assistant"}, { "role", "assistant" },
{"content", {}}, { "content", {} },
{"tool_calls", json {{ { "tool_calls", json{ {
{"type", "function"}, { "type", "function" },
{"function", { { "function",
{"name", "python"}, {
{"arguments", { { "name", "python" },
{"code", "print('hey')"}, { "arguments",
}}, {
}}, { "code", "print('hey')" },
}}} } },
}; } },
auto code_interpreter_tool_call_message = json { } } }
{"role", "assistant"}, };
{"content", {}}, auto code_interpreter_tool_call_message = json{
{"tool_calls", json {{ { "role", "assistant" },
{"type", "function"}, { "content", {} },
{"function", { { "tool_calls", json{ {
{"name", "code_interpreter"}, { "type", "function" },
{"arguments", { { "function",
{"code", "print('hey')"}, {
}}, { "name", "code_interpreter" },
}}, { "arguments",
}}} {
}; { "code", "print('hey')" },
} },
} },
} } }
};
common_chat_inputs inputs_no_tools;
inputs_no_tools.messages = {
{ { "role", "user" }, { "content", "Hey" } }
};
common_chat_inputs inputs_no_tools; common_chat_inputs inputs_tools = inputs_no_tools;
inputs_no_tools.messages = {{{"role", "user"}, {"content", "Hey"}}}; inputs_tools.tools = json::array();
inputs_tools.tools.push_back(special_function_tool);
common_chat_inputs inputs_tools = inputs_no_tools; common_chat_inputs inputs_tools_builtin = inputs_no_tools;
inputs_tools.tools = json::array(); inputs_tools_builtin.tools = json::array();
inputs_tools.tools.push_back(special_function_tool); inputs_tools_builtin.tools.push_back(python_tool);
common_chat_inputs inputs_tools_builtin = inputs_no_tools; {
inputs_tools_builtin.tools = json::array(); const common_chat_template tmpl(read_file("models/templates/google-gemma-2-2b-it.jinja"), "<s>", "</s>");
inputs_tools_builtin.tools.push_back(python_tool); std::vector<std::string> end_tokens{ "<end_of_turn>" };
// { assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, common_chat_params_init(tmpl, inputs_no_tools).format);
// const common_chat_template tmpl(read_file( assert_equals(COMMON_CHAT_FORMAT_GENERIC, common_chat_params_init(tmpl, inputs_tools).format);
// "models/templates/google-gemma-2-2b-it.jinja"), "<s>", "</s>"); assert_equals(COMMON_CHAT_FORMAT_GENERIC,
// std::vector<std::string> end_tokens { "<end_of_turn>" }; common_chat_params_init(
common_chat_template(read_file("models/templates/microsoft-Phi-3.5-mini-instruct.jinja"),
"<s>", "</s>"),
inputs_tools)
.format);
// assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, common_chat_params_init(tmpl, inputs_no_tools).format); // Generic tool calls doesn't generate / parse content-only messages symmetrically.
// assert_equals(COMMON_CHAT_FORMAT_GENERIC, common_chat_params_init(tmpl, inputs_tools).format);
// assert_equals(COMMON_CHAT_FORMAT_GENERIC, common_chat_params_init(common_chat_template(read_file(
// "models/templates/microsoft-Phi-3.5-mini-instruct.jinja"), "<s>", "</s>"), inputs_tools).format);
// // Generic tool calls doesn't generate / parse content-only messages symmetrically. assert_msg_equals(msg_from_json(text_message),
common_chat_parse("{\n"
" \"response\": \"Hello, world!\"\n"
"}",
common_chat_params_init(tmpl, inputs_tools).format));
test_template(tmpl, end_tokens, tool_call_message_with_id, tools,
"{\n"
" \"tool_calls\": [\n"
" {\n"
" \"name\": \"special_function\",\n"
" \"arguments\": {\n"
" \"arg1\": 1\n"
" },\n"
" \"id\": \"123456789\"\n"
" }\n"
" ]\n"
"}");
}
{
const common_chat_template tmpl(read_file("models/templates/mistralai-Mistral-Nemo-Instruct-2407.jinja"), "<s>",
"</s>");
std::vector<std::string> end_tokens{ "</s>" };
// assert_msg_equals(msg_from_json(text_message), common_chat_parse( assert_equals(COMMON_CHAT_FORMAT_MISTRAL_NEMO, common_chat_params_init(tmpl, inputs_tools).format);
// "{\n"
// " \"response\": \"Hello, world!\"\n"
// "}",
// common_chat_params_init(tmpl, inputs_tools).format));
// test_template(tmpl, end_tokens, tool_call_message_with_id, tools,
// "{\n"
// " \"tool_calls\": [\n"
// " {\n"
// " \"name\": \"special_function\",\n"
// " \"arguments\": {\n"
// " \"arg1\": 1\n"
// " },\n"
// " \"id\": \"123456789\"\n"
// " }\n"
// " ]\n"
// "}");
// }
// {
// const common_chat_template tmpl(read_file(
// "models/templates/mistralai-Mistral-Nemo-Instruct-2407.jinja"), "<s>", "</s>");
// std::vector<std::string> end_tokens { "</s>" };
// assert_equals(COMMON_CHAT_FORMAT_MISTRAL_NEMO, common_chat_params_init(tmpl, inputs_tools).format); test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true);
test_template(
tmpl, end_tokens, tool_call_message_with_id, tools,
"[TOOL_CALLS][{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}, \"id\": \"123456789\"}]",
/* skip_grammar_test= */ true);
}
{
const common_chat_template tmpl(
read_file("models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja"), "<s>", "</s>");
std::vector<std::string> end_tokens{ "<|im_end|>" };
// test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); assert_equals(COMMON_CHAT_FORMAT_HERMES_2_PRO, common_chat_params_init(tmpl, inputs_tools).format);
// test_template(tmpl, end_tokens, tool_call_message_with_id, tools, assert_equals(
// "[TOOL_CALLS][{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}, \"id\": \"123456789\"}]", COMMON_CHAT_FORMAT_HERMES_2_PRO,
// /* skip_grammar_test= */ true); common_chat_params_init(
// } common_chat_template(read_file("models/templates/NousResearch-Hermes-3-Llama-3.1-8B-tool_use.jinja"),
// { "<s>", "</s>"),
// const common_chat_template tmpl(read_file( inputs_tools)
// "models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja"), "<s>", "</s>"); .format);
// std::vector<std::string> end_tokens { "<|im_end|>" }; assert_equals(
COMMON_CHAT_FORMAT_HERMES_2_PRO,
common_chat_params_init(
common_chat_template(read_file("models/templates/Qwen-Qwen2.5-7B-Instruct.jinja"), "<s>", "</s>"),
inputs_tools)
.format);
// assert_equals(COMMON_CHAT_FORMAT_HERMES_2_PRO, common_chat_params_init(tmpl, inputs_tools).format); test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true);
// assert_equals(COMMON_CHAT_FORMAT_HERMES_2_PRO, common_chat_params_init(common_chat_template(read_file( test_template(tmpl, end_tokens, tool_call_message, tools,
// "models/templates/NousResearch-Hermes-3-Llama-3.1-8B-tool_use.jinja"), "<s>", "</s>"), inputs_tools).format); "<tool_call>\n"
// assert_equals(COMMON_CHAT_FORMAT_HERMES_2_PRO, common_chat_params_init(common_chat_template(read_file( "{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}}\n"
// "models/templates/Qwen-Qwen2.5-7B-Instruct.jinja"), "<s>", "</s>"), inputs_tools).format); "</tool_call>");
test_template(tmpl, end_tokens, python_tool_call_message, tools,
"<tool_call>\n"
"{\"name\": \"python\", \"arguments\": {\"code\": \"print('hey')\"}}\n"
"</tool_call>");
}
{
const common_chat_template tmpl(read_file("models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja"), "<s>",
"</s>");
std::vector<std::string> end_tokens{ "<|eom_id|>", "<|eot_id|>" };
// test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X, common_chat_params_init(tmpl, inputs_tools).format);
// test_template(tmpl, end_tokens, tool_call_message, tools, assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS,
// "<tool_call>\n" common_chat_params_init(tmpl, inputs_tools_builtin).format);
// "{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}}\n" assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS,
// "</tool_call>"); common_chat_params_init(
// test_template(tmpl, end_tokens, python_tool_call_message, tools, common_chat_template(read_file("models/templates/meta-llama-Llama-3.3-70B-Instruct.jinja"),
// "<tool_call>\n" "<s>", "</s>"),
// "{\"name\": \"python\", \"arguments\": {\"code\": \"print('hey')\"}}\n" inputs_tools_builtin)
// "</tool_call>"); .format);
// }
// {
// const common_chat_template tmpl(read_file(
// "models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja"), "<s>", "</s>");
// std::vector<std::string> end_tokens { "<|eom_id|>", "<|eot_id|>" };
// assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X, common_chat_params_init(tmpl, inputs_tools).format); // test_template(tmpl, end_tokens, text_message, tools, R"(?)", /* skip_grammar_test= */ true);
// assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS, common_chat_params_init(tmpl, inputs_tools_builtin).format); test_template(tmpl, end_tokens, code_interpreter_tool_call_message, llama_3_1_tools,
// assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS, common_chat_params_init(common_chat_template(read_file( "<|python_tag|>code_interpreter.call(code=\"print('hey')\")");
// "models/templates/meta-llama-Llama-3.3-70B-Instruct.jinja"), "<s>", "</s>"), inputs_tools_builtin).format); test_template(tmpl, end_tokens, python_tool_call_message, tools,
"<|python_tag|>python.call(code=\"print('hey')\")");
test_template(tmpl, end_tokens, tool_call_message, tools,
"{\"name\": \"special_function\", \"parameters\": {\"arg1\": 1}}");
}
{
const common_chat_template tmpl(read_file("models/templates/meta-llama-Llama-3.2-3B-Instruct.jinja"), "<s>",
"</s>");
std::vector<std::string> end_tokens{ "<|eom_id|>", "<|eot_id|>" };
// // test_template(tmpl, end_tokens, text_message, tools, R"(?)", /* skip_grammar_test= */ true); assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X, common_chat_params_init(tmpl, inputs_tools).format);
// test_template(tmpl, end_tokens, code_interpreter_tool_call_message, llama_3_1_tools,
// "<|python_tag|>code_interpreter.call(code=\"print('hey')\")");
// test_template(tmpl, end_tokens, python_tool_call_message, tools,
// "<|python_tag|>python.call(code=\"print('hey')\")");
// test_template(tmpl, end_tokens, tool_call_message, tools,
// "{\"name\": \"special_function\", \"parameters\": {\"arg1\": 1}}");
// }
// {
// const common_chat_template tmpl(read_file(
// "models/templates/meta-llama-Llama-3.2-3B-Instruct.jinja"), "<s>", "</s>");
// std::vector<std::string> end_tokens { "<|eom_id|>", "<|eot_id|>" };
// assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X, common_chat_params_init(tmpl, inputs_tools).format); test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true);
test_template(tmpl, end_tokens, tool_call_message, tools,
"{\"name\": \"special_function\", \"parameters\": {\"arg1\": 1}}");
}
{
const common_chat_template tmpl(read_file("models/templates/meetkai-functionary-medium-v3.1.jinja"), "<s>",
"</s>");
std::vector<std::string> end_tokens{ "<|eom_id|>", "<|eot_id|>" };
// test_template(tmpl, end_tokens, text_message, tools, assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1,
// "Hello, world!", /* skip_grammar_test= */ true); common_chat_params_init(tmpl, inputs_tools).format);
// test_template(tmpl, end_tokens, tool_call_message, tools,
// "{\"name\": \"special_function\", \"parameters\": {\"arg1\": 1}}");
// }
// {
// const common_chat_template tmpl(read_file(
// "models/templates/meetkai-functionary-medium-v3.1.jinja"), "<s>", "</s>");
// std::vector<std::string> end_tokens { "<|eom_id|>", "<|eot_id|>" };
// assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1, common_chat_params_init(tmpl, inputs_tools).format); test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true);
test_template(tmpl, end_tokens, tool_call_message, tools,
"<function=special_function>{\"arg1\": 1}</function>");
}
{
const common_chat_template tmpl(read_file("models/templates/meetkai-functionary-medium-v3.2.jinja"), "<s>",
"</s>");
std::vector<std::string> end_tokens{ "<|eom_id|>", "<|eot_id|>" };
// test_template(tmpl, end_tokens, text_message, tools, assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, common_chat_params_init(tmpl, inputs_no_tools).format);
// "Hello, world!", /* skip_grammar_test= */ true); assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, common_chat_params_init(tmpl, inputs_tools).format);
// test_template(tmpl, end_tokens, tool_call_message, tools,
// "<function=special_function>{\"arg1\": 1}</function>");
// }
{
const common_chat_template tmpl(read_file(
"models/templates/meetkai-functionary-medium-v3.2.jinja"), "<s>", "</s>");
std::vector<std::string> end_tokens { "<|eom_id|>", "<|eot_id|>" };
assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, common_chat_params_init(tmpl, inputs_no_tools).format); test_template(tmpl, end_tokens, text_message, {},
assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, common_chat_params_init(tmpl, inputs_tools).format); "all\n"
"Hello, world!",
/* skip_grammar_test= */ true);
test_template(tmpl, end_tokens, tool_call_message, tools,
"special_function\n"
"{\"arg1\": 1}");
}
{
const common_chat_template tmpl(read_file("models/templates/fireworks-ai-llama-3-firefunction-v2.jinja"), "<s>",
"</s>");
std::vector<std::string> end_tokens{ "<|eot_id|>" };
test_template(tmpl, end_tokens, text_message, {}, assert_equals(COMMON_CHAT_FORMAT_FIREFUNCTION_V2, common_chat_params_init(tmpl, inputs_tools).format);
"all\n"
"Hello, world!", /* skip_grammar_test= */ true);
test_template(tmpl, end_tokens, tool_call_message, tools,
"special_function\n"
"{\"arg1\": 1}");
}
{
const common_chat_template tmpl(read_file(
"models/templates/fireworks-ai-llama-3-firefunction-v2.jinja"), "<s>", "</s>");
std::vector<std::string> end_tokens { "<|eot_id|>" };
assert_equals(COMMON_CHAT_FORMAT_FIREFUNCTION_V2, common_chat_params_init(tmpl, inputs_tools).format); test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true);
test_template(tmpl, end_tokens, tool_call_message, tools,
" functools[{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}}]");
}
{
const common_chat_template tmpl(read_file("models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja"),
"<s>", "</s>");
std::vector<std::string> end_tokens{ "<end▁of▁sentence>" };
test_template(tmpl, end_tokens, text_message, tools, assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_params_init(tmpl, inputs_tools).format);
"Hello, world!", /* skip_grammar_test= */ true);
test_template(tmpl, end_tokens, tool_call_message, tools,
" functools[{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}}]");
}
{
const common_chat_template tmpl(read_file(
"models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja"), "<s>", "</s>");
std::vector<std::string> end_tokens { "<end▁of▁sentence>" };
assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_params_init(tmpl, inputs_tools).format); test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true);
test_template(tmpl, end_tokens, tool_call_message, tools,
test_template(tmpl, end_tokens, text_message, tools, "<tool▁calls▁begin><tool▁call▁begin>function<tool▁sep>special_function\n"
"Hello, world!", /* skip_grammar_test= */ true); "```json\n"
test_template(tmpl, end_tokens, tool_call_message, tools, "{\"arg1\": 1}\n"
"<tool▁calls▁begin><tool▁call▁begin>function<tool▁sep>special_function\n" "```<tool▁call▁end>");
"```json\n" }
"{\"arg1\": 1}\n"
"```<tool▁call▁end>");
}
} }
int main(int argc, char **argv) { int main(int argc, char ** argv) {
#ifndef _WIN32 #ifndef _WIN32
if (argc > 1) { if (argc > 1) {
common_chat_inputs inputs; common_chat_inputs inputs;
inputs.messages = {{{"role", "user"}, {"content", "Hey"}}}; inputs.messages = {
inputs.tools = json::array({special_function_tool}); { { "role", "user" }, { "content", "Hey" } }
};
inputs.tools = json::array({ special_function_tool });
std::cout << "| Template | Format |\n"; std::cout << "| Template | Format |\n";
std::cout << "|----------|--------|\n"; std::cout << "|----------|--------|\n";
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
std::string path = argv[i]; std::string path = argv[i];
if (path.rfind(".jinja") != path.size() - 6) { if (path.rfind(".jinja") != path.size() - 6) {
std::cerr << "Skipping non-jinja file: " << path << std::endl; std::cerr << "Skipping non-jinja file: " << path << std::endl;
continue; continue;
}
common_chat_template tmpl(read_file(path), "", "");
auto parts = string_split(path, "/");
auto name = parts[parts.size() - 1];
std::cout << "| " << name << " | " << common_chat_format_name(common_chat_params_init(tmpl, inputs).format)
<< " |\n";
} }
common_chat_template tmpl(read_file(path), "", ""); } else
auto parts = string_split(path, "/");
auto name = parts[parts.size() - 1];
std::cout << "| " << name << " | " << common_chat_format_name(common_chat_params_init(tmpl, inputs).format) << " |\n";
}
}
else
#endif #endif
{ {
test_template_output_parsers(); test_template_output_parsers();
std::cout << "\n[chat] All tests passed!" << std::endl; std::cout << "\n[chat] All tests passed!" << std::endl;
} }
return 0; return 0;
} }