diff --git a/common/arg.cpp b/common/arg.cpp index f5e9b294f..23a9efcfc 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1962,6 +1962,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.use_jinja = true; } ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_JINJA")); + add_opt(common_arg( + {"--think"}, + "*experimental* thinking mode (default: disabled)\n" + "returns reasoning_content in messages, forcing model to think unless it supports native tags (DeepSeek R1)\n" + "only supported for non-streamed responses", + [](common_params & params) { + params.think = true; + } + ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_JINJA")); add_opt(common_arg( {"--chat-template"}, "JINJA_TEMPLATE", string_format( diff --git a/common/chat.cpp b/common/chat.cpp index a72b1a899..8a04b251a 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -12,6 +12,7 @@ std::string common_chat_format_name(common_chat_format format) { case COMMON_CHAT_FORMAT_LLAMA_3_X: return "Llama 3.x"; case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS: return "Llama 3.x with builtin tools"; case COMMON_CHAT_FORMAT_DEEPSEEK_R1: return "DeepSeek R1"; + case COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK: return "DeepSeek R1 (extract )"; case COMMON_CHAT_FORMAT_FIREFUNCTION_V2: return "FireFunction v2"; case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return "Functionary v3.2"; case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: return "Functionary v3.1 Llama 3.1"; @@ -206,83 +207,149 @@ static std::string apply( static common_chat_params common_chat_params_init_generic(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { common_chat_params data; - auto tool_call_schemas = json::array(); - foreach_function(inputs.tools, [&](const json & tool) { - const auto & function = tool["function"]; - auto tool_schema = json { + json schema; + auto make_object = []() { + return json { {"type", "object"}, - {"properties", { - {"name", { - {"type", "string"}, - {"const", function["name"]}, - }}, - {"arguments", function["parameters"]}, - }}, - {"required", json::array({"name", "arguments"})}, + {"properties", json::object()}, + {"required", json::array()}, }; - if (function.contains("description")) { - tool_schema["description"] = function["description"]; + }; + auto add_property = [](json & obj, const std::string & name, const json & schema) { + obj["properties"][name] = schema; + obj["required"].push_back(name); + }; + auto add_thoughts = [&](json & obj) { + add_property(obj, "thoughts", { + {"type", "string"}, + {"description", "The assistant's thoughts"}, + }); + }; + auto make_response = [&]() { + json response_wrapper = make_object(); + if (inputs.think) { + add_thoughts(response_wrapper); + } + add_property(response_wrapper, "response", inputs.json_schema.is_null() ? json {{"type", "string"}} : inputs.json_schema); + return response_wrapper; + }; + std::ostringstream ss; + if (inputs.tools.is_array() && !inputs.tools.empty()) { + auto tool_call_schemas = json::array(); + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + auto tool_schema = json { + {"type", "object"}, + {"properties", { + {"name", { + {"type", "string"}, + {"const", function["name"]}, + }}, + {"arguments", function["parameters"]}, + }}, + {"required", json::array({"name", "arguments"})}, + }; + if (function.contains("description")) { + tool_schema["description"] = function["description"]; + } + if (inputs.parallel_tool_calls) { + tool_schema["properties"]["id"] = { + {"type", "string"}, + {"minLength", 4}, + }; + tool_schema["required"].push_back("id"); + } + tool_call_schemas.emplace_back(tool_schema); + }); + const json tool_call = tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json {{"anyOf", tool_call_schemas}}; + json tool_call_wrapper = make_object(); + if (inputs.think) { + add_thoughts(tool_call_wrapper); } if (inputs.parallel_tool_calls) { - tool_schema["properties"]["id"] = { - {"type", "string"}, - {"minLength", 4}, - }; - tool_schema["required"].push_back("id"); + add_property(tool_call_wrapper, "tool_calls", { + {"type", "array"}, + {"items", tool_call}, + {"minItems", 1}, + }); + } else { + add_property(tool_call_wrapper, "tool_call", tool_call); } - tool_call_schemas.emplace_back(tool_schema); - }); - const auto tool_call = - inputs.parallel_tool_calls - ? json { - {"type", "object"}, - {"properties", { - {"tool_calls", { - {"type", "array"}, - {"items", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json { - {"anyOf", tool_call_schemas}, - }}, - {"minItems", 1}, - }}, - }}, - {"required", json::array({"tool_calls"})}, - } - : json { - {"type", "object"}, - {"properties", { - {"tool_call", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json { - {"anyOf", tool_call_schemas}, - }}, - }}, - {"required", json::array({"tool_call"})}, + if (inputs.think) { + /* + This kind of turns any model into a thinking model by requiring the output to be (in TypeScript notation): + + // ResponseSchema is json_schema if set, otherwisestring + + Schema = ({thoughts: string} & ToolCallSchema) | {thoughts: string, response: ResponseSchema} + SchemaToolRequired = {thoughts: string} & ToolCallSchema + + + ToolCallSchema = SingleToolCallSchema | ParallelToolCallSchema + SingleToolCallSchema = {tool_call: ToolCall} + ParallelToolCallSchema = {tool_calls: ToolCall[]} // If parallel_tool_calls is true + + ToolCall = {name: string, arguments: ParametersSchema, id?: string} // id only if parallel_tool_calls is true + ParametersSchema = tool1_params | tool2_params | ... + */ + + // TODO(ochafik): make the prompts configurable (jinja template?). + ss << "You are a tool-calling assistant that thinks before it acts.\n" + "You respond in JSON format, as follows:\n" + "- First, candidly explain your thoughts about the user's request " + "and elaborate a step-by-step reasoning about your plan to satisfy it " + "(including possible tool usage / function call), pondering pros and cons, " + "widening your reasoning than narrowing down on a plan. " + "Express all of these thoughts in the `thoughts` field.\n"; + } + if (inputs.tool_choice == "required") { + schema = { + {"anyOf", json::array({tool_call_wrapper, make_response()})}, }; - const auto schema = - inputs.tool_choice != "required" - ? json { - {"anyOf", json::array({ - tool_call, - { - {"type", "object"}, - {"properties", { - {"response", inputs.json_schema.is_null() - ? json {{"type", "string"}} - : inputs.json_schema - }, - }}, - {"required", json::array({"response"})}, - }, - })} + if (inputs.think) { + if (inputs.parallel_tool_calls && inputs.tools.size() > 1) { + ss << "- Then if you need to perform operations or get data before responding to the user, " + "call tools by providing an array of objects with name & arguments fields in the `tool_calls` field, " + "or respond directly to the user's request in the `response` field."; + // system = "Respond in JSON format, either with `tool_call` (a request to call tools) or with `response` reply to the user's request"; + } else { + ss << "- Then if you need to perform an operation or get data before responding to the user, " + "call a tool by providing its name & arguments in the `tool_call` field, " + "or respond directly to the user's request in the `response` field."; + } } - : tool_call; + } else { + schema = tool_call_wrapper; + if (inputs.think) { + if (inputs.parallel_tool_calls && inputs.tools.size() > 1) { + ss << "- Then call tools by providing their names and arguments in the `tool_calls` array."; + } else { + ss << "- Then call a tool by providing its name and arguments in the `tool_call` object."; + } + } + } + ss << "- Finally, once you get results from previously requested tool calls (if you requested anys), " + "you iterate on your reasoning, update it if needed, and work towards a final response to the user's request " + "in as many iterations as needed."; + } else if (inputs.think) { + schema = make_response(); + ss << "You are an assistant that thinks before it acts.\n" + "You respond in JSON format, as follows:\n" + "- First, candidly explain your thoughts about the user's request " + "and elaborate a step-by-step reasoning about your plan to satisfy it, " + "pondering pros and cons, " + "widening your reasoning than narrowing down on a plan. " + "Express all of these thoughts in the `thoughts` field.\n" + "- Then, respond directly to the user's request in the `response` field."; + } + auto system = ss.str(); data.grammar_lazy = false; data.grammar = build_grammar([&](const common_grammar_builder & builder) { builder.add_schema("root", schema); }, grammar_options); - auto tweaked_messages = common_chat_template::add_system( - inputs.messages, - "Respond in JSON format, either with `tool_call` (a request to call tools) or with `response` reply to the user's request"); + auto tweaked_messages = system.empty() ? inputs.messages : common_chat_template::add_system(inputs.messages, system); data.prompt = apply(tmpl, tweaked_messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); data.format = COMMON_CHAT_FORMAT_GENERIC; @@ -292,6 +359,9 @@ static common_chat_msg common_chat_parse_generic(const std::string & input) { json data = json::parse(input); common_chat_msg result; result.role = "assistant"; + if (data.contains("thoughts")) { + result.reasoning_content = data["thoughts"]; + } if (data.contains("tool_calls")) { for (const auto & tool_call : data["tool_calls"]) { result.tool_calls.push_back({ @@ -565,7 +635,7 @@ static common_chat_msg common_chat_parse_llama_3_1(const std::string & input, bo static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { common_chat_params data; if (inputs.tools.is_array() && !inputs.tools.empty()) { - data.grammar_lazy = inputs.tool_choice != "required"; + data.grammar_lazy = inputs.tool_choice != "required" && inputs.json_schema.is_null(); data.grammar = build_grammar([&](const common_grammar_builder & builder) { std::vector tool_rules; foreach_function(inputs.tools, [&](const json & tool) { @@ -617,27 +687,32 @@ static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_ "$1<|tool▁calls▁end|><|end▁of▁sentence|>$2"); } data.prompt = prompt; - data.format = COMMON_CHAT_FORMAT_DEEPSEEK_R1; + data.format = inputs.think ? COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK : COMMON_CHAT_FORMAT_DEEPSEEK_R1; return data; } -static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input) { +static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input, bool think) { static std::regex function_regex("<|tool▁call▁begin|>function<|tool▁sep|>([^\n]+)\n```json\n"); static std::regex close_regex("```[\\s\\r\\n]*<|tool▁call▁end|>"); - static std::regex reasoning_content_regex("(?:([\\s\\S\\r\\n]*?))?([\\s\\S\\r\\n]*)"); + static std::regex reasoning_content_regex("(([\\s\\S\\r\\n]*?))?([\\s\\S\\r\\n]*)"); static std::regex tool_calls_regex("[\\s\\r\\n]*(?:<|tool▁calls▁begin|>|<|tool_calls_begin|>|<|tool calls begin|>|<|tool\\\\_calls\\\\_begin|>)([\\s\\S\\r\\n]*?)<|tool▁calls▁end|>"); common_chat_msg msg; msg.role = "assistant"; std::smatch match; if (std::regex_match(input, match, reasoning_content_regex)) { - msg.reasoning_content = string_trim(match[1].str()); - auto rest = match[2].str(); + std::string rest; + if (think) { + msg.reasoning_content = string_trim(match[2].str()); + } else { + msg.content = match[1].str(); + } + rest = match[3].str(); if (std::regex_search(rest, match, tool_calls_regex)) { auto tool_calls = match[1].str(); auto msg2 = parse_json_tool_calls(tool_calls, std::nullopt, function_regex, close_regex); msg.tool_calls = std::move(msg2.tool_calls); } else { - msg.content = std::string(rest.begin() + rest.find_first_not_of(" \r\n"), rest.end()); + msg.content += std::string(rest.begin() + rest.find_first_not_of(" \r\n"), rest.end()); } } else { msg.content = input; @@ -953,47 +1028,66 @@ static common_chat_params common_chat_params_init_without_tools(const common_cha } common_chat_params common_chat_params_init(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { - auto has_tools = !inputs.tools.is_null() && inputs.tool_choice != "none"; - LOG_DBG("[%s] has_tools=%s\n", __func__, has_tools ? "true" : "false"); - - if (has_tools && !inputs.grammar.empty()) { + if (inputs.tools.is_array() && inputs.tool_choice != "none" && !inputs.grammar.empty()) { throw std::runtime_error("Cannot specify grammar with tools"); } const auto & src = tmpl.source(); - if (src.find(">>>all") != std::string::npos) { - // Functionary prepends "all\n" to plain content outputs, so we use the parser no matter when - return common_chat_params_init_functionary_v3_2(tmpl, inputs); - } - if (src.find(" functools[") != std::string::npos) { - // Firefunction v2 requires datetime and functions in the context, even w/o tools. - return common_chat_params_init_firefunction_v2(tmpl, inputs); - } - if (src.find("<|tool▁calls▁begin|>") != std::string::npos) { + + // DeepSeek R1: use handler in all cases except json schema (thinking / tools). + if (src.find("<|tool▁calls▁begin|>") != std::string::npos && inputs.json_schema.is_null()) { return common_chat_params_init_deepseek_r1(tmpl, inputs); } - if (!has_tools) { + // Use generic handler when forcing thoughts or JSON schema for final output + // TODO: support thinking mode and/or JSON schema in handlers below this. + if (inputs.think || inputs.json_schema.is_object()) { + return common_chat_params_init_generic(tmpl, inputs); + } + + // Functionary prepends "all\n" to plain content outputs, so we use its handler in all cases. + if (src.find(">>>all") != std::string::npos) { + return common_chat_params_init_functionary_v3_2(tmpl, inputs); + } + + // Firefunction v2 requires datetime and functions in the context even w/o tools, so we also use its handler in all cases. + if (src.find(" functools[") != std::string::npos) { + return common_chat_params_init_firefunction_v2(tmpl, inputs); + } + + // Plain handler (no tools) + if (inputs.tools.is_null() || inputs.tool_choice == "none") { return common_chat_params_init_without_tools(tmpl, inputs); } + // Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools) if (src.find("") != std::string::npos) { return common_chat_params_init_hermes_2_pro(tmpl, inputs); } + + // Functionary v3.1 (w/ tools) if (src.find("<|start_header_id|>") != std::string::npos && src.find("ipython<|end_header_id|>") != std::string::npos) { auto allow_python_tag_builtin_tools = src.find("<|python_tag|>") != std::string::npos; return common_chat_params_init_llama_3_1_tool_calls(tmpl, inputs, allow_python_tag_builtin_tools); } + + // Mistral Nemo (w/ tools) if (src.find("[TOOL_CALLS]") != std::string::npos) { return common_chat_params_init_mistral_nemo(tmpl, inputs); } + + // Command R7B (w/ tools) if (src.find("<|END_THINKING|><|START_ACTION|>") != std::string::npos) { return common_chat_params_init_command_r7b(tmpl, inputs); } + + // Generic fallback return common_chat_params_init_generic(tmpl, inputs); } @@ -1018,7 +1112,9 @@ common_chat_msg common_chat_parse(const std::string & input, common_chat_format case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS: return common_chat_parse_llama_3_1(input, /* with_builtin_tools= */ true); case COMMON_CHAT_FORMAT_DEEPSEEK_R1: - return common_chat_parse_deepseek_r1(input); + return common_chat_parse_deepseek_r1(input, /* think= */ false); + case COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK: + return common_chat_parse_deepseek_r1(input, /* think= */ true); case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return common_chat_parse_functionary_v3_2(input); case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: diff --git a/common/chat.hpp b/common/chat.hpp index 33e64a430..9bd9dc5ef 100644 --- a/common/chat.hpp +++ b/common/chat.hpp @@ -19,6 +19,7 @@ struct common_chat_inputs { bool stream; std::string grammar; bool add_generation_prompt = true; + bool think = false; }; enum common_chat_format { @@ -28,6 +29,7 @@ enum common_chat_format { COMMON_CHAT_FORMAT_LLAMA_3_X, COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS, COMMON_CHAT_FORMAT_DEEPSEEK_R1, + COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK, COMMON_CHAT_FORMAT_FIREFUNCTION_V2, COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1, diff --git a/common/common.h b/common/common.h index 0d1cb98ce..e389a29d0 100644 --- a/common/common.h +++ b/common/common.h @@ -346,6 +346,7 @@ struct common_params { std::string chat_template = ""; // NOLINT bool use_jinja = false; // NOLINT bool enable_chat_template = true; + bool think = false; // return reasoning_content, force model to think unless it supports native tags. std::vector api_keys; diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 8f098fef0..8ccce6a61 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -4052,7 +4052,7 @@ int main(int argc, char ** argv) { } auto body = json::parse(req.body); - json data = oaicompat_completion_params_parse(body, params.use_jinja, ctx_server.chat_templates); + json data = oaicompat_completion_params_parse(body, params.use_jinja, params.think, ctx_server.chat_templates); return handle_completions_impl( SERVER_TASK_TYPE_COMPLETION, @@ -4065,7 +4065,7 @@ int main(int argc, char ** argv) { // same with handle_chat_completions, but without inference part const auto handle_apply_template = [&ctx_server, ¶ms, &res_ok](const httplib::Request & req, httplib::Response & res) { auto body = json::parse(req.body); - json data = oaicompat_completion_params_parse(body, params.use_jinja, ctx_server.chat_templates); + json data = oaicompat_completion_params_parse(body, params.use_jinja, params.think, ctx_server.chat_templates); res_ok(res, {{ "prompt", std::move(data.at("prompt")) }}); }; diff --git a/examples/server/tests/unit/test_tool_call.py b/examples/server/tests/unit/test_tool_call.py index dc526b61d..937169d4b 100644 --- a/examples/server/tests/unit/test_tool_call.py +++ b/examples/server/tests/unit/test_tool_call.py @@ -439,14 +439,20 @@ def test_calc_result(result_override: str | None, n_predict: int, hf_repo: str, @pytest.mark.slow -@pytest.mark.parametrize("n_predict,expect_content,expect_reasoning_content,hf_repo,template_override", [ - (128, "^The sum of 102 and 7 is 109.*", None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), - (1024, "To find the sum of.*", "I need to calculate the sum of 102 and 7.*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), - (1024, "To find the sum of.*", "First, I need to add the tens place.*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)), +@pytest.mark.parametrize("n_predict,think,expect_content,expect_reasoning_content,hf_repo,template_override", [ + (1024, True, "^The sum of 102 and 7 is 109.*", "^The user's request is straightforward.*", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + (128, False, "^The sum of 102 and 7 is 109.*", None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + + (1024, True, "To find the sum of.*", "I need to calculate the sum of 102 and 7.*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + (1024, False, "\nI need[\\s\\S\\r\\n]*\nTo find", None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + + (1024, True, "To find the sum of.*", "First, I need to add the tens place.*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)), + (1024, False, "\nI need[\\s\\S\\r\\n]*To find", None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)), ]) -def test_reasoning_content(n_predict: int, expect_content: str | None, expect_reasoning_content: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None): +def test_thoughts(n_predict: int, think: bool, expect_content: str | None, expect_reasoning_content: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None): global server server.n_slots = 1 + server.think = think server.jinja = True server.n_ctx = 8192 * 2 server.n_predict = n_predict @@ -470,11 +476,15 @@ def test_reasoning_content(n_predict: int, expect_content: str | None, expect_re assert choice["message"].get("tool_calls") is None, f'Expected no tool call in {choice["message"]}' content = choice["message"].get("content") - if expect_content is not None: + if expect_content is None: + assert content is None, f'Expected no content in {choice["message"]}' + else: assert re.match(expect_content, content), f'Expected {expect_content}, got {content}' reasoning_content = choice["message"].get("reasoning_content") - if expect_reasoning_content is not None: + if expect_reasoning_content is None: + assert reasoning_content is None, f'Expected no reasoning content in {choice["message"]}' + else: assert re.match(expect_reasoning_content, reasoning_content), f'Expected {expect_reasoning_content}, got {reasoning_content}' diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py index ce0680662..2bddc55b6 100644 --- a/examples/server/tests/utils.py +++ b/examples/server/tests/utils.py @@ -78,6 +78,7 @@ class ServerProcess: draft_max: int | None = None no_webui: bool | None = None jinja: bool | None = None + think: bool | None = None chat_template: str | None = None chat_template_file: str | None = None @@ -172,6 +173,8 @@ class ServerProcess: server_args.append("--no-webui") if self.jinja: server_args.append("--jinja") + if self.think: + server_args.append("--think") if self.chat_template: server_args.extend(["--chat-template", self.chat_template]) if self.chat_template_file: diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 5f97df5fd..f006bbff8 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -578,6 +578,7 @@ static json oaicompat_completion_params_parse(const json & body) { static json oaicompat_completion_params_parse( const json & body, /* openai api json semantics */ bool use_jinja, + bool think, const common_chat_templates & chat_templates) { json llama_params; @@ -633,9 +634,10 @@ static json oaicompat_completion_params_parse( throw std::runtime_error("Cannot use custom grammar constraints with tools."); } common_chat_inputs inputs; - inputs.messages = body.at("messages"); - inputs.tools = tools; - inputs.tool_choice = tool_choice; + inputs.think = think; + inputs.messages = body.at("messages"); + inputs.tools = tools; + inputs.tool_choice = tool_choice; inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false); if (inputs.parallel_tool_calls && !tmpl.original_caps().supports_parallel_tool_calls) { LOG_DBG("Disabling parallel_tool_calls because the template does not support it\n"); diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index 1494c2443..a556098be 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -289,11 +289,19 @@ static void test_template(const common_chat_template & tmpl, const std::vectorI'm thinkingHello, world!\nWhat's up?" }, + }; + json message_assist_thoughts { { "role", "assistant" }, { "content", "Hello, world!\nWhat's up?" }, { "reasoning_content", "I'm thinking" }, @@ -303,7 +311,7 @@ static void test_template_output_parsers() { { "function", { { "name", "special_function" }, { "arguments", "{\"arg1\": 1}" } } }, }}); - json tool_call_message { + json message_assist_call { { "role", "assistant"}, { "content", {}}, { "tool_calls", { @@ -316,7 +324,7 @@ static void test_template_output_parsers() { }, }}, }; - json tool_call_reasoning_message = { + json message_assist_call_thoughts = { { "role", "assistant" }, { "content", nullptr }, { "reasoning_content", "I'm\nthinking" }, @@ -330,7 +338,20 @@ static void test_template_output_parsers() { }, }}, }; - json tool_call_message_with_id { + json message_assist_call_thoughts_unparsed = { + { "role", "assistant" }, + { "content", "I'm\nthinking" }, + { "tool_calls", { + { + { "type", "function" }, + { "function", { + { "name", "special_function" }, + { "arguments", "{\"arg1\": 1}" }, + }}, + }, + }}, + }; + json message_assist_call_id { { "role", "assistant"}, { "content", {}}, { "tool_calls", { @@ -347,7 +368,7 @@ static void test_template_output_parsers() { { "content", {} }, { "tool_calls", tool_calls } }; - json tool_call_plan_message_with_idx { + json message_assist_call_idx { { "role", "assistant"}, { "content", {}}, { "tool_plan", "I'm not so sure"}, @@ -367,7 +388,7 @@ static void test_template_output_parsers() { { "tool_calls", tool_calls } }; - auto python_tool_call_message = json{ + auto python_message_assist_call = json{ { "role", "assistant" }, { "content", {} }, { "tool_calls", json{ { @@ -382,7 +403,7 @@ static void test_template_output_parsers() { } }, } } } }; - auto code_interpreter_tool_call_message = json{ + auto code_interpreter_message_assist_call = json{ { "role", "assistant" }, { "content", {} }, { "tool_calls", json{ { @@ -399,17 +420,24 @@ static void test_template_output_parsers() { }; common_chat_inputs inputs_no_tools; - inputs_no_tools.messages = { - { { "role", "user" }, { "content", "Hey\nThere" } } - }; + inputs_no_tools.messages = json::array({message_user}); - common_chat_inputs inputs_tools = inputs_no_tools; - inputs_tools.tools = json::array(); - inputs_tools.tools.push_back(special_function_tool); + common_chat_inputs inputs_no_tools_think; + inputs_no_tools_think.messages = json::array({message_user}); + inputs_no_tools_think.think = true; - common_chat_inputs inputs_tools_builtin = inputs_no_tools; - inputs_tools_builtin.tools = json::array(); - inputs_tools_builtin.tools.push_back(python_tool); + common_chat_inputs inputs_tools; + inputs_tools.messages = json::array({message_user}); + inputs_tools.tools = json::array({special_function_tool}); + + common_chat_inputs inputs_tools_think; + inputs_tools_think.messages = json::array({message_user}); + inputs_tools_think.tools = json::array({special_function_tool}); + inputs_tools_think.think = true; + + common_chat_inputs inputs_tools_builtin; + inputs_tools_builtin.messages = json::array({message_user}); + inputs_tools_builtin.tools = json::array({python_tool}); { // Not supported yet @@ -423,12 +451,12 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, common_chat_params_init(tmpl, inputs_no_tools).format); assert_equals(COMMON_CHAT_FORMAT_COMMAND_R7B, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, tool_call_plan_message_with_idx, tools, + test_template(tmpl, end_tokens, message_assist_call_idx, tools, "<|START_THINKING|>I'm not so sure<|END_THINKING|>" "<|START_ACTION|>[\n" " {\"tool_call_id\": \"0\", \"tool_name\": \"special_function\", \"parameters\": {\"arg1\": 1}}\n" "]<|END_ACTION|>"); - test_template(tmpl, end_tokens, text_message, tools, + test_template(tmpl, end_tokens, message_assist, tools, "<|START_RESPONSE|>Hello, world!\n" "What's up?<|END_RESPONSE|>", /* expect_grammar_triggered= */ false); @@ -448,12 +476,12 @@ static void test_template_output_parsers() { // Generic tool calls doesn't generate / parse content-only messages symmetrically. - assert_msg_equals(msg_from_json(text_message), + assert_msg_equals(msg_from_json(message_assist), common_chat_parse("{\n" " \"response\": \"Hello, world!\\nWhat's up?\"\n" "}", common_chat_params_init(tmpl, inputs_tools).format)); - test_template(tmpl, end_tokens, tool_call_message_with_id, tools, + test_template(tmpl, end_tokens, message_assist_call_id, tools, "{\n" " \"tool_calls\": [\n" " {\n" @@ -473,9 +501,9 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_MISTRAL_NEMO, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); + test_template(tmpl, end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); test_template( - tmpl, end_tokens, tool_call_message_with_id, tools, + tmpl, end_tokens, message_assist_call_id, tools, "[TOOL_CALLS][{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}, \"id\": \"123456789\"}]"); } { @@ -498,12 +526,12 @@ static void test_template_output_parsers() { inputs_tools) .format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); - test_template(tmpl, end_tokens, tool_call_message, tools, + test_template(tmpl, end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); + test_template(tmpl, end_tokens, message_assist_call, tools, "\n" "{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}}\n" ""); - test_template(tmpl, end_tokens, python_tool_call_message, tools, + test_template(tmpl, end_tokens, python_message_assist_call, tools, "\n" "{\"name\": \"python\", \"arguments\": {\"code\": \"print('hey')\"}}\n" ""); @@ -523,12 +551,12 @@ static void test_template_output_parsers() { inputs_tools_builtin) .format); - // test_template(tmpl, end_tokens, text_message, tools, R"(?)", /* expect_grammar_triggered= */ false); - test_template(tmpl, end_tokens, code_interpreter_tool_call_message, llama_3_1_tools, + // test_template(tmpl, end_tokens, message_assist, tools, R"(?)", /* expect_grammar_triggered= */ false); + test_template(tmpl, end_tokens, code_interpreter_message_assist_call, llama_3_1_tools, "<|python_tag|>code_interpreter.call(code=\"print('hey')\")"); - test_template(tmpl, end_tokens, python_tool_call_message, tools, + test_template(tmpl, end_tokens, python_message_assist_call, tools, "<|python_tag|>python.call(code=\"print('hey')\")"); - test_template(tmpl, end_tokens, tool_call_message, tools, + test_template(tmpl, end_tokens, message_assist_call, tools, "{\"name\": \"special_function\", \"parameters\": {\"arg1\": 1}}"); } { @@ -538,8 +566,8 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); - test_template(tmpl, end_tokens, tool_call_message, tools, + test_template(tmpl, end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); + test_template(tmpl, end_tokens, message_assist_call, tools, "{\"name\": \"special_function\", \"parameters\": {\"arg1\": 1}}"); } { @@ -550,8 +578,8 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); - test_template(tmpl, end_tokens, tool_call_message, tools, + test_template(tmpl, end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); + test_template(tmpl, end_tokens, message_assist_call, tools, "{\"arg1\": 1}"); } { @@ -562,12 +590,12 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, common_chat_params_init(tmpl, inputs_no_tools).format); assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, {}, + test_template(tmpl, end_tokens, message_assist, {}, "all\n" "Hello, world!\n" "What's up?", /* expect_grammar_triggered= */ false); - test_template(tmpl, end_tokens, tool_call_message, tools, + test_template(tmpl, end_tokens, message_assist_call, tools, "special_function\n" "{\"arg1\": 1}"); } @@ -578,8 +606,8 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_FIREFUNCTION_V2, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); - test_template(tmpl, end_tokens, tool_call_message, tools, + test_template(tmpl, end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); + test_template(tmpl, end_tokens, message_assist_call, tools, " functools[{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}}]"); } { @@ -590,10 +618,11 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); - test_template(tmpl, end_tokens, text_reasoning_message, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); - assert_msg_equals(msg_from_json(text_reasoning_message), common_chat_parse("I'm thinkingHello, world!\nWhat's up?", COMMON_CHAT_FORMAT_DEEPSEEK_R1)); - // test_template(tmpl, end_tokens, tool_call_message, tools, + test_template(tmpl, end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); + test_template(tmpl, end_tokens, message_assist_thoughts, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); + assert_msg_equals(msg_from_json(message_assist_thoughts_unparsed), common_chat_parse("I'm thinkingHello, world!\nWhat's up?", COMMON_CHAT_FORMAT_DEEPSEEK_R1)); + assert_msg_equals(msg_from_json(message_assist_thoughts), common_chat_parse("I'm thinkingHello, world!\nWhat's up?", COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK)); + // test_template(tmpl, end_tokens, message_assist_call, tools, // "<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>special_function\n" // "```json\n" // "{\"arg1\": 1}\n" @@ -610,11 +639,12 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); - test_template(tmpl, end_tokens, text_reasoning_message, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); - assert_msg_equals(msg_from_json(text_reasoning_message), common_chat_parse("I'm thinkingHello, world!\nWhat's up?", COMMON_CHAT_FORMAT_DEEPSEEK_R1)); + test_template(tmpl, end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); + test_template(tmpl, end_tokens, message_assist_thoughts, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); + assert_msg_equals(msg_from_json(message_assist_thoughts_unparsed), common_chat_parse("I'm thinkingHello, world!\nWhat's up?", COMMON_CHAT_FORMAT_DEEPSEEK_R1)); + assert_msg_equals(msg_from_json(message_assist_thoughts), common_chat_parse("I'm thinkingHello, world!\nWhat's up?", COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK)); - assert_msg_equals(msg_from_json(tool_call_reasoning_message), + assert_msg_equals(msg_from_json(message_assist_call_thoughts_unparsed), common_chat_parse( "I'm\nthinking\n\n" "<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>special_function\n" @@ -622,7 +652,15 @@ static void test_template_output_parsers() { "{\"arg1\": 1}\n" "```<|tool▁call▁end|><|tool▁calls▁end|>", COMMON_CHAT_FORMAT_DEEPSEEK_R1)); - test_template(tmpl, end_tokens, tool_call_message, tools, + assert_msg_equals(msg_from_json(message_assist_call_thoughts), + common_chat_parse( + "I'm\nthinking\n\n" + "<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>special_function\n" + "```json\n" + "{\"arg1\": 1}\n" + "```<|tool▁call▁end|><|tool▁calls▁end|>", + COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK)); + test_template(tmpl, end_tokens, message_assist_call, tools, "<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>special_function\n" "```json\n" "{\"arg1\": 1}\n"