diff --git a/common/arg.cpp b/common/arg.cpp index 7b99baa4f..4b34aee0e 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1976,12 +1976,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_JINJA")); add_opt(common_arg( - {"--think"}, - "*experimental* thinking mode (default: disabled)\n" - "returns reasoning_content in messages, forcing model to think unless it supports native tags (DeepSeek R1, Command R7B)\n" + {"--reasoning-format"}, "FORMAT", + "reasoning format (default: deepseek; allowed values: deepseek, none)\n" + "controls whether thought tags are extracted from the response, and in which format they're returned. 'none' leaves thoughts unparsed in `message.content`, 'deepseek' puts them in `message.reasoning_content` (for DeepSeek R1 & Command R7B only).\n" "only supported for non-streamed responses", - [](common_params & params) { - params.think = true; + [](common_params & params, const std::string & value) { + /**/ if (value == "deepseek") { params.reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK; } + else if (value == "none") { params.reasoning_format = COMMON_REASONING_FORMAT_NONE; } + else { std::invalid_argument("invalid value"); } } ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_THINK")); add_opt(common_arg( diff --git a/common/chat.cpp b/common/chat.cpp index 6d32a6299..691080c63 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -12,13 +12,13 @@ std::string common_chat_format_name(common_chat_format format) { case COMMON_CHAT_FORMAT_LLAMA_3_X: return "Llama 3.x"; case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS: return "Llama 3.x with builtin tools"; case COMMON_CHAT_FORMAT_DEEPSEEK_R1: return "DeepSeek R1"; - case COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK: return "DeepSeek R1 (extract reasoning_content)"; + case COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING: return "DeepSeek R1 (extract reasoning)"; case COMMON_CHAT_FORMAT_FIREFUNCTION_V2: return "FireFunction v2"; case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return "Functionary v3.2"; case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: return "Functionary v3.1 Llama 3.1"; case COMMON_CHAT_FORMAT_HERMES_2_PRO: return "Hermes 2 Pro"; case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B"; - case COMMON_CHAT_FORMAT_COMMAND_R7B_THINK: return "Command R7B (extract reasoning_content)"; + case COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING: return "Command R7B (extract reasoning)"; default: throw std::runtime_error("Unknown chat format"); } @@ -196,148 +196,83 @@ static std::string apply( static common_chat_params common_chat_params_init_generic(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { common_chat_params data; - json schema; - auto make_object = []() { - return json { + auto tool_call_schemas = json::array(); + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + auto tool_schema = json { {"type", "object"}, - {"properties", json::object()}, - {"required", json::array()}, - }; - }; - auto add_property = [](json & obj, const std::string & name, const json & schema) { - obj["properties"][name] = schema; - obj["required"].push_back(name); - }; - auto add_thoughts = [&](json & obj) { - add_property(obj, "thoughts", { - {"type", "string"}, - {"description", "The assistant's thoughts"}, - }); - }; - auto make_response = [&]() { - json response_wrapper = make_object(); - if (inputs.think) { - add_thoughts(response_wrapper); - } - add_property(response_wrapper, "response", inputs.json_schema.is_null() ? json {{"type", "string"}} : inputs.json_schema); - return response_wrapper; - }; - std::ostringstream ss; - if (inputs.tools.is_array() && !inputs.tools.empty()) { - auto tool_call_schemas = json::array(); - foreach_function(inputs.tools, [&](const json & tool) { - const auto & function = tool["function"]; - auto tool_schema = json { - {"type", "object"}, - {"properties", { - {"name", { - {"type", "string"}, - {"const", function["name"]}, - }}, - {"arguments", function["parameters"]}, - }}, - {"required", json::array({"name", "arguments"})}, - }; - if (function.contains("description")) { - tool_schema["description"] = function["description"]; - } - if (inputs.parallel_tool_calls) { - tool_schema["properties"]["id"] = { + {"properties", { + {"name", { {"type", "string"}, - {"minLength", 4}, - }; - tool_schema["required"].push_back("id"); - } - tool_call_schemas.emplace_back(tool_schema); - }); - const json tool_call = tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json {{"anyOf", tool_call_schemas}}; - json tool_call_wrapper = make_object(); - if (inputs.think) { - add_thoughts(tool_call_wrapper); + {"const", function["name"]}, + }}, + {"arguments", function["parameters"]}, + }}, + {"required", json::array({"name", "arguments"})}, + }; + if (function.contains("description")) { + tool_schema["description"] = function["description"]; } if (inputs.parallel_tool_calls) { - add_property(tool_call_wrapper, "tool_calls", { - {"type", "array"}, - {"items", tool_call}, - {"minItems", 1}, - }); - } else { - add_property(tool_call_wrapper, "tool_call", tool_call); - } - if (inputs.think) { - /* - This kind of turns any model into a thinking model by requiring the output to be (in TypeScript notation): - - // ResponseSchema is json_schema if set, otherwise string - - type SchemaToolRequired = {thoughts: string} & ToolCallSchema - type Schema = ({thoughts: string} & ToolCallSchema) | {thoughts: string, response: ResponseSchema} - - type ToolCallSchema = SingleToolCallSchema | ParallelToolCallSchema - type SingleToolCallSchema = {tool_call: ToolCall} - type ParallelToolCallSchema = {tool_calls: ToolCall[]} // If parallel_tool_calls is true - - type ToolCall = {name: string, arguments: ParametersSchema, id?: string} // id only if parallel_tool_calls is true - type ParametersSchema = tool1_params | tool2_params | ... - */ - - // TODO(ochafik): make the prompts configurable (jinja template?). - ss << "You are a tool-calling assistant that thinks before it acts.\n" - "You respond in JSON format, as follows:\n" - "- First, candidly explain your thoughts about the user's request " - "and elaborate a step-by-step reasoning about your plan to satisfy it " - "(including possible tool usage / function call), pondering pros and cons, " - "widening your reasoning than narrowing down on a plan. " - "Express all of these thoughts in the `thoughts` field.\n"; - } - if (inputs.tool_choice == "required") { - schema = { - {"anyOf", json::array({tool_call_wrapper, make_response()})}, + tool_schema["properties"]["id"] = { + {"type", "string"}, + {"minLength", 4}, }; - if (inputs.think) { - if (inputs.parallel_tool_calls && inputs.tools.size() > 1) { - ss << "- Then if you need to perform operations or get data before responding to the user, " - "call tools by providing an array of objects with name & arguments fields in the `tool_calls` field, " - "or respond directly to the user's request in the `response` field."; - // system = "Respond in JSON format, either with `tool_call` (a request to call tools) or with `response` reply to the user's request"; - } else { - ss << "- Then if you need to perform an operation or get data before responding to the user, " - "call a tool by providing its name & arguments in the `tool_call` field, " - "or respond directly to the user's request in the `response` field."; - } - } - } else { - schema = tool_call_wrapper; - if (inputs.think) { - if (inputs.parallel_tool_calls && inputs.tools.size() > 1) { - ss << "- Then call tools by providing their names and arguments in the `tool_calls` array."; - } else { - ss << "- Then call a tool by providing its name and arguments in the `tool_call` object."; - } - } + tool_schema["required"].push_back("id"); } - ss << "- Finally, once you get results from previously requested tool calls (if you requested anys), " - "you iterate on your reasoning, update it if needed, and work towards a final response to the user's request " - "in as many iterations as needed."; - } else if (inputs.think) { - schema = make_response(); - ss << "You are an assistant that thinks before it acts.\n" - "You respond in JSON format, as follows:\n" - "- First, candidly explain your thoughts about the user's request " - "and elaborate a step-by-step reasoning about your plan to satisfy it, " - "pondering pros and cons, " - "widening your reasoning than narrowing down on a plan. " - "Express all of these thoughts in the `thoughts` field.\n" - "- Then, respond directly to the user's request in the `response` field."; - } - auto system = ss.str(); + tool_call_schemas.emplace_back(tool_schema); + }); + const auto tool_call = + inputs.parallel_tool_calls + ? json { + {"type", "object"}, + {"properties", { + {"tool_calls", { + {"type", "array"}, + {"items", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json { + {"anyOf", tool_call_schemas}, + }}, + {"minItems", 1}, + }}, + }}, + {"required", json::array({"tool_calls"})}, + } + : json { + {"type", "object"}, + {"properties", { + {"tool_call", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json { + {"anyOf", tool_call_schemas}, + }}, + }}, + {"required", json::array({"tool_call"})}, + }; + const auto schema = + inputs.tool_choice != "required" + ? json { + {"anyOf", json::array({ + tool_call, + { + {"type", "object"}, + {"properties", { + {"response", inputs.json_schema.is_null() + ? json {{"type", "string"}} + : inputs.json_schema + }, + }}, + {"required", json::array({"response"})}, + }, + })} + } + : tool_call; data.grammar_lazy = false; data.grammar = build_grammar([&](const common_grammar_builder & builder) { builder.add_schema("root", schema); }, grammar_options); - auto tweaked_messages = system.empty() ? inputs.messages : common_chat_template::add_system(inputs.messages, system); + auto tweaked_messages = common_chat_template::add_system( + inputs.messages, + "Respond in JSON format, either with `tool_call` (a request to call tools) or with `response` reply to the user's request"); data.prompt = apply(tmpl, tweaked_messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); data.format = COMMON_CHAT_FORMAT_GENERIC; @@ -471,14 +406,11 @@ static common_chat_params common_chat_params_init_command_r7b(const common_chat_ adjusted_messages.push_back(msg); } } - // } else { - // adjusted_messages = inputs.messages; - // } data.prompt = apply(tmpl, adjusted_messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt, {}); - data.format = inputs.think ? COMMON_CHAT_FORMAT_COMMAND_R7B_THINK : COMMON_CHAT_FORMAT_COMMAND_R7B; + data.format = inputs.extract_reasoning ? COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING : COMMON_CHAT_FORMAT_COMMAND_R7B; return data; } -static common_chat_msg common_chat_parse_command_r7b(const std::string & input, bool think) { +static common_chat_msg common_chat_parse_command_r7b(const std::string & input, bool extract_reasoning) { static std::regex thought_regex("(<\\|START_THINKING\\|>([\\s\\S\\n\\r]*?)<\\|END_THINKING\\|>)([\\s\\S\\n\\r]*)"); static std::regex action_regex("<\\|START_ACTION\\|>([\\s\\S\\n\\r]*?)<\\|END_ACTION\\|>"); static std::regex response_regex("(?:<\\|START_RESPONSE\\|>)?([\\s\\S\\n\\r]*?)<\\|END_RESPONSE\\|>"); @@ -491,7 +423,7 @@ static common_chat_msg common_chat_parse_command_r7b(const std::string & input, std::string rest = input; if (std::regex_match(rest, match, thought_regex)) { - if (think) { + if (extract_reasoning) { result.reasoning_content = match[2].str(); } else if (!match[2].str().empty()) { // Let the unparsed thinking tags through in content only if their insides aren't empty. @@ -705,10 +637,10 @@ static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_ "$1<|tool▁calls▁end|><|end▁of▁sentence|>$2"); } data.prompt = prompt; - data.format = inputs.think ? COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK : COMMON_CHAT_FORMAT_DEEPSEEK_R1; + data.format = inputs.extract_reasoning ? COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING : COMMON_CHAT_FORMAT_DEEPSEEK_R1; return data; } -static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input, bool think) { +static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input, bool extract_reasoning) { static std::regex function_regex("<|tool▁call▁begin|>function<|tool▁sep|>([^\n]+)\n```json\n"); static std::regex close_regex("```[\\s\\r\\n]*<|tool▁call▁end|>"); static std::regex reasoning_content_regex("(([\\s\\S\\r\\n]*?))?([\\s\\S\\r\\n]*)"); @@ -718,7 +650,7 @@ static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input, std::smatch match; if (std::regex_match(input, match, reasoning_content_regex)) { std::string rest; - if (think) { + if (extract_reasoning) { msg.reasoning_content = string_strip(match[2].str()); } else { msg.content = match[1].str(); @@ -1068,9 +1000,9 @@ common_chat_params common_chat_params_init(const common_chat_template & tmpl, co return common_chat_params_init_command_r7b(tmpl, inputs); } - // Use generic handler when forcing thoughts or JSON schema for final output - // TODO: support thinking mode and/or JSON schema in handlers below this. - if (inputs.think || (!inputs.tools.is_null() && inputs.json_schema.is_object())) { + // Use generic handler when mixing tools + JSON schema. + // TODO: support that mix in handlers below. + if ((!inputs.tools.is_array() && inputs.json_schema.is_object())) { return common_chat_params_init_generic(tmpl, inputs); } @@ -1136,9 +1068,9 @@ common_chat_msg common_chat_parse(const std::string & input, common_chat_format case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS: return common_chat_parse_llama_3_1(input, /* with_builtin_tools= */ true); case COMMON_CHAT_FORMAT_DEEPSEEK_R1: - return common_chat_parse_deepseek_r1(input, /* think= */ false); - case COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK: - return common_chat_parse_deepseek_r1(input, /* think= */ true); + return common_chat_parse_deepseek_r1(input, /* extract_reasoning= */ false); + case COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING: + return common_chat_parse_deepseek_r1(input, /* extract_reasoning= */ true); case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return common_chat_parse_functionary_v3_2(input); case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: @@ -1148,9 +1080,9 @@ common_chat_msg common_chat_parse(const std::string & input, common_chat_format case COMMON_CHAT_FORMAT_FIREFUNCTION_V2: return common_chat_parse_firefunction_v2(input); case COMMON_CHAT_FORMAT_COMMAND_R7B: - return common_chat_parse_command_r7b(input, /* think= */ false); - case COMMON_CHAT_FORMAT_COMMAND_R7B_THINK: - return common_chat_parse_command_r7b(input, /* think= */ true); + return common_chat_parse_command_r7b(input, /* extract_reasoning= */ false); + case COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING: + return common_chat_parse_command_r7b(input, /* extract_reasoning= */ true); default: throw std::runtime_error("Unsupported format: " + common_chat_format_name(format)); } diff --git a/common/chat.hpp b/common/chat.hpp index d3272f70f..ba1632f66 100644 --- a/common/chat.hpp +++ b/common/chat.hpp @@ -19,7 +19,7 @@ struct common_chat_inputs { bool stream; std::string grammar; bool add_generation_prompt = true; - bool think = false; + bool extract_reasoning = true; }; enum common_chat_format { @@ -29,13 +29,13 @@ enum common_chat_format { COMMON_CHAT_FORMAT_LLAMA_3_X, COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS, COMMON_CHAT_FORMAT_DEEPSEEK_R1, - COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK, + COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING, COMMON_CHAT_FORMAT_FIREFUNCTION_V2, COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1, COMMON_CHAT_FORMAT_HERMES_2_PRO, COMMON_CHAT_FORMAT_COMMAND_R7B, - COMMON_CHAT_FORMAT_COMMAND_R7B_THINK, + COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING, COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats }; diff --git a/common/common.h b/common/common.h index 76de599f6..3c5b4910b 100644 --- a/common/common.h +++ b/common/common.h @@ -202,6 +202,11 @@ struct common_params_vocoder { bool use_guide_tokens = false; // enable guide tokens to improve TTS accuracy // NOLINT }; +enum common_reasoning_format { + COMMON_REASONING_FORMAT_NONE, + COMMON_REASONING_FORMAT_DEEPSEEK, // Extract thinking tag contents and return as `message.reasoning_content` +}; + struct common_params { int32_t n_predict = -1; // new tokens to predict int32_t n_ctx = 4096; // context size @@ -346,7 +351,7 @@ struct common_params { std::string chat_template = ""; // NOLINT bool use_jinja = false; // NOLINT bool enable_chat_template = true; - bool think = false; // return reasoning_content, force model to think unless it supports native tags. + common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK; std::vector api_keys; diff --git a/examples/server/README.md b/examples/server/README.md index 30ece095d..b0312588c 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -127,7 +127,7 @@ The project is under active development, and we are [looking for feedback and co | `--grammar-file FNAME` | file to read grammar from | | `-j, --json-schema SCHEMA` | JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object
For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead | | `--jinja` | Enable experimental Jinja templating engine (required for tool use) | -| `--think` | Enable experimental thinking mode (extracts DeepSeek R1 & Command R7B's native thinking tags and forces any other model to think before responding, resulting thoughts are in the `reasoning_content` output field) (requires `--jinja`) | +| `--reasoning-format FORMAT` | Controls extraction of model thinking traces and the format / field in which they are returned (default: `deepseek`; allowed values: `deepseek`, `none`; requires `--jinja`). `none` will leave thinking traces inline in `message.content` in a model-specific format, while `deepseek` will return them separately under `message.reasoning_content` | **Example-specific params** @@ -1224,10 +1224,10 @@ curl http://localhost:8080/v1/chat/completions \ # Native support for DeepSeek R1 works best w/ our own template (official template buggy) - llama-server --jinja -fa -hf bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q6_K_L --think \ + llama-server --jinja -fa -hf bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q6_K_L \ --chat-template-file models/templates/llama-cpp-deepseek-r1.jinja - llama-server --jinja -fa -hf bartowski/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q4_K_M --think \ + llama-server --jinja -fa -hf bartowski/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q4_K_M \ --chat-template-file models/templates/llama-cpp-deepseek-r1.jinja # Native support requires the right template for these GGUFs: @@ -1241,7 +1241,7 @@ curl http://localhost:8080/v1/chat/completions \ llama-server --jinja -fa -hf bartowski/firefunction-v2-GGUF -hff firefunction-v2-IQ1_M.gguf \ --chat-template-file <( python scripts/get_chat_template.py fireworks-ai/llama-3-firefunction-v2 tool_use ) - llama-server --jinja -fa -hf bartowski/c4ai-command-r7b-12-2024-GGUF:Q6_K_L --think \ + llama-server --jinja -fa -hf bartowski/c4ai-command-r7b-12-2024-GGUF:Q6_K_L \ --chat-template-file <( python scripts/get_chat_template.py CohereForAI/c4ai-command-r7b-12-2024 tool_use ) # Generic format support diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 05b73ef73..7123d1945 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -4055,7 +4055,7 @@ int main(int argc, char ** argv) { } auto body = json::parse(req.body); - json data = oaicompat_completion_params_parse(body, params.use_jinja, params.think, ctx_server.chat_templates); + json data = oaicompat_completion_params_parse(body, params.use_jinja, params.reasoning_format, ctx_server.chat_templates); return handle_completions_impl( SERVER_TASK_TYPE_COMPLETION, @@ -4068,7 +4068,7 @@ int main(int argc, char ** argv) { // same with handle_chat_completions, but without inference part const auto handle_apply_template = [&ctx_server, ¶ms, &res_ok](const httplib::Request & req, httplib::Response & res) { auto body = json::parse(req.body); - json data = oaicompat_completion_params_parse(body, params.use_jinja, params.think, ctx_server.chat_templates); + json data = oaicompat_completion_params_parse(body, params.use_jinja, params.reasoning_format, ctx_server.chat_templates); res_ok(res, {{ "prompt", std::move(data.at("prompt")) }}); }; diff --git a/examples/server/tests/unit/test_tool_call.py b/examples/server/tests/unit/test_tool_call.py index 7fa6ffe1d..08d824acc 100644 --- a/examples/server/tests/unit/test_tool_call.py +++ b/examples/server/tests/unit/test_tool_call.py @@ -274,44 +274,44 @@ def test_completion_without_tool_call_slow(template_name: str, n_predict: int, t @pytest.mark.slow -@pytest.mark.parametrize("think,hf_repo,template_override", [ - (True, "bartowski/c4ai-command-r7b-12-2024-GGUF:Q4_K_M", ("CohereForAI/c4ai-command-r7b-12-2024", "tool_use")), +@pytest.mark.parametrize("reasoning_format,hf_repo,template_override", [ + ('deepseek', "bartowski/c4ai-command-r7b-12-2024-GGUF:Q4_K_M", ("CohereForAI/c4ai-command-r7b-12-2024", "tool_use")), - (False, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), - (False, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + (None, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"), - (False, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), - (False, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + (None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"), - (False, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), - (False, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), + (None, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"), - (False, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), - (False, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + (None, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"), - (False, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")), - (False, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")), + (None, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"), - (False, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), - (False, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + (None, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"), - (False, "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)), - (False, "bartowski/functionary-small-v3.2-GGUF:Q8_0", "chatml"), + (None, "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)), + (None, "bartowski/functionary-small-v3.2-GGUF:Q8_0", "chatml"), - (False, "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), - (False, "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + (None, "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"), - (True, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + ('deepseek', "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), # Note: gemma-2-2b-it knows itself as "model", not "assistant", so we don't test the ill-suited chatml on it. - (False, "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), + (None, "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), # ("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), ]) -def test_weather(think: bool, hf_repo: str, template_override: Tuple[str, str | None] | None): +def test_weather(reasoning_format: Literal['deepseek', 'none'] | None, hf_repo: str, template_override: Tuple[str, str | None] | None): global server n_predict = 512 - server.think = think + server.reasoning_format = reasoning_format server.n_slots = 1 server.jinja = True server.n_ctx = 8192 @@ -440,19 +440,19 @@ def test_calc_result(result_override: str | None, n_predict: int, hf_repo: str, @pytest.mark.slow -@pytest.mark.parametrize("n_predict,think,expect_content,expect_reasoning_content,hf_repo,template_override", [ - (1024, True, "^The sum of 102 and 7 is 109.*", "^The user's request is straightforward.*", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), - (128, False, "^The sum of 102 and 7 is 109.*", None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), +@pytest.mark.parametrize("n_predict,reasoning_format,expect_content,expect_reasoning_content,hf_repo,template_override", [ + # (1024, 'deepseek', "^The sum of 102 and 7 is 109.*", "^The user's request is straightforward.*", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + # (128, None, "^The sum of 102 and 7 is 109.*", None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), - (1024, True, "To find the sum of.*", "I need to calculate the sum of 102 and 7.*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), - (1024, False, "\nI need[\\s\\S\\r\\n]*?\nTo find.*", None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + (1024, 'deepseek', "To find the sum of.*", "I need to calculate the sum of 102 and 7.*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + (1024, 'none', "\nI need[\\s\\S\\r\\n]*?\nTo find.*", None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), - (1024, True, "To find the sum of.*", "First, I need to add the tens place.*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)), + (1024, 'deepseek', "To find the sum of.*", "First, I need to add the tens place.*", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)), ]) -def test_thoughts(n_predict: int, think: bool, expect_content: str | None, expect_reasoning_content: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None): +def test_thoughts(n_predict: int, reasoning_format: Literal['deepseek', 'none'] | None, expect_content: str | None, expect_reasoning_content: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None): global server server.n_slots = 1 - server.think = think + server.reasoning_format = reasoning_format server.jinja = True server.n_ctx = 8192 * 2 server.n_predict = n_predict @@ -489,45 +489,44 @@ def test_thoughts(n_predict: int, think: bool, expect_content: str | None, expec @pytest.mark.slow -@pytest.mark.parametrize("think,expected_arguments_override,hf_repo,template_override", [ - (True, None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), - (True, None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", "chatml"), +@pytest.mark.parametrize("expected_arguments_override,hf_repo,template_override", [ + (None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + (None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", "chatml"), - (False, None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), - (False, None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + (None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", "chatml"), - (False, None, "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai-functionary-medium-v3.2", None)), - (False, None, "bartowski/functionary-small-v3.2-GGUF:Q8_0", "chatml"), + (None, "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai-functionary-medium-v3.2", None)), + (None, "bartowski/functionary-small-v3.2-GGUF:Q8_0", "chatml"), - (False, None, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), - (False, '{"code":"print("}', "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + ('{"code":"print("}', "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", "chatml"), - (False, '{"code":"print("}', "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)), - (False, None, "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", "chatml"), + ('{"code":"print("}', "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)), + (None, "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", "chatml"), - (False, '{"code":"print("}', "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)), - (False, '{"code":"print("}', "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"), + ('{"code":"print("}', "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)), + ('{"code":"print("}', "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", "chatml"), - (False, None, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), - (False, None, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), + (None, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", "chatml"), - (False, None, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), - (False, None, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + (None, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", "chatml"), - (False, None, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch-Hermes-3-Llama-3.1-8B", "tool_use")), - (False, None, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch-Hermes-3-Llama-3.1-8B", "tool_use")), + (None, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", "chatml"), - (False, None, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), - (False, None, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"), + (None, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + (None, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", "chatml"), # Note: gemma-2-2b-it knows itself as "model", not "assistant", so we don't test the ill-suited chatml on it. - (False, None, "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), + (None, "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), ]) -def test_hello_world(think: bool, expected_arguments_override: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None): +def test_hello_world(reasoning_format: Literal['deepseek', 'none'] | None, expected_arguments_override: str | None, hf_repo: str, template_override: str | Tuple[str, str | None] | None): global server server.n_slots = 1 server.jinja = True - server.think = think server.n_ctx = 8192 server.n_predict = 512 # High because of DeepSeek R1 server.model_hf_repo = hf_repo diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py index 2bddc55b6..191603149 100644 --- a/examples/server/tests/utils.py +++ b/examples/server/tests/utils.py @@ -78,7 +78,7 @@ class ServerProcess: draft_max: int | None = None no_webui: bool | None = None jinja: bool | None = None - think: bool | None = None + reasoning_format: Literal['deepseek', 'none'] | None = None chat_template: str | None = None chat_template_file: str | None = None @@ -173,8 +173,8 @@ class ServerProcess: server_args.append("--no-webui") if self.jinja: server_args.append("--jinja") - if self.think: - server_args.append("--think") + if self.reasoning_format: + server_args.append("--reasoning-format") if self.chat_template: server_args.extend(["--chat-template", self.chat_template]) if self.chat_template_file: diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index f006bbff8..86de0e6d7 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -578,7 +578,7 @@ static json oaicompat_completion_params_parse(const json & body) { static json oaicompat_completion_params_parse( const json & body, /* openai api json semantics */ bool use_jinja, - bool think, + common_reasoning_format reasoning_format, const common_chat_templates & chat_templates) { json llama_params; @@ -634,7 +634,7 @@ static json oaicompat_completion_params_parse( throw std::runtime_error("Cannot use custom grammar constraints with tools."); } common_chat_inputs inputs; - inputs.think = think; + inputs.extract_reasoning = reasoning_format != COMMON_REASONING_FORMAT_NONE; inputs.messages = body.at("messages"); inputs.tools = tools; inputs.tool_choice = tool_choice; diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index 865e7fbfe..b9d380631 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -188,7 +188,7 @@ static delta_data init_delta(const common_chat_template & tmpl, const std::vecto inputs.messages.push_back(user_message); inputs.tools = tools; inputs.tool_choice = tool_choice; - inputs.think = think; + inputs.extract_reasoning = think; auto params_prefix = common_chat_params_init(tmpl, inputs); inputs.messages.push_back(delta_message); @@ -427,24 +427,24 @@ static void test_template_output_parsers() { }; common_chat_inputs inputs_no_tools; - inputs_no_tools.messages = json::array({message_user}); + inputs_no_tools.messages = json::array({message_user}); common_chat_inputs inputs_no_tools_think; - inputs_no_tools_think.messages = json::array({message_user}); - inputs_no_tools_think.think = true; + inputs_no_tools_think.messages = json::array({message_user}); + inputs_no_tools_think.extract_reasoning = true; common_chat_inputs inputs_tools; - inputs_tools.messages = json::array({message_user}); - inputs_tools.tools = json::array({special_function_tool}); + inputs_tools.messages = json::array({message_user}); + inputs_tools.tools = json::array({special_function_tool}); common_chat_inputs inputs_tools_think; - inputs_tools_think.messages = json::array({message_user}); - inputs_tools_think.tools = json::array({special_function_tool}); - inputs_tools_think.think = true; + inputs_tools_think.messages = json::array({message_user}); + inputs_tools_think.tools = json::array({special_function_tool}); + inputs_tools_think.extract_reasoning = true; common_chat_inputs inputs_tools_builtin; - inputs_tools_builtin.messages = json::array({message_user}); - inputs_tools_builtin.tools = json::array({python_tool}); + inputs_tools_builtin.messages = json::array({message_user}); + inputs_tools_builtin.tools = json::array({python_tool}); { // Not supported yet @@ -455,9 +455,9 @@ static void test_template_output_parsers() { const common_chat_template tmpl(read_file("models/templates/CohereForAI-c4ai-command-r7b-12-2024-tool_use.jinja"), "", ""); std::vector end_tokens{ "<|END_OF_TURN_TOKEN|>" }; - assert_equals(COMMON_CHAT_FORMAT_COMMAND_R7B, common_chat_params_init(tmpl, inputs_no_tools).format); - assert_equals(COMMON_CHAT_FORMAT_COMMAND_R7B, common_chat_params_init(tmpl, inputs_tools).format); - assert_equals(COMMON_CHAT_FORMAT_COMMAND_R7B_THINK, common_chat_params_init(tmpl, inputs_tools_think).format); + assert_equals(COMMON_CHAT_FORMAT_COMMAND_R7B, common_chat_params_init(tmpl, inputs_no_tools).format); + assert_equals(COMMON_CHAT_FORMAT_COMMAND_R7B, common_chat_params_init(tmpl, inputs_tools).format); + assert_equals(COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING, common_chat_params_init(tmpl, inputs_tools_think).format); assert_msg_equals(msg_from_json(message_assist), common_chat_parse( @@ -486,7 +486,7 @@ static void test_template_output_parsers() { common_chat_parse( "<|START_THINKING|>I'm thinking<|END_THINKING|>" "<|START_RESPONSE|>Hello, world!\nWhat's up?<|END_RESPONSE|>", - COMMON_CHAT_FORMAT_COMMAND_R7B_THINK)); + COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING)); test_template(tmpl, end_tokens, message_assist_call_idx, tools, "<|START_THINKING|><|END_THINKING|>" @@ -661,8 +661,8 @@ static void test_template_output_parsers() { "", ""); std::vector end_tokens{ "<|end▁of▁sentence|>" }; - assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_params_init(tmpl, inputs_tools).format); - assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK, common_chat_params_init(tmpl, inputs_tools_think).format); + assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_params_init(tmpl, inputs_tools).format); + assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING, common_chat_params_init(tmpl, inputs_tools_think).format); test_template(tmpl, end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); test_template(tmpl, end_tokens, message_assist_thoughts, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); @@ -671,7 +671,7 @@ static void test_template_output_parsers() { COMMON_CHAT_FORMAT_DEEPSEEK_R1)); assert_msg_equals(msg_from_json(message_assist_thoughts), common_chat_parse("I'm thinkingHello, world!\nWhat's up?", - COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK)); + COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING)); // test_template(tmpl, end_tokens, message_assist_call, tools, // "<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>special_function\n" // "```json\n" @@ -687,8 +687,8 @@ static void test_template_output_parsers() { "", ""); std::vector end_tokens{ "<|end▁of▁sentence|>" }; - assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_params_init(tmpl, inputs_tools).format); - assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK, common_chat_params_init(tmpl, inputs_tools_think).format); + assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_params_init(tmpl, inputs_tools).format); + assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING, common_chat_params_init(tmpl, inputs_tools_think).format); test_template(tmpl, end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); test_template(tmpl, end_tokens, message_assist_thoughts, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); @@ -697,7 +697,7 @@ static void test_template_output_parsers() { COMMON_CHAT_FORMAT_DEEPSEEK_R1)); assert_msg_equals(msg_from_json(message_assist_thoughts), common_chat_parse("I'm thinkingHello, world!\nWhat's up?", - COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK)); + COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING)); assert_msg_equals(msg_from_json(message_assist_call_thoughts_unparsed), common_chat_parse( @@ -714,7 +714,7 @@ static void test_template_output_parsers() { "```json\n" "{\"arg1\": 1}\n" "```<|tool▁call▁end|><|tool▁calls▁end|>", - COMMON_CHAT_FORMAT_DEEPSEEK_R1_THINK)); + COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING)); test_template(tmpl, end_tokens, message_assist_call, tools, "<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>special_function\n" "```json\n"