This commit is contained in:
ochafik 2024-10-31 14:28:52 +00:00
parent c4a8050120
commit f5f74751b9
2 changed files with 4 additions and 4 deletions

View file

@ -89,7 +89,7 @@ class chat_template {
if (_requires_object_arguments || !_supports_system_role || !_supports_tools) {
actual_messages = json::array();
std::string pending_system;
auto flush_sys = [&]() {
if (!pending_system.empty()) {
@ -154,7 +154,7 @@ class chat_template {
};
if (message.contains("tool_call_id")) {
obj["tool_response"]["tool_call_id"] = message.at("tool_call_id");
}
}
message["content"] = obj.dump(2);
message.erase("name");
}

View file

@ -24,12 +24,12 @@ Here's how to run an agent w/ local tool call:
./llama-server --jinja -fa --verbose \
-hfr bartowski/gemma-2-2b-it-GGUF -hff gemma-2-2b-it-Q4_K_M.gguf | |
# Native support for Mistral Nemo, Qwen 2.5, Hermes 3, Functionary 3.x
# Note that some of these GGUFs lack the right template, so we override it
# (otherwise they'd use the generic tool call support, which may be less efficient
# and consume more tokens)
./llama-server --jinja -fa -ctk q4_0 -ctv q4_0 --verbose \
-hfr bartowski/Qwen2.5-7B-Instruct-GGUF -hff Qwen2.5-7B-Instruct-Q4_K_M.gguf