diff --git a/Makefile b/Makefile index 749925a57..6bbdcb2e3 100644 --- a/Makefile +++ b/Makefile @@ -55,7 +55,6 @@ TEST_TARGETS = \ tests/test-grammar-parser \ tests/test-json-schema-to-grammar \ tests/test-minja \ - tests/test-tool-call \ tests/test-llama-grammar \ tests/test-log \ tests/test-model-load-cancel \ @@ -64,6 +63,7 @@ TEST_TARGETS = \ tests/test-quantize-perf \ tests/test-rope \ tests/test-sampling \ + tests/test-tool-call \ tests/test-tokenizer-0 \ tests/test-tokenizer-1-bpe \ tests/test-tokenizer-1-spm @@ -934,7 +934,6 @@ OBJ_LLAMA = \ OBJ_COMMON = \ common/common.o \ - common/chat-template.o \ common/arg.o \ common/log.o \ common/console.o \ @@ -1171,12 +1170,14 @@ $(LIB_LLAMA_S): \ common/common.o: \ common/common.cpp \ common/common.h \ - common/chat-template.cpp \ - common/chat-template.h \ + common/chat-template.hpp \ common/console.h \ common/sampling.h \ common/json.hpp \ common/json-schema-to-grammar.h \ + common/minja.hpp \ + common/tool-call.cpp \ + common/tool-call.h \ include/llama.h $(CXX) $(CXXFLAGS) -c $< -o $@ @@ -1468,9 +1469,11 @@ llama-server: \ examples/server/prompt-formats.js.hpp \ examples/server/json-schema-to-grammar.mjs.hpp \ examples/server/loading.html.hpp \ - common/chat-template.h \ + common/chat-template.hpp \ common/json.hpp \ + common/minja.hpp \ common/stb_image.h \ + common/tool-call.h \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h %.hpp $<,$^) -Iexamples/server $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(LWINSOCK2) diff --git a/tests/test-tool-call.cpp b/tests/test-tool-call.cpp index 5899b9ada..4450f9aa9 100644 --- a/tests/test-tool-call.cpp +++ b/tests/test-tool-call.cpp @@ -243,14 +243,14 @@ static void test_parsing() { "{\"name\": \"unknown_function\", \"arguments\": {\"arg1\": 1}}", json::array()); } -void test_tool_call_style(const std::string & template_file, llama_tool_call_style expected) { +static void test_tool_call_style(const std::string & template_file, llama_tool_call_style expected) { const minja::chat_template tmpl(read_file(template_file), "", ""); auto tool_call_style = llama_tool_call_style_detect(tmpl); std::cout << "# Testing tool call style of: " << template_file << std::endl << std::flush; assert_equals(expected, tool_call_style); } -void test_tool_call_style_detection() { +static void test_tool_call_style_detection() { test_tool_call_style("tests/chat/templates/meetkai-functionary-medium-v3.1.jinja", FunctionaryV3Llama31); test_tool_call_style("tests/chat/templates/meetkai-functionary-medium-v3.2.jinja", FunctionaryV3Llama3); test_tool_call_style("tests/chat/templates/meta-llama-Meta-Llama-3.1-8B-Instruct.jinja", Llama31);