minimize diffs

This commit is contained in:
ochafik 2025-02-03 10:58:52 +00:00
parent 5e6f2a21ae
commit a76073cf88
3 changed files with 8 additions and 24 deletions

View file

@ -377,8 +377,8 @@ static common_chat_params common_chat_params_init_command_r7b(const common_chat_
return data; return data;
} }
static common_chat_msg common_chat_parse_command_r7b(const std::string & input) { static common_chat_msg common_chat_parse_command_r7b(const std::string & input) {
static std::regex response_regex("<\\|START_RESPONSE\\|>([\\s\\S\\n\\r]*?)<\\|END_RESPONSE\\|>"); static std::regex response_regex("<\\|START_RESPONSE\\|>(.*?)<\\|END_RESPONSE\\|>");
static std::regex thought_action_regex("<\\|START_THINKING\\|>([\\s\\S\\n\\r]*)<\\|END_THINKING\\|><\\|START_ACTION\\|>([\\s\\S\\n\\r]*?)<\\|END_ACTION\\|>"); static std::regex thought_action_regex("<\\|START_THINKING\\|>([\\s\\S\\n\\r]*?)<\\|END_THINKING\\|><\\|START_ACTION\\|>([\\s\\S\\n\\r]*?)<\\|END_ACTION\\|>");
std::smatch match; std::smatch match;
common_chat_msg result; common_chat_msg result;
@ -576,7 +576,7 @@ static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_
} }
static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input) { static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input) {
static std::regex trigger_regex("<tool▁calls▁begin>"); static std::regex trigger_regex("<tool▁calls▁begin>");
static std::regex function_regex(R"(<tool▁call▁begin>function<tool▁sep>([^\n]+)\n```json\n)"); static std::regex function_regex("<tool▁call▁begin>function<tool▁sep>([^\n]+)\n```json\n");
static std::regex close_regex("```<tool▁call▁end>"); static std::regex close_regex("```<tool▁call▁end>");
static std::regex think_regex(R"(<think>([\s\S\n]*)</think>([\s\S\r\n]*))"); static std::regex think_regex(R"(<think>([\s\S\n]*)</think>([\s\S\r\n]*))");
auto msg = parse_json_tool_calls(input, trigger_regex, function_regex, close_regex); auto msg = parse_json_tool_calls(input, trigger_regex, function_regex, close_regex);

View file

@ -1869,16 +1869,9 @@ std::string common_chat_format_example(const common_chat_template & tmpl, bool u
return common_chat_apply_template(tmpl, msgs, true, use_jinja); return common_chat_apply_template(tmpl, msgs, true, use_jinja);
} }
#define CHATML_TEMPLATE_SRC \
"{%- for message in messages -%}\n" \
" {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' -}}\n" \
"{%- endfor -%}\n" \
"{%- if add_generation_prompt -%}\n" \
" {{- '<|im_start|>assistant\n' -}}\n" \
"{%- endif -%})"
common_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override) common_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override)
{ {
auto vocab = llama_model_get_vocab(model);
std::string default_template_src = chat_template_override == "chatml" ? CHATML_TEMPLATE_SRC : chat_template_override; std::string default_template_src = chat_template_override == "chatml" ? CHATML_TEMPLATE_SRC : chat_template_override;
std::string template_tool_use_src = chat_template_override == "chatml" ? CHATML_TEMPLATE_SRC : ""; std::string template_tool_use_src = chat_template_override == "chatml" ? CHATML_TEMPLATE_SRC : "";
bool has_explicit_template = !chat_template_override.empty(); bool has_explicit_template = !chat_template_override.empty();
@ -1908,11 +1901,6 @@ common_chat_templates common_chat_templates_from_model(const struct llama_model
)"; )";
} }
} }
std::string token_bos;
std::string token_eos;
// TODO: update logic that adds BOS and EOS tokens to the tokenized prompt, in favour of the template.
#if 0
auto vocab = llama_model_get_vocab(model);
const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) { const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) {
if (token == LLAMA_TOKEN_NULL) { if (token == LLAMA_TOKEN_NULL) {
if (default_template_src.find(jinja_variable_name) != std::string::npos if (default_template_src.find(jinja_variable_name) != std::string::npos
@ -1924,9 +1912,8 @@ common_chat_templates common_chat_templates_from_model(const struct llama_model
return common_token_to_piece(vocab, token, true); return common_token_to_piece(vocab, token, true);
} }
}; };
token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token"); auto token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token");
token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token"); auto token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token");
#endif
return { return {
has_explicit_template, has_explicit_template,
std::make_unique<minja::chat_template>(default_template_src, token_bos, token_eos), std::make_unique<minja::chat_template>(default_template_src, token_bos, token_eos),

View file

@ -252,7 +252,6 @@ def test_completion_without_tool_call_slow(template_name: str, n_predict: int, t
@pytest.mark.slow @pytest.mark.slow
@pytest.mark.parametrize("hf_repo,template_override", [ @pytest.mark.parametrize("hf_repo,template_override", [
("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), ("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
("bartowski/c4ai-command-r7b-12-2024-GGUF:Q4_K_M", ("CohereForAI/c4ai-command-r7b-12-2024", "tool_use")),
("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), ("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), ("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None),
("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), ("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
@ -263,9 +262,8 @@ def test_completion_without_tool_call_slow(template_name: str, n_predict: int, t
("bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)), ("bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)),
("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), ("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
# ("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), # ("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
# ("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
]) ])
def test_weather(hf_repo: str, template_override: Tuple[str, str | None] | None): def test_weather_tool_call(hf_repo: str, template_override: Tuple[str, str | None] | None):
global server global server
n_predict = 512 n_predict = 512
server.n_slots = 1 server.n_slots = 1
@ -313,9 +311,8 @@ def test_weather(hf_repo: str, template_override: Tuple[str, str | None] | None)
(None, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), (None, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")),
(None, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch-Hermes-3-Llama-3.1-8B", "tool_use")), (None, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch-Hermes-3-Llama-3.1-8B", "tool_use")),
(None, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), (None, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None),
# (None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
]) ])
def test_hello_world(expected_arguments_override: str | None, hf_repo: str, template_override: Tuple[str, str | None] | None): def test_hello_world_tool_call(expected_arguments_override: str | None, hf_repo: str, template_override: Tuple[str, str | None] | None):
global server global server
server.n_slots = 1 server.n_slots = 1
server.jinja = True server.jinja = True