Revert LLAMA_CHATML_TEMPLATE refactor

This commit is contained in:
ochafik 2025-01-18 01:04:12 +00:00
parent 81c0d437a5
commit d5fa351a24
3 changed files with 8 additions and 15 deletions

View file

@ -74,15 +74,6 @@
#endif #endif
#define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083 #define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
const std::string LLAMA_CHATML_TEMPLATE(R"(
{%- for message in messages -%}
{{- "<|im_start|>" + message.role + "\n" + message.content + "<|im_end|>\n" -}}
{%- endfor -%}
{%- if add_generation_prompt -%}
{{- "<|im_start|>assistant\n" -}}
{%- endif -%}
)");
// //
// CURL utils // CURL utils
// //
@ -1846,7 +1837,14 @@ llama_chat_templates llama_chat_templates_from_model(const struct llama_model *
if (!tool_use_template_src.empty()) { if (!tool_use_template_src.empty()) {
default_template_src = tool_use_template_src; default_template_src = tool_use_template_src;
} else { } else {
default_template_src = LLAMA_CHATML_TEMPLATE; default_template_src = R"(
{%- for message in messages -%}
{{- "<|im_start|>" + message.role + "\n" + message.content + "<|im_end|>\n" -}}
{%- endfor -%}
{%- if add_generation_prompt -%}
{{- "<|im_start|>assistant\n" -}}
{%- endif -%}
)";
} }
} }
return { return {

View file

@ -26,8 +26,6 @@
#define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf" #define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf"
extern const std::string LLAMA_CHATML_TEMPLATE;
struct common_adapter_lora_info { struct common_adapter_lora_info {
std::string path; std::string path;
float scale; float scale;

View file

@ -8,7 +8,6 @@
#include "llama.h" #include "llama.h"
#include "common.h" #include "common.h"
#include "chat-template.hpp" #include "chat-template.hpp"
#include "llama-chat.h"
int main(void) { int main(void) {
std::vector<llama_chat_message> conversation { std::vector<llama_chat_message> conversation {
@ -365,7 +364,5 @@ int main(void) {
assert(fmt_single("llama3") == "<|start_header_id|>user<|end_header_id|>\n\nHow are you<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"); assert(fmt_single("llama3") == "<|start_header_id|>user<|end_header_id|>\n\nHow are you<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n");
assert(fmt_single("gigachat") == "user<|role_sep|>How are you<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>"); assert(fmt_single("gigachat") == "user<|role_sep|>How are you<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>");
assert(llm_chat_detect_template(LLAMA_CHATML_TEMPLATE) == LLM_CHAT_TEMPLATE_CHATML);
return 0; return 0;
} }