gguf-py : export chat templates (#4125)
* gguf-py : export chat templates * llama.cpp : escape new lines in gguf kv info prints * gguf-py : bump version * gguf-py : check chat_template type * gguf-py : initialize chat_template
This commit is contained in:
parent
28a2e6e7d4
commit
e937066420
5 changed files with 34 additions and 15 deletions
|
@ -1871,6 +1871,7 @@ struct llama_model_loader {
|
|||
if (value.size() > MAX_VALUE_LEN) {
|
||||
value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
|
||||
}
|
||||
replace_all(value, "\n", "\\n");
|
||||
|
||||
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue