llama : fix llama_chat_format_single for mistral (#8657)

* fix `llama_chat_format_single` for mistral

* fix typo

* use printf
This commit is contained in:
Xuan Son Nguyen 2024-07-24 13:48:46 +02:00 committed by GitHub
parent 79167d9e49
commit 96952e7181
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 26 additions and 7 deletions

View file

@ -2723,7 +2723,7 @@ std::string llama_chat_format_single(const struct llama_model * model,
const llama_chat_msg & new_msg,
bool add_ass) {
std::ostringstream ss;
auto fmt_past_msg = llama_chat_apply_template(model, tmpl, past_msg, false);
auto fmt_past_msg = past_msg.empty() ? "" : llama_chat_apply_template(model, tmpl, past_msg, false);
std::vector<llama_chat_msg> chat_new(past_msg);
// if the past_msg ends with a newline, we must preserve it in the formatted version
if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {