diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 85a65efb9..a7a86548b 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -798,7 +798,6 @@ struct server_context { slot.oaicompat = false; slot.oaicompat_model = ""; } - std::string default_empty = ""; slot.params.stream = json_value(data, "stream", false); slot.params.cache_prompt = json_value(data, "cache_prompt", false); diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 6c51b9f12..d82bc6464 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -338,7 +338,7 @@ inline std::string format_chat(const struct llama_model * model, const std::stri } if (!is_custom) formatted_chat = common_chat_apply_template(model, tmpl, chat, true); - LOG_WRN("formatted_chat using '%s': '%s'\n", tmpl.c_str(), formatted_chat.c_str()); + LOG_DBG("formatted_chat using '%s': '%s'\n", tmpl.c_str(), formatted_chat.c_str()); return formatted_chat; }