From 05420dbba8ba0c89a1a3b0ab074600ddb0c6fd93 Mon Sep 17 00:00:00 2001 From: Kurt Manucredo Date: Tue, 8 Oct 2024 21:02:50 +0000 Subject: [PATCH] fix logging in examples/main/main.cpp LOG() outputs to stdout. This interferes with the generated output in at least 3 places: When we log: "[end of text]", "saving final output to session file", and when we log "\n\n" in line 927. LOG_INF() outputs to stderr. If we use LOG_INF() we can fix that. --- examples/main/main.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 6bbb1e13e..c5343c91e 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -907,7 +907,7 @@ int main(int argc, char ** argv) { // end of generation if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !(params.interactive)) { - LOG(" [end of text]\n"); + LOG_INF(" [end of text]\n"); break; } @@ -920,11 +920,11 @@ int main(int argc, char ** argv) { } if (!path_session.empty() && params.prompt_cache_all && !params.prompt_cache_ro) { - LOG("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str()); + LOG_INF("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str()); llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size()); } - LOG("\n\n"); + LOG_INF("\n\n"); gpt_perf_print(ctx, smpl); write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);