From 56360a77ae53c17d6171e111f55a85834ec2d6e4 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Tue, 19 Mar 2024 15:29:43 +0100 Subject: [PATCH] llama: fix formatting of llm_load_tensors logs This commit fixes the formatting of the log message printed by llm_load_tensors. The output currently looks like this: ```console llm_load_tensors: ggml ctx size = 0.11 MiB llm_load_tensors: CPU buffer size = 3647.87 MiB ``` And after this change it will look like this: ```console llm_load_tensors: ggml ctx size = 0.11 MiB llm_load_tensors: CPU buffer size = 3647.87 MiB ``` Signed-off-by: Daniel Bevenius --- llama.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index 1a9fe0c4d..a195a375e 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4178,7 +4178,7 @@ static bool llm_load_tensors( model.ctxs.push_back(ctx); } - LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0); + LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0); // create tensors for the weights { @@ -5092,7 +5092,7 @@ static bool llm_load_tensors( // print memory requirements for (ggml_backend_buffer_t buf : model.bufs) { - LLAMA_LOG_INFO("%s: %10s buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); + LLAMA_LOG_INFO("%s: %s buffer size = %7.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); } // populate tensors_by_name