llama: fix formatting of llm_load_tensors logs
This commit fixes the formatting of the log message printed by llm_load_tensors. The output currently looks like this: ```console llm_load_tensors: ggml ctx size = 0.11 MiB llm_load_tensors: CPU buffer size = 3647.87 MiB ``` And after this change it will look like this: ```console llm_load_tensors: ggml ctx size = 0.11 MiB llm_load_tensors: CPU buffer size = 3647.87 MiB ``` Signed-off-by: Daniel Bevenius <daniel.bevenius@gmail.com>
This commit is contained in:
parent
d0d5de42e5
commit
56360a77ae
1 changed files with 2 additions and 2 deletions
|
@ -4178,7 +4178,7 @@ static bool llm_load_tensors(
|
|||
model.ctxs.push_back(ctx);
|
||||
}
|
||||
|
||||
LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0);
|
||||
LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0);
|
||||
|
||||
// create tensors for the weights
|
||||
{
|
||||
|
@ -5092,7 +5092,7 @@ static bool llm_load_tensors(
|
|||
|
||||
// print memory requirements
|
||||
for (ggml_backend_buffer_t buf : model.bufs) {
|
||||
LLAMA_LOG_INFO("%s: %10s buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
|
||||
LLAMA_LOG_INFO("%s: %s buffer size = %7.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
|
||||
}
|
||||
|
||||
// populate tensors_by_name
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue