llama: fix formatting of llm_load_tensors logs

This commit fixes the formatting of the log message printed by
llm_load_tensors.

The output currently looks like this:
```console
llm_load_tensors: ggml ctx size =    0.11 MiB
llm_load_tensors:        CPU buffer size =  3647.87 MiB
```

And after this change it will look like this:
```console
llm_load_tensors: ggml ctx size   =    0.11 MiB
llm_load_tensors: CPU buffer size = 3647.87 MiB
```

Signed-off-by: Daniel Bevenius <daniel.bevenius@gmail.com>
This commit is contained in:
Daniel Bevenius 2024-03-19 15:29:43 +01:00
parent d0d5de42e5
commit 56360a77ae
Failed to extract signature

View file

@ -4178,7 +4178,7 @@ static bool llm_load_tensors(
model.ctxs.push_back(ctx);
}
LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0);
LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0);
// create tensors for the weights
{
@ -5092,7 +5092,7 @@ static bool llm_load_tensors(
// print memory requirements
for (ggml_backend_buffer_t buf : model.bufs) {
LLAMA_LOG_INFO("%s: %10s buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
LLAMA_LOG_INFO("%s: %s buffer size = %7.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
}
// populate tensors_by_name