diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 0487c978b..e30db66e9 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1275,6 +1275,8 @@ bool llama_model::load_tensors(llama_model_loader & ml) { const bool use_mmap_buffer = true; + LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, use_mmap_buffer ? "true" : "false"); + // build a list of buffer types for the CPU and GPU devices pimpl->cpu_buft_list = make_cpu_buft_list(devices); for (auto * dev : devices) {