diff --git a/llama.cpp b/llama.cpp index 3e0aec8f6..092eae8f6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3237,7 +3237,7 @@ struct llama_model_loader { std::vector> mmaps_used; // Returns false if cancelled by progress_callback - bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, std::map & bufs_mmap, std::vector> * lmlocks) { + bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, std::unordered_map & bufs_mmap, std::vector> * lmlocks) { GGML_ASSERT(size_data != 0 && "call init_mappings() first"); std::vector> read_buf;