From 53a941c1e50ac81926eb4af5fc19e8458c5cfb95 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 24 Mar 2023 17:17:56 +0200 Subject: [PATCH] Update llama.cpp --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 461d08214..5d56cc90e 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1430,7 +1430,7 @@ struct llama_context * llama_init_from_file( ggml_type type_memory = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, type_memory, - params.vocab_only, params.use_mlock)) { + params.vocab_only)) { fprintf(stderr, "%s: failed to load model\n", __func__); delete ctx; return nullptr;