From 6d06695c7e9835b5e162d3794d9040ce84880b3a Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Sat, 2 Sep 2023 00:41:50 +0800 Subject: [PATCH] initializer syntax --- llama.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/llama.cpp b/llama.cpp index 3cffc7201..74ce6d8ad 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1844,11 +1844,11 @@ static void llm_load_tensors( model.mlock_buf.grow_to(model.buf.size); } - struct ggml_init_params params = { - /*.mem_size =*/ model.buf.size, - /*.mem_buffer =*/ model.buf.data, - /*.no_alloc =*/ ml.use_mmap, - }; + struct ggml_init_params params; + /*.mem_size =*/ params.mem_size = model.buf.size; + /*.mem_buffer =*/ params.mem_buffer = model.buf.data; + /*.no_alloc =*/ params.no_alloc = ml.use_mmap; + model.ctx = ggml_init(params); if (!model.ctx) {