From ce0d5fb9de67cbc80601f2ccff687344e7a60f8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebasti=C3=A1n=20Aedo?= Date: Sun, 12 Mar 2023 00:11:08 -0300 Subject: [PATCH] MSVC: Remove features that are only available on MSVC C++20. --- main.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/main.cpp b/main.cpp index dd8323940..0c86a2b48 100644 --- a/main.cpp +++ b/main.cpp @@ -209,8 +209,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab // create the ggml context { struct ggml_init_params params = { - .mem_size = ctx_size, - .mem_buffer = NULL, + /*.mem_size =*/ ctx_size, + /*.mem_buffer =*/ NULL, }; model.ctx = ggml_init(params); @@ -546,12 +546,13 @@ bool llama_eval( } struct ggml_init_params params = { - .mem_size = buf_size, - .mem_buffer = buf, + /*.mem_size =*/ buf_size, + /*.mem_buffer =*/ buf, }; struct ggml_context * ctx0 = ggml_init(params); - struct ggml_cgraph gf = { .n_threads = n_threads }; + struct ggml_cgraph gf; + gf.n_threads = n_threads; struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));