diff --git a/examples/server/server.cpp b/examples/server/server.cpp index b0f0486b7..37b5b78d3 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -52,7 +52,7 @@ struct llama_server_context n_consumed = 0; } - bool loadModel(gpt_params params_) + bool loadModel(const gpt_params ¶ms_) { params = params_; ctx = llama_init_from_gpt_params(params);