From 44f5e2ad77b070bc4ef0d73298197c932caf9e46 Mon Sep 17 00:00:00 2001 From: slaren Date: Tue, 22 Aug 2023 21:19:29 +0200 Subject: [PATCH] better n_threads --- examples/llama2-chat/llama2-chat.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/llama2-chat/llama2-chat.cpp b/examples/llama2-chat/llama2-chat.cpp index 4b497fd2b..93c991b3a 100644 --- a/examples/llama2-chat/llama2-chat.cpp +++ b/examples/llama2-chat/llama2-chat.cpp @@ -19,7 +19,7 @@ struct chat { llama_context * ctx; std::string system; - int n_threads = 8; + int n_threads = get_num_physical_cores(); chat(const std::string & model_file, const std::string & system) : system(system) { lparams = llama_context_default_params();