From 22e1b45c02a4b532cd01b9330d034d2b6696e1cb Mon Sep 17 00:00:00 2001 From: jianyuzh Date: Wed, 24 Jan 2024 17:24:24 +0800 Subject: [PATCH] fix build break on MacOS, due to CI of MacOS depend on external ggml, instead of internal ggml --- llama.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index ce74fb557..b8d0f1bba 100644 --- a/llama.cpp +++ b/llama.cpp @@ -6703,10 +6703,14 @@ static int llama_decode_internal( } const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 1; - if ((ggml_cpu_has_cublas() || ggml_cpu_has_sycl()) && fully_offloaded) { + if (ggml_cpu_has_cublas() && fully_offloaded) { n_threads = 1; } +#ifdef GGML_USE_SYCL + n_threads = 1; +#endif + #ifdef GGML_USE_MPI const int64_t n_layer = hparams.n_layer; ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);