diff --git a/llama.cpp b/llama.cpp index ce74fb557..b8d0f1bba 100644 --- a/llama.cpp +++ b/llama.cpp @@ -6703,10 +6703,14 @@ static int llama_decode_internal( } const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 1; - if ((ggml_cpu_has_cublas() || ggml_cpu_has_sycl()) && fully_offloaded) { + if (ggml_cpu_has_cublas() && fully_offloaded) { n_threads = 1; } +#ifdef GGML_USE_SYCL + n_threads = 1; +#endif + #ifdef GGML_USE_MPI const int64_t n_layer = hparams.n_layer; ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);