From f815fe43d383a742963ee73c5c506053b9061bcf Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 27 Nov 2023 20:48:27 +0200 Subject: [PATCH 1/3] ggml : use blas even if src0 is not F32 --- ggml.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml.c b/ggml.c index 0c7264a36..c522a101f 100644 --- a/ggml.c +++ b/ggml.c @@ -9373,7 +9373,7 @@ static bool ggml_compute_forward_mul_mat_use_blas( // TODO: find the optimal values for these if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && - src0->type == GGML_TYPE_F32 && + //src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) { From e9b7a5cbd0cdbb29aa3622fe0a1e783dc7a10384 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 27 Nov 2023 20:48:44 +0200 Subject: [PATCH 2/3] llama : use n_threads_batch only when n_tokens >= 32 ggml-ci --- llama.cpp | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/llama.cpp b/llama.cpp index f2b5967d7..9aa337a8d 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5433,7 +5433,7 @@ static int llama_decode_internal( GGML_ASSERT(n_tokens <= n_batch); - int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch; + int n_threads = n_tokens < 32 ? cparams.n_threads : cparams.n_threads_batch; GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT const int64_t t_start_us = ggml_time_us(); @@ -5550,18 +5550,8 @@ static int llama_decode_internal( n_threads = std::min(4, n_threads); } - // If all tensors can be run on the GPU then using more than 1 thread is detrimental. - const bool full_offload_supported = - model.arch == LLM_ARCH_LLAMA || - model.arch == LLM_ARCH_BAICHUAN || - model.arch == LLM_ARCH_FALCON || - model.arch == LLM_ARCH_REFACT || - model.arch == LLM_ARCH_MPT || - model.arch == LLM_ARCH_STARCODER || - model.arch == LLM_ARCH_STABLELM; - const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3; - if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) { + if (ggml_cpu_has_cublas() && fully_offloaded) { n_threads = 1; } From 87f4102a70b54c002ad37f966717e4f13a94410a Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 27 Nov 2023 21:21:23 +0200 Subject: [PATCH 3/3] llama : revert n_threads_batch logic ggml-ci --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 9aa337a8d..cb544228b 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5433,7 +5433,7 @@ static int llama_decode_internal( GGML_ASSERT(n_tokens <= n_batch); - int n_threads = n_tokens < 32 ? cparams.n_threads : cparams.n_threads_batch; + int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch; GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT const int64_t t_start_us = ggml_time_us();