From 457aaf5badfe1914f02f8024246d7c5e27aa0ade Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Thu, 1 Jun 2023 07:33:32 +0200 Subject: [PATCH 1/2] Reduce code duplication between cuda and opencl branches --- llama.cpp | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/llama.cpp b/llama.cpp index 52966b2c5..f70b26c0f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1012,8 +1012,10 @@ static void llama_model_load_internal( #if defined(GGML_USE_CUBLAS) #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CUDA + fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__); #elif defined(GGML_USE_CLBLAST) #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CL + fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__); #else #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU #endif @@ -1095,22 +1097,14 @@ static void llama_model_load_internal( fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__, mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); -#if defined(GGML_USE_CUBLAS) const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); - fprintf(stderr, "%s: [cublas] offloading %d layers to GPU\n", __func__, n_gpu); +#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) + fprintf(stderr, "%s: offloading %d layers to GPU\n", __func__, n_gpu); if (n_gpu_layers > (int) hparams.n_layer) { - fprintf(stderr, "%s: [cublas] offloading output layer to GPU\n", __func__); + fprintf(stderr, "%s: offloading output layer to GPU\n", __func__); } - fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); -#elif defined(GGML_USE_CLBLAST) - const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); - - fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu); - if (n_gpu_layers > (int) hparams.n_layer) { - fprintf(stderr, "%s: [opencl] offloading output layer to GPU\n", __func__); - } - fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); + fprintf(stderr, "%s: total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); #else (void) n_gpu_layers; #endif From 24239f0df7e9f29cddeffd42b9b606dd89ebb819 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Thu, 1 Jun 2023 18:57:08 +0200 Subject: [PATCH 2/2] Improve implementation --- ggml-opencl.cpp | 9 ++++++--- ggml-opencl.h | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 73088560b..52ba3aaac 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -983,11 +983,11 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * cl_kernel* dmmv = ggml_get_dequantize_mul_mat_vec_cl(type); GGML_ASSERT(to_fp32_cl != nullptr); + size_t ev_idx = 0; + std::vector events; + for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { - size_t ev_idx = 0; - std::vector events; - // copy src0 to device if necessary if (src0->backend == GGML_BACKEND_CPU) { events.emplace_back(); @@ -1050,6 +1050,9 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * for (auto *event : events) { clReleaseEvent(event); } + + ev_idx = 0; + events.clear(); } } diff --git a/ggml-opencl.h b/ggml-opencl.h index 797b92c12..c850bb8ad 100644 --- a/ggml-opencl.h +++ b/ggml-opencl.h @@ -17,7 +17,7 @@ void * ggml_cl_host_malloc(size_t size); void ggml_cl_host_free(void * ptr); void ggml_cl_transform_tensor(struct ggml_tensor * tensor); -void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, const size_t offset); +void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset); #ifdef __cplusplus }