diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index e0c6f089f..8ac4686b3 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -993,11 +993,11 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * cl_kernel* dmmv = ggml_get_dequantize_mul_mat_vec_cl(type); GGML_ASSERT(to_fp32_cl != nullptr); + size_t ev_idx = 0; + std::vector events; + for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { - size_t ev_idx = 0; - std::vector events; - // copy src0 to device if necessary if (src0->backend == GGML_BACKEND_CPU) { events.emplace_back(); @@ -1061,6 +1061,9 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * for (auto *event : events) { clReleaseEvent(event); } + + ev_idx = 0; + events.clear(); } } diff --git a/ggml-opencl.h b/ggml-opencl.h index ff419c306..20e9cd37c 100644 --- a/ggml-opencl.h +++ b/ggml-opencl.h @@ -17,7 +17,7 @@ void * ggml_cl_host_malloc(size_t size); void ggml_cl_host_free(void * ptr); void ggml_cl_transform_tensor(struct ggml_tensor * tensor); -void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, const size_t offset); +void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset); #ifdef __cplusplus } diff --git a/llama.cpp b/llama.cpp index 51c4168db..3df41e7f6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1012,8 +1012,10 @@ static void llama_model_load_internal( #if defined(GGML_USE_CUBLAS) #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CUDA + fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__); #elif defined(GGML_USE_CLBLAST) #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CL + fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__); #else #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU #endif @@ -1095,22 +1097,14 @@ static void llama_model_load_internal( fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__, mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); -#if defined(GGML_USE_CUBLAS) const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); - fprintf(stderr, "%s: [cublas] offloading %d layers to GPU\n", __func__, n_gpu); +#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) + fprintf(stderr, "%s: offloading %d layers to GPU\n", __func__, n_gpu); if (n_gpu_layers > (int) hparams.n_layer) { - fprintf(stderr, "%s: [cublas] offloading output layer to GPU\n", __func__); + fprintf(stderr, "%s: offloading output layer to GPU\n", __func__); } - fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); -#elif defined(GGML_USE_CLBLAST) - const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); - - fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu); - if (n_gpu_layers > (int) hparams.n_layer) { - fprintf(stderr, "%s: [opencl] offloading output layer to GPU\n", __func__); - } - fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); + fprintf(stderr, "%s: total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); #else (void) n_gpu_layers; #endif