Improve implementation

This commit is contained in:
0cc4m 2023-06-01 18:57:08 +02:00
parent 457aaf5bad
commit 24239f0df7
2 changed files with 7 additions and 4 deletions

View file

@ -983,11 +983,11 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
cl_kernel* dmmv = ggml_get_dequantize_mul_mat_vec_cl(type); cl_kernel* dmmv = ggml_get_dequantize_mul_mat_vec_cl(type);
GGML_ASSERT(to_fp32_cl != nullptr); GGML_ASSERT(to_fp32_cl != nullptr);
size_t ev_idx = 0;
std::vector<cl_event> events;
for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i03 = 0; i03 < ne03; i03++) {
for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i02 = 0; i02 < ne02; i02++) {
size_t ev_idx = 0;
std::vector<cl_event> events;
// copy src0 to device if necessary // copy src0 to device if necessary
if (src0->backend == GGML_BACKEND_CPU) { if (src0->backend == GGML_BACKEND_CPU) {
events.emplace_back(); events.emplace_back();
@ -1050,6 +1050,9 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
for (auto *event : events) { for (auto *event : events) {
clReleaseEvent(event); clReleaseEvent(event);
} }
ev_idx = 0;
events.clear();
} }
} }

View file

@ -17,7 +17,7 @@ void * ggml_cl_host_malloc(size_t size);
void ggml_cl_host_free(void * ptr); void ggml_cl_host_free(void * ptr);
void ggml_cl_transform_tensor(struct ggml_tensor * tensor); void ggml_cl_transform_tensor(struct ggml_tensor * tensor);
void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, const size_t offset); void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset);
#ifdef __cplusplus #ifdef __cplusplus
} }