From db8781dfd0db973f65bb24276a886faefe0665ec Mon Sep 17 00:00:00 2001 From: edp1096 Date: Fri, 9 Jun 2023 09:23:52 +0900 Subject: [PATCH] Rename function name --- ggml-opencl.cpp | 2 +- ggml-opencl.h | 2 +- llama.cpp | 7 +++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 0a34f52ff..7b6daf4a8 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -662,7 +662,7 @@ static void ggml_cl_pool_free(cl_mem mem, size_t size) { clReleaseMemObject(mem); } -void ggml_cl_data_free(const struct ggml_tensor* tensor) { +void ggml_cl_free_data(const struct ggml_tensor* tensor) { if (tensor->backend != GGML_BACKEND_GPU) { return; } diff --git a/ggml-opencl.h b/ggml-opencl.h index 293faa627..bf95e5cd0 100644 --- a/ggml-opencl.h +++ b/ggml-opencl.h @@ -16,7 +16,7 @@ void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor void * ggml_cl_host_malloc(size_t size); void ggml_cl_host_free(void * ptr); -void ggml_cl_data_free(const struct ggml_tensor* tensor); +void ggml_cl_free_data(const struct ggml_tensor* tensor); void ggml_cl_transform_tensor(struct ggml_tensor * tensor); void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset); diff --git a/llama.cpp b/llama.cpp index 25282add1..f40c5afa2 100644 --- a/llama.cpp +++ b/llama.cpp @@ -210,12 +210,11 @@ struct llama_model { for (size_t i = 0; i < tensors_by_name.size(); ++i) { ggml_cuda_free_data(tensors_by_name[i].second); } -#endif // GGML_USE_CUBLAS -#if defined(GGML_USE_CLBLAST) +#elif defined(GGML_USE_CLBLAST) for (size_t i = 0; i < tensors_by_name.size(); ++i) { - ggml_cl_data_free(tensors_by_name[i].second); + ggml_cl_free_data(tensors_by_name[i].second); } -#endif // GGML_USE_CLBLAST +#endif } };