Free Cublas GPU memory

I have corrected the PR https://github.com/ggerganov/llama.cpp/pull/5576 which causes crash and streamlined the code.
Unfortunately, this does not free all occupied GPU memory yet (only 15% of it). We still need to find some objects which are not freed after releasing GPU memory.
This commit is contained in:
Zoli Somogyi 2024-03-06 07:56:41 +01:00
parent 8ced9f7e32
commit 5a790a38b0
2 changed files with 21 additions and 5 deletions

View file

@ -8751,10 +8751,11 @@ GGML_CALL bool ggml_cublas_loaded(void) {
return g_cublas_loaded; return g_cublas_loaded;
} }
GGML_CALL void ggml_init_cublas() { static bool g_cublas_initialized = false;
static bool initialized = false;
if (!initialized) { GGML_CALL void ggml_init_cublas() {
if (!g_cublas_initialized) {
#ifdef __HIP_PLATFORM_AMD__ #ifdef __HIP_PLATFORM_AMD__
// Workaround for a rocBLAS bug when using multiple graphics cards: // Workaround for a rocBLAS bug when using multiple graphics cards:
@ -8764,7 +8765,7 @@ GGML_CALL void ggml_init_cublas() {
#endif #endif
if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) { if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) {
initialized = true; g_cublas_initialized = true;
g_cublas_loaded = false; g_cublas_loaded = false;
fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__); fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__);
return; return;
@ -8835,7 +8836,7 @@ GGML_CALL void ggml_init_cublas() {
// configure logging to stdout // configure logging to stdout
// CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr)); // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr));
initialized = true; g_cublas_initialized = true;
g_cublas_loaded = true; g_cublas_loaded = true;
} }
} }
@ -12490,3 +12491,15 @@ GGML_CALL int ggml_backend_cuda_reg_devices() {
} }
return device_count; return device_count;
} }
extern "C" GGML_CALL void ggml_free_cublas(void);
GGML_CALL void ggml_free_cublas(void) {
#ifdef GGML_USE_CUBLAS
for (int id = 0; id < g_device_count; ++id) {
CUBLAS_CHECK(cublasDestroy(g_cublas_handles[id]));
g_cublas_handles[id] = nullptr;
}
g_cublas_initialized = false;
#endif
}

View file

@ -17,6 +17,9 @@ extern "C" {
#define GGML_CUDA_MAX_DEVICES 16 #define GGML_CUDA_MAX_DEVICES 16
// Release CUDA resources
GGML_API GGML_CALL void ggml_free_cublas(void);
// Always success. To check if CUDA is actually loaded, use `ggml_cublas_loaded`. // Always success. To check if CUDA is actually loaded, use `ggml_cublas_loaded`.
GGML_API GGML_CALL void ggml_init_cublas(void); GGML_API GGML_CALL void ggml_init_cublas(void);