From 863166b4c39662b9e86406c7ee23014fb4e2d6bf Mon Sep 17 00:00:00 2001 From: Oleksii Maryshchenko Date: Sat, 4 Nov 2023 17:50:59 +0100 Subject: [PATCH] Skip GPUs without mem pool support. --- ggml-cuda.cu | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 0f2fb1921..bed020394 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -5869,7 +5869,7 @@ void ggml_init_cublas() { } } else { g_cudaMemPools[id] = nullptr; - fprintf(stderr, ", CUDA memory pool is not supported (cant load default pool)\n"); + fprintf(stderr, ", CUDA memory pool is not supported (can't load default pool)\n"); } // test alloc/dealoc if (err == cudaSuccess) { @@ -5887,6 +5887,8 @@ void ggml_init_cublas() { fprintf(stderr, ", CUDA memory pool is not supported (allocation failed)\n"); } } + } else { + fprintf(stderr, ", CUDA memory pool is not supported\n"); } #endif g_tensor_split[id] = total_vram; @@ -5915,6 +5917,7 @@ void ggml_init_cublas() { fprintf(stderr, "Warning: Device %d doesnt support CUDA memory pool, skipping pool access config\n", id); + continue; } cudaMemAccessDesc desc_device = {}; @@ -5923,13 +5926,13 @@ void ggml_init_cublas() { desc_device.flags = cudaMemAccessFlagsProtReadWrite; cudaError_t err = cudaMemPoolSetAccess(main_device_pool, &desc_device, 1 /* numDescs */); if (err != cudaSuccess) { - fprintf(stderr, "Cant give access for main device memory pool to device %d\n", id); + fprintf(stderr, "Can't give access for main device memory pool to device %d\n", id); } cudaMemPool_t mempool; CUDA_CHECK(cudaDeviceGetDefaultMemPool(&mempool, id)); err = cudaMemPoolSetAccess(mempool, &desc_main_device, 1 /* numDescs */); if (err != cudaSuccess) { - fprintf(stderr, "Cant give access for device %d memory pool to main device \n", id); + fprintf(stderr, "Can't give access for device %d memory pool to main device \n", id); } } } else {