diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index db3c24f60..9c9d6cee2 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -16022,6 +16022,17 @@ catch (sycl::exception const &exc) { std::exit(1); } +GGML_CALL void ggml_backend_sycl_get_free_device_memory(int device, size_t *free) try { + GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_get_free_device_memory\n"); + size_t total; + ggml_backend_sycl_get_device_memory(device, free, &total); +} +catch (sycl::exception const &exc) { + std::cerr << exc.what() << "Exception caught at file:" << __FILE__ + << ", line:" << __LINE__ << std::endl; + std::exit(1); +} + //////////////////////////////////////////////////////////////////////////////// // backend interface diff --git a/ggml-sycl.h b/ggml-sycl.h index a9f776fc1..2bb76f94d 100644 --- a/ggml-sycl.h +++ b/ggml-sycl.h @@ -33,6 +33,7 @@ GGML_API GGML_CALL void ggml_sycl_get_gpu_list(int *id_list, int max_len); GGML_API GGML_CALL void ggml_sycl_get_device_description(int device, char *description, size_t description_size); GGML_API GGML_CALL int ggml_backend_sycl_get_device_count(); GGML_API GGML_CALL void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total); +GGML_API GGML_CALL void ggml_backend_sycl_get_free_device_memory(int device, size_t *free); GGML_API GGML_CALL int ggml_backend_sycl_get_device_index(int device_id); // TODO: these are temporary diff --git a/ggml-vulkan.cpp b/ggml-vulkan.cpp index 1736ab736..1343260be 100644 --- a/ggml-vulkan.cpp +++ b/ggml-vulkan.cpp @@ -5781,6 +5781,21 @@ GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size } } +GGML_CALL void ggml_backend_vk_get_free_device_memory(int device, size_t * free) { + GGML_ASSERT(device < (int) vk_instance.device_indices.size()); + + vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]]; + + vk::PhysicalDeviceMemoryProperties memprops = vkdev.getMemoryProperties(); + + for (const vk::MemoryHeap& heap : memprops.memoryHeaps) { + if (heap.flags & vk::MemoryHeapFlagBits::eDeviceLocal) { + *free = heap.size; + break; + } + } +} + // backend registry GGML_CALL static ggml_backend_t ggml_backend_reg_vk_init(const char * params, void * user_data) { ggml_backend_t vk_backend = ggml_backend_vk_init((int) (intptr_t) user_data); diff --git a/ggml-vulkan.h b/ggml-vulkan.h index af661c2d7..c72072e8d 100644 --- a/ggml-vulkan.h +++ b/ggml-vulkan.h @@ -19,6 +19,7 @@ GGML_API GGML_CALL bool ggml_backend_is_vk(ggml_backend_t backend); GGML_API GGML_CALL int ggml_backend_vk_get_device_count(void); GGML_API GGML_CALL void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size); GGML_API GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total); +GGML_API GGML_CALL void ggml_backend_vk_get_free_device_memory(int device, size_t * free); GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num); // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU diff --git a/llama.cpp b/llama.cpp index 6cad1a553..da72bf0a5 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1659,12 +1659,12 @@ static size_t llama_get_available_device_memory(int device) { #elif defined(GGML_USE_SYCL) size_t total; size_t free; - ggml_backend_sycl_get_device_memory(device, &total, &free); + ggml_backend_sycl_get_free_device_memory(device, &total, &free); return free; #elif defined(GGML_USE_VULKAN) size_t total; size_t free; - ggml_backend_vk_get_device_memory(device, &total, &free); + ggml_backend_vk_get_free_device_memory(device, &total, &free); return free; #else return 1;