implement ggml_backend_sycl_get_free_device_memory for all GPU support

This commit is contained in:
Yui 2024-04-06 17:03:04 +02:00
parent 746d5fb3c9
commit 2549662cde
5 changed files with 30 additions and 2 deletions

View file

@ -16022,6 +16022,17 @@ catch (sycl::exception const &exc) {
std::exit(1);
}
GGML_CALL void ggml_backend_sycl_get_free_device_memory(int device, size_t *free) try {
GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_get_free_device_memory\n");
size_t total;
ggml_backend_sycl_get_device_memory(device, free, &total);
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// backend interface

View file

@ -33,6 +33,7 @@ GGML_API GGML_CALL void ggml_sycl_get_gpu_list(int *id_list, int max_len);
GGML_API GGML_CALL void ggml_sycl_get_device_description(int device, char *description, size_t description_size);
GGML_API GGML_CALL int ggml_backend_sycl_get_device_count();
GGML_API GGML_CALL void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
GGML_API GGML_CALL void ggml_backend_sycl_get_free_device_memory(int device, size_t *free);
GGML_API GGML_CALL int ggml_backend_sycl_get_device_index(int device_id);
// TODO: these are temporary

View file

@ -5781,6 +5781,21 @@ GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size
}
}
GGML_CALL void ggml_backend_vk_get_free_device_memory(int device, size_t * free) {
GGML_ASSERT(device < (int) vk_instance.device_indices.size());
vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]];
vk::PhysicalDeviceMemoryProperties memprops = vkdev.getMemoryProperties();
for (const vk::MemoryHeap& heap : memprops.memoryHeaps) {
if (heap.flags & vk::MemoryHeapFlagBits::eDeviceLocal) {
*free = heap.size;
break;
}
}
}
// backend registry
GGML_CALL static ggml_backend_t ggml_backend_reg_vk_init(const char * params, void * user_data) {
ggml_backend_t vk_backend = ggml_backend_vk_init((int) (intptr_t) user_data);

View file

@ -19,6 +19,7 @@ GGML_API GGML_CALL bool ggml_backend_is_vk(ggml_backend_t backend);
GGML_API GGML_CALL int ggml_backend_vk_get_device_count(void);
GGML_API GGML_CALL void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size);
GGML_API GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total);
GGML_API GGML_CALL void ggml_backend_vk_get_free_device_memory(int device, size_t * free);
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num);
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU

View file

@ -1659,12 +1659,12 @@ static size_t llama_get_available_device_memory(int device) {
#elif defined(GGML_USE_SYCL)
size_t total;
size_t free;
ggml_backend_sycl_get_device_memory(device, &total, &free);
ggml_backend_sycl_get_free_device_memory(device, &total, &free);
return free;
#elif defined(GGML_USE_VULKAN)
size_t total;
size_t free;
ggml_backend_vk_get_device_memory(device, &total, &free);
ggml_backend_vk_get_free_device_memory(device, &total, &free);
return free;
#else
return 1;