vulkan : add backend registry / device interfaces (#9721)

* vulkan : add backend registry / device interfaces

* llama : print devices used on model load
This commit is contained in:
Diego Devesa 2024-10-17 02:46:58 +02:00 committed by GitHub
parent 2194200278
commit f010b77a37
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 226 additions and 123 deletions

View file

@ -8,9 +8,7 @@
#include "ggml-alloc.h"
#include "ggml-backend.h"
#if defined(GGML_USE_VULKAN)
# include "ggml-vulkan.h"
#elif defined(GGML_USE_SYCL)
#if defined(GGML_USE_SYCL)
# include "ggml-sycl.h"
#elif defined(GGML_USE_KOMPUTE)
# include "ggml-kompute.h"
@ -3418,8 +3416,6 @@ static int llama_get_device_count(const llama_model & model) {
#if defined(GGML_USE_SYCL)
count += ggml_backend_sycl_get_device_count();
#elif defined(GGML_USE_VULKAN)
count += ggml_backend_vk_get_device_count();
#elif defined(GGML_USE_CANN)
count += ggml_backend_cann_get_device_count();
#endif
@ -3451,10 +3447,6 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(const llama_mode
}
#elif defined(GGML_USE_CPU_HBM)
buft = ggml_backend_cpu_hbm_buffer_type();
#elif defined(GGML_USE_VULKAN)
if (host_buffer) {
buft = ggml_backend_vk_host_buffer_type();
}
#endif
if (buft == nullptr) {
@ -3473,9 +3465,7 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_
}
device -= (int)model.devices.size();
#if defined(GGML_USE_VULKAN)
buft = ggml_backend_vk_buffer_type(device);
#elif defined(GGML_USE_SYCL)
#if defined(GGML_USE_SYCL)
buft = ggml_backend_sycl_buffer_type(device);
#elif defined(GGML_USE_KOMPUTE)
buft = ggml_backend_kompute_buffer_type(device);
@ -3535,11 +3525,6 @@ static size_t llama_get_device_memory(const llama_model & model, int device) {
size_t free;
ggml_backend_sycl_get_device_memory(device, &free, &total);
return free;
#elif defined(GGML_USE_VULKAN)
size_t total;
size_t free;
ggml_backend_vk_get_device_memory(device, &free, &total);
return free;
#elif defined(GGML_USE_CANN)
size_t total;
size_t free;
@ -19095,8 +19080,7 @@ bool llama_supports_mlock(void) {
}
bool llama_supports_gpu_offload(void) {
#if defined(GGML_USE_VULKAN) || \
defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE)
#if defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE)
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
return true;
#else
@ -19227,8 +19211,13 @@ struct llama_model * llama_load_model_from_file(
case GGML_BACKEND_DEVICE_TYPE_GPU:
case GGML_BACKEND_DEVICE_TYPE_GPU_FULL:
{
size_t free, total; // NOLINT
ggml_backend_dev_memory(dev, &free, &total);
LLAMA_LOG_INFO("%s: using device %s (%s) - %zu MiB free\n", __func__, ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), free/1024/1024);
model->devices.push_back(dev);
break;
}
}
}
@ -19423,32 +19412,7 @@ struct llama_context * llama_new_context_with_model(
main_gpu -= (int)model->devices.size();
}
#if defined(GGML_USE_VULKAN)
if (model->split_mode == LLAMA_SPLIT_MODE_ROW) {
LLAMA_LOG_ERROR("%s: Row split not supported. Failed to initialize Vulkan backend\n", __func__);
llama_free(ctx);
return nullptr;
}
if (model->split_mode == LLAMA_SPLIT_MODE_NONE) {
ggml_backend_t backend = ggml_backend_vk_init(main_gpu);
if (backend == nullptr) {
LLAMA_LOG_ERROR("%s: failed to initialize Vulkan backend\n", __func__);
llama_free(ctx);
return nullptr;
}
ctx->backends.push_back(backend);
} else {
for (int device = 0; device < ggml_backend_vk_get_device_count(); ++device) {
ggml_backend_t backend = ggml_backend_vk_init(device);
if (backend == nullptr) {
LLAMA_LOG_ERROR("%s: failed to initialize Vulkan%d backend\n", __func__, device);
llama_free(ctx);
return nullptr;
}
ctx->backends.push_back(backend);
}
}
#elif defined(GGML_USE_SYCL)
#if defined(GGML_USE_SYCL)
// with split_mode LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_ROW, only the main GPU backend is used
if (model->split_mode == LLAMA_SPLIT_MODE_NONE || model->split_mode == LLAMA_SPLIT_MODE_ROW) {
ggml_backend_t backend = ggml_backend_sycl_init(main_gpu);