Handle the review comments of this pull request

This commit is contained in:
leo-pony 2024-10-22 10:26:58 +08:00
parent 4d4ae1c9a1
commit abf5be4cfa
4 changed files with 10 additions and 19 deletions

View file

@ -33,7 +33,6 @@ extern "C" {
* @brief Maximum number of CANN devices supported. * @brief Maximum number of CANN devices supported.
*/ */
#define GGML_CANN_MAX_DEVICES 16 #define GGML_CANN_MAX_DEVICES 16
#define GGML_CANN_NAME "CANN"
GGML_API ggml_backend_reg_t ggml_backend_cann_reg(void); GGML_API ggml_backend_reg_t ggml_backend_cann_reg(void);

View file

@ -588,14 +588,13 @@ struct ggml_backend_registry {
#ifdef GGML_USE_RPC #ifdef GGML_USE_RPC
register_backend(ggml_backend_rpc_reg()); register_backend(ggml_backend_rpc_reg());
#endif #endif
#ifdef GGML_USE_AMX #ifdef GGML_USE_AMX
register_backend(ggml_backend_amx_reg()); register_backend(ggml_backend_amx_reg());
#endif #endif
#ifdef GGML_USE_CANN #ifdef GGML_USE_CANN
register_backend(ggml_backend_cann_reg()); register_backend(ggml_backend_cann_reg());
#endif #endif
// TODO: kompute // TODO: kompute
register_backend(ggml_backend_cpu_reg()); register_backend(ggml_backend_cpu_reg());

View file

@ -39,6 +39,8 @@
#include "ggml-common.h" #include "ggml-common.h"
#define GGML_CANN_NAME "CANN"
/** /**
* @brief Handles CANN errors by printing an error message and aborting. * @brief Handles CANN errors by printing an error message and aborting.
* *

View file

@ -10,8 +10,6 @@
#if defined(GGML_USE_KOMPUTE) #if defined(GGML_USE_KOMPUTE)
# include "ggml-kompute.h" # include "ggml-kompute.h"
#elif defined(GGML_USE_CANN)
# include "ggml-cann.h"
#endif #endif
#ifndef __AMX_INT8__ #ifndef __AMX_INT8__
@ -3416,11 +3414,7 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(const llama_mode
} }
} }
#if defined(GGML_USE_CANN) #if defined(GGML_USE_CPU_HBM)
if (host_buffer) {
buft = ggml_backend_cann_host_buffer_type();
}
#elif defined(GGML_USE_CPU_HBM)
buft = ggml_backend_cpu_hbm_buffer_type(); buft = ggml_backend_cpu_hbm_buffer_type();
#endif #endif
@ -3442,8 +3436,6 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_
#if defined(GGML_USE_KOMPUTE) #if defined(GGML_USE_KOMPUTE)
buft = ggml_backend_kompute_buffer_type(device); buft = ggml_backend_kompute_buffer_type(device);
#elif defined(GGML_USE_CANN)
buft = ggml_backend_cann_buffer_type(device);
#endif #endif
if (buft == nullptr) { if (buft == nullptr) {
@ -3487,14 +3479,13 @@ static size_t llama_get_device_memory(const llama_model & model, int device) {
return free; return free;
} }
#if defined(GGML_USE_CANN) if (model.devices.size() > 0) {
size_t total; ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(model.devices[0]);
size_t free; LLAMA_LOG_WARN("%s: failed to get free memmory of device:%d of backend:%s, for device id is out of range.\n", __func__, device, ggml_backend_reg_name(reg));
ggml_backend_cann_get_device_memory(device, &free, &total); } else {
return free; LLAMA_LOG_WARN("%s: failed to get free memmory of device, no devices in inputted model.\n", __func__);
#else }
return 1; return 1;
#endif
GGML_UNUSED(model); GGML_UNUSED(model);
GGML_UNUSED(device); GGML_UNUSED(device);