fix compiling error
This commit is contained in:
parent
3c491a3263
commit
3fe07eb907
6 changed files with 72 additions and 69 deletions
56
ggml-qnn.cpp
56
ggml-qnn.cpp
|
@ -70,27 +70,27 @@ typedef void (*ggml_qnn_func_t)(ggml_backend_qnn_context * ctx,
|
|||
|
||||
static struct qnn::qcom_socinfo g_qnn_soc_info_table[] = {
|
||||
/* Qualcomm SnapDragon 8 Gen 1 */
|
||||
[SM8450] = {
|
||||
.soc_model = SM8450,
|
||||
.htp_arch = V69,
|
||||
[qnn::SM8450] = {
|
||||
.soc_model = qnn::SM8450,
|
||||
.htp_arch = qnn::V69,
|
||||
.vtcm_size_in_mb = 8},
|
||||
|
||||
/* Qualcomm SnapDragon 8 Gen 1+ */
|
||||
[SM8475] = {
|
||||
.soc_model = SM8475,
|
||||
.htp_arch = V69,
|
||||
[qnn::SM8475] = {
|
||||
.soc_model = qnn::SM8475,
|
||||
.htp_arch = qnn::V69,
|
||||
.vtcm_size_in_mb = 8},
|
||||
|
||||
/* Qualcomm SnapDragon 8 Gen 2 */
|
||||
[SM8550] = {
|
||||
.soc_model = SM8550,
|
||||
.htp_arch = V73,
|
||||
[qnn::SM8550] = {
|
||||
.soc_model = qnn::SM8550,
|
||||
.htp_arch = qnn::V73,
|
||||
.vtcm_size_in_mb = 8},
|
||||
|
||||
/* Qualcomm SnapDragon 8 Gen 3 */
|
||||
[SM8650] = {
|
||||
.soc_model = SM8650,
|
||||
.htp_arch = V75,
|
||||
[qnn::SM8650] = {
|
||||
.soc_model = qnn::SM8650,
|
||||
.htp_arch = qnn::V75,
|
||||
.vtcm_size_in_mb = 8},
|
||||
|
||||
};
|
||||
|
@ -198,19 +198,6 @@ static const char * qnn_opname_from_ggmlop(enum ggml_op ggmlop) {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
static uint32_t qnn_get_ggml_tensor_data_size(const ggml_tensor * tensor) {
|
||||
/*
|
||||
size_t data_size = ggml_row_size(tensor->type, tensor->ne[0]);
|
||||
size_t n_dims = qnn_get_ggml_tensor_rank(tensor);
|
||||
for (int i = 1; i < n_dims; i++) {
|
||||
data_size *= tensor->ne[i];
|
||||
}
|
||||
|
||||
return data_size;
|
||||
*/
|
||||
return ggml_nbytes(tensor);
|
||||
}
|
||||
|
||||
static bool qnn_is_valid_params(ggml_backend_qnn_context * ctx, const ggml_tensor * src0,
|
||||
const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
if ((nullptr == ctx) || (nullptr == src0) || (nullptr == src1) || (nullptr == dst)) {
|
||||
|
@ -218,7 +205,7 @@ static bool qnn_is_valid_params(ggml_backend_qnn_context * ctx, const ggml_tenso
|
|||
return false;
|
||||
}
|
||||
|
||||
qnn_instance * instance = nullptr;
|
||||
qnn_internal::qnn_instance *instance = nullptr;
|
||||
Qnn_Tensor_t *tensor_0 = nullptr;
|
||||
Qnn_Tensor_t *tensor_1 = nullptr;
|
||||
Qnn_Tensor_t *tensor_2 = nullptr;
|
||||
|
@ -283,13 +270,6 @@ public:
|
|||
};
|
||||
#endif
|
||||
|
||||
using pfn_qnnsaver_initialize = decltype(QnnSaver_initialize);
|
||||
using pfn_qnninterface_getproviders = decltype(QnnInterface_getProviders);
|
||||
using pfn_qnnsysteminterface_getproviders = decltype(QnnSystemInterface_getProviders);
|
||||
|
||||
#define RPCMEM_DEFAULT_FLAGS 1
|
||||
#define RPCMEM_HEAP_ID_SYSTEM 25
|
||||
|
||||
#define VALIDATE(value, status) \
|
||||
do { \
|
||||
status = value; \
|
||||
|
@ -625,7 +605,7 @@ static void ggml_qnn_add(ggml_backend_qnn_context * ctx, const ggml_tensor * src
|
|||
const ggml_tensor * src1, ggml_tensor * dst) {
|
||||
Qnn_ErrorHandle_t error = QNN_SUCCESS;
|
||||
bool graph_initialized = false;
|
||||
qnn_instance * instance = nullptr;
|
||||
qnn_internal::qnn_instance *instance = nullptr;
|
||||
std::string graph_name = "ggml_op_qnn_add";
|
||||
Qnn_GraphHandle_t graph_handle = nullptr;
|
||||
Qnn_Param_t qnn_params[] = {};
|
||||
|
@ -819,7 +799,7 @@ static void ggml_qnn_mul_mat(ggml_backend_qnn_context * ctx,
|
|||
ggml_tensor * dst) {
|
||||
Qnn_ErrorHandle_t error = QNN_SUCCESS;
|
||||
bool graph_initialized = false;
|
||||
qnn_instance * instance = nullptr;
|
||||
qnn_internal::qnn_instance *instance = nullptr;
|
||||
std::string graph_name = "ggml_op_qnn_mul_mat";
|
||||
Qnn_GraphHandle_t graph_handle = nullptr;
|
||||
Qnn_Param_t qnn_params[] = {};
|
||||
|
@ -1492,8 +1472,9 @@ GGML_CALL static void ggml_backend_qnn_free(ggml_backend_t backend) {
|
|||
ggml_backend_qnn_context * ctx = (ggml_backend_qnn_context *) backend->context;
|
||||
QNN_LOG_INFO("idx %d, name:%s", ctx->device, g_qnn_mgr[ctx->device].name);
|
||||
|
||||
qnn_instance * instance = (qnn_instance *)g_qnn_mgr[ctx->device].instance;
|
||||
auto *instance = g_qnn_mgr[ctx->device].instance;
|
||||
if (instance != nullptr) {
|
||||
// TODO: this should be done inside the destructor
|
||||
std::map<std::string,
|
||||
std::tuple<Qnn_GraphHandle_t, Qnn_Tensor_t *, Qnn_Tensor_t *,
|
||||
Qnn_Tensor_t *>>::iterator graph_it;
|
||||
|
@ -1721,8 +1702,7 @@ ggml_backend_t ggml_backend_qnn_init(size_t device, const char * qnn_lib_path) {
|
|||
}
|
||||
}
|
||||
|
||||
qnn_instance * instance = nullptr;
|
||||
instance = new qnn_instance(qnn_lib_path, g_qnn_mgr[device].lib, "");
|
||||
auto *instance = new qnn_internal::qnn_instance(qnn_lib_path, g_qnn_mgr[device].lib, "");
|
||||
result = instance->qnn_init(nullptr);
|
||||
if (0 != result) {
|
||||
QNN_LOG_WARN(
|
||||
|
|
|
@ -20,5 +20,5 @@ struct ggml_backend_qnn_context {
|
|||
struct ggml_backend* backend;
|
||||
QNN_INTERFACE_VER_TYPE raw_interface;
|
||||
QNN_SYSTEM_INTERFACE_VER_TYPE raw_system_interface;
|
||||
struct qcom_socinfo socinfo;
|
||||
qnn::qcom_socinfo socinfo;
|
||||
};
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "QnnTypes.h"
|
||||
#include "QnnCommon.h"
|
||||
#include "QnnInterface.h"
|
||||
#include "Saver/QnnSaver.h"
|
||||
#include "System/QnnSystemInterface.h"
|
||||
|
||||
namespace qnn {
|
||||
// =================================================================================================
|
||||
//
|
||||
|
@ -30,17 +36,24 @@ namespace qnn {
|
|||
SM8650 = 57, // v75
|
||||
};
|
||||
|
||||
struct qcom_socinfo {
|
||||
uint32_t soc_model;
|
||||
size_t htp_arch;
|
||||
size_t vtcm_size_in_mb;
|
||||
};
|
||||
|
||||
using pfn_rpc_mem_init = void (*)(void);
|
||||
using pfn_rpc_mem_deinit = void (*)(void);
|
||||
using pfn_rpc_mem_alloc = void* (*) (int, uint32_t, int);
|
||||
using pfn_rpc_mem_free = void (*)(void*);
|
||||
using pfn_rpc_mem_to_fd = int (*)(void*);
|
||||
|
||||
struct qcom_socinfo {
|
||||
uint32_t soc_model;
|
||||
size_t htp_arch;
|
||||
size_t vtcm_size_in_mb;
|
||||
};
|
||||
using pfn_qnnsaver_initialize = decltype(QnnSaver_initialize);
|
||||
using pfn_qnninterface_getproviders = decltype(QnnInterface_getProviders);
|
||||
using pfn_qnnsysteminterface_getproviders = decltype(QnnSystemInterface_getProviders);
|
||||
}
|
||||
|
||||
#define QNN_VER_PTR(x) (&((x).v1)) // TODO: remove this macro after we have a separate header for QNN
|
||||
|
||||
#define RPCMEM_DEFAULT_FLAGS 1
|
||||
#define RPCMEM_HEAP_ID_SYSTEM 25
|
||||
|
|
|
@ -11,9 +11,6 @@
|
|||
#include "QnnGraph.h"
|
||||
#include "QnnProperty.h"
|
||||
#include "QnnTensor.h"
|
||||
#include "QnnInterface.h"
|
||||
#include "Saver/QnnSaver.h"
|
||||
#include "System/QnnSystemInterface.h"
|
||||
#include "HTP/QnnHtpDevice.h"
|
||||
#include "HTP/QnnHtpGraph.h"
|
||||
|
||||
|
@ -886,7 +883,7 @@ namespace qnn_internal {
|
|||
}
|
||||
|
||||
auto* get_providers =
|
||||
reinterpret_cast<pfn_qnnsysteminterface_getproviders*>(
|
||||
reinterpret_cast<qnn::pfn_qnnsysteminterface_getproviders*>(
|
||||
dlsym(_system_lib_handle, "QnnSystemInterface_getProviders"));
|
||||
if (nullptr == get_providers) {
|
||||
QNN_LOG_WARN(
|
||||
|
@ -988,7 +985,7 @@ namespace qnn_internal {
|
|||
}
|
||||
|
||||
auto get_providers =
|
||||
load_qnn_functionpointers<pfn_qnninterface_getproviders*>(
|
||||
qnn::load_qnn_functionpointers<qnn::pfn_qnninterface_getproviders*>(
|
||||
lib_handle, "QnnInterface_getProviders");
|
||||
if (nullptr == get_providers) {
|
||||
QNN_LOG_WARN("can not load symbol QnnInterface_getProviders : %s", dlerror());
|
||||
|
|
|
@ -45,7 +45,7 @@ namespace qnn {
|
|||
QNN_VER_PTR(*_qnn_tensor)->dataType = qnn_data_type;
|
||||
|
||||
if (is_npu) {
|
||||
qnn_instance* instance = ctx->instance;
|
||||
auto* instance = ctx->instance;
|
||||
uint8_t* qnn_buffer = static_cast<uint8_t*>(
|
||||
instance->alloc_rpcmem(ggml_nbytes(tensor), alignof(void*)));
|
||||
if (!qnn_buffer) {
|
||||
|
@ -68,7 +68,7 @@ namespace qnn {
|
|||
}
|
||||
else {
|
||||
QNN_VER_PTR(*_qnn_tensor)->clientBuf = {
|
||||
tensor->data, qnn_get_ggml_tensor_data_size(tensor) };
|
||||
tensor->data, get_ggml_tensor_data_size(tensor) };
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ namespace qnn {
|
|||
ggml_backend_qnn_context* ctx)
|
||||
: _tensor(tensor), _qnn_tensor(qnn_tensor), _context(ctx) {
|
||||
_old_dimensions = QNN_VER_PTR(*_qnn_tensor)->dimensions;
|
||||
const auto qnn_data_type = qnn_datatype_from_ggml_datatype(tensor->type);
|
||||
const auto qnn_data_type = qnn::datatype_from_ggml_datatype(tensor->type);
|
||||
const bool is_npu = ctx->device == QNN_BACKEND_NPU;
|
||||
|
||||
_dimensions[0] = (uint32_t)tensor->ne[0];
|
||||
|
@ -84,7 +84,7 @@ namespace qnn {
|
|||
_dimensions[2] = (uint32_t)tensor->ne[2];
|
||||
_dimensions[3] = (uint32_t)tensor->ne[3];
|
||||
QNN_VER_PTR(*_qnn_tensor)->dimensions = _dimensions;
|
||||
QNN_VER_PTR(*_qnn_tensor)->rank = qnn_get_ggml_tensor_rank(tensor);
|
||||
QNN_VER_PTR(*_qnn_tensor)->rank = get_ggml_tensor_rank(tensor);
|
||||
QNN_VER_PTR(*_qnn_tensor)->dataType = qnn_data_type;
|
||||
|
||||
if (is_npu) {
|
||||
|
@ -104,7 +104,7 @@ namespace qnn {
|
|||
}
|
||||
else {
|
||||
QNN_VER_PTR(*_qnn_tensor)->clientBuf = {
|
||||
tensor->data, qnn_get_ggml_tensor_data_size(tensor) };
|
||||
tensor->data, get_ggml_tensor_data_size(tensor) };
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -96,4 +96,17 @@ namespace qnn {
|
|||
offset % static_cast<intptr_t>(alignment));
|
||||
}
|
||||
|
||||
uint32_t get_ggml_tensor_data_size(const ggml_tensor* tensor) {
|
||||
/*
|
||||
size_t data_size = ggml_row_size(tensor->type, tensor->ne[0]);
|
||||
size_t n_dims = qnn_get_ggml_tensor_rank(tensor);
|
||||
for (int i = 1; i < n_dims; i++) {
|
||||
data_size *= tensor->ne[i];
|
||||
}
|
||||
|
||||
return data_size;
|
||||
*/
|
||||
return ggml_nbytes(tensor);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue