fix warnings

This commit is contained in:
hongruichen 2024-07-17 21:39:25 +08:00
parent 454deef83c
commit 2502b57203
5 changed files with 35 additions and 21 deletions

View file

@ -173,7 +173,7 @@ static bool ggml_qnn_can_handle_op(ggml_backend_qnn_context *ctx, const struct g
return true;
}
bool ggml_qnn_compute_forward(ggml_backend_qnn_context *ctx, struct ggml_tensor *tensor) {
static bool ggml_qnn_compute_forward(ggml_backend_qnn_context *ctx, struct ggml_tensor *tensor) {
auto unary_op = qnn::ggml_qnn_unary_op_array()[tensor->op];
if (unary_op) {
return unary_op(ctx, tensor->src[0], tensor);
@ -260,7 +260,10 @@ static ggml_backend_buffer_i ggml_backend_qnn_buffer_interface = {
/* .reset = */ nullptr,
};
GGML_CALL static const char *ggml_backend_qnn_buffer_type_name(ggml_backend_buffer_type_t buft) { return "QNN"; }
GGML_CALL static const char *ggml_backend_qnn_buffer_type_name(ggml_backend_buffer_type_t buft) {
GGML_UNUSED(buft);
return "QNN";
}
GGML_CALL static ggml_backend_buffer_t ggml_backend_qnn_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
size_t size) {
@ -291,7 +294,10 @@ GGML_CALL static bool ggml_backend_qnn_buffer_is_host(ggml_backend_buffer_type_t
return true;
}
GGML_CALL static const char *ggml_backend_qnn_name(ggml_backend_t backend) { return "QNN"; }
GGML_CALL static const char *ggml_backend_qnn_name(ggml_backend_t backend) {
GGML_UNUSED(backend);
return "QNN";
}
GGML_CALL static void ggml_backend_qnn_free(ggml_backend_t backend) {
QNN_LOG_INFO("enter %s", __func__);
@ -408,8 +414,6 @@ void ggml_backend_qnn_set_n_threads(ggml_backend_t backend, int n_threads) {
ctx->threads = n_threads;
}
const char *ggml_backend_qnn_get_name(ggml_backend_t backend) { return backend->iface.get_name(backend); }
int ggml_backend_qnn_get_device_count() { return GGML_QNN_MAX_DEVICES; }
void ggml_backend_qnn_get_device_description(size_t dev_num, char *description, size_t description_size) {
@ -534,7 +538,9 @@ ggml_backend_t ggml_backend_qnn_init(size_t device, const char *qnn_lib_path) {
return qnn_backend;
}
extern "C" GGML_CALL void ggml_backend_qnn_reg_devices() {
extern "C" GGML_CALL void ggml_backend_qnn_reg_devices();
GGML_CALL void ggml_backend_qnn_reg_devices() {
for (size_t idx = 0; idx < GGML_QNN_MAX_DEVICES; idx++) {
char name[GGML_MAX_NAME];
ggml_backend_qnn_get_device_description(idx, name, GGML_MAX_NAME);

View file

@ -101,10 +101,9 @@ public:
_tensor_inputs = tensor_inputs;
_tensor_outputs = tensor_outputs;
Qnn_Param_t qnn_params[] = {};
Qnn_OpConfig_t op_config = { .version = QNN_OPCONFIG_VERSION_1,
.v1 = { _graph_name.c_str(), QNN_OP_PACKAGE_NAME_QTI_AISW, op_name.c_str(), 0,
qnn_params, (uint32_t)_tensor_inputs.size(), _tensor_inputs.data(),
nullptr, (uint32_t)_tensor_inputs.size(), _tensor_inputs.data(),
(uint32_t)_tensor_outputs.size(), _tensor_outputs.data() } };
auto error = _qnn_interface->qnn_graph_add_node(_graph_handle, op_config);
if (error != QNN_SUCCESS) {

View file

@ -9,7 +9,8 @@
#include <android/log.h>
#endif
void qnn::internal_log(ggml_log_level level, const char *file, const char *func, int line, const char *format, ...) {
void qnn::internal_log(ggml_log_level level, const char * /*file*/, const char *func, int line, const char *format,
...) {
static std::mutex qnn_internal_log_mutex;
static char s_qnn_internal_log_buf[QNN_LOGBUF_LEN];
@ -32,8 +33,8 @@ void qnn::internal_log(ggml_log_level level, const char *file, const char *func,
}
}
void qnn::sdk_logcallback(const char *fmt, QnnLog_Level_t level, uint64_t timestamp, va_list argp) {
#if ENABLE_QNNSDK_LOG
void qnn::sdk_logcallback(const char *fmt, QnnLog_Level_t level, uint64_t timestamp, va_list argp) {
static std::mutex log_mutex;
static unsigned char s_ggml_qnn_logbuf[QNN_LOGBUF_LEN];
@ -67,5 +68,7 @@ void qnn::sdk_logcallback(const char *fmt, QnnLog_Level_t level, uint64_t timest
vsnprintf(reinterpret_cast<char *const>(s_ggml_qnn_logbuf), QNN_LOGBUF_LEN, fmt, argp);
QNN_LOG_INFO("%8.1fms [%-7s] %s", ms, log_level_desc, s_ggml_qnn_logbuf);
}
#endif
}
#else
void qnn::sdk_logcallback(const char *, QnnLog_Level_t, uint64_t, va_list) {}
#endif

View file

@ -47,6 +47,10 @@ Fn dl_sym_typed(dl_handler_t handle, const std::string &function_name) {
// ref:https://github.com/pytorch/executorch/tree/main/backends/qualcomm
// =================================================================================================
// TODO: fix this for other compilers
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wextra-semi"
class qnn_system_interface {
#define DEFINE_SHIM_FUNCTION_SYS_INTERFACE(F, pointer_name) \
@ -176,12 +180,14 @@ private:
const QnnInterface_t _qnn_interface = {};
};
#pragma GCC diagnostic pop
class qnn_instance {
public:
using BackendIdType = decltype(QnnInterface_t{}.backendId);
explicit qnn_instance(const std::string &lib_path, const std::string &backend_name, const std::string &model_name) :
_lib_path(std::move(lib_path)), _backend_name(std::move(backend_name)), _model_name(std::move(model_name)) {};
_lib_path(std::move(lib_path)), _backend_name(std::move(backend_name)), _model_name(std::move(model_name)) {}
~qnn_instance() {}
@ -250,7 +256,7 @@ public:
QNN_LOG_INFO("device counts %d", p_info->v1.numHwDevices);
QnnDevice_HardwareDeviceInfo_t *infos = p_info->v1.hwDevices;
QnnHtpDevice_OnChipDeviceInfoExtension_t chipinfo = {};
for (int i = 0; i < p_info->v1.numHwDevices; i++) {
for (uint32_t i = 0; i < p_info->v1.numHwDevices; i++) {
QNN_LOG_INFO("deviceID:%d, deviceType:%d, numCores %d", infos[i].v1.deviceId, infos[i].v1.deviceType,
infos[i].v1.numCores);
QnnDevice_DeviceInfoExtension_t devinfo = infos[i].v1.deviceInfoExtension;
@ -464,17 +470,17 @@ public:
return _qnn_interface;
}
const Qnn_LogHandle_t get_qnn_log_handle() { return _qnn_log_handle; }
Qnn_LogHandle_t get_qnn_log_handle() { return _qnn_log_handle; }
const Qnn_ProfileHandle_t get_qnn_profile_handle() { return _qnn_profile_handle; }
Qnn_ProfileHandle_t get_qnn_profile_handle() { return _qnn_profile_handle; }
const Qnn_DeviceHandle_t get_qnn_device_handle() { return _qnn_device_handle; }
Qnn_DeviceHandle_t get_qnn_device_handle() { return _qnn_device_handle; }
const Qnn_BackendHandle_t get_qnn_backend_handle() { return _qnn_backend_handle; }
Qnn_BackendHandle_t get_qnn_backend_handle() { return _qnn_backend_handle; }
const Qnn_ContextHandle_t get_qnn_context_handle() { return _qnn_context_handle; }
Qnn_ContextHandle_t get_qnn_context_handle() { return _qnn_context_handle; }
const Qnn_GraphHandle_t get_qnn_graph_handle() { return _qnn_graph_handle; }
Qnn_GraphHandle_t get_qnn_graph_handle() { return _qnn_graph_handle; }
int init_htp_perfinfra() {
QnnDevice_Infrastructure_t device_infra = nullptr;
@ -779,7 +785,7 @@ private:
return 0;
}
int load_backend(std::string &lib_path, const QnnSaver_Config_t **saver_config) {
int load_backend(std::string &lib_path, const QnnSaver_Config_t ** /*saver_config*/) {
Qnn_ErrorHandle_t error = QNN_SUCCESS;
QNN_LOG_DEBUG("lib_path:%s\n", lib_path.c_str());

View file

@ -202,7 +202,7 @@ private:
#else
class qnn_perf {
public:
qnn_perf(const std::string &perf_name) {}
qnn_perf(const std::string &) {}
~qnn_perf() { info(); }
qnn_perf() = delete;
qnn_perf(const qnn_perf &) = delete;