fix warnings

This commit is contained in:
hongruichen 2024-07-17 23:30:14 +08:00
parent 6457a68bd7
commit c76fc9aa2f
2 changed files with 7 additions and 8 deletions

View file

@ -134,8 +134,7 @@ struct ggml_backend_qnn_buffer_type_context {
// implementation of QNN backend for GGML // implementation of QNN backend for GGML
// //
// ================================================================================================= // =================================================================================================
static bool ggml_qnn_can_handle_op(ggml_backend_qnn_context *ctx, const struct ggml_tensor *tensor, static bool ggml_qnn_can_handle_op(ggml_backend_qnn_context *ctx, const struct ggml_tensor *tensor) {
bool b_dump_tensor_info) {
if (ggml_is_empty(tensor) || if (ggml_is_empty(tensor) ||
(!qnn::ggml_qnn_unary_op_array()[tensor->op] && !qnn::ggml_qnn_binary_op_array()[tensor->op])) { (!qnn::ggml_qnn_unary_op_array()[tensor->op] && !qnn::ggml_qnn_binary_op_array()[tensor->op])) {
return false; return false;
@ -353,13 +352,13 @@ GGML_CALL static ggml_status ggml_backend_qnn_graph_compute(ggml_backend_t backe
GGML_CALL static bool ggml_backend_qnn_supports_op(ggml_backend_t backend, const ggml_tensor *op) { GGML_CALL static bool ggml_backend_qnn_supports_op(ggml_backend_t backend, const ggml_tensor *op) {
ggml_backend_qnn_context *ctx = (ggml_backend_qnn_context *)backend->context; ggml_backend_qnn_context *ctx = (ggml_backend_qnn_context *)backend->context;
return (ggml_qnn_can_handle_op(ctx, op, false)); return ggml_qnn_can_handle_op(ctx, op);
} }
GGML_CALL static bool ggml_backend_qnn_offload_op(ggml_backend_t backend, const ggml_tensor *tensor) { GGML_CALL static bool ggml_backend_qnn_offload_op(ggml_backend_t backend, const ggml_tensor *tensor) {
ggml_backend_qnn_context *ctx = (ggml_backend_qnn_context *)backend->context; ggml_backend_qnn_context *ctx = (ggml_backend_qnn_context *)backend->context;
return ggml_qnn_can_handle_op(ctx, tensor, false); return ggml_qnn_can_handle_op(ctx, tensor);
} }
static ggml_backend_i ggml_backend_qnn_interface = { static ggml_backend_i ggml_backend_qnn_interface = {

View file

@ -101,10 +101,10 @@ public:
_tensor_inputs = tensor_inputs; _tensor_inputs = tensor_inputs;
_tensor_outputs = tensor_outputs; _tensor_outputs = tensor_outputs;
Qnn_OpConfig_t op_config = { .version = QNN_OPCONFIG_VERSION_1, Qnn_OpConfig_t op_config = { /*.version = */ QNN_OPCONFIG_VERSION_1,
.v1 = { _graph_name.c_str(), QNN_OP_PACKAGE_NAME_QTI_AISW, op_name.c_str(), 0, /*.v1 = */ { _graph_name.c_str(), QNN_OP_PACKAGE_NAME_QTI_AISW, op_name.c_str(), 0,
nullptr, (uint32_t)_tensor_inputs.size(), _tensor_inputs.data(), nullptr, (uint32_t)_tensor_inputs.size(), _tensor_inputs.data(),
(uint32_t)_tensor_outputs.size(), _tensor_outputs.data() } }; (uint32_t)_tensor_outputs.size(), _tensor_outputs.data() } };
auto error = _qnn_interface->qnn_graph_add_node(_graph_handle, op_config); auto error = _qnn_interface->qnn_graph_add_node(_graph_handle, op_config);
if (error != QNN_SUCCESS) { if (error != QNN_SUCCESS) {
QNN_LOG_ERROR("graphAddNode.error = %d\n", error); QNN_LOG_ERROR("graphAddNode.error = %d\n", error);