diff --git a/ggml/src/ggml-qnn.cpp b/ggml/src/ggml-qnn.cpp index 8ba258d63..0e5e86e4a 100644 --- a/ggml/src/ggml-qnn.cpp +++ b/ggml/src/ggml-qnn.cpp @@ -134,8 +134,7 @@ struct ggml_backend_qnn_buffer_type_context { // implementation of QNN backend for GGML // // ================================================================================================= -static bool ggml_qnn_can_handle_op(ggml_backend_qnn_context *ctx, const struct ggml_tensor *tensor, - bool b_dump_tensor_info) { +static bool ggml_qnn_can_handle_op(ggml_backend_qnn_context *ctx, const struct ggml_tensor *tensor) { if (ggml_is_empty(tensor) || (!qnn::ggml_qnn_unary_op_array()[tensor->op] && !qnn::ggml_qnn_binary_op_array()[tensor->op])) { return false; @@ -353,13 +352,13 @@ GGML_CALL static ggml_status ggml_backend_qnn_graph_compute(ggml_backend_t backe GGML_CALL static bool ggml_backend_qnn_supports_op(ggml_backend_t backend, const ggml_tensor *op) { ggml_backend_qnn_context *ctx = (ggml_backend_qnn_context *)backend->context; - return (ggml_qnn_can_handle_op(ctx, op, false)); + return ggml_qnn_can_handle_op(ctx, op); } GGML_CALL static bool ggml_backend_qnn_offload_op(ggml_backend_t backend, const ggml_tensor *tensor) { ggml_backend_qnn_context *ctx = (ggml_backend_qnn_context *)backend->context; - return ggml_qnn_can_handle_op(ctx, tensor, false); + return ggml_qnn_can_handle_op(ctx, tensor); } static ggml_backend_i ggml_backend_qnn_interface = { diff --git a/ggml/src/ggml-qnn/graph.hpp b/ggml/src/ggml-qnn/graph.hpp index 9621ad1b4..462ed9203 100644 --- a/ggml/src/ggml-qnn/graph.hpp +++ b/ggml/src/ggml-qnn/graph.hpp @@ -101,10 +101,10 @@ public: _tensor_inputs = tensor_inputs; _tensor_outputs = tensor_outputs; - Qnn_OpConfig_t op_config = { .version = QNN_OPCONFIG_VERSION_1, - .v1 = { _graph_name.c_str(), QNN_OP_PACKAGE_NAME_QTI_AISW, op_name.c_str(), 0, - nullptr, (uint32_t)_tensor_inputs.size(), _tensor_inputs.data(), - (uint32_t)_tensor_outputs.size(), _tensor_outputs.data() } }; + Qnn_OpConfig_t op_config = { /*.version = */ QNN_OPCONFIG_VERSION_1, + /*.v1 = */ { _graph_name.c_str(), QNN_OP_PACKAGE_NAME_QTI_AISW, op_name.c_str(), 0, + nullptr, (uint32_t)_tensor_inputs.size(), _tensor_inputs.data(), + (uint32_t)_tensor_outputs.size(), _tensor_outputs.data() } }; auto error = _qnn_interface->qnn_graph_add_node(_graph_handle, op_config); if (error != QNN_SUCCESS) { QNN_LOG_ERROR("graphAddNode.error = %d\n", error);