From 38f88d5fb15eed11265bba11ddbd85e36ebffaa1 Mon Sep 17 00:00:00 2001 From: hongruichen Date: Tue, 2 Jul 2024 19:46:17 +0800 Subject: [PATCH] fix compiling error after merge latest master --- ggml/src/ggml-qnn.cpp | 21 +++++---------------- ggml/src/ggml-qnn/backend-ops.cpp | 24 ++++++++++-------------- 2 files changed, 15 insertions(+), 30 deletions(-) diff --git a/ggml/src/ggml-qnn.cpp b/ggml/src/ggml-qnn.cpp index 750d5ff91..e5fc00045 100644 --- a/ggml/src/ggml-qnn.cpp +++ b/ggml/src/ggml-qnn.cpp @@ -321,9 +321,7 @@ static bool ggml_qnn_can_handle_op(ggml_backend_qnn_context * ctx, return true; } -bool ggml_qnn_compute_forward(ggml_backend_qnn_context * ctx, - struct ggml_compute_params * params, - struct ggml_tensor * tensor) { +bool ggml_qnn_compute_forward(ggml_backend_qnn_context * ctx, struct ggml_tensor * tensor) { auto func = qnn::ggml_qnn_op_array()[tensor->op]; if (!func) { QNN_LOG_WARN("unsupported op %d", tensor->op); @@ -515,13 +513,6 @@ GGML_CALL static size_t ggml_backend_qnn_buffer_type_get_max_size(ggml_backend_b return (96 * 1024 * 1024); } -GGML_CALL static bool ggml_backend_qnn_buffer_type_supports_backend( - ggml_backend_buffer_type_t buft, ggml_backend_t backend) { - GGML_UNUSED(buft); - - return ggml_backend_is_qnn(backend) || ggml_backend_is_cpu(backend); -} - GGML_CALL static bool ggml_backend_qnn_buffer_is_host(ggml_backend_buffer_type_t buft) { GGML_UNUSED(buft); return true; @@ -574,9 +565,6 @@ GGML_CALL static ggml_status ggml_backend_qnn_graph_compute(ggml_backend_t backe ggml_backend_qnn_context * ctx = (ggml_backend_qnn_context *) backend->context; GGML_UNUSED(ctx); - ggml_compute_params params = {}; - params.type = GGML_TASK_TYPE_COMPUTE; - params.ith = 0; for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || @@ -584,7 +572,7 @@ GGML_CALL static ggml_status ggml_backend_qnn_graph_compute(ggml_backend_t backe node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { continue; } - bool ok = ggml_qnn_compute_forward(ctx, ¶ms, node); + bool ok = ggml_qnn_compute_forward(ctx, node); if (!ok) { QNN_LOG_DEBUG("error: op not supported %s (%s)\n", node->name, ggml_op_name(node->op)); } @@ -616,9 +604,11 @@ static ggml_backend_i ggml_backend_qnn_interface = { /* .synchronize = */ nullptr, /* .graph_plan_create = */ nullptr, /* .graph_plan_free = */ nullptr, + /* .graph_plan_update = */ nullptr, /* .graph_plan_compute = */ nullptr, /* .graph_compute = */ ggml_backend_qnn_graph_compute, /* .supports_op = */ ggml_backend_qnn_supports_op, + /* .supports_buft = */ nullptr, /* .offload_op = */ ggml_backend_qnn_offload_op, /* .event_new = */ nullptr, /* .event_free = */ nullptr, @@ -702,10 +692,9 @@ ggml_backend_buffer_type_t ggml_backend_qnn_buffer_type(size_t device) { /* .get_alignment = */ ggml_backend_qnn_buffer_type_get_alignment, /* .get_max_size = */ ggml_backend_qnn_buffer_type_get_max_size, /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes - /* .supports_backend = */ ggml_backend_qnn_buffer_type_supports_backend, /* .is_host = */ ggml_backend_qnn_buffer_is_host }, - /* .context = */ & context, + /* .context = */ &context, }; } ggml_backend_qnn_buffer_type_initialized = true; diff --git a/ggml/src/ggml-qnn/backend-ops.cpp b/ggml/src/ggml-qnn/backend-ops.cpp index a9c94a6df..f1fe699ab 100644 --- a/ggml/src/ggml-qnn/backend-ops.cpp +++ b/ggml/src/ggml-qnn/backend-ops.cpp @@ -8,21 +8,17 @@ static bool qnn_is_valid_params(ggml_backend_qnn_context* ctx, const ggml_tensor* src0, const ggml_tensor* src1, ggml_tensor* dst) { - if ((nullptr == ctx) || (nullptr == src0) || (nullptr == src1) || (nullptr == dst)) { + if (!ctx || !src0 || !src1 || !dst) { QNN_LOG_WARN("invalid params\n"); return false; } - qnn::qnn_instance* instance = nullptr; - Qnn_Tensor_t* tensor_0 = nullptr; - Qnn_Tensor_t* tensor_1 = nullptr; - Qnn_Tensor_t* tensor_2 = nullptr; - tensor_0 = (Qnn_Tensor_t*)src0->extra; - tensor_1 = (Qnn_Tensor_t*)src1->extra; - tensor_2 = (Qnn_Tensor_t*)dst->extra; - instance = ctx->instance; - if ((nullptr == instance) || (nullptr == tensor_0) || (nullptr == tensor_1) || (nullptr == tensor_2)) { - QNN_LOG_WARN("invalid params\n"); + auto* instance = ctx->instance; + auto* tensor0 = src0->extra; + auto* tensor1 = src1->extra; + auto* tensor2 = dst->extra; + if (!instance || !tensor0 || !tensor1 || !tensor2) { + QNN_LOG_WARN("invalid tensors\n"); return false; } @@ -60,7 +56,7 @@ static void ggml_qnn_add(ggml_backend_qnn_context* ctx, const ggml_tensor* src0, qnn::qnn_perf perf("ggml_qnn_add"); perf.start(); - std::string map_entry = std::string(ggml_op_name(ggmlop)); + std::string map_entry(ggml_op_name(ggmlop)); if (instance->_qnn_graph_map.find(map_entry) != instance->_qnn_graph_map.end()) { graph_initialized = true; @@ -141,8 +137,8 @@ static void ggml_qnn_add(ggml_backend_qnn_context* ctx, const ggml_tensor* src0, goto failure; } - Qnn_Tensor_t tensor_inputs[] = { *tensor_input0.get_qnn_tensor(), *tensor_input1.get_qnn_tensor() }; - Qnn_Tensor_t tensor_outputs[] = { *tensor_output.get_qnn_tensor() }; + Qnn_Tensor_t tensor_inputs[] = { *tensor_input0.get_qnn_tensor(), *tensor_input1.get_qnn_tensor() }; + Qnn_Tensor_t tensor_outputs[] = { *tensor_output.get_qnn_tensor() }; Qnn_OpConfig_t op_config = { (Qnn_OpConfigVersion_t)1, .v1 = {"ggml_op_add",