ggml: add Qualcomm QNN(Qualcomm Neural Network,aka Qualcomm AI Engine Direct) backend

This commit is contained in:
zhou.weiguo 2024-04-24 16:28:18 +08:00
parent c90dbe026b
commit d325088dbf
No known key found for this signature in database
GPG key ID: 952EA81D18BB2FA8
7 changed files with 4368 additions and 1 deletions

3590
ggml-qnn.cpp Normal file

File diff suppressed because it is too large Load diff

43
ggml-qnn.h Normal file
View file

@ -0,0 +1,43 @@
#pragma once
#include "ggml.h"
#include "ggml-backend.h"
#ifdef __cplusplus
extern "C" {
#endif
#define GGML_QNN_MAX_DEVICES 3
//QNN cDSP and HTA backend would not be used currently, just focus on QNN CPU/GPU/NPU(aka HTP/DSP) backend currently
enum QNNBackend {
QNN_BACKEND_CPU,
QNN_BACKEND_GPU,
QNN_BACKEND_NPU,
QNN_BACKEND_GGML, //"fake" QNN backend just for compare performance between QNN and original GGML
};
GGML_API int ggml_backend_qnn_reg_devices(void);
/**
*
* @param device 0: QNN_BACKEND_CPU 1: QNN_BACKEND_GPU 2: QNN_BACKEND_NPU(aka HTP/DSP)
* @param qnn_lib_path qnn library path, such as "/data/local/tmp/" on Android or specified in JNI layer
* @return
*/
GGML_API ggml_backend_t ggml_backend_qnn_init(size_t dev_num, const char * qnn_lib_path);
GGML_API bool ggml_backend_is_qnn(ggml_backend_t backend);
GGML_API void ggml_backend_qnn_set_n_threads(ggml_backend_t backend, int thread_counts);
GGML_API int ggml_backend_qnn_get_device_count(void);
GGML_API void ggml_backend_qnn_get_device_description(size_t dev_num, char * description, size_t description_size);
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_qnn_buffer_type(size_t dev_num);
#ifdef __cplusplus
}
#endif

View file

@ -19,6 +19,8 @@
# include "ggml-sycl.h"
#elif defined(GGML_USE_KOMPUTE)
# include "ggml-kompute.h"
#elif defined(GGML_USE_QNN)
# include "ggml-qnn.h"
#endif
#ifdef GGML_USE_METAL
@ -2377,6 +2379,8 @@ static size_t llama_get_device_count(const llama_model & model) {
count = ggml_backend_sycl_get_device_count();
#elif defined(GGML_USE_VULKAN)
count = ggml_backend_vk_get_device_count();
#elif defined(GGML_USE_QNN)
count = ggml_backend_qnn_get_device_count();
#endif
#if defined(GGML_USE_RPC)
count += model.rpc_servers.size();
@ -2409,6 +2413,8 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_
if (buft == nullptr) {
LLAMA_LOG_WARN("%s: cannot use GPU %d, check `vulkaninfo --summary`\n", __func__, gpu);
}
#elif defined(GGML_USE_QNN)
buft = ggml_backend_qnn_buffer_type(gpu);
#endif
if (buft == nullptr) {
@ -15899,6 +15905,8 @@ size_t llama_max_devices(void) {
return GGML_SYCL_MAX_DEVICES;
#elif defined(GGML_USE_VULKAN)
return GGML_VK_MAX_DEVICES;
#elif defined(GGML_USE_QNN)
return GGML_QNN_MAX_DEVICES;
#else
return 1;
#endif
@ -15914,7 +15922,7 @@ bool llama_supports_mlock(void) {
bool llama_supports_gpu_offload(void) {
#if defined(GGML_USE_CUDA) || defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \
defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_RPC)
defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_RPC) || defined(GGML_USE_QNN)
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
return true;
#else
@ -16225,6 +16233,19 @@ struct llama_context * llama_new_context_with_model(
}
ctx->backends.push_back(backend);
}
#elif defined(GGML_USE_QNN)
if (model->n_gpu_layers > 0) {
//the second param is data path of prebuit QNN libs provided by Qualcomm
//can be hardcoded to "/data/local/tmp/" for Android command line application
//or specified in JNI layer for Android APK application
ggml_backend_t backend = ggml_backend_qnn_init(model->main_gpu, "/data/local/tmp/");
if (nullptr == backend) {
LLAMA_LOG_ERROR("%s: failed to initialize QNN backend\n", __func__);
llama_free(ctx);
return nullptr;
}
ctx->backends.push_back(backend);
}
#endif
#if defined(GGML_USE_RPC)
if (model->n_gpu_layers > 0) {

View file

@ -0,0 +1,60 @@
cmake_minimum_required(VERSION 3.22.1)
project(ggml-qnn-test)
set(CMAKE_VERBOSE_MAKEFILE on)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
#set to ON if target Android phone is based on Qualcomm Snapdragon 8 Gen 3
set(TARGET_SNAPDRAGON_8_GEN3 OFF)
set(QNN_INC_PATH ${QNN_SDK_PATH}/include/QNN)
set(QNN_LIB_PATH ${QNN_SDK_PATH}/lib/aarch64-android)
include_directories(${QNN_INC_PATH})
include_directories(../../) # ggml.h
set(SOURCE_FILES
../../ggml.c
../../ggml-alloc.c
../../ggml-backend.c
../../ggml-quants.c
../../ggml-qnn.cpp
test-qnn-ops.cpp
)
message("QNN_SDK_PATH : ${QNN_SDK_PATH}")
message("QNN_INC_PATH : ${QNN_INC_PATH}")
message("QNN_LIB_PATH : ${QNN_LIB_PATH}")
add_definitions(-D__ARM_NEON)
add_definitions(-DGGML_USE_QNN)
if(CMAKE_BUILD_TYPE STREQUAL "Release")
add_definitions(-DNDEBUG)
add_definitions(-O3)
endif()
if (TARGET_SNAPDRAGON_8_GEN3)
# the below build optimization only verified and works well on Qualcomm SM8650-AB Snapdragon 8 Gen 3
add_definitions(-march=armv8.7-a)
add_definitions(-mcpu=cortex-x1)
add_definitions(-mtune=cortex-x1)
else()
# the below build optimization might be works well on ALL mainstream Android phone based on Qualcomm mobile SoC
add_definitions(-mcpu=cortex-a72)
endif()
add_compile_options("-Wall" "-Wno-sign-compare")
find_library(LOG_LIB log)
link_libraries(${LOG_LIB} android)
add_executable(${TARGET_NAME}
${SOURCE_FILES}
)

View file

@ -0,0 +1,95 @@
#!/bin/bash
set -e
#https://qpm.qualcomm.com/#/main/tools/details/qualcomm_ai_engine_direct
#https://developer.qualcomm.com/software/hexagon-dsp-sdk/tools
QNN_SDK_PATH=/opt/qcom/aistack/qnn/2.20.0.240223/
ANDROID_NDK=`pwd`/android-ndk-r26c
ANDROID_PLATFORM=android-34
TARGET=ggml-qnn-test
function dump_vars()
{
echo -e "ANDROID_NDK: ${ANDROID_NDK}"
echo -e "QNN_SDK_PATH: ${QNN_SDK_PATH}"
}
function show_pwd()
{
echo -e "current working path:$(pwd)\n"
}
function check_qnn_sdk()
{
if [ ! -d ${QNN_SDK_PATH} ]; then
echo -e "QNN_SDK_PATH ${QNN_SDK_PATH} not exist, pls check...\n"
exit 1
fi
}
function check_and_download_ndk()
{
is_android_ndk_exist=1
if [ ! -d ${ANDROID_NDK} ]; then
is_android_ndk_exist=0
fi
if [ ! -f ${ANDROID_NDK}/build/cmake/android.toolchain.cmake ]; then
is_android_ndk_exist=0
fi
if [ ${is_android_ndk_exist} -eq 0 ]; then
if [ ! -f android-ndk-r26c-linux.zip ]; then
wget --no-config --quiet --show-progress -O android-ndk-r26c-linux.zip https://dl.google.com/android/repository/android-ndk-r26c-linux.zip
fi
unzip android-ndk-r26c-linux.zip
if [ $? -ne 0 ]; then
printf "failed to download android ndk to %s \n" "${ANDROID_NDK}"
exit 1
fi
printf "android ndk saved to ${ANDROID_NDK} \n\n"
else
printf "android ndk already exist:${ANDROID_NDK} \n\n"
fi
}
function build_arm64
{
cmake -H. -B./out/arm64-v8a -DTARGET_NAME=${TARGET} -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=${ANDROID_PLATFORM} -DANDROID_NDK=${ANDROID_NDK} -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake -DQNN_SDK_PATH=${QNN_SDK_PATH}
cd ./out/arm64-v8a
make
ls -lah ${TARGET}
/bin/cp ${TARGET} ../../
cd -
}
function remove_temp_dir()
{
if [ -d out ]; then
echo "remove out directory in `pwd`"
rm -rf out
fi
}
show_pwd
check_and_download_ndk
check_qnn_sdk
dump_vars
remove_temp_dir
build_arm64

108
tests/ggml-qnn/run-ggml-qnn.sh Executable file
View file

@ -0,0 +1,108 @@
#!/bin/bash
#https://qpm.qualcomm.com/#/main/tools/details/qualcomm_ai_engine_direct
#https://developer.qualcomm.com/software/hexagon-dsp-sdk/tools
QNN_SDK_PATH=/opt/qcom/aistack/qnn/2.20.0.240223/
GGML_QNN_TEST=ggml-qnn-test
REMOTE_PATH=/data/local/tmp/
function check_qnn_sdk()
{
if [ ! -d ${QNN_SDK_PATH} ]; then
echo -e "QNN_SDK_PATH ${QNN_SDK_PATH} not exist, pls check or download it from https://qpm.qualcomm.com/#/main/tools/details/qualcomm_ai_engine_direct...\n"
exit 1
fi
}
function check_qnn_libs()
{
#reuse the cached qnn libs in Android phone
adb shell ls ${REMOTE_PATH}/libQnnCpu.so
if [ $? -eq 0 ]; then
printf "QNN libs already exist on Android phone\n"
else
adb push ${QNN_SDK_PATH}/lib/aarch64-android/libQnnSystem.so ${REMOTE_PATH}/
adb push ${QNN_SDK_PATH}/lib/aarch64-android/libQnnCpu.so ${REMOTE_PATH}/
adb push ${QNN_SDK_PATH}/lib/aarch64-android/libQnnGpu.so ${REMOTE_PATH}/
#the QNN NPU(aka HTP/DSP) backend only verified on Xiaomi14(Qualcomm SM8650-AB Snapdragon 8 Gen 3) successfully
adb push ${QNN_SDK_PATH}/lib/aarch64-android/libQnnHtp.so ${REMOTE_PATH}/
adb push ${QNN_SDK_PATH}/lib/aarch64-android/libQnnHtpNetRunExtensions.so ${REMOTE_PATH}/
adb push ${QNN_SDK_PATH}/lib/aarch64-android/libQnnHtpPrepare.so ${REMOTE_PATH}/
adb push ${QNN_SDK_PATH}/lib/aarch64-android/libQnnHtpV75Stub.so ${REMOTE_PATH}/
adb push ${QNN_SDK_PATH}/lib/hexagon-v75/unsigned/libQnnHtpV75Skel.so ${REMOTE_PATH}/
fi
}
function show_usage()
{
echo "Usage:"
echo " $0 GGML_OP_ADD 0/1/2"
echo " $0 GGML_OP_MUL 0/1/2"
echo " $0 GGML_OP_MUL_MAT 0/1/2"
echo -e "\n\n\n"
}
function main()
{
check_qnn_libs
#upload the latest ggml_qnn_test
adb push ${GGML_QNN_TEST} ${REMOTE_PATH}
adb shell chmod +x ${REMOTE_PATH}/${GGML_QNN_TEST}
case "$ggmlop" in
GGML_OP_ADD)
echo "adb shell ${REMOTE_PATH}/${GGML_QNN_TEST} -t GGML_OP_ADD -b $qnnbackend"
adb shell ${REMOTE_PATH}/${GGML_QNN_TEST} -t GGML_OP_ADD -b $qnnbackend
;;
GGML_OP_MUL)
adb shell ${REMOTE_PATH}/${GGML_QNN_TEST} -t GGML_OP_MUL -b $qnnbackend
;;
GGML_OP_MUL_MAT)
adb shell ${REMOTE_PATH}/${GGML_QNN_TEST} -t GGML_OP_MUL_MAT -b $qnnbackend
;;
*)
printf " \n$arg not supported currently\n"
show_usage
exit 1
;;
esac
}
check_qnn_sdk
unset ggmlop
unset qnnbackend
if [ $# == 0 ]; then
show_usage
exit 1
elif [ $# == 1 ]; then
if [ "$1" == "-h" ]; then
#avoid upload command line program to Android phone in this scenario
show_usage
exit 1
elif [ "$1" == "help" ]; then
#avoid upload command line program to Android phone in this scenario
show_usage
exit 1
else
ggmlop=$1
qnnbackend=0
fi
elif [ $# == 2 ]; then
ggmlop=$1
qnnbackend=$2
else
show_usage
exit 1
fi
main $arg

View file

@ -0,0 +1,450 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stddef.h>
#include <unistd.h>
#include <inttypes.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <dlfcn.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <limits.h>
#include <signal.h>
#include <fcntl.h>
#include <sys/types.h>
#include <string>
#include <vector>
#include <thread>
#include <mutex>
#include <map>
#include <set>
#include <tuple>
#include <queue>
#include <fstream>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <chrono>
#include <memory>
#include <regex>
#include <random>
#include <functional>
#include <unordered_map>
#include <condition_variable>
#include <cassert>
#include <unordered_set>
#include <utility>
#include "ggml.h"
#include "ggml-alloc.h"
#include "ggml-backend.h"
#include "ggml-qnn.h"
#define GGML_QNN_DEBUG 1
#define GGML_QNN_LOGBUF_LEN 4096
#define QNN_LOG_ERROR(...) ggml_qnn_log_internal(GGML_LOG_LEVEL_DEBUG, __FILE__, __FUNCTION__, __LINE__, __VA_ARGS__)
#define QNN_LOG_WARN(...) ggml_qnn_log_internal(GGML_LOG_LEVEL_DEBUG , __FILE__, __FUNCTION__, __LINE__, __VA_ARGS__)
#define QNN_LOG_INFO(...) ggml_qnn_log_internal(GGML_LOG_LEVEL_DEBUG , __FILE__, __FUNCTION__, __LINE__, __VA_ARGS__)
#if GGML_QNN_DEBUG
#define QNN_LOG_DEBUG(...) ggml_qnn_log_internal(GGML_LOG_LEVEL_DEBUG, __FILE__, __FUNCTION__, __LINE__, __VA_ARGS__)
#else
#define QNN_LOG_DEBUG(...)
#endif
static void tensor_dump(const ggml_tensor * tensor, const char * name);
#define TENSOR_DUMP(tensor) tensor_dump(tensor, #tensor)
static void ggml_qnn_log_internal(ggml_log_level level, const char * file, const char * func, int line, const char * format, ...) {
static std::mutex ggml_qnn_log_internal_mutex;
static char s_ggml_qnn_log_internal_buf[GGML_QNN_LOGBUF_LEN];
{
std::lock_guard<std::mutex> lock(ggml_qnn_log_internal_mutex);
va_list args;
va_start(args, format);
int len_prefix = snprintf(s_ggml_qnn_log_internal_buf, GGML_QNN_LOGBUF_LEN, "[%s, %d]: ", func, line);
int len = vsnprintf(s_ggml_qnn_log_internal_buf + len_prefix, GGML_QNN_LOGBUF_LEN - len_prefix, format, args);
if (len < (GGML_QNN_LOGBUF_LEN - len_prefix)) {
//for Android command line application or WoA
printf("%s\n", s_ggml_qnn_log_internal_buf);
}
va_end(args);
}
}
static const char * get_qnn_backend_name(int n_backend_type) {
switch (n_backend_type) {
case 0:
return "QNN-CPU";
case 1:
return "QNN-GPU";
case 2:
return "QNN-NPU(HTP/DSP)";
case 3:
return "ggml";
default:
return "unknown";
}
}
static bool ggml_graph_compute_helper(
struct ggml_backend * backend,
struct ggml_cgraph * graph,
std::vector<uint8_t> & buf,
int n_threads,
ggml_abort_callback abort_callback,
void * abort_callback_data) {
struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
plan.abort_callback = abort_callback;
plan.abort_callback_data = abort_callback_data;
if (plan.work_size > 0) {
buf.resize(plan.work_size);
plan.work_data = buf.data();
}
if (ggml_backend_is_cpu(backend)) {
ggml_backend_cpu_set_n_threads(backend, n_threads);
}
#ifdef GGML_USE_QNN
if (ggml_backend_is_qnn(backend)) {
ggml_backend_qnn_set_n_threads(backend, n_threads);
}
#endif
//a new approch of mixed inference
if (nullptr != backend)
return ggml_backend_graph_compute(backend, graph) == GGML_STATUS_SUCCESS;
else
return ggml_graph_compute(graph, &plan);
}
static void tensor_dump_elements(const ggml_tensor * tensor) {
float value = 0;
std::ostringstream tmposs;
if (tensor->type == GGML_TYPE_F32) {
for (int h = 0; h < tensor->ne[3]; h++) {
for (int i = 0; i < tensor->ne[2]; i++) {
for (int j = 0; j < tensor->ne[1]; j++) {
for (int k = 0; k < tensor->ne[0]; k++) {
value = ((float *) tensor->data)[h * tensor->ne[2] + i * tensor->ne[1] +
j * tensor->ne[0] + k];
tmposs << std::setw(8) << std::fixed << std::setprecision(2) << value
<< " ";
}
if (strlen(tmposs.str().c_str()) <= (GGML_QNN_LOGBUF_LEN - 96)) {
QNN_LOG_DEBUG("%s", tmposs.str().c_str());
}
tmposs.clear();
tmposs.str("");
//QNN_LOG_DEBUG("\n");
}
}
}
}
//QNN_LOG_DEBUG("\n");
}
static void tensor_dump(const ggml_tensor * tensor, const char * name) {
QNN_LOG_DEBUG("dump ggml tensor %s(%s)", name, tensor->name);
QNN_LOG_DEBUG("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)",
name,
tensor->type, ggml_type_name(tensor->type),
tensor->ne[0], tensor->ne[1], tensor->ne[2],
tensor->nb[0], tensor->nb[1], tensor->nb[2]);
tensor_dump_elements(tensor);
QNN_LOG_DEBUG("\n");
}
static uint32_t get_tensor_rank(const ggml_tensor * tensor) {
uint32_t rank = 0;
for (int i = 0; i < GGML_MAX_DIMS; i++) {
if ((0 != tensor->ne[i]) && (1 != tensor->ne[i])) {
rank++;
}
}
return rank;
}
static uint32_t get_tensor_data_size(const ggml_tensor * tensor) {
size_t data_size = ggml_row_size(tensor->type, tensor->ne[0]);
size_t n_dims = get_tensor_rank(tensor);
for (int i = 1; i < n_dims; i++) {
data_size *= tensor->ne[i];
}
QNN_LOG_DEBUG("get_tensor_data_size %d", data_size);
QNN_LOG_DEBUG("ggml_nbytes(tensor) %d", ggml_nbytes(tensor));
return ggml_nbytes(tensor);
}
//ref: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-backend-ops.cpp#L20
static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
// static RNG initialization (revisit if n_threads stops being constant)
static const size_t n_threads = std::thread::hardware_concurrency();
static std::vector<std::default_random_engine> generators = []() {
std::random_device rd;
std::vector<std::default_random_engine> vec;
vec.reserve(n_threads);
//for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(1234 + i); } // fixed seed
for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(rd()); }
return vec;
}();
size_t size = ggml_nelements(tensor);
std::vector<float> data(size);
auto init_thread = [&](size_t ith, size_t start, size_t end) {
std::uniform_real_distribution<float> distribution(min, max);
for (size_t i = start; i < end; i++) {
data[i] = distribution(generators[ith]);
}
};
std::vector<std::thread> threads;
threads.reserve(n_threads);
for (size_t i = 0; i < n_threads; i++) {
size_t start = i*size/n_threads;
size_t end = (i+1)*size/n_threads;
threads.emplace_back(init_thread, i, start, end);
}
for (auto & t : threads) {
t.join();
}
if (tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_I32) {
ggml_backend_tensor_set(tensor, data.data(), 0, size * sizeof(float));
} else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_BF16) {
GGML_ASSERT(size % ggml_blck_size(tensor->type) == 0);
std::vector<uint8_t> dataq(ggml_row_size(tensor->type, size));
std::vector<float> imatrix(tensor->ne[0], 1.0f); // dummy importance matrix
const float * im = imatrix.data();
if (!ggml_quantize_requires_imatrix(tensor->type)) {
// when the imatrix is optional, we want to test both quantization with and without imatrix
// use one of the random numbers to decide
if (data[0] > 0.5f*(min + max)) {
im = nullptr;
}
}
ggml_quantize_chunk(tensor->type, data.data(), dataq.data(), 0, size/tensor->ne[0], tensor->ne[0], im);
GGML_ASSERT(ggml_validate_row_data(tensor->type, dataq.data(), dataq.size()));
ggml_backend_tensor_set(tensor, dataq.data(), 0, dataq.size());
} else if (tensor->type == GGML_TYPE_I8 || tensor->type == GGML_TYPE_I16 || tensor->type == GGML_TYPE_I32) {
// This is going to create some weird integers though.
ggml_backend_tensor_set(tensor, data.data(), 0, ggml_nbytes(tensor));
} else {
GGML_ASSERT(false);
}
}
//ref: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-backend-ops.cpp#L310
static void initialize_tensors(ggml_context * ctx) {
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
init_tensor_uniform(t);
}
}
static void show_usage() {
printf(" " \
"\nUsage: test_qnn_ops [options]\n" \
"\n" \
"Options:\n" \
" -t GGML_OP_ADD / GGML_OP_MUL / GGML_OP_MULMAT\n" \
" -b 0(QNN_CPU) 1(QNN_GPU) 2(QNN_NPU)\n" \
" ?/h print usage infomation\n\n"
);
}
int main(int argc, char * argv[]) {
int64_t n_begin_time = 0LL;
int64_t n_end_time = 0LL;
int64_t n_duration = 0LL;
size_t ctx_size = 0;
int sizey = 4;
int sizex = 4;
int num_threads = 4;
int n_backend_type = QNN_BACKEND_CPU;
int n_ggml_op_type = GGML_OP_ADD;
struct ggml_context * ctx = nullptr;
struct ggml_cgraph * gf = nullptr;
struct ggml_tensor * src0 = nullptr;
struct ggml_tensor * src1 = nullptr;
struct ggml_tensor * dst = nullptr;
ggml_backend_t backend = nullptr;
ggml_backend_buffer_t buffer= nullptr;
ggml_type qtype = GGML_TYPE_F32;
std::vector<uint8_t> work_buffer;
for (int i = 1; i < argc; i++) {
if (0 == strcmp(argv[i], "-t")) {
if (i + 1 < argc) {
if (0 == memcmp(argv[i + 1], "GGML_OP_ADD", 11)) {
n_ggml_op_type = GGML_OP_ADD;
} else if (0 == memcmp(argv[i + 1], "GGML_OP_MUL_MAT", 15)) {
n_ggml_op_type = GGML_OP_MUL_MAT;
} else if (0 == memcmp(argv[i + 1], "GGML_OP_MUL", 11)) {
n_ggml_op_type = GGML_OP_MUL;
} else {
show_usage();
return 1;
}
i++;
}
} else if (0 == strcmp(argv[i], "-b")) {
if (i + 1 < argc) {
int backend = atoi(argv[i + 1]);
if (backend <= QNN_BACKEND_NPU)
n_backend_type = backend;
else {
show_usage();
return 1;
}
i++;
}
} else {
show_usage();
return 1;
}
}
QNN_LOG_DEBUG("enter qnn_ggml_op\n");
QNN_LOG_DEBUG("ggml op:%d(%s)", n_ggml_op_type, ggml_op_name((enum ggml_op) n_ggml_op_type));
n_begin_time = ggml_time_us();
srand(time(NULL));
ctx_size += 1024 * 1024 * 32;
QNN_LOG_DEBUG("Allocating Memory of size %zi bytes, %zi MB\n", ctx_size,
(ctx_size / 1024 / 1024));
struct ggml_init_params params = {
/*.mem_size =*/ ctx_size,
/*.mem_buffer =*/ NULL,
/* no_alloc =*/ 0
};
if (n_backend_type != QNN_BACKEND_GGML) {
params.no_alloc = true;
backend = ggml_backend_qnn_init(n_backend_type, "/data/local/tmp/");
if (nullptr == backend) {
QNN_LOG_ERROR("create qnn backend %d(%s) failed", n_backend_type, get_qnn_backend_name(n_backend_type));
return 1;
}
}
ctx = ggml_init(params);
if (!ctx) {
QNN_LOG_ERROR("%s: ggml_init() failed\n");
return 2;
}
QNN_LOG_DEBUG("creating new tensors\n");
QNN_LOG_DEBUG("ggml_blck_size(%s) %d", ggml_type_name(qtype), ggml_blck_size(qtype));
QNN_LOG_DEBUG("ggml_type_size(%s) %d", ggml_type_name(qtype), ggml_type_size(qtype));
if (qtype != GGML_TYPE_F32) {
sizex = ggml_blck_size(qtype);
}
src0 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
ggml_set_input(src0);
src1 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizey);
ggml_set_input(src1);
switch (n_ggml_op_type) {
case GGML_OP_ADD:
dst = ggml_add(ctx, src0, src1);
break;
case GGML_OP_MUL:
dst = ggml_mul(ctx, src0, src1);
break;
case GGML_OP_MUL_MAT:
dst = ggml_mul_mat(ctx, src0, src1);
break;
default:
QNN_LOG_WARN("ggml op %d(%s) not supported", n_ggml_op_type,
ggml_op_name((enum ggml_op) n_ggml_op_type));
ggml_free(ctx);
ggml_backend_free(backend);
return 3;
}
ggml_set_output(dst);
#ifdef GGML_USE_QNN
if (n_backend_type != QNN_BACKEND_GGML) {
buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
if (!buffer) {
QNN_LOG_ERROR("%s: failed to allocate backend buffer\n", __func__);
ggml_free(ctx);
ggml_backend_free(backend);
return 4;
}
}
#endif
QNN_LOG_DEBUG("creating compute graph\n");
gf = ggml_new_graph(ctx);
ggml_build_forward_expand(gf, dst);
#if 0
ggml_set_f32(src0, (rand() % 100 + 1));
ggml_set_f32(src1, (rand() % 100 + 1));
ggml_set_f32(dst, 0.0f);
#else
if (n_backend_type != QNN_BACKEND_GGML) {
initialize_tensors(ctx);
}
#endif
ggml_graph_compute_helper(backend, gf, work_buffer, num_threads, nullptr, nullptr);
if (get_tensor_data_size(dst) < (32 * 32)) {
QNN_LOG_DEBUG("dump tensors:\n");
TENSOR_DUMP(src0);
TENSOR_DUMP(src1);
TENSOR_DUMP(dst);
} else {
QNN_LOG_DEBUG("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
src0->name,
src0->type, ggml_type_name(src0->type), src0->ne[0], src0->ne[1], src0->ne[2],
src0->nb[0], src0->nb[1], src0->nb[2]);
QNN_LOG_DEBUG("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
src1->name,
src1->type, ggml_type_name(src1->type), src1->ne[0], src1->ne[1], src1->ne[2],
src1->nb[0], src1->nb[1], src1->nb[2]);
QNN_LOG_DEBUG("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
dst->name,
dst->type, ggml_type_name(dst->type), dst->ne[0], dst->ne[1], dst->ne[2], dst->nb[0],
dst->nb[1], dst->nb[2]);
}
ggml_free(ctx);
ggml_backend_buffer_free(buffer);
ggml_backend_free(backend);
n_end_time = ggml_time_us();
n_duration = (n_end_time - n_begin_time) / 1000;
QNN_LOG_DEBUG("duration of ut GGML_OP_%s using QNN backend %s: %lld milliseconds\n", ggml_op_name((enum ggml_op)n_ggml_op_type), get_qnn_backend_name(n_backend_type), n_duration);
return 0;
}