refactoring: remove unused functions and variables

This commit is contained in:
hongruichen 2024-07-17 14:13:42 +08:00
parent 63dc587dff
commit bb13795dce
3 changed files with 0 additions and 148 deletions

View file

@ -49,21 +49,6 @@
#define QNN_BACKEND_NAME "qnn" #define QNN_BACKEND_NAME "qnn"
static struct qnn::qcom_socinfo g_qnn_soc_info_table[] = {
/* Qualcomm SnapDragon 8 Gen 1 */
[qnn::SM8450] = { .soc_model = qnn::SM8450, .htp_arch = qnn::V69, .vtcm_size_in_mb = 8 },
/* Qualcomm SnapDragon 8 Gen 1+ */
[qnn::SM8475] = { .soc_model = qnn::SM8475, .htp_arch = qnn::V69, .vtcm_size_in_mb = 8 },
/* Qualcomm SnapDragon 8 Gen 2 */
[qnn::SM8550] = { .soc_model = qnn::SM8550, .htp_arch = qnn::V73, .vtcm_size_in_mb = 8 },
/* Qualcomm SnapDragon 8 Gen 3 */
[qnn::SM8650] = { .soc_model = qnn::SM8650, .htp_arch = qnn::V75, .vtcm_size_in_mb = 8 },
};
// according to the QNN SDK Reference Guide, // according to the QNN SDK Reference Guide,
// CPU - Choose a non-quantized model.Quantized models are currently incompatible with the CPU backend // CPU - Choose a non-quantized model.Quantized models are currently incompatible with the CPU backend
// GPU - Choose a non-quantized model.Quantized models are currently incompatible with the GPU backend // GPU - Choose a non-quantized model.Quantized models are currently incompatible with the GPU backend
@ -277,17 +262,6 @@ static ggml_backend_buffer_i ggml_backend_qnn_buffer_interface = {
GGML_CALL static const char *ggml_backend_qnn_buffer_type_name(ggml_backend_buffer_type_t buft) { return "QNN"; } GGML_CALL static const char *ggml_backend_qnn_buffer_type_name(ggml_backend_buffer_type_t buft) { return "QNN"; }
static void *ggml_qnn_host_malloc(size_t n) {
void *data = nullptr;
int result = posix_memalign((void **)&data, sysconf(_SC_PAGESIZE), n);
if (result != 0) {
QNN_LOG_WARN("%s: error: posix_memalign failed\n", __func__);
return nullptr;
}
return data;
}
GGML_CALL static ggml_backend_buffer_t ggml_backend_qnn_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, GGML_CALL static ggml_backend_buffer_t ggml_backend_qnn_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
size_t size) { size_t size) {
ggml_backend_qnn_buffer_type_context *buft_ctx = (ggml_backend_qnn_buffer_type_context *)buft->context; ggml_backend_qnn_buffer_type_context *buft_ctx = (ggml_backend_qnn_buffer_type_context *)buft->context;

View file

@ -7,20 +7,6 @@
#include "qnn-types.hpp" #include "qnn-types.hpp"
namespace {
size_t memscpy(void *dst, size_t dst_size, const void *src, size_t copy_size) {
if (!dst || !src || !dst_size || !copy_size) return 0;
size_t min_size = dst_size < copy_size ? dst_size : copy_size;
memcpy(dst, src, min_size);
return min_size;
}
} // namespace
namespace qnn { namespace qnn {
// TODO: mapping more ggml data type to QNN data type // TODO: mapping more ggml data type to QNN data type
@ -166,105 +152,4 @@ const char *opname_from_ggmlop(enum ggml_op ggmlop) {
return nullptr; return nullptr;
} }
void device_tensor_init(Qnn_Tensor_t &tensor, uint32_t rank, Qnn_TensorMemType_t mem_type, const char *tensor_name,
Qnn_TensorType_t qnn_tensor_type, Qnn_DataType_t qnn_data_type, uint32_t *dimensions) {
tensor = QNN_TENSOR_INIT;
tensor = { .version = QNN_TENSOR_VERSION_1,
{ .v1 = { .id = 0,
.name = tensor_name,
.type = qnn_tensor_type,
.dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER,
.dataType = qnn_data_type,
.quantizeParams = { QNN_DEFINITION_UNDEFINED,
QNN_QUANTIZATION_ENCODING_UNDEFINED,
{ .scaleOffsetEncoding = { .scale = 0.0000000000000000f, .offset = 0 } } },
.rank = rank,
.dimensions = dimensions,
.memType = mem_type,
{ .clientBuf = {} } } } };
}
Qnn_ErrorHandle_t device_tensor_deep_copy(const Qnn_Tensor_t &src, Qnn_Tensor_t &dst) {
Qnn_ErrorHandle_t err = validate_tensor_version(src);
if (err != QNN_SUCCESS) {
QNN_LOG_WARN("validate_tensor_version expected QNN_SUCCESS\n");
return err;
}
dst.version = src.version;
QNN_TENSOR_SET_NAME(dst, ::strndup(QNN_TENSOR_GET_NAME(src), std::string(QNN_TENSOR_GET_NAME(src)).size()));
if (nullptr == QNN_TENSOR_GET_NAME(dst)) {
return (Qnn_ErrorHandle_t)1;
}
QNN_TENSOR_SET_ID(dst, QNN_TENSOR_GET_ID(src));
QNN_TENSOR_SET_TYPE(dst, QNN_TENSOR_GET_TYPE(src));
QNN_TENSOR_SET_DATA_FORMAT(dst, QNN_TENSOR_GET_DATA_FORMAT(src));
QNN_TENSOR_SET_DATA_TYPE(dst, QNN_TENSOR_GET_DATA_TYPE(src));
QNN_TENSOR_SET_MEM_TYPE(dst, QNN_TENSOR_GET_MEM_TYPE(src));
if (QNN_TENSOR_GET_MEM_TYPE(src) == QNN_TENSORMEMTYPE_RAW) {
Qnn_ClientBuffer_t client_buf = { nullptr, 0 };
QNN_TENSOR_SET_CLIENT_BUF(dst, client_buf);
} else if (QNN_TENSOR_GET_MEM_TYPE(src) == QNN_TENSORMEMTYPE_MEMHANDLE) {
QNN_TENSOR_SET_MEM_HANDLE(dst, nullptr);
} else {
return (Qnn_ErrorHandle_t)1;
}
Qnn_QuantizeParams_t src_qparam = QNN_TENSOR_GET_QUANT_PARAMS(src);
Qnn_QuantizationEncoding_t encoding = src_qparam.quantizationEncoding;
if (encoding == QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET) {
Qnn_QuantizeParams_t src_qparam_cpy = src_qparam;
Qnn_AxisScaleOffset_t &axis_scale_offset = src_qparam_cpy.axisScaleOffsetEncoding;
Qnn_ScaleOffset_t **scaleOffset = &axis_scale_offset.scaleOffset;
size_t scaleOffsetSize = axis_scale_offset.numScaleOffsets * sizeof(Qnn_ScaleOffset_t);
*scaleOffset = (Qnn_ScaleOffset_t *)malloc(scaleOffsetSize);
memscpy(*scaleOffset, scaleOffsetSize, src_qparam.axisScaleOffsetEncoding.scaleOffset, scaleOffsetSize);
QNN_TENSOR_SET_QUANT_PARAMS(dst, src_qparam_cpy);
} else if (encoding == QNN_QUANTIZATION_ENCODING_BW_AXIS_SCALE_OFFSET) {
Qnn_QuantizeParams_t src_qparam_cpy = src_qparam;
Qnn_BwAxisScaleOffset_t &bwaxis_scale_offset = src_qparam_cpy.bwAxisScaleOffsetEncoding;
size_t scaleSize = bwaxis_scale_offset.numElements * sizeof(float);
float **scales = &bwaxis_scale_offset.scales;
int32_t **offsets = &bwaxis_scale_offset.offsets;
*scales = (float *)malloc(scaleSize);
memscpy(*scales, scaleSize, src_qparam.bwAxisScaleOffsetEncoding.scales, scaleSize);
if (bwaxis_scale_offset.offsets != nullptr) {
size_t offsetSize = bwaxis_scale_offset.numElements * sizeof(int32_t);
*offsets = (int32_t *)malloc(offsetSize);
memscpy(*offsets, offsetSize, src_qparam.bwAxisScaleOffsetEncoding.offsets, offsetSize);
}
QNN_TENSOR_SET_QUANT_PARAMS(dst, src_qparam_cpy);
} else {
QNN_TENSOR_SET_QUANT_PARAMS(dst, src_qparam);
}
uint32_t rank = QNN_TENSOR_GET_RANK(src);
QNN_TENSOR_SET_RANK(dst, rank);
size_t dim_size = rank * sizeof(uint32_t);
uint32_t *dimensions = (uint32_t *)malloc(dim_size);
if (dimensions == nullptr) {
QNN_LOG_WARN(
"deep_copy_qnn_tensors() allocation error while copying "
"tensor %s\n",
QNN_TENSOR_GET_NAME(src));
return (Qnn_ErrorHandle_t)1;
}
memscpy(dimensions, dim_size, QNN_TENSOR_GET_DIMENSIONS(src), dim_size);
QNN_TENSOR_SET_DIMENSIONS(dst, dimensions);
return err;
}
void device_tensor_free(Qnn_Tensor_t &tensor) {
if (validate_tensor_version(tensor) != QNN_SUCCESS) {
QNN_LOG_WARN("validate_tensor_version expected QNN_SUCCESS\n");
return;
}
free((void *)QNN_TENSOR_GET_NAME(tensor));
free(QNN_TENSOR_GET_DIMENSIONS(tensor));
}
} // namespace qnn } // namespace qnn

View file

@ -176,13 +176,6 @@ inline void set_qnn_tensor_memhandle(Qnn_Tensor_t &tensor, Qnn_MemHandle_t handl
Qnn_DataType_t device_datatype_from_ggml_datatype(ggml_type ggml_type); Qnn_DataType_t device_datatype_from_ggml_datatype(ggml_type ggml_type);
Qnn_TensorType_t device_tensortype_from_ggml_tensor(ggml_tensor *ggml_tensor); Qnn_TensorType_t device_tensortype_from_ggml_tensor(ggml_tensor *ggml_tensor);
void device_tensor_init(Qnn_Tensor_t &tensor, uint32_t rank, Qnn_TensorMemType_t mem_type, const char *tensor_name,
Qnn_TensorType_t qnn_tensor_type, Qnn_DataType_t qnn_data_type, uint32_t *dimensions);
Qnn_ErrorHandle_t device_tensor_deep_copy(const Qnn_Tensor_t &src, Qnn_Tensor_t &dst);
void device_tensor_free(Qnn_Tensor_t &tensor);
#if ENABLE_QNNBACKEND_PERF #if ENABLE_QNNBACKEND_PERF
class qnn_perf { class qnn_perf {
public: public: