From 38c6fa3b8fb6c88075102fd859d04eaea27aa87c Mon Sep 17 00:00:00 2001 From: Zack Zhiyuan Li Date: Tue, 5 Nov 2024 20:56:33 +0000 Subject: [PATCH 1/7] enable lib to be exported in nexa SDK --- examples/omni-vlm/CMakeLists.txt | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/examples/omni-vlm/CMakeLists.txt b/examples/omni-vlm/CMakeLists.txt index b6d41b050..594c29897 100644 --- a/examples/omni-vlm/CMakeLists.txt +++ b/examples/omni-vlm/CMakeLists.txt @@ -40,7 +40,16 @@ target_compile_features(${TARGET} PRIVATE cxx_std_11) #=== for omni-vlm-wrapper add_library(omni_vlm_wrapper_shared SHARED omni-vlm-wrapper.cpp $) target_link_libraries(omni_vlm_wrapper_shared PRIVATE common ggml_llama llama ${CMAKE_THREAD_LIBS_INIT}) -install(TARGETS omni_vlm_wrapper_shared LIBRARY) + +# For Nexa SDK library installation +set_target_properties(omni_vlm_wrapper_shared PROPERTIES + PUBLIC_HEADER "omni-vlm-wrapper.h" + POSITION_INDEPENDENT_CODE ON + OUTPUT_NAME "omni_vlm_wrapper_shared") + +install(TARGETS omni_vlm_wrapper_shared + LIBRARY + PUBLIC_HEADER DESTINATION include) # set(TARGET omni-vlm-wrapper-cli) # add_executable(${TARGET} omni-vlm-wrapper-cli.cpp) From aad0167bc3accc17ec80db5225576e4130383cc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E4=B8=BA?= Date: Thu, 14 Nov 2024 14:50:49 +0800 Subject: [PATCH 2/7] audio embedding free() (but still memory leakage detected) --- examples/qwen2-audio/qwen2.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/qwen2-audio/qwen2.cpp b/examples/qwen2-audio/qwen2.cpp index a42b85bdc..ad6a199c7 100644 --- a/examples/qwen2-audio/qwen2.cpp +++ b/examples/qwen2-audio/qwen2.cpp @@ -763,6 +763,7 @@ static bool omni_eval_audio_embed(llama_context *ctx_llama, ggml_tensor *audio_e } *n_past += n_eval; } + free(audio_embed_data); return true; } From 8e2e6304057af44e66c0c3a123ca798dc4d25a55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E4=B8=BA?= Date: Thu, 14 Nov 2024 22:04:01 +0800 Subject: [PATCH 3/7] fix mem leakage based on leaks tool (still WIP) --- common/common-nexa.cpp | 3 ++- examples/qwen2-audio/whisper.cpp | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/common/common-nexa.cpp b/common/common-nexa.cpp index e8a54ba04..c41f91384 100644 --- a/common/common-nexa.cpp +++ b/common/common-nexa.cpp @@ -150,6 +150,7 @@ bool load_hparams_and_tensors_from_gguf(const std::string &fname, NexaBaseModel } ggml_free(meta); + gguf_free(ctx_gguf); return true; } @@ -314,4 +315,4 @@ struct ggml_tensor * ggml_graph_node(struct ggml_cgraph * cgraph, int i) { GGML_ASSERT(i < cgraph->n_nodes); return cgraph->nodes[i]; -} \ No newline at end of file +} diff --git a/examples/qwen2-audio/whisper.cpp b/examples/qwen2-audio/whisper.cpp index 6da9d268d..b2ce58475 100644 --- a/examples/qwen2-audio/whisper.cpp +++ b/examples/qwen2-audio/whisper.cpp @@ -9467,6 +9467,8 @@ static bool whisper_encoder_load(struct whisper_model_loader *loader, whisper_co wctx.t_load_us = ggml_time_us() - t_start_us; + gguf_free(gguf_ctx); + return true; } From e4ca946c48ee6e1a848cf88e5f81680179b0fbf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E4=B8=BA?= Date: Fri, 15 Nov 2024 08:31:01 +0800 Subject: [PATCH 4/7] free omni_ctx heap malloc space in omni_free() api Currently mem leaks in qwen2audio are almost fixed. --- examples/qwen2-audio/qwen2.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/qwen2-audio/qwen2.cpp b/examples/qwen2-audio/qwen2.cpp index ad6a199c7..8a08a7ac6 100644 --- a/examples/qwen2-audio/qwen2.cpp +++ b/examples/qwen2-audio/qwen2.cpp @@ -724,6 +724,7 @@ void omni_free(struct omni_context *ctx_omni) llama_free(ctx_omni->ctx_llama); llama_free_model(ctx_omni->model); llama_backend_free(); + free(ctx_omni); } static bool omni_eval_audio_embed(llama_context *ctx_llama, ggml_tensor *audio_embed, int n_batch, int *n_past) From 460212ac2a61cd24f479bba145a9e652f01f31b3 Mon Sep 17 00:00:00 2001 From: zack Zhiyuan Li Date: Fri, 22 Nov 2024 09:06:15 +0000 Subject: [PATCH 5/7] change template for inference --- examples/omni-vlm/omni-vlm-cli.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/omni-vlm/omni-vlm-cli.cpp b/examples/omni-vlm/omni-vlm-cli.cpp index d24634fe8..2cd9eceb1 100644 --- a/examples/omni-vlm/omni-vlm-cli.cpp +++ b/examples/omni-vlm/omni-vlm-cli.cpp @@ -274,7 +274,7 @@ int main(int argc, char ** argv) { if (params.omni_vlm_version == "vlm-81-ocr") { params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n <|vision_start|><|image_pad|><|vision_end|><|im_end|>"; } else if (params.omni_vlm_version == "vlm-81-instruct" || params.omni_vlm_version == "nano-vlm-instruct") { - params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n" + params.prompt + "\n<|vision_start|><|image_pad|><|vision_end|><|im_end|>"; + params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n\n<|vision_start|><|image_pad|><|vision_end|>" + params.prompt + "<|im_end|>"; } else { LOG_TEE("%s : error: you set wrong vlm version info:'%s'.\n", __func__, params.omni_vlm_version.c_str()); print_usage(argc, argv, {}); From fe8c7b45fd5eca1c38a09c257ebf8cf1ccae3a4a Mon Sep 17 00:00:00 2001 From: zack Zhiyuan Li Date: Fri, 22 Nov 2024 09:08:44 +0000 Subject: [PATCH 6/7] revert CMakeList --- examples/omni-vlm/CMakeLists.txt | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/examples/omni-vlm/CMakeLists.txt b/examples/omni-vlm/CMakeLists.txt index 594c29897..b6d41b050 100644 --- a/examples/omni-vlm/CMakeLists.txt +++ b/examples/omni-vlm/CMakeLists.txt @@ -40,16 +40,7 @@ target_compile_features(${TARGET} PRIVATE cxx_std_11) #=== for omni-vlm-wrapper add_library(omni_vlm_wrapper_shared SHARED omni-vlm-wrapper.cpp $) target_link_libraries(omni_vlm_wrapper_shared PRIVATE common ggml_llama llama ${CMAKE_THREAD_LIBS_INIT}) - -# For Nexa SDK library installation -set_target_properties(omni_vlm_wrapper_shared PROPERTIES - PUBLIC_HEADER "omni-vlm-wrapper.h" - POSITION_INDEPENDENT_CODE ON - OUTPUT_NAME "omni_vlm_wrapper_shared") - -install(TARGETS omni_vlm_wrapper_shared - LIBRARY - PUBLIC_HEADER DESTINATION include) +install(TARGETS omni_vlm_wrapper_shared LIBRARY) # set(TARGET omni-vlm-wrapper-cli) # add_executable(${TARGET} omni-vlm-wrapper-cli.cpp) From 3479f516ea55c9a278986e9a300a163979be4177 Mon Sep 17 00:00:00 2001 From: Yicheng Qian Date: Fri, 22 Nov 2024 14:01:42 -0800 Subject: [PATCH 7/7] udpate prompt template in wrapper --- examples/omni-vlm/CMakeLists.txt | 9 ++++++++- examples/omni-vlm/omni-vlm-wrapper.cpp | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/examples/omni-vlm/CMakeLists.txt b/examples/omni-vlm/CMakeLists.txt index b6d41b050..4e9413bfc 100644 --- a/examples/omni-vlm/CMakeLists.txt +++ b/examples/omni-vlm/CMakeLists.txt @@ -40,7 +40,14 @@ target_compile_features(${TARGET} PRIVATE cxx_std_11) #=== for omni-vlm-wrapper add_library(omni_vlm_wrapper_shared SHARED omni-vlm-wrapper.cpp $) target_link_libraries(omni_vlm_wrapper_shared PRIVATE common ggml_llama llama ${CMAKE_THREAD_LIBS_INIT}) -install(TARGETS omni_vlm_wrapper_shared LIBRARY) +# For Nexa SDK library installation +set_target_properties(omni_vlm_wrapper_shared PROPERTIES + PUBLIC_HEADER "omni-vlm-wrapper.h" + POSITION_INDEPENDENT_CODE ON + OUTPUT_NAME "omni_vlm_wrapper_shared") +install(TARGETS omni_vlm_wrapper_shared + LIBRARY + PUBLIC_HEADER DESTINATION include) # set(TARGET omni-vlm-wrapper-cli) # add_executable(${TARGET} omni-vlm-wrapper-cli.cpp) diff --git a/examples/omni-vlm/omni-vlm-wrapper.cpp b/examples/omni-vlm/omni-vlm-wrapper.cpp index ba0749d06..00d50cf15 100644 --- a/examples/omni-vlm/omni-vlm-wrapper.cpp +++ b/examples/omni-vlm/omni-vlm-wrapper.cpp @@ -255,7 +255,7 @@ const char* omnivlm_inference(const char *prompt, const char *imag_path) { if (params.omni_vlm_version == "vlm-81-ocr") { params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n <|ocr_start|><|vision_start|><|image_pad|><|vision_end|><|ocr_end|><|im_end|>"; } else if (params.omni_vlm_version == "vlm-81-instruct" || params.omni_vlm_version == "nano-vlm-instruct") { - params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n" + params.prompt + "\n<|vision_start|><|image_pad|><|vision_end|><|im_end|>"; + params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n\n<|vision_start|><|image_pad|><|vision_end|>" + params.prompt + "<|im_end|>"; } else { LOG_TEE("%s : error: you set wrong vlm version info:'%s'.\n", __func__, params.omni_vlm_version.c_str()); throw std::runtime_error("You set wrong vlm_version info strings.");