From 38c6fa3b8fb6c88075102fd859d04eaea27aa87c Mon Sep 17 00:00:00 2001 From: Zack Zhiyuan Li Date: Tue, 5 Nov 2024 20:56:33 +0000 Subject: [PATCH 1/3] enable lib to be exported in nexa SDK --- examples/omni-vlm/CMakeLists.txt | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/examples/omni-vlm/CMakeLists.txt b/examples/omni-vlm/CMakeLists.txt index b6d41b050..594c29897 100644 --- a/examples/omni-vlm/CMakeLists.txt +++ b/examples/omni-vlm/CMakeLists.txt @@ -40,7 +40,16 @@ target_compile_features(${TARGET} PRIVATE cxx_std_11) #=== for omni-vlm-wrapper add_library(omni_vlm_wrapper_shared SHARED omni-vlm-wrapper.cpp $) target_link_libraries(omni_vlm_wrapper_shared PRIVATE common ggml_llama llama ${CMAKE_THREAD_LIBS_INIT}) -install(TARGETS omni_vlm_wrapper_shared LIBRARY) + +# For Nexa SDK library installation +set_target_properties(omni_vlm_wrapper_shared PROPERTIES + PUBLIC_HEADER "omni-vlm-wrapper.h" + POSITION_INDEPENDENT_CODE ON + OUTPUT_NAME "omni_vlm_wrapper_shared") + +install(TARGETS omni_vlm_wrapper_shared + LIBRARY + PUBLIC_HEADER DESTINATION include) # set(TARGET omni-vlm-wrapper-cli) # add_executable(${TARGET} omni-vlm-wrapper-cli.cpp) From 460212ac2a61cd24f479bba145a9e652f01f31b3 Mon Sep 17 00:00:00 2001 From: zack Zhiyuan Li Date: Fri, 22 Nov 2024 09:06:15 +0000 Subject: [PATCH 2/3] change template for inference --- examples/omni-vlm/omni-vlm-cli.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/omni-vlm/omni-vlm-cli.cpp b/examples/omni-vlm/omni-vlm-cli.cpp index d24634fe8..2cd9eceb1 100644 --- a/examples/omni-vlm/omni-vlm-cli.cpp +++ b/examples/omni-vlm/omni-vlm-cli.cpp @@ -274,7 +274,7 @@ int main(int argc, char ** argv) { if (params.omni_vlm_version == "vlm-81-ocr") { params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n <|vision_start|><|image_pad|><|vision_end|><|im_end|>"; } else if (params.omni_vlm_version == "vlm-81-instruct" || params.omni_vlm_version == "nano-vlm-instruct") { - params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n" + params.prompt + "\n<|vision_start|><|image_pad|><|vision_end|><|im_end|>"; + params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n\n<|vision_start|><|image_pad|><|vision_end|>" + params.prompt + "<|im_end|>"; } else { LOG_TEE("%s : error: you set wrong vlm version info:'%s'.\n", __func__, params.omni_vlm_version.c_str()); print_usage(argc, argv, {}); From fe8c7b45fd5eca1c38a09c257ebf8cf1ccae3a4a Mon Sep 17 00:00:00 2001 From: zack Zhiyuan Li Date: Fri, 22 Nov 2024 09:08:44 +0000 Subject: [PATCH 3/3] revert CMakeList --- examples/omni-vlm/CMakeLists.txt | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/examples/omni-vlm/CMakeLists.txt b/examples/omni-vlm/CMakeLists.txt index 594c29897..b6d41b050 100644 --- a/examples/omni-vlm/CMakeLists.txt +++ b/examples/omni-vlm/CMakeLists.txt @@ -40,16 +40,7 @@ target_compile_features(${TARGET} PRIVATE cxx_std_11) #=== for omni-vlm-wrapper add_library(omni_vlm_wrapper_shared SHARED omni-vlm-wrapper.cpp $) target_link_libraries(omni_vlm_wrapper_shared PRIVATE common ggml_llama llama ${CMAKE_THREAD_LIBS_INIT}) - -# For Nexa SDK library installation -set_target_properties(omni_vlm_wrapper_shared PROPERTIES - PUBLIC_HEADER "omni-vlm-wrapper.h" - POSITION_INDEPENDENT_CODE ON - OUTPUT_NAME "omni_vlm_wrapper_shared") - -install(TARGETS omni_vlm_wrapper_shared - LIBRARY - PUBLIC_HEADER DESTINATION include) +install(TARGETS omni_vlm_wrapper_shared LIBRARY) # set(TARGET omni-vlm-wrapper-cli) # add_executable(${TARGET} omni-vlm-wrapper-cli.cpp)