udpate prompt template in wrapper

This commit is contained in:
Yicheng Qian 2024-11-22 14:01:42 -08:00
parent 43f41a4c00
commit 3479f516ea
2 changed files with 9 additions and 2 deletions

View file

@ -40,7 +40,14 @@ target_compile_features(${TARGET} PRIVATE cxx_std_11)
#=== for omni-vlm-wrapper #=== for omni-vlm-wrapper
add_library(omni_vlm_wrapper_shared SHARED omni-vlm-wrapper.cpp $<TARGET_OBJECTS:omni_vlm>) add_library(omni_vlm_wrapper_shared SHARED omni-vlm-wrapper.cpp $<TARGET_OBJECTS:omni_vlm>)
target_link_libraries(omni_vlm_wrapper_shared PRIVATE common ggml_llama llama ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(omni_vlm_wrapper_shared PRIVATE common ggml_llama llama ${CMAKE_THREAD_LIBS_INIT})
install(TARGETS omni_vlm_wrapper_shared LIBRARY) # For Nexa SDK library installation
set_target_properties(omni_vlm_wrapper_shared PROPERTIES
PUBLIC_HEADER "omni-vlm-wrapper.h"
POSITION_INDEPENDENT_CODE ON
OUTPUT_NAME "omni_vlm_wrapper_shared")
install(TARGETS omni_vlm_wrapper_shared
LIBRARY
PUBLIC_HEADER DESTINATION include)
# set(TARGET omni-vlm-wrapper-cli) # set(TARGET omni-vlm-wrapper-cli)
# add_executable(${TARGET} omni-vlm-wrapper-cli.cpp) # add_executable(${TARGET} omni-vlm-wrapper-cli.cpp)

View file

@ -255,7 +255,7 @@ const char* omnivlm_inference(const char *prompt, const char *imag_path) {
if (params.omni_vlm_version == "vlm-81-ocr") { if (params.omni_vlm_version == "vlm-81-ocr") {
params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n <|ocr_start|><|vision_start|><|image_pad|><|vision_end|><|ocr_end|><|im_end|>"; params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n <|ocr_start|><|vision_start|><|image_pad|><|vision_end|><|ocr_end|><|im_end|>";
} else if (params.omni_vlm_version == "vlm-81-instruct" || params.omni_vlm_version == "nano-vlm-instruct") { } else if (params.omni_vlm_version == "vlm-81-instruct" || params.omni_vlm_version == "nano-vlm-instruct") {
params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n" + params.prompt + "\n<|vision_start|><|image_pad|><|vision_end|><|im_end|>"; params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n\n<|vision_start|><|image_pad|><|vision_end|>" + params.prompt + "<|im_end|>";
} else { } else {
LOG_TEE("%s : error: you set wrong vlm version info:'%s'.\n", __func__, params.omni_vlm_version.c_str()); LOG_TEE("%s : error: you set wrong vlm version info:'%s'.\n", __func__, params.omni_vlm_version.c_str());
throw std::runtime_error("You set wrong vlm_version info strings."); throw std::runtime_error("You set wrong vlm_version info strings.");