diff --git a/.gitignore b/.gitignore index 471cf90d5..c96dc91af 100644 --- a/.gitignore +++ b/.gitignore @@ -44,7 +44,7 @@ models-mnt /infill /libllama.so /llama-bench -/llava +/llava-cli /main /metal /perplexity diff --git a/CMakeLists.txt b/CMakeLists.txt index 6af42a6c2..58547b6d5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -793,6 +793,7 @@ endif() # add_subdirectory(common) +add_subdirectory(llava) if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION) include(CTest) diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index fbb0ff095..ac4ce0958 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -3,6 +3,7 @@ set(TARGET common) add_library(${TARGET} OBJECT + base64.hpp common.h common.cpp sampling.h diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index d451690ad..6b41d157e 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -1,16 +1,3 @@ -set(TARGET llava) -add_library(${TARGET} llava.cpp llava.h clip.cpp clip.h) -install(TARGETS ${TARGET} LIBRARY) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) -if (NOT MSVC) - target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h - endif() -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() - - set(TARGET llava-cli) add_executable(${TARGET} llava-cli.cpp) install(TARGETS ${TARGET} RUNTIME) @@ -19,6 +6,3 @@ target_compile_features(${TARGET} PRIVATE cxx_std_11) if(TARGET BUILD_INFO) add_dependencies(${TARGET} BUILD_INFO) endif() - -unset(TARGET) -llama_build_and_test_executable(test-llava.cpp) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 84c376246..173c2d938 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -32,7 +32,11 @@ static bool load_image(llava_context * ctx_llava, gpt_params * params, float **i return false; } } - llava_build_img_embed(ctx_llava, params->n_threads, &img, image_embd, n_image_pos); + bool image_embed_result = llava_build_img_embed(ctx_llava->ctx_llama, ctx_llava->ctx_clip, params->n_threads, &img, image_embd, n_image_pos); + if (!image_embed_result) { + fprintf(stderr, "%s: coulnd't embed the image\n", __func__); + return false; + } return true; } diff --git a/llava/CMakeLists.txt b/llava/CMakeLists.txt new file mode 100644 index 000000000..9f9f8871d --- /dev/null +++ b/llava/CMakeLists.txt @@ -0,0 +1,13 @@ +set(TARGET llava) + +add_library(${TARGET} llava.cpp llava.h clip.cpp clip.h) + +target_include_directories(${TARGET} PUBLIC .) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_11) +if (NOT MSVC) + target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h + endif() +if(TARGET BUILD_INFO) + add_dependencies(${TARGET} BUILD_INFO) +endif() diff --git a/examples/llava/clip.cpp b/llava/clip.cpp similarity index 100% rename from examples/llava/clip.cpp rename to llava/clip.cpp diff --git a/examples/llava/clip.h b/llava/clip.h similarity index 100% rename from examples/llava/clip.h rename to llava/clip.h diff --git a/examples/llava/llava-utils.h b/llava/llava-utils.h similarity index 100% rename from examples/llava/llava-utils.h rename to llava/llava-utils.h diff --git a/examples/llava/llava.cpp b/llava/llava.cpp similarity index 83% rename from examples/llava/llava.cpp rename to llava/llava.cpp index 522334c7c..a20d34bfa 100644 --- a/examples/llava/llava.cpp +++ b/llava/llava.cpp @@ -10,8 +10,7 @@ #include "base64.hpp" -static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos) { - auto ctx_clip = ctx_llava->ctx_clip; +static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos) { clip_image_f32 img_res; if (!clip_image_preprocess(ctx_clip, img, &img_res, /*pad2square =*/ true)) { fprintf(stderr, "%s: unable to preprocess image\n", __func__); @@ -22,14 +21,6 @@ static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, con *n_img_pos = clip_n_patches(ctx_clip); *n_img_embd = clip_n_mmproj_embd(ctx_clip); - // make sure that the correct mmproj was used, i.e., compare apples to apples - int n_llama_embd = llama_n_embd(llama_get_model(ctx_llava->ctx_llama)); - if (*n_img_embd != n_llama_embd) { - printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, *n_img_embd, n_llama_embd); - - return false; - } - const int64_t t_img_enc_start_us = ggml_time_us(); if (!clip_image_encode(ctx_clip, n_threads, &img_res, image_embd)) { fprintf(stderr, "Unable to encode image\n"); @@ -46,9 +37,8 @@ static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, con return true; } -bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out) { +bool llava_build_img_embed(const llama_context * ctx_llama, clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out) { - auto ctx_clip = ctx_llava->ctx_clip; float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); if (!image_embd) { fprintf(stderr, "Unable to allocate memory for image embeddings\n"); @@ -58,13 +48,22 @@ bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, cons int n_image_pos; int n_img_embd; - if (!encode_image_with_clip(ctx_llava, n_threads, img, image_embd, &n_img_embd, &n_image_pos)) { + if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_embd, &n_image_pos)) { fprintf(stderr, "%s: cannot encode image, aborting\n", __func__); free(image_embd); return false; } + // make sure that the correct mmproj was used, i.e., compare apples to apples + int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); + if (n_img_embd != n_llama_embd) { + printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_img_embd, n_llama_embd); + free(image_embd); + return false; + } + *image_embd_out = image_embd; *n_image_pos_out = n_image_pos; + return true; } @@ -102,16 +101,15 @@ struct llava_context * llava_init(gpt_params * params) { return NULL; } - auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); ctx_llava->ctx_llama = ctx_llama; ctx_llava->ctx_clip = ctx_clip; ctx_llava->model = model; return ctx_llava; - } + void llava_free(struct llava_context * ctx_llava) { if (ctx_llava->ctx_clip) { clip_free(ctx_llava->ctx_clip); diff --git a/examples/llava/llava.h b/llava/llava.h similarity index 57% rename from examples/llava/llava.h rename to llava/llava.h index 1d8b87a46..a7789ad36 100644 --- a/examples/llava/llava.h +++ b/llava/llava.h @@ -19,7 +19,8 @@ struct llava_context { struct llava_context * llava_init(gpt_params * params); void llava_free(struct llava_context * ctx_llava); -bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out); +/** build a llava image embedding from the passed-in clip image `img`. result is returned as image_embd_out, size n_image_pos_out */ +bool llava_build_img_embed(const struct llama_context * ctx_llama, struct clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out); #ifdef __cplusplus