move llava into its own subdir
This commit is contained in:
parent
f8eddcf8e8
commit
b9f533b997
11 changed files with 36 additions and 34 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -44,7 +44,7 @@ models-mnt
|
||||||
/infill
|
/infill
|
||||||
/libllama.so
|
/libllama.so
|
||||||
/llama-bench
|
/llama-bench
|
||||||
/llava
|
/llava-cli
|
||||||
/main
|
/main
|
||||||
/metal
|
/metal
|
||||||
/perplexity
|
/perplexity
|
||||||
|
|
|
@ -793,6 +793,7 @@ endif()
|
||||||
#
|
#
|
||||||
|
|
||||||
add_subdirectory(common)
|
add_subdirectory(common)
|
||||||
|
add_subdirectory(llava)
|
||||||
|
|
||||||
if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
||||||
include(CTest)
|
include(CTest)
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
set(TARGET common)
|
set(TARGET common)
|
||||||
|
|
||||||
add_library(${TARGET} OBJECT
|
add_library(${TARGET} OBJECT
|
||||||
|
base64.hpp
|
||||||
common.h
|
common.h
|
||||||
common.cpp
|
common.cpp
|
||||||
sampling.h
|
sampling.h
|
||||||
|
|
|
@ -1,16 +1,3 @@
|
||||||
set(TARGET llava)
|
|
||||||
add_library(${TARGET} llava.cpp llava.h clip.cpp clip.h)
|
|
||||||
install(TARGETS ${TARGET} LIBRARY)
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
|
||||||
if (NOT MSVC)
|
|
||||||
target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h
|
|
||||||
endif()
|
|
||||||
if(TARGET BUILD_INFO)
|
|
||||||
add_dependencies(${TARGET} BUILD_INFO)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
|
|
||||||
set(TARGET llava-cli)
|
set(TARGET llava-cli)
|
||||||
add_executable(${TARGET} llava-cli.cpp)
|
add_executable(${TARGET} llava-cli.cpp)
|
||||||
install(TARGETS ${TARGET} RUNTIME)
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
@ -19,6 +6,3 @@ target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||||
if(TARGET BUILD_INFO)
|
if(TARGET BUILD_INFO)
|
||||||
add_dependencies(${TARGET} BUILD_INFO)
|
add_dependencies(${TARGET} BUILD_INFO)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
unset(TARGET)
|
|
||||||
llama_build_and_test_executable(test-llava.cpp)
|
|
||||||
|
|
|
@ -32,7 +32,11 @@ static bool load_image(llava_context * ctx_llava, gpt_params * params, float **i
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
llava_build_img_embed(ctx_llava, params->n_threads, &img, image_embd, n_image_pos);
|
bool image_embed_result = llava_build_img_embed(ctx_llava->ctx_llama, ctx_llava->ctx_clip, params->n_threads, &img, image_embd, n_image_pos);
|
||||||
|
if (!image_embed_result) {
|
||||||
|
fprintf(stderr, "%s: coulnd't embed the image\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
13
llava/CMakeLists.txt
Normal file
13
llava/CMakeLists.txt
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
set(TARGET llava)
|
||||||
|
|
||||||
|
add_library(${TARGET} llava.cpp llava.h clip.cpp clip.h)
|
||||||
|
|
||||||
|
target_include_directories(${TARGET} PUBLIC .)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||||
|
if (NOT MSVC)
|
||||||
|
target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h
|
||||||
|
endif()
|
||||||
|
if(TARGET BUILD_INFO)
|
||||||
|
add_dependencies(${TARGET} BUILD_INFO)
|
||||||
|
endif()
|
|
@ -10,8 +10,7 @@
|
||||||
|
|
||||||
#include "base64.hpp"
|
#include "base64.hpp"
|
||||||
|
|
||||||
static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos) {
|
static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos) {
|
||||||
auto ctx_clip = ctx_llava->ctx_clip;
|
|
||||||
clip_image_f32 img_res;
|
clip_image_f32 img_res;
|
||||||
if (!clip_image_preprocess(ctx_clip, img, &img_res, /*pad2square =*/ true)) {
|
if (!clip_image_preprocess(ctx_clip, img, &img_res, /*pad2square =*/ true)) {
|
||||||
fprintf(stderr, "%s: unable to preprocess image\n", __func__);
|
fprintf(stderr, "%s: unable to preprocess image\n", __func__);
|
||||||
|
@ -22,14 +21,6 @@ static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, con
|
||||||
*n_img_pos = clip_n_patches(ctx_clip);
|
*n_img_pos = clip_n_patches(ctx_clip);
|
||||||
*n_img_embd = clip_n_mmproj_embd(ctx_clip);
|
*n_img_embd = clip_n_mmproj_embd(ctx_clip);
|
||||||
|
|
||||||
// make sure that the correct mmproj was used, i.e., compare apples to apples
|
|
||||||
int n_llama_embd = llama_n_embd(llama_get_model(ctx_llava->ctx_llama));
|
|
||||||
if (*n_img_embd != n_llama_embd) {
|
|
||||||
printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, *n_img_embd, n_llama_embd);
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int64_t t_img_enc_start_us = ggml_time_us();
|
const int64_t t_img_enc_start_us = ggml_time_us();
|
||||||
if (!clip_image_encode(ctx_clip, n_threads, &img_res, image_embd)) {
|
if (!clip_image_encode(ctx_clip, n_threads, &img_res, image_embd)) {
|
||||||
fprintf(stderr, "Unable to encode image\n");
|
fprintf(stderr, "Unable to encode image\n");
|
||||||
|
@ -46,9 +37,8 @@ static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, con
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out) {
|
bool llava_build_img_embed(const llama_context * ctx_llama, clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out) {
|
||||||
|
|
||||||
auto ctx_clip = ctx_llava->ctx_clip;
|
|
||||||
float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip));
|
float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip));
|
||||||
if (!image_embd) {
|
if (!image_embd) {
|
||||||
fprintf(stderr, "Unable to allocate memory for image embeddings\n");
|
fprintf(stderr, "Unable to allocate memory for image embeddings\n");
|
||||||
|
@ -58,13 +48,22 @@ bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, cons
|
||||||
|
|
||||||
int n_image_pos;
|
int n_image_pos;
|
||||||
int n_img_embd;
|
int n_img_embd;
|
||||||
if (!encode_image_with_clip(ctx_llava, n_threads, img, image_embd, &n_img_embd, &n_image_pos)) {
|
if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_embd, &n_image_pos)) {
|
||||||
fprintf(stderr, "%s: cannot encode image, aborting\n", __func__);
|
fprintf(stderr, "%s: cannot encode image, aborting\n", __func__);
|
||||||
free(image_embd);
|
free(image_embd);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
// make sure that the correct mmproj was used, i.e., compare apples to apples
|
||||||
|
int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama));
|
||||||
|
if (n_img_embd != n_llama_embd) {
|
||||||
|
printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_img_embd, n_llama_embd);
|
||||||
|
free(image_embd);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
*image_embd_out = image_embd;
|
*image_embd_out = image_embd;
|
||||||
*n_image_pos_out = n_image_pos;
|
*n_image_pos_out = n_image_pos;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,16 +101,15 @@ struct llava_context * llava_init(gpt_params * params) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));
|
auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));
|
||||||
|
|
||||||
ctx_llava->ctx_llama = ctx_llama;
|
ctx_llava->ctx_llama = ctx_llama;
|
||||||
ctx_llava->ctx_clip = ctx_clip;
|
ctx_llava->ctx_clip = ctx_clip;
|
||||||
ctx_llava->model = model;
|
ctx_llava->model = model;
|
||||||
return ctx_llava;
|
return ctx_llava;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void llava_free(struct llava_context * ctx_llava) {
|
void llava_free(struct llava_context * ctx_llava) {
|
||||||
if (ctx_llava->ctx_clip) {
|
if (ctx_llava->ctx_clip) {
|
||||||
clip_free(ctx_llava->ctx_clip);
|
clip_free(ctx_llava->ctx_clip);
|
|
@ -19,7 +19,8 @@ struct llava_context {
|
||||||
struct llava_context * llava_init(gpt_params * params);
|
struct llava_context * llava_init(gpt_params * params);
|
||||||
void llava_free(struct llava_context * ctx_llava);
|
void llava_free(struct llava_context * ctx_llava);
|
||||||
|
|
||||||
bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out);
|
/** build a llava image embedding from the passed-in clip image `img`. result is returned as image_embd_out, size n_image_pos_out */
|
||||||
|
bool llava_build_img_embed(const struct llama_context * ctx_llama, struct clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out);
|
||||||
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
Loading…
Add table
Add a link
Reference in a new issue