Merge 70a23863dc
into 549279d804
This commit is contained in:
commit
5ff51211ef
16 changed files with 3754 additions and 13 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -60,6 +60,7 @@ models-mnt
|
||||||
/libllama.so
|
/libllama.so
|
||||||
/llama-bench
|
/llama-bench
|
||||||
/llava-cli
|
/llava-cli
|
||||||
|
/minicpmv-cli
|
||||||
/lookahead
|
/lookahead
|
||||||
/lookup
|
/lookup
|
||||||
/lookup-create
|
/lookup-create
|
||||||
|
|
8
Makefile
8
Makefile
|
@ -1,7 +1,7 @@
|
||||||
# Define the default target now so that it is always the first target
|
# Define the default target now so that it is always the first target
|
||||||
BUILD_TARGETS = \
|
BUILD_TARGETS = \
|
||||||
main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
||||||
simple batched batched-bench save-load-state server gguf gguf-split eval-callback llama-bench libllava.a llava-cli baby-llama beam-search \
|
simple batched batched-bench save-load-state server gguf gguf-split eval-callback llama-bench libllava.a llava-cli minicpmv-cli baby-llama beam-search \
|
||||||
retrieval speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm tests/test-c.o
|
retrieval speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm tests/test-c.o
|
||||||
|
|
||||||
# Binaries only useful for tests
|
# Binaries only useful for tests
|
||||||
|
@ -878,6 +878,12 @@ llava-cli: examples/llava/llava-cli.cpp examples/llava/clip.h examples/llava/cli
|
||||||
$(CXX) $(CXXFLAGS) -c examples/llava/llava.cpp -o $(call GET_OBJ_FILE, examples/llava/llava.cpp)
|
$(CXX) $(CXXFLAGS) -c examples/llava/llava.cpp -o $(call GET_OBJ_FILE, examples/llava/llava.cpp)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $< examples/llava/clip.cpp examples/llava/llava.cpp,$^) $(call GET_OBJ_FILE, $<) $(call GET_OBJ_FILE, examples/llava/clip.cpp) $(call GET_OBJ_FILE, examples/llava/llava.cpp) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $< examples/llava/clip.cpp examples/llava/llava.cpp,$^) $(call GET_OBJ_FILE, $<) $(call GET_OBJ_FILE, examples/llava/clip.cpp) $(call GET_OBJ_FILE, examples/llava/llava.cpp) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
minicpmv-cli: examples/minicpmv/minicpmv-cli.cpp examples/minicpmv/clip.h examples/minicpmv/clip.cpp examples/minicpmv/minicpmv.h examples/minicpmv/minicpmv.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) -c examples/minicpmv/clip.cpp -o $(call GET_OBJ_FILE, examples/minicpmv/clip.cpp) -Wno-cast-qual
|
||||||
|
$(CXX) $(CXXFLAGS) -c examples/minicpmv/minicpmv.cpp -o $(call GET_OBJ_FILE, examples/minicpmv/minicpmv.cpp)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $< examples/minicpmv/clip.cpp examples/minicpmv/minicpmv.cpp,$^) $(call GET_OBJ_FILE, $<) $(call GET_OBJ_FILE, examples/minicpmv/clip.cpp) $(call GET_OBJ_FILE, examples/minicpmv/minicpmv.cpp) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS)
|
baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
|
@ -26,6 +26,7 @@ else()
|
||||||
add_subdirectory(infill)
|
add_subdirectory(infill)
|
||||||
add_subdirectory(llama-bench)
|
add_subdirectory(llama-bench)
|
||||||
add_subdirectory(llava)
|
add_subdirectory(llava)
|
||||||
|
add_subdirectory(minicpmv)
|
||||||
if (LLAMA_SYCL)
|
if (LLAMA_SYCL)
|
||||||
add_subdirectory(sycl)
|
add_subdirectory(sycl)
|
||||||
endif()
|
endif()
|
||||||
|
|
37
examples/minicpmv/CMakeLists.txt
Normal file
37
examples/minicpmv/CMakeLists.txt
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
add_library(minicpmv OBJECT
|
||||||
|
minicpmv.cpp
|
||||||
|
minicpmv.h
|
||||||
|
clip.cpp
|
||||||
|
clip.h
|
||||||
|
)
|
||||||
|
|
||||||
|
target_link_libraries(minicpmv PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
target_include_directories(minicpmv PUBLIC .)
|
||||||
|
target_include_directories(minicpmv PUBLIC ../..)
|
||||||
|
target_include_directories(minicpmv PUBLIC ../../common)
|
||||||
|
|
||||||
|
target_compile_features(minicpmv PRIVATE cxx_std_11)
|
||||||
|
|
||||||
|
add_library(minicpmv_static STATIC $<TARGET_OBJECTS:minicpmv>)
|
||||||
|
if (BUILD_SHARED_LIBS)
|
||||||
|
set_target_properties(minicpmv PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
|
target_compile_definitions(minicpmv PRIVATE LLAMA_SHARED LLAMA_BUILD)
|
||||||
|
add_library(minicpmv_shared SHARED $<TARGET_OBJECTS:minicpmv>)
|
||||||
|
target_link_libraries(minicpmv_shared PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
install(TARGETS minicpmv_shared LIBRARY)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (NOT MSVC)
|
||||||
|
target_compile_options(minicpmv PRIVATE -Wno-cast-qual) # stb_image.h
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(TARGET BUILD_INFO)
|
||||||
|
add_dependencies(minicpmv BUILD_INFO)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(TARGET minicpmv-cli)
|
||||||
|
add_executable(minicpmv-cli minicpmv-cli.cpp)
|
||||||
|
install(TARGETS minicpmv-cli RUNTIME)
|
||||||
|
target_link_libraries(minicpmv-cli PRIVATE common minicpmv ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(minicpmv PRIVATE cxx_std_11)
|
19
examples/minicpmv/README.md
Normal file
19
examples/minicpmv/README.md
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
# 所有命令在 llama.cpp 根目录执行,模型位于根目录上级目录处
|
||||||
|
# All command should be executed under the root path of llama.cpp repo. We assume the MiniCPM-V-2 model are put in its parent folder.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make
|
||||||
|
make minicpmv-cli
|
||||||
|
|
||||||
|
python ./examples/minicpmv/minicpm-surgery.py -m ../MiniCPM-V-2
|
||||||
|
python ./examples/minicpmv/convert-image-encoder-to-gguf.py -m ../MiniCPM-V-2 --llava-projector ../MiniCPM-V-2/llava.projector --output-dir ../MiniCPM-V-2 --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5
|
||||||
|
python ./convert-hf-to-gguf.py ../MiniCPM-V-2/MiniCPM
|
||||||
|
./minicpmv-cli -m ../MiniCPM-V-2/MiniCPM/ggml-model-f16.gguf --mmproj ../MiniCPM-V-2/mmproj-model-f16.gguf -c 4096 --temp 0.6 --top-p 0.8 --top-k 100 --repeat-penalty 1.0 --image ../test.jpg -p "这张图里有什么?"
|
||||||
|
|
||||||
|
# or run quantize int4 version
|
||||||
|
./quantize ../MiniCPM-V-2/MiniCPM/ggml-model-f16.gguf ../MiniCPM-V-2/MiniCPM/ggml-model-Q4_K_M.gguf Q4_K_M
|
||||||
|
./minicpmv-cli -m ../MiniCPM-V-2/MiniCPM/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-V-2/mmproj-model-f16.gguf -c 4096 --temp 0.6 --top-p 0.8 --top-k 100 --repeat-penalty 1.0 --image ../test.jpg -p "这张图里有什么?"
|
||||||
|
|
||||||
|
# or run in interactive mode
|
||||||
|
./minicpmv-cli -m ../MiniCPM-V-2/MiniCPM/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-V-2/mmproj-model-f16.gguf -c 4096 --temp 0.6 --top-p 0.8 --top-k 100 --repeat-penalty 1.0 --image ../test.jpg -i
|
||||||
|
```
|
53
examples/minicpmv/android/adb_run.sh
Executable file
53
examples/minicpmv/android/adb_run.sh
Executable file
|
@ -0,0 +1,53 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
model_dir="/Users/cxt/model/llm/mobileVLM/MobileVLM-1.7B_processed"
|
||||||
|
projector_name="mmproj-model-f16.gguf"
|
||||||
|
llama_name="ggml-model-q4_k.gguf"
|
||||||
|
img_dir="/Users/cxt/model/llm"
|
||||||
|
img_name="demo.jpg"
|
||||||
|
prompt="A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <slice>").c_str(), params->n_batch, &n_past, false);
|
||||||
|
for (int i = 0; i < image_embed_slices.size(); ++i) {
|
||||||
|
eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
|
||||||
|
} else {
|
||||||
|
eval_string(ctx_llava->ctx_llama, std::string("</image>").c_str(), params->n_batch, &n_past, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
eval_string(ctx_llava->ctx_llama, std::string("</slice>\n").c_str(), params->n_batch, &n_past, false);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
eval_string(ctx_llava->ctx_llama, std::string("</image>\n").c_str(), params->n_batch, &n_past, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void process_user_input(struct llava_context * ctx_llava, gpt_params * params, std::string prompt, int &n_past, bool is_first = false) {
|
||||||
|
std::string user_prompt = prompt + "<AI>";
|
||||||
|
if (!is_first) user_prompt = "<用户>" + prompt;
|
||||||
|
const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict;
|
||||||
|
|
||||||
|
eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
|
||||||
|
|
||||||
|
LOG_TEE("\n");
|
||||||
|
|
||||||
|
// generate the response
|
||||||
|
|
||||||
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
|
||||||
|
std::string response = "";
|
||||||
|
for (int i = 0; i < max_tgt_len; i++) {
|
||||||
|
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
|
||||||
|
response += tmp;
|
||||||
|
if (strcmp(tmp, "</s>") == 0) break;
|
||||||
|
if (strstr(tmp, "###")) break; // Yi-VL behavior
|
||||||
|
printf("%s", tmp);
|
||||||
|
if (strstr(response.c_str(), "<|im_end|>")) break; // Yi-34B llava-1.6 - for some reason those decode not as the correct token (tokenizer works)
|
||||||
|
if (strstr(response.c_str(), "<|im_start|>")) break; // Yi-34B llava-1.6
|
||||||
|
if (strstr(response.c_str(), "USER:")) break; // mistral llava-1.6
|
||||||
|
if (strstr(response.c_str(), "<用户>")) break; // minicpm-v
|
||||||
|
|
||||||
|
fflush(stdout);
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_sampling_free(ctx_sampling);
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct llava_context * llava_init(gpt_params * params) {
|
||||||
|
const char * clip_path = params->mmproj.c_str();
|
||||||
|
|
||||||
|
auto prompt = params->prompt;
|
||||||
|
if (prompt.empty()) {
|
||||||
|
prompt = "describe the image in detail.";
|
||||||
|
}
|
||||||
|
|
||||||
|
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
|
||||||
|
|
||||||
|
llama_backend_init();
|
||||||
|
llama_numa_init(params->numa);
|
||||||
|
|
||||||
|
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
|
||||||
|
|
||||||
|
llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
|
||||||
|
if (model == NULL) {
|
||||||
|
LOG_TEE("%s: error: unable to load model\n" , __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
|
||||||
|
ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings
|
||||||
|
|
||||||
|
llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
|
||||||
|
|
||||||
|
if (ctx_llama == NULL) {
|
||||||
|
LOG_TEE("%s: error: failed to create the llama_context\n" , __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));
|
||||||
|
|
||||||
|
ctx_llava->ctx_llama = ctx_llama;
|
||||||
|
ctx_llava->ctx_clip = ctx_clip;
|
||||||
|
ctx_llava->model = model;
|
||||||
|
return ctx_llava;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void llava_free(struct llava_context * ctx_llava) {
|
||||||
|
if (ctx_llava->ctx_clip) {
|
||||||
|
clip_free(ctx_llava->ctx_clip);
|
||||||
|
ctx_llava->ctx_clip = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_free(ctx_llava->ctx_llama);
|
||||||
|
llama_free_model(ctx_llava->model);
|
||||||
|
llama_backend_free();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) {
|
||||||
|
(void) level;
|
||||||
|
(void) user_data;
|
||||||
|
LOG_TEE("%s", text);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char ** argv) {
|
||||||
|
ggml_time_init();
|
||||||
|
|
||||||
|
gpt_params params;
|
||||||
|
|
||||||
|
if (!gpt_params_parse(argc, argv, params)) {
|
||||||
|
show_additional_info(argc, argv);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef LOG_DISABLE_LOGS
|
||||||
|
log_set_target(log_filename_generator("llava", "log"));
|
||||||
|
LOG_TEE("Log start\n");
|
||||||
|
log_dump_cmdline(argc, argv);
|
||||||
|
llama_log_set(llama_log_callback_logTee, nullptr);
|
||||||
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
|
if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
|
||||||
|
gpt_print_usage(argc, argv, params);
|
||||||
|
show_additional_info(argc, argv);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto ctx_llava = llava_init(¶ms);
|
||||||
|
if (ctx_llava == NULL) {
|
||||||
|
LOG_TEE("%s: error: failed to init llava\n", __func__);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// auto image_embed = load_image(ctx_llava, ¶ms);
|
||||||
|
auto image_embed_and_slices = llava_image_embed_make_with_filename_slice(ctx_llava->ctx_clip, params.n_threads, params.image.c_str());
|
||||||
|
auto image_embed = image_embed_and_slices.first;
|
||||||
|
auto image_embed_slices = image_embed_and_slices.second;
|
||||||
|
|
||||||
|
if (!image_embed) {
|
||||||
|
LOG_TEE("%s: is %s really an image file?\n", __func__, params.image.c_str());
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// process the prompt
|
||||||
|
if (params.prompt.empty() && params.interactive == false) {
|
||||||
|
LOG_TEE("prompt should be given or interactive mode should be on");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int n_past = 0;
|
||||||
|
process_image(ctx_llava, image_embed, image_embed_slices, ¶ms, n_past);
|
||||||
|
|
||||||
|
if (!params.prompt.empty()) {
|
||||||
|
LOG_TEE("<用户>%s\n", params.prompt.c_str());
|
||||||
|
LOG_TEE("<AI>");
|
||||||
|
process_user_input(ctx_llava, ¶ms, params.prompt, n_past, true);
|
||||||
|
} else {
|
||||||
|
while (true) {
|
||||||
|
LOG_TEE("<用户>");
|
||||||
|
std::string prompt;
|
||||||
|
std::getline(std::cin, prompt);
|
||||||
|
LOG_TEE("<AI>");
|
||||||
|
process_user_input(ctx_llava, ¶ms, prompt, n_past, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_print_timings(ctx_llava->ctx_llama);
|
||||||
|
|
||||||
|
llava_image_embed_free(image_embed);
|
||||||
|
llava_free(ctx_llava);
|
||||||
|
return 0;
|
||||||
|
}
|
556
examples/minicpmv/minicpmv.cpp
Normal file
556
examples/minicpmv/minicpmv.cpp
Normal file
|
@ -0,0 +1,556 @@
|
||||||
|
#include "clip.h"
|
||||||
|
#include "common.h"
|
||||||
|
#include "llama.h"
|
||||||
|
#include "minicpmv.h"
|
||||||
|
#include "base64.hpp"
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <vector>
|
||||||
|
#include <numeric>
|
||||||
|
|
||||||
|
// RGB uint8 image
|
||||||
|
struct clip_image_u8 {
|
||||||
|
int nx;
|
||||||
|
int ny;
|
||||||
|
|
||||||
|
std::vector<uint8_t> buf;
|
||||||
|
};
|
||||||
|
|
||||||
|
// RGB float32 image (NHWC)
|
||||||
|
// Memory layout: RGBRGBRGB...
|
||||||
|
struct clip_image_f32 {
|
||||||
|
int nx;
|
||||||
|
int ny;
|
||||||
|
|
||||||
|
std::vector<float> buf;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct clip_image_grid_shape {
|
||||||
|
int first;
|
||||||
|
int second;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Selects the best resolution from a list of possible resolutions based on the original size.
|
||||||
|
*
|
||||||
|
* @param original_size The original size of the image in the format (width, height).
|
||||||
|
* @param possible_resolutions A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
|
||||||
|
* @return The best fit resolution in the format (width, height).
|
||||||
|
*/
|
||||||
|
static std::pair<int, int> select_best_resolution(const std::pair<int, int>& original_size, const std::vector<std::pair<int, int>>& possible_resolutions) {
|
||||||
|
int original_width = original_size.first;
|
||||||
|
int original_height = original_size.second;
|
||||||
|
|
||||||
|
std::pair<int, int> best_fit;
|
||||||
|
int max_effective_resolution = 0;
|
||||||
|
int min_wasted_resolution = std::numeric_limits<int>::max();
|
||||||
|
|
||||||
|
for (const auto& resolution : possible_resolutions) {
|
||||||
|
int width = resolution.first;
|
||||||
|
int height = resolution.second;
|
||||||
|
float scale = std::min(static_cast<float>(width) / original_width, static_cast<float>(height) / original_height);
|
||||||
|
int downscaled_width = static_cast<int>(original_width * scale);
|
||||||
|
int downscaled_height = static_cast<int>(original_height * scale);
|
||||||
|
int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
|
||||||
|
int wasted_resolution = (width * height) - effective_resolution;
|
||||||
|
// LOG_TEE("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
|
||||||
|
if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
|
||||||
|
max_effective_resolution = effective_resolution;
|
||||||
|
min_wasted_resolution = wasted_resolution;
|
||||||
|
best_fit = resolution;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return best_fit;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the anyres image grid shape object
|
||||||
|
*
|
||||||
|
* @param image_size
|
||||||
|
* @param grid_pinpoints
|
||||||
|
* @param image_patch_size
|
||||||
|
* @return <int, int>
|
||||||
|
*/
|
||||||
|
static struct clip_image_grid_shape get_anyres_image_grid_shape(const std::pair<int, int> & image_size, const std::vector<std::pair<int, int>> & grid_pinpoints, int image_patch_size) {
|
||||||
|
/**
|
||||||
|
Conversion from gguf flat array to vector:
|
||||||
|
std::vector<std::pair<int, int>> possible_resolutions;
|
||||||
|
for (int i = 0; i < 32 && params.image_grid_pinpoints[i] != 0; i+=2) {
|
||||||
|
possible_resolutions.push_back({params.image_grid_pinpoints[i], params.image_grid_pinpoints[i+1]});
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
auto best_resolution = select_best_resolution(image_size, grid_pinpoints);
|
||||||
|
return {best_resolution.first / image_patch_size, best_resolution.second / image_patch_size};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take the image segments in a grid configuration and return the embeddings and the number of embeddings into preallocated memory (image_embd_out)
|
||||||
|
static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector<float *> & image_embd_v, struct clip_image_grid_shape grid_shape, float * image_embd_out, int * n_img_pos_out) {
|
||||||
|
struct {
|
||||||
|
struct ggml_tensor * newline;
|
||||||
|
struct ggml_context * ctx;
|
||||||
|
} model;
|
||||||
|
|
||||||
|
const int32_t image_size = clip_image_size(ctx_clip);
|
||||||
|
const int32_t patch_size = clip_patch_size(ctx_clip);
|
||||||
|
|
||||||
|
int32_t num_patches_per_side = image_size / patch_size; // 336 / 14 = 24 - used for embedding-patching boxes (24*24 = 576 patches)
|
||||||
|
|
||||||
|
int num_patches_width = grid_shape.first; // grid 1-4
|
||||||
|
int num_patches_height = grid_shape.second; // grid 1-4
|
||||||
|
|
||||||
|
const size_t num_images = num_patches_width * num_patches_height + 1;
|
||||||
|
|
||||||
|
// TODO: size calculation is not calculated - it's only tens of MB
|
||||||
|
size_t ctx_size = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
ctx_size += clip_embd_nbytes(ctx_clip) * num_images * 8; // image_features
|
||||||
|
ctx_size += 1024*1024 * ggml_type_size(GGML_TYPE_F32);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_init_params params {
|
||||||
|
/*.mem_size =*/ ctx_size,
|
||||||
|
/*.mem_buffer =*/ NULL,
|
||||||
|
/*.no_alloc =*/ false, // NOTE: this should be false when using the legacy API
|
||||||
|
};
|
||||||
|
|
||||||
|
// Python reference code for full unpad:
|
||||||
|
/*
|
||||||
|
base_image_feature = image_feature[0]
|
||||||
|
image_feature = image_feature[1:]
|
||||||
|
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
|
||||||
|
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
|
||||||
|
image_feature = unpad_image(image_feature, image_sizes[image_idx])
|
||||||
|
image_feature = torch.cat((
|
||||||
|
image_feature,
|
||||||
|
self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1)
|
||||||
|
), dim=-1)
|
||||||
|
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
|
||||||
|
image_feature = torch.cat((base_image_feature, image_feature), dim=0)
|
||||||
|
*/
|
||||||
|
// We now have two options: unpad or no unpad. Unpad removes tokens for faster llm eval.
|
||||||
|
// In terms of result quality it appears to make no difference, so we'll start with the easier approach given 5D tensors are not supported in ggml yet.
|
||||||
|
// Without unpad we have to split the sub-image embeddings into patches of 24 features each and permute them.
|
||||||
|
// Once all images are processed to prepended the base_image_features without any changes.
|
||||||
|
|
||||||
|
// Pytorch reference simplified, modified for ggml compatibility - confirmed identical output in python (for a 2x2 grid image (676x676 scaling))
|
||||||
|
/*
|
||||||
|
image_feature = image_feature.view(2, 2, 24, 24, 4096)
|
||||||
|
image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous()
|
||||||
|
image_feature = image_feature.view(2, 24, 2, 24, 4096)
|
||||||
|
image_feature = image_feature.flatten(0, 3)
|
||||||
|
|
||||||
|
// Reshape to 4D tensor by merging the last two dimensions
|
||||||
|
image_feature = image_feature.view(2, 2, 24, 24*4096)
|
||||||
|
image_feature = image_feature.permute(0, 2, 1, 3).contiguous()
|
||||||
|
image_feature = image_feature.view(-1, 4096)
|
||||||
|
*/
|
||||||
|
|
||||||
|
model.ctx = ggml_init(params);
|
||||||
|
|
||||||
|
ggml_tensor * newline_tmp = clip_get_newline_tensor(ctx_clip);
|
||||||
|
model.newline = ggml_new_tensor_1d(model.ctx, GGML_TYPE_F32, newline_tmp->ne[0]);
|
||||||
|
if (newline_tmp->backend != GGML_BACKEND_TYPE_CPU) {
|
||||||
|
if (newline_tmp->buffer == NULL) {
|
||||||
|
LOG_TEE("newline_tmp tensor buffer is NULL\n");
|
||||||
|
}
|
||||||
|
ggml_backend_tensor_get(newline_tmp, model.newline->data, 0, ggml_nbytes(newline_tmp));
|
||||||
|
} else {
|
||||||
|
model.newline->data = newline_tmp->data;
|
||||||
|
if (model.newline->data == NULL) {
|
||||||
|
LOG_TEE("newline_tmp tensor data is NULL\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * image_features = ggml_new_tensor_3d(model.ctx, GGML_TYPE_F32, clip_n_mmproj_embd(ctx_clip), clip_n_patches(ctx_clip), num_images - 1); // example: 4096 x 576 x 4
|
||||||
|
// ggml_tensor_printf(image_features,"image_features",__LINE__,false,false);
|
||||||
|
// fill it with the image embeddings, ignoring the base
|
||||||
|
for (size_t i = 1; i < num_images; i++) {
|
||||||
|
size_t offset = (i-1) * clip_embd_nbytes(ctx_clip);
|
||||||
|
memcpy((uint8_t *)(image_features->data) + offset, image_embd_v[i], clip_embd_nbytes(ctx_clip));
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_cgraph * gf = ggml_new_graph(model.ctx);
|
||||||
|
size_t size_ele = ggml_type_size(GGML_TYPE_F32);
|
||||||
|
|
||||||
|
struct ggml_tensor *image_features_patchview = ggml_view_4d(model.ctx, image_features,
|
||||||
|
num_patches_per_side * clip_n_mmproj_embd(ctx_clip),
|
||||||
|
num_patches_per_side,
|
||||||
|
num_patches_width,
|
||||||
|
num_patches_height,
|
||||||
|
size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip),
|
||||||
|
size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip) * num_patches_per_side,
|
||||||
|
size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip) * num_patches_per_side * num_patches_width, 0);
|
||||||
|
// ggml_tensor_printf(image_features_patchview,"image_features_patchview",__LINE__,false,false);
|
||||||
|
struct ggml_tensor *permuted_cont = ggml_cont(model.ctx, ggml_permute(model.ctx, image_features_patchview, 0, 2, 1, 3));
|
||||||
|
/**
|
||||||
|
At the end of each row we have to add the row_end embeddings, which are the same as the newline embeddings
|
||||||
|
image_feature = torch.cat((
|
||||||
|
image_feature,
|
||||||
|
self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)
|
||||||
|
), dim=-1)
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// ggml_tensor_printf(permuted_cont,"permuted_cont",__LINE__,false,false);
|
||||||
|
struct ggml_tensor *flatten = ggml_view_2d(model.ctx, permuted_cont, clip_n_mmproj_embd(ctx_clip), num_patches_height * num_patches_width * num_patches_per_side * num_patches_per_side, size_ele * clip_n_mmproj_embd(ctx_clip), 0);
|
||||||
|
// ggml_tensor_printf(flatten,"flatten",__LINE__,false,false);
|
||||||
|
ggml_build_forward_expand(gf, flatten);
|
||||||
|
ggml_graph_compute_with_ctx(model.ctx, gf, 1);
|
||||||
|
struct ggml_tensor* result = gf->nodes[gf->n_nodes - 1];
|
||||||
|
|
||||||
|
memcpy(image_embd_out, image_embd_v[0], clip_embd_nbytes(ctx_clip)); // main image as global context
|
||||||
|
// append without newline tokens (default behavior in llava_arch when not using unpad ):
|
||||||
|
memcpy(image_embd_out + clip_n_patches(ctx_clip) * clip_n_mmproj_embd(ctx_clip), (float*)result->data, clip_embd_nbytes(ctx_clip) * (num_images-1)); // grid patches
|
||||||
|
*n_img_pos_out = static_cast<int>(result->ne[1]+clip_n_patches(ctx_clip));
|
||||||
|
|
||||||
|
// Debug: Test single segments
|
||||||
|
// Current findings: sending base image, sending a segment embedding all works similar to python
|
||||||
|
// However, permuted embeddings do not work yet (stride issue?)
|
||||||
|
// memcpy(image_embd_out, image_embd_v[0], clip_embd_nbytes(ctx_clip)); // main image as context
|
||||||
|
// memcpy(image_embd_out, (float*)prepared_cont->data, clip_embd_nbytes(ctx_clip)); // main image as context
|
||||||
|
// *n_img_pos_out=576;
|
||||||
|
|
||||||
|
ggml_free(model.ctx);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos) {
|
||||||
|
// std::vector<clip_image_f32*> img_res_v; // format VectN x H x W x RGB (N x 336 x 336 x 3), so interleaved RGB - different to the python implementation which is N x 3 x 336 x 336
|
||||||
|
clip_image_f32_batch img_res_v;
|
||||||
|
img_res_v.size = 0;
|
||||||
|
img_res_v.data = nullptr;
|
||||||
|
if (!clip_image_preprocess(ctx_clip, img, &img_res_v)) {
|
||||||
|
LOG_TEE("%s: unable to preprocess image\n", __func__);
|
||||||
|
delete[] img_res_v.data;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int64_t t_img_enc_start_us = ggml_time_us();
|
||||||
|
|
||||||
|
const char * mm_patch_merge_type = clip_patch_merge_type(ctx_clip);
|
||||||
|
|
||||||
|
if (strcmp(mm_patch_merge_type, "spatial_unpad") != 0) {
|
||||||
|
// flat / default llava-1.5 type embedding
|
||||||
|
*n_img_pos = clip_n_patches(ctx_clip);
|
||||||
|
bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[0], image_embd); // image_embd shape is 576 x 4096
|
||||||
|
delete[] img_res_v.data;
|
||||||
|
if (!encoded) {
|
||||||
|
LOG_TEE("Unable to encode image\n");
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// spatial_unpad llava-1.6 type embedding
|
||||||
|
// TODO: CLIP needs batching support - in HF the llm projection is separate after encoding, which might be a solution to quickly get batching working
|
||||||
|
std::vector<float *> image_embd_v;
|
||||||
|
image_embd_v.resize(img_res_v.size);
|
||||||
|
for (size_t i = 0; i < img_res_v.size; i++) {
|
||||||
|
image_embd_v[i] = (float *)malloc(clip_embd_nbytes(ctx_clip)); // 576 patches * 4096 embeddings * 4 bytes = 9437184
|
||||||
|
const bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]); // image data is in 3x336x336 format and will be converted to 336x336x3 inside
|
||||||
|
if (!encoded) {
|
||||||
|
LOG_TEE("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int) i+1, (int) img_res_v.size);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const int64_t t_img_enc_batch_us = ggml_time_us();
|
||||||
|
LOG_TEE("%s: %d segments encoded in %8.2f ms\n", __func__, (int)img_res_v.size, (t_img_enc_batch_us - t_img_enc_start_us) / 1000.0);
|
||||||
|
|
||||||
|
const int32_t * image_grid = clip_image_grid(ctx_clip);
|
||||||
|
|
||||||
|
std::vector<std::pair<int, int>> grid_pinpoints;
|
||||||
|
for (int i = 0; i < 32 && image_grid[i] != 0; i += 2) {
|
||||||
|
grid_pinpoints.push_back({image_grid[i], image_grid[i+1]});
|
||||||
|
}
|
||||||
|
|
||||||
|
// free all img_res_v - not needed anymore
|
||||||
|
delete[] img_res_v.data;
|
||||||
|
img_res_v.size = 0;
|
||||||
|
img_res_v.data = nullptr;
|
||||||
|
|
||||||
|
const int32_t image_size = clip_image_size(ctx_clip);
|
||||||
|
|
||||||
|
struct clip_image_grid_shape grid_shape = get_anyres_image_grid_shape({img->nx,img->ny}, grid_pinpoints, image_size);
|
||||||
|
|
||||||
|
int n_img_pos_out;
|
||||||
|
clip_llava_handle_patches(ctx_clip, image_embd_v, grid_shape, image_embd, &n_img_pos_out);
|
||||||
|
*n_img_pos = n_img_pos_out;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < image_embd_v.size(); i++) {
|
||||||
|
free(image_embd_v[i]);
|
||||||
|
}
|
||||||
|
image_embd_v.clear();
|
||||||
|
|
||||||
|
// debug image/segment/normalization content:
|
||||||
|
// clip_image_u8 * tmp = clip_image_u8_init();
|
||||||
|
// clip_image_convert_f32_to_u8(*image_feature, *tmp);
|
||||||
|
// clip_image_save_to_bmp(*tmp, "image_feature.bmp");
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_TEE("%s: image embedding created: %d tokens\n", __func__, *n_img_pos);
|
||||||
|
|
||||||
|
const int64_t t_img_enc_end_us = ggml_time_us();
|
||||||
|
float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0;
|
||||||
|
|
||||||
|
LOG_TEE("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / *n_img_pos);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip) {
|
||||||
|
// make sure that the correct mmproj was used, i.e., compare apples to apples
|
||||||
|
int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama));
|
||||||
|
auto n_image_embd = clip_n_mmproj_embd(ctx_clip);
|
||||||
|
if (n_image_embd != n_llama_embd) {
|
||||||
|
LOG_TEE("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_image_embd, n_llama_embd);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) {
|
||||||
|
float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)*6); // TODO: base on gridsize/llava model
|
||||||
|
if (!image_embd) {
|
||||||
|
LOG_TEE("Unable to allocate memory for image embeddings\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
int n_img_pos;
|
||||||
|
if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_pos)) {
|
||||||
|
LOG_TEE("%s: cannot encode image, aborting\n", __func__);
|
||||||
|
free(image_embd);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
*image_embd_out = image_embd;
|
||||||
|
*n_img_pos_out = n_img_pos;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) {
|
||||||
|
int n_embd = llama_n_embd(llama_get_model(ctx_llama));
|
||||||
|
|
||||||
|
for (int i = 0; i < image_embed->n_image_pos; i += n_batch) {
|
||||||
|
int n_eval = image_embed->n_image_pos - i;
|
||||||
|
if (n_eval > n_batch) {
|
||||||
|
n_eval = n_batch;
|
||||||
|
}
|
||||||
|
llama_batch batch = {int32_t(n_eval), nullptr, (image_embed->embed+i*n_embd), nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, };
|
||||||
|
if (llama_decode(ctx_llama, batch)) {
|
||||||
|
LOG_TEE("%s : failed to eval\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
*n_past += n_eval;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) {
|
||||||
|
clip_image_u8 * img = clip_image_u8_init();
|
||||||
|
if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) {
|
||||||
|
clip_image_u8_free(img);
|
||||||
|
LOG_TEE("%s: can't load image from bytes, is it a valid image?", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
float* image_embed = NULL;
|
||||||
|
int n_image_pos = 0;
|
||||||
|
bool image_embed_result = llava_image_embed_make_with_clip_img(ctx_clip, n_threads, img, &image_embed, &n_image_pos);
|
||||||
|
if (!image_embed_result) {
|
||||||
|
clip_image_u8_free(img);
|
||||||
|
LOG_TEE("%s: coulnd't embed the image\n", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
clip_image_u8_free(img);
|
||||||
|
auto result = (llava_image_embed*)malloc(sizeof(llava_image_embed));
|
||||||
|
result->embed = image_embed;
|
||||||
|
result->n_image_pos = n_image_pos;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::vector<clip_image_u8 *>> slice_image(const clip_image_u8 * img, const int max_slice_nums=9, const int scale_resolution=448, const int patch_size=14, const bool never_split=false) {
|
||||||
|
|
||||||
|
const int original_width = img->nx;
|
||||||
|
const int original_height = img->ny;
|
||||||
|
const float log_ratio = log(1.0*original_width/original_height); //
|
||||||
|
const float ratio = 1.0 * original_width * original_height/ (scale_resolution * scale_resolution);
|
||||||
|
const int multiple = fmin(ceil(ratio), max_slice_nums);
|
||||||
|
|
||||||
|
std::vector<std::vector<clip_image_u8 *>> images;
|
||||||
|
LOG_TEE("%s: multiple %d\n", __func__, multiple);
|
||||||
|
if(multiple > 1){
|
||||||
|
|
||||||
|
std::vector<int> candidate_split_grids_nums;
|
||||||
|
for (int i : {multiple - 1, multiple, multiple + 1}) {
|
||||||
|
if (i == 1 || i > max_slice_nums) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
candidate_split_grids_nums.push_back(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::pair<int, int>> candidate_grids;
|
||||||
|
|
||||||
|
for (int split_grids_nums : candidate_split_grids_nums) {
|
||||||
|
int m = 1;
|
||||||
|
while (m <= split_grids_nums) {
|
||||||
|
if (split_grids_nums % m == 0) {
|
||||||
|
candidate_grids.emplace_back(m, split_grids_nums / m);
|
||||||
|
}
|
||||||
|
++m;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<int, int> best_grid{1, 1};
|
||||||
|
float min_error = std::numeric_limits<float>::infinity();
|
||||||
|
|
||||||
|
for (const auto& grid : candidate_grids) {
|
||||||
|
float error = std::abs(log_ratio - std::log(1.0 * grid.first / grid.second));
|
||||||
|
if (error < min_error) {
|
||||||
|
best_grid = grid;
|
||||||
|
min_error = error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LOG_TEE("%s: image_size: %d %d; best_grid: %d %d\n", __func__, img->nx, img->ny, best_grid.first, best_grid.second);
|
||||||
|
|
||||||
|
// split_to_patches
|
||||||
|
int width = img->nx;
|
||||||
|
int height = img->ny;
|
||||||
|
int grid_x = int(width / best_grid.first);
|
||||||
|
int grid_y = int(height / best_grid.second);
|
||||||
|
for (int patches_i = 0, ic = 0; patches_i < height && ic < best_grid.second; patches_i += grid_y, ic += 1){
|
||||||
|
images.push_back(std::vector<clip_image_u8 *>());
|
||||||
|
for(int patches_j = 0, jc = 0; patches_j < width && jc < best_grid.first; patches_j += grid_x, jc += 1){
|
||||||
|
clip_image_u8 * patch = clip_image_u8_init();
|
||||||
|
patch->nx = grid_x;
|
||||||
|
patch->ny = grid_y;
|
||||||
|
patch->buf.resize(3 * patch->nx * patch->ny);
|
||||||
|
for (int y = patches_i; y < patches_i + grid_y; ++y) {
|
||||||
|
for (int x = patches_j; x < patches_j + grid_x; ++x) {
|
||||||
|
const int i = 3 * (y * img->nx + x);
|
||||||
|
const int j = 3 * ((y-patches_i) * patch->nx + (x-patches_j));
|
||||||
|
patch->buf[j] = img->buf[i];
|
||||||
|
patch->buf[j+1] = img->buf[i+1];
|
||||||
|
patch->buf[j+2] = img->buf[i+2];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
images[images.size()-1].push_back(patch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return images;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::vector<std::vector<struct llava_image_embed *>> llava_image_embed_make_with_bytes_slice(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) {
|
||||||
|
clip_image_u8 * img = clip_image_u8_init();
|
||||||
|
if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) {
|
||||||
|
clip_image_u8_free(img);
|
||||||
|
LOG_TEE("%s: can't load image from bytes, is it a valid image?", __func__);
|
||||||
|
return std::vector<std::vector<struct llava_image_embed *>>();
|
||||||
|
}
|
||||||
|
std::vector<std::vector<clip_image_u8 *>> imgs = slice_image(img);
|
||||||
|
for (size_t i = 0; i < imgs.size(); ++i){
|
||||||
|
for (size_t j = 0; j < imgs[i].size(); ++j) {
|
||||||
|
LOG_TEE("%s: %d %d\n", __func__,imgs[i][j]->nx,imgs[i][j]->ny);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::vector<std::vector<llava_image_embed *>> results;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < imgs.size(); ++i){
|
||||||
|
results.push_back(std::vector<llava_image_embed *>());
|
||||||
|
for (size_t j = 0; j < imgs[i].size(); ++j) {
|
||||||
|
float* image_embed = NULL;
|
||||||
|
int n_image_pos = 0;
|
||||||
|
bool image_embed_result = llava_image_embed_make_with_clip_img(ctx_clip, n_threads, imgs[i][j], &image_embed, &n_image_pos);
|
||||||
|
if (!image_embed_result) {
|
||||||
|
clip_image_u8_free(img);
|
||||||
|
LOG_TEE("%s: coulnd't embed the image\n", __func__);
|
||||||
|
return std::vector<std::vector<struct llava_image_embed *>>();
|
||||||
|
}
|
||||||
|
|
||||||
|
auto result = (llava_image_embed*)malloc(sizeof(llava_image_embed));
|
||||||
|
result->embed = image_embed;
|
||||||
|
result->n_image_pos = n_image_pos;
|
||||||
|
results[i].push_back(result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
clip_image_u8_free(img);
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) {
|
||||||
|
auto file = fopen(path, "rb");
|
||||||
|
if (file == NULL) {
|
||||||
|
LOG_TEE("%s: can't read file %s\n", __func__, path);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
fseek(file, 0, SEEK_END);
|
||||||
|
auto fileSize = ftell(file);
|
||||||
|
fseek(file, 0, SEEK_SET);
|
||||||
|
|
||||||
|
auto buffer = (unsigned char *)malloc(fileSize); // Allocate memory to hold the file data
|
||||||
|
if (buffer == NULL) {
|
||||||
|
LOG_TEE("%s: failed to alloc %ld bytes for file %s\n", __func__, fileSize, path);
|
||||||
|
perror("Memory allocation error");
|
||||||
|
fclose(file);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
errno = 0;
|
||||||
|
size_t ret = fread(buffer, 1, fileSize, file); // Read the file into the buffer
|
||||||
|
if (ferror(file)) {
|
||||||
|
die_fmt("read error: %s", strerror(errno));
|
||||||
|
}
|
||||||
|
if (ret != (size_t) fileSize) {
|
||||||
|
die("unexpectedly reached end of file");
|
||||||
|
}
|
||||||
|
fclose(file); // Close the file
|
||||||
|
|
||||||
|
*bytesOut = buffer;
|
||||||
|
*sizeOut = fileSize;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) {
|
||||||
|
unsigned char* image_bytes;
|
||||||
|
long image_bytes_length;
|
||||||
|
auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length);
|
||||||
|
if (!loaded) {
|
||||||
|
LOG_TEE("%s: failed to load %s\n", __func__, image_path);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
llava_image_embed *embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, image_bytes, image_bytes_length);
|
||||||
|
free(image_bytes);
|
||||||
|
|
||||||
|
return embed;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<struct llava_image_embed *, std::vector<std::vector<struct llava_image_embed *>>> llava_image_embed_make_with_filename_slice(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) {
|
||||||
|
unsigned char* image_bytes;
|
||||||
|
long image_bytes_length;
|
||||||
|
auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length);
|
||||||
|
if (!loaded) {
|
||||||
|
LOG_TEE("%s: failed to load %s\n", __func__, image_path);
|
||||||
|
return {NULL, std::vector<std::vector<struct llava_image_embed *>>()};
|
||||||
|
}
|
||||||
|
|
||||||
|
llava_image_embed *embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, image_bytes, image_bytes_length);
|
||||||
|
std::vector<std::vector<struct llava_image_embed *>> embeds = llava_image_embed_make_with_bytes_slice(ctx_clip, n_threads, image_bytes, image_bytes_length);
|
||||||
|
free(image_bytes);
|
||||||
|
return {embed, embeds};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void llava_image_embed_free(struct llava_image_embed * embed) {
|
||||||
|
free(embed->embed);
|
||||||
|
free(embed);
|
||||||
|
}
|
53
examples/minicpmv/minicpmv.h
Normal file
53
examples/minicpmv/minicpmv.h
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
#ifndef LLAVA_H
|
||||||
|
#define LLAVA_H
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
#ifdef LLAMA_SHARED
|
||||||
|
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||||
|
# ifdef LLAMA_BUILD
|
||||||
|
# define LLAVA_API __declspec(dllexport)
|
||||||
|
# else
|
||||||
|
# define LLAVA_API __declspec(dllimport)
|
||||||
|
# endif
|
||||||
|
# else
|
||||||
|
# define LLAVA_API __attribute__ ((visibility ("default")))
|
||||||
|
# endif
|
||||||
|
#else
|
||||||
|
# define LLAVA_API
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct clip_ctx;
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct llava_image_embed {
|
||||||
|
float * embed;
|
||||||
|
int n_image_pos;
|
||||||
|
};
|
||||||
|
|
||||||
|
/** sanity check for clip <-> llava embed size match */
|
||||||
|
LLAVA_API bool llava_validate_embed_size(const struct llama_context * ctx_llama, const struct clip_ctx * ctx_clip);
|
||||||
|
|
||||||
|
LLAVA_API bool llava_image_embed_make_with_clip_img(struct clip_ctx * ctx_clip, int n_threads, const struct clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out);
|
||||||
|
|
||||||
|
/** build an image embed from image file bytes */
|
||||||
|
LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length);
|
||||||
|
/** build an image embed from a path to an image filename */
|
||||||
|
LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path);
|
||||||
|
LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed);
|
||||||
|
/** free an embedding made with llava_image_embed_make_* */
|
||||||
|
|
||||||
|
/** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */
|
||||||
|
LLAVA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
LLAVA_API std::vector<std::vector<struct llava_image_embed *>> llava_image_embed_make_with_bytes_slice(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length);
|
||||||
|
LLAVA_API std::pair<struct llava_image_embed *, std::vector<std::vector<struct llava_image_embed *>>> llava_image_embed_make_with_filename_slice(struct clip_ctx * ctx_clip, int n_threads, const char * image_path);
|
||||||
|
|
||||||
|
#endif
|
3
examples/minicpmv/requirements.txt
Normal file
3
examples/minicpmv/requirements.txt
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
-r ../../requirements/requirements-convert.txt
|
||||||
|
pillow~=10.2.0
|
||||||
|
torch~=2.1.1
|
|
@ -645,6 +645,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||||
],
|
],
|
||||||
MODEL_ARCH.MINICPM: [
|
MODEL_ARCH.MINICPM: [
|
||||||
MODEL_TENSOR.TOKEN_EMBD,
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
|
MODEL_TENSOR.OUTPUT,
|
||||||
MODEL_TENSOR.OUTPUT_NORM,
|
MODEL_TENSOR.OUTPUT_NORM,
|
||||||
MODEL_TENSOR.ROPE_FREQS,
|
MODEL_TENSOR.ROPE_FREQS,
|
||||||
MODEL_TENSOR.ATTN_NORM,
|
MODEL_TENSOR.ATTN_NORM,
|
||||||
|
|
24
llama.cpp
24
llama.cpp
|
@ -5120,18 +5120,16 @@ static bool llm_load_tensors(
|
||||||
case LLM_ARCH_MINICPM:
|
case LLM_ARCH_MINICPM:
|
||||||
{
|
{
|
||||||
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
||||||
|
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
|
||||||
|
// if output is NULL, init from the input tok embed
|
||||||
|
if (model.output == NULL) {
|
||||||
|
model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
||||||
|
ml.n_created--; // artificial tensor
|
||||||
|
ml.size_data += ggml_nbytes(model.output);
|
||||||
|
}
|
||||||
|
|
||||||
// output
|
// output
|
||||||
{
|
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
|
||||||
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
|
|
||||||
if (model.arch != LLM_ARCH_MINICPM){
|
|
||||||
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
|
||||||
// if output is NULL, init from the input tok embed
|
|
||||||
if (model.output == NULL) {
|
|
||||||
model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < n_layer; ++i) {
|
for (int i = 0; i < n_layer; ++i) {
|
||||||
ggml_context * ctx_layer = ctx_for_layer(i);
|
ggml_context * ctx_layer = ctx_for_layer(i);
|
||||||
|
@ -10096,7 +10094,9 @@ struct llm_build_context {
|
||||||
inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
||||||
|
|
||||||
// scale the input embeddings
|
// scale the input embeddings
|
||||||
inpL = ggml_scale(ctx0, inpL, scale_embd);
|
if (batch.token) {
|
||||||
|
inpL = ggml_scale(ctx0, inpL, scale_embd);
|
||||||
|
}
|
||||||
cb(inpL, "inp_scaled", -1);
|
cb(inpL, "inp_scaled", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
|
@ -10212,7 +10212,7 @@ struct llm_build_context {
|
||||||
cb(cur, "lmhead_scaling", -1);
|
cb(cur, "lmhead_scaling", -1);
|
||||||
|
|
||||||
// lm_head
|
// lm_head
|
||||||
cur = ggml_mul_mat(ctx0, model.tok_embd, cur);
|
cur = ggml_mul_mat(ctx0, model.output, cur);
|
||||||
cb(cur, "result_output", -1);
|
cb(cur, "result_output", -1);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, cur);
|
ggml_build_forward_expand(gf, cur);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue