vit v_tokenizer integration

This commit is contained in:
root 2024-08-28 19:21:01 +00:00
parent 07baee57c9
commit f645b0bc8c
9 changed files with 505 additions and 53 deletions

View file

@ -20,6 +20,7 @@ BUILD_TARGETS = \
llama-infill \
llama-llava-cli \
llama-minicpmv-cli\
xgenmm-cli\
llama-lookahead \
llama-lookup \
llama-lookup-create \
@ -1473,6 +1474,14 @@ llama-minicpmv-cli: examples/llava/minicpmv-cli.cpp \
$(OBJ_ALL)
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
xgenmm-cli: examples/xgenmm/xgenmm-cli.cpp \
examples/xgenmm/xgenmm.cpp \
examples/xgenmm/xgenmm.h \
examples/xgenmm/clip.cpp \
examples/xgenmm/clip.h \
$(OBJ_ALL)
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
ifeq ($(UNAME_S),Darwin)
swift: examples/batched.swift
(cd examples/batched.swift; make build)

View file

@ -151,7 +151,7 @@ enum projector_type
PROJECTOR_TYPE_LDP,
PROJECTOR_TYPE_LDPV2,
PROJECTOR_TYPE_RESAMPLER,
PROJECTOR_TYPE_PERCIVER_RESAMPLER,
PROJECTOR_TYPE_PERCEIVER_RESAMPLER,
PROJECTOR_TYPE_UNKNOWN,
};
@ -160,7 +160,7 @@ static std::map<projector_type, std::string> PROJECTOR_TYPE_NAMES = {
{ PROJECTOR_TYPE_LDP, "ldp" },
{ PROJECTOR_TYPE_LDPV2, "ldpv2"},
{ PROJECTOR_TYPE_RESAMPLER, "resampler"},
{ PROJECTOR_TYPE_PERCIVER_RESAMPLER, "PercevierResampler"}
{ PROJECTOR_TYPE_PERCEIVER_RESAMPLER, "PerceiverResampler"} // CT: fix name
};
@ -626,6 +626,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
image_size_height = imgs->data->ny;
}
}
if (ctx->has_xgenmm_projector) {
//TODO: implement something for example, image masks
printf("use has_xgenmm_projector\n");
@ -650,16 +651,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
/*.mem_buffer =*/ ctx->buf_compute_meta.data(),
/*.no_alloc =*/ true,
};
struct ggml_context * ctx0 = ggml_init(params);
struct ggml_cgraph * gf = ggml_new_graph(ctx0);
struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3, batch_size);
ggml_set_name(inp_raw, "inp_raw");
ggml_set_input(inp_raw);
struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
@ -669,7 +666,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
}
struct ggml_tensor * embeddings = inp;
struct ggml_tensor * pos_embed = nullptr;
if (ctx->has_llava_projector) {
printf("use has_llava_projector\n");
// concat class_embeddings and patch_embeddings
@ -686,15 +682,15 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
}
}
printf("hi1!");
// printf("hi1!");
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
ggml_set_name(positions, "positions");
ggml_set_input(positions);
printf("hi2!");
// printf("hi2!");
embeddings =
ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
printf("hi3!");
// printf("hi3!");
if (ctx->has_minicpmv_projector) {
int pos_w = image_size_width/patch_size;
int pos_h = image_size_height/patch_size;
@ -707,7 +703,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
ggml_set_name(pos_embed, "pos_embed");
ggml_set_input(pos_embed);
}
// pre-layernorm
if (ctx->has_pre_norm) {
embeddings = ggml_norm(ctx0, embeddings, eps);
@ -715,7 +710,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b);
}
// loop over layers
if (ctx->has_minicpmv_projector) {
n_layer += 1;
@ -766,7 +760,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
cur = ggml_cont_3d(ctx0, KQV, hidden_size, num_positions, batch_size);
}
// attention output
cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b);
@ -774,7 +767,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
cur = ggml_add(ctx0, cur, embeddings);
embeddings = cur; // embeddings = residual, cur = hidden_states
// layernorm2
{
cur = ggml_norm(ctx0, cur, eps);
@ -799,7 +791,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
embeddings = cur;
}
// post-layernorm
if (ctx->has_post_norm) {
embeddings = ggml_norm(ctx0, embeddings, eps);
@ -807,7 +798,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
}
// llava projector
if (ctx->has_llava_projector) {
embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
@ -1062,7 +1052,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
// xgenmm-projector
else if (ctx->has_xgenmm_projector)
{
if (ctx->proj_type == PROJECTOR_TYPE_PERCIVER_RESAMPLER)
if (ctx->proj_type == PROJECTOR_TYPE_PERCEIVER_RESAMPLER)
{
struct ggml_tensor * self_latents = model.mm_model_latents;
struct ggml_tensor *img_embeddings = embeddings;
@ -1287,8 +1277,8 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
{
int idx = gguf_find_key(ctx, KEY_PROJ_TYPE);
if (idx != -1) {
const std::string proj_type = gguf_get_val_str(ctx, idx);
new_clip->proj_type = clip_projector_type_from_string(proj_type);
const std::string proj_type = gguf_get_val_str(ctx, idx); // CT: assign projector name
new_clip->proj_type = clip_projector_type_from_string(proj_type);
} else {
new_clip->proj_type = PROJECTOR_TYPE_MLP;
}
@ -1333,7 +1323,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
if (idx != -1) {
new_clip->has_llava_projector = gguf_get_val_bool(ctx, idx);
}
idx = gguf_find_key(ctx, KEY_HAS_MINICPMV_PROJ);
if (idx != -1) {
new_clip->has_minicpmv_projector = gguf_get_val_bool(ctx, idx);
@ -1344,6 +1334,11 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
new_clip->minicpmv_version = gguf_get_val_i32(ctx, idx);
}
idx = gguf_find_key(ctx, KEY_HAS_XGENMM_PROJ); // CT: checked.
if (idx != -1) {
new_clip->has_xgenmm_projector = gguf_get_val_bool(ctx, idx);
}
// GGML_ASSERT(new_clip->has_llava_projector); // see monatis/clip.cpp for image and/or text encoding for semantic search
GGML_ASSERT(new_clip->has_vision_encoder);
@ -1357,6 +1352,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
LOG_TEE("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder);
LOG_TEE("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector);
LOG_TEE("%s: minicpmv_projector: %d\n", __func__, new_clip->has_minicpmv_projector);
LOG_TEE("%s: xgenmm_projector: %d\n", __func__, new_clip->has_xgenmm_projector);
LOG_TEE("%s: model size: %.2f MB\n", __func__, model_size / 1024.0 / 1024.0);
LOG_TEE("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0);
}
@ -1437,7 +1433,6 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
hparams.patch_size = get_u32(ctx, KEY_PATCH_SIZE);
hparams.projection_dim = get_u32(ctx, format(KEY_PROJ_DIM, "vision"));
hparams.eps = get_f32(ctx, format(KEY_LAYER_NORM_EPS, "vision"));
try {
int idx = get_key_idx(ctx, KEY_IMAGE_GRID_PINPOINTS);
int n = gguf_get_arr_n(ctx, idx);
@ -1619,7 +1614,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
vision_model.mm_model_ln_post_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "weight"));
vision_model.mm_model_ln_post_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "bias"));
}
else if(new_clip->proj_type == PROJECTOR_TYPE_PERCIVER_RESAMPLER){
else if(new_clip->proj_type == PROJECTOR_TYPE_PERCEIVER_RESAMPLER){
vision_model.mm_model_latents = ggml_get_tensor(new_clip->ctx_data, "perceiver_resampler.latents");
vision_model.mm_model_projection_w =
ggml_get_tensor(new_clip->ctx_data, "perceiver_resampler.projection.weight");
@ -1706,8 +1701,10 @@ void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size
struct clip_image_size * clip_image_size_init() {
struct clip_image_size * load_image_size = new struct clip_image_size();
load_image_size->width = 448;
load_image_size->height = 448;
// load_image_size->width = 448; // CT: this part is hard coded, need check
// load_image_size->height = 448;
load_image_size->width = 384; // CT: this part is hard coded, need check
load_image_size->height = 384;
return load_image_size;
}
@ -2504,11 +2501,9 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
// build the inference graph
ggml_cgraph * gf = clip_image_build_graph(ctx, imgs, ctx->load_image_size, true);
ggml_gallocr_alloc_graph(ctx->compute_alloc, gf);
// set inputs
const auto & model = ctx->vision_model;
const auto & hparams = model.hparams;
const int image_size = hparams.image_size;
int image_size_width = image_size;
int image_size_height = image_size;
@ -2522,9 +2517,9 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
if(ctx->load_image_size==nullptr){
ctx->load_image_size= clip_image_size_init();
}
ctx->load_image_size= clip_image_size_init(); // CT: hard code
const int pos_w = ctx->load_image_size->width/patch_size;
const int pos_h = ctx->load_image_size->height/patch_size;
{
struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
float * data = (float *)malloc(ggml_nbytes(inp_raw));
@ -2611,7 +2606,6 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
free(zero_mem);
}
}
{
struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
@ -2622,18 +2616,19 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
free(positions_data);
}
{
struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
int* patches_data = (int*)malloc(ggml_nbytes(patches));
for (int i = 0; i < num_patches; i++) {
patches_data[i] = i + 1;
}
ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
free(patches_data);
}
// {
// std::cout << __LINE__ << std::endl;
// struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
// std::cout << __LINE__ << std::endl;
// int* patches_data = (int*)malloc(ggml_nbytes(patches));
// std::cout << __LINE__ << std::endl;
// for (int i = 0; i < num_patches; i++) {
// patches_data[i] = i + 1;
// }
// ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
// free(patches_data);
// }
}
if (ggml_backend_is_cpu(ctx->backend)) {
ggml_backend_cpu_set_n_threads(ctx->backend, n_threads);
}
@ -2643,15 +2638,11 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
ggml_backend_metal_set_n_cb(ctx->backend, n_threads);
}
#endif
ggml_backend_graph_compute(ctx->backend, gf);
// the last node is the embedding tensor
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 1];
// copy the embeddings to the location passed by the user
ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
return true;
}
@ -2809,6 +2800,9 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
return 3584;
}
}
if (ctx->proj_type == PROJECTOR_TYPE_PERCEIVER_RESAMPLER) {
return 3072; // CT: hard coded
}
std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type];
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
@ -2820,3 +2814,10 @@ int clip_is_minicpmv(const struct clip_ctx * ctx) {
}
return 0;
}
int clip_is_xgenmm(const struct clip_ctx * ctx) {
if (ctx->has_xgenmm_projector) {
return 1;
}
return 0;
}

View file

@ -91,6 +91,7 @@ CLIP_API bool clip_model_quantize(const char * fname_inp, const char * fname_out
CLIP_API int clip_is_minicpmv(const struct clip_ctx * ctx);
CLIP_API int clip_is_xgenmm(const struct clip_ctx * ctx);
#ifdef __cplusplus
}
#endif

View file

@ -1,14 +1,13 @@
source /export/share/yutong/miniconda3/bin/activate
conda activate xgenmm-flamingo
which python
# source /export/share/yutong/miniconda3/bin/activate
# conda activate xgenmm-flamingo
# which python
# # step 1: surgery
# python xgenmm_surgery.py
# step 2: convert to gguf (vit + projector)
python xgenmm_convert_image_encoder_to_gguf.py \
python examples/xgenmm/xgenmm_convert_image_encoder_to_gguf.py \
--surgery_dir /export/share/yutong/xgenmm/llamacpp_wd \
--output_dirname gguf_test \
--version siglip_kosmos_phi3_4k_instruct \
--use_f32 \
--use_f32

49
examples/xgenmm/llava.h Normal file
View file

@ -0,0 +1,49 @@
#ifndef LLAVA_H
#define LLAVA_H
#include "ggml.h"
#ifdef LLAMA_SHARED
# if defined(_WIN32) && !defined(__MINGW32__)
# ifdef LLAMA_BUILD
# define LLAVA_API __declspec(dllexport)
# else
# define LLAVA_API __declspec(dllimport)
# endif
# else
# define LLAVA_API __attribute__ ((visibility ("default")))
# endif
#else
# define LLAVA_API
#endif
#ifdef __cplusplus
extern "C" {
#endif
struct clip_ctx;
struct llava_image_embed {
float * embed;
int n_image_pos;
};
/** sanity check for clip <-> llava embed size match */
LLAVA_API bool llava_validate_embed_size(const struct llama_context * ctx_llama, const struct clip_ctx * ctx_clip);
LLAVA_API bool llava_image_embed_make_with_clip_img(struct clip_ctx * ctx_clip, int n_threads, const struct clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out);
/** build an image embed from image file bytes */
LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length);
/** build an image embed from a path to an image filename */
LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path);
/** free an embedding made with llava_image_embed_make_* */
LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed);
/** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */
LLAVA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past);
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,374 @@
// refer to example/minicpmv-cli
#include "ggml.h"
#include "log.h"
#include "common.h"
#include "clip.h"
#include "llava.h"
#include "llama.h" // TODO: check if this head filde is necessary
#include <cstdio>
#include <cstdlib>
#include <vector>
struct llava_context {
struct clip_ctx * ctx_clip = NULL;
struct llama_context * ctx_llama = NULL;
struct llama_model * model = NULL;
};
static void show_additional_info(int /*argc*/, char ** argv) {
LOG_TEE("\n example usage: %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
LOG_TEE(" note: a lower temperature value like 0.1 is recommended for better quality.\n");
}
static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) {
(void) level;
(void) user_data;
LOG_TEE("%s", text);
}
static struct llama_model * llava_init(gpt_params * params) {
llama_backend_init();
llama_numa_init(params->numa);
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
if (model == NULL) {
LOG_TEE("%s: error: unable to load model\n" , __func__);
return NULL;
}
return model;
}
static struct llava_context * llava_init_context(gpt_params * params, llama_model * model) {
auto prompt = params->prompt;
if (prompt.empty()) {
prompt = "describe the image in detail.";
}
llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
if (params->n_ctx < 2048) {
// warn user here, "Image processing requires at least 2048 context, setting context to 2048"
LOG_TEE("%s: warn: Image processing requires at least 2048 context, setting context to 2048\n" , __func__);
ctx_params.n_ctx = 2048;
} else {
ctx_params.n_ctx = params->n_ctx;
}
llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
if (ctx_llama == NULL) {
LOG_TEE("%s: error: failed to create the llama_context\n" , __func__);
return NULL;
}
auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));
ctx_llava->ctx_llama = ctx_llama;
ctx_llava->model = model;
return ctx_llava;
}
static void llava_free(struct llava_context * ctx_llava) {
if (ctx_llava->ctx_clip) {
clip_free(ctx_llava->ctx_clip);
ctx_llava->ctx_clip = NULL;
}
llama_free(ctx_llava->ctx_llama);
llama_free_model(ctx_llava->model);
llama_backend_free();
}
static struct clip_ctx * clip_init_context(gpt_params * params) {
const char * clip_path = params->mmproj.c_str();
auto prompt = params->prompt;
if (prompt.empty()) {
prompt = "describe the image in detail.";
}
// std::cout << __LINE__ << std::endl;
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
return ctx_clip;
}
static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past) {
int N = (int) tokens.size();
for (int i = 0; i < N; i += n_batch) {
int n_eval = (int) tokens.size() - i;
if (n_eval > n_batch) {
n_eval = n_batch;
}
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
LOG_TEE("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
return false;
}
*n_past += n_eval;
}
return true;
}
static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
std::vector<llama_token> tokens;
tokens.push_back(id);
return eval_tokens(ctx_llama, tokens, 1, n_past);
}
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
std::string str2 = str;
std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
return eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
}
static void process_eval_image_embed(struct llava_context * ctx_llava, const struct llava_image_embed * embeds, int n_batch, int * n_past, int idx) {
float * image_embed = (float *)malloc(clip_embd_nbytes(ctx_llava->ctx_clip));
std::memcpy(image_embed, embeds->embed + idx * clip_n_patches(ctx_llava->ctx_clip) * clip_n_mmproj_embd(ctx_llava->ctx_clip), clip_embd_nbytes(ctx_llava->ctx_clip));
auto slice_embed = (llava_image_embed*)malloc(sizeof(llava_image_embed));
slice_embed->embed = image_embed;
slice_embed->n_image_pos = clip_n_patches(ctx_llava->ctx_clip);
llava_eval_image_embed(ctx_llava->ctx_llama, slice_embed, n_batch, n_past);
llava_image_embed_free(slice_embed);
}
static void process_image(struct llava_context * ctx_llava, struct llava_image_embed * embeds, gpt_params * params, int &n_past) {
std::string system_prompt;
int idx = 0;
int num_image_embeds = embeds->n_image_pos / clip_n_patches(ctx_llava->ctx_clip);
int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
if (has_minicpmv_projector == 2) {
system_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n";
}
else if (has_minicpmv_projector == 3) {
system_prompt = "<|im_start|>user\n";
}
LOG_TEE("%s: image token past: %d\n", __func__, n_past);
eval_string(ctx_llava->ctx_llama, (system_prompt+"").c_str(), params->n_batch, &n_past, false);
if (num_image_embeds > 1) {
size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip);
eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) {
for (size_t j = 0; j < num_image_embeds_col; ++j) {
eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false);
if (j == num_image_embeds_col - 1) {
eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
}
}
}
eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false);
}
LOG_TEE("%s: image token past: %d\n", __func__, n_past);
}
static const char * sample(struct llama_sampling_context * ctx_sampling,
struct llama_context * ctx_llama,
int * n_past) {
const llama_token id = llama_sampling_sample(ctx_sampling, ctx_llama, NULL);
llama_sampling_accept(ctx_sampling, ctx_llama, id, true);
static std::string ret;
if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
ret = "</s>";
} else {
ret = llama_token_to_piece(ctx_llama, id);
}
eval_id(ctx_llama, id, n_past);
return ret.c_str();
}
// static struct llava_context * minicpmv_init(gpt_params * params, const std::string & fname, int &n_past){
// auto ctx_clip = clip_init_context(params);
// auto embeds = llava_image_embed_make_with_filename(ctx_clip, params->n_threads, fname.c_str());
// if (!embeds) {
// std::cerr << "error: failed to load image " << fname << ". Terminating\n\n";
// return NULL;
// }
// // process the prompt
// if (params->prompt.empty() && params->interactive == false) {
// LOG_TEE("prompt should be given or interactive mode should be on");
// return NULL;
// }
// auto model = llava_init(params);
// if (model == NULL) {
// fprintf(stderr, "%s: error: failed to init minicpmv model\n", __func__);
// return NULL;
// }
// const int64_t t_llava_init_start_us = ggml_time_us();
// auto ctx_llava = llava_init_context(params, model);
// ctx_llava->ctx_clip = ctx_clip;
// const int64_t t_llava_init_end_us = ggml_time_us();
// float t_llava_init_ms = (t_llava_init_end_us - t_llava_init_start_us) / 1000.0;
// LOG_TEE("\n%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms);
// const int64_t t_process_image_start_us = ggml_time_us();
// process_image(ctx_llava, embeds, params, n_past);
// const int64_t t_process_image_end_us = ggml_time_us();
// float t_process_image_ms = (t_process_image_end_us - t_process_image_start_us) / 1000.0;
// LOG_TEE("\n%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms);
// llava_image_embed_free(embeds);
// return ctx_llava;
// }
static struct llava_context * xgenmm_init(gpt_params * params, const std::string & fname, int &n_past){
auto ctx_clip = clip_init_context(params);
std::cout << "clip model has been loaded \n\n";
auto embeds = llava_image_embed_make_with_filename(ctx_clip, params->n_threads, fname.c_str());
if (!embeds) {
std::cerr << "error: failed to load image " << fname << ". Terminating\n\n";
return NULL;
}
std::cout<< "Start Processing Prompt" << std::endl;
exit(1);
// process the prompt
if (params->prompt.empty() && params->interactive == false) {
LOG_TEE("prompt should be given or interactive mode should be on");
return NULL;
}
auto model = llava_init(params);
if (model == NULL) {
fprintf(stderr, "%s: error: failed to init minicpmv model\n", __func__);
return NULL;
}
const int64_t t_llava_init_start_us = ggml_time_us();
auto ctx_llava = llava_init_context(params, model);
ctx_llava->ctx_clip = ctx_clip;
const int64_t t_llava_init_end_us = ggml_time_us();
float t_llava_init_ms = (t_llava_init_end_us - t_llava_init_start_us) / 1000.0;
LOG_TEE("\n%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms);
const int64_t t_process_image_start_us = ggml_time_us();
process_image(ctx_llava, embeds, params, n_past);
const int64_t t_process_image_end_us = ggml_time_us();
float t_process_image_ms = (t_process_image_end_us - t_process_image_start_us) / 1000.0;
LOG_TEE("\n%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms);
llava_image_embed_free(embeds);
return ctx_llava;
}
static struct llama_sampling_context * llama_init(struct llava_context * ctx_llava, gpt_params * params, std::string prompt, int &n_past, bool is_first = false){
std::string user_prompt = prompt;
int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
if (!is_first) {
if (has_minicpmv_projector == 2) {
user_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n" + prompt;
}
else if (has_minicpmv_projector == 3) {
user_prompt = "<|im_start|>user\n" + prompt;
}
}
eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
if (has_minicpmv_projector == 2) {
eval_string(ctx_llava->ctx_llama, "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", params->n_batch, &n_past, false);
}
else if (has_minicpmv_projector == 3) {
eval_string(ctx_llava->ctx_llama, "<|im_end|><|im_start|>assistant\n", params->n_batch, &n_past, false);
}
// generate the response
LOG_TEE("\n");
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
return ctx_sampling;
}
static const char * llama_loop(struct llava_context * ctx_llava,struct llama_sampling_context * ctx_sampling, int &n_past){
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
return tmp;
}
int main(int argc, char ** argv) {
ggml_time_init();
gpt_params params;
if (!gpt_params_parse(argc, argv, params)) {
show_additional_info(argc, argv);
return 1;
}
#ifndef LOG_DISABLE_LOGS
log_set_target(log_filename_generator("llava", "log"));
LOG_TEE("Log start\n");
log_dump_cmdline(argc, argv);
llama_log_set(llama_log_callback_logTee, nullptr);
#endif // LOG_DISABLE_LOGS
if (params.mmproj.empty() || (params.image.empty())) {
gpt_params_print_usage(argc, argv, params);
show_additional_info(argc, argv);
return 1;
}
for (auto & image : params.image) { // only single image for now
int n_past = 0;
// auto ctx_llava = minicpmv_init(&params, image, n_past);
auto ctx_llava = xgenmm_init(&params, image, n_past); // generate vision tokens
std::cout << "Start llava generation: " << std::endl;
// // TODO: integrate base llm
// if (!params.prompt.empty()) {
// LOG_TEE("<user>%s\n", params.prompt.c_str());
// LOG_TEE("<assistant>");
// auto ctx_sampling = llama_init(ctx_llava, &params, params.prompt.c_str(), n_past, true);
// const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
// std::string response = "";
// bool have_tmp = false;
// for (int i = 0; i < max_tgt_len; i++) {
// auto tmp = llama_loop(ctx_llava, ctx_sampling, n_past);
// response += tmp;
// if (strcmp(tmp, "</s>") == 0){
// if(!have_tmp)continue;
// else break;
// }
// if (strstr(tmp, "###")) break; // Yi-VL behavior
// have_tmp = true;
// printf("%s", tmp);
// if (strstr(response.c_str(), "<user>")) break; // minicpm-v
// fflush(stdout);
// }
// llama_sampling_free(ctx_sampling);
// }else {
// while (true) {
// LOG_TEE("<user>");
// std::string prompt;
// std::getline(std::cin, prompt);
// LOG_TEE("<assistant>");
// auto ctx_sampling = llama_init(ctx_llava, &params, prompt, n_past, true);
// const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
// std::string response = "";
// for (int i = 0; i < max_tgt_len; i++) {
// auto tmp = llama_loop(ctx_llava, ctx_sampling, n_past);
// response += tmp;
// if (strcmp(tmp, "</s>") == 0) break;
// if (strstr(tmp, "###")) break; // Yi-VL behavior
// printf("%s", tmp);// mistral llava-1.6
// if (strstr(response.c_str(), "<user>")) break; // minicpm-v
// fflush(stdout);
// }
// llama_sampling_free(ctx_sampling);
// }
// }
// printf("\n");
// llama_print_timings(ctx_llava->ctx_llama);
ctx_llava->model = NULL;
llava_free(ctx_llava);
}
return 0;
}

View file

@ -285,7 +285,6 @@ static bool encode_image_with_clip(clip_ctx *ctx_clip, int n_threads, const clip
const int64_t t_img_enc_start_us = ggml_time_us();
const char *mm_patch_merge_type = clip_patch_merge_type(ctx_clip);
if (clip_is_minicpmv(ctx_clip))
{
std::vector<float *> image_embd_v;
@ -342,6 +341,20 @@ static bool encode_image_with_clip(clip_ctx *ctx_clip, int n_threads, const clip
clip_add_load_image_size(ctx_clip, load_image_size);
LOG_TEE("%s: load_image_size %d %d\n", __func__, load_image_size->width, load_image_size->height);
}
else if (clip_is_xgenmm(ctx_clip))
{
// xgenmm embedding
*n_img_pos = clip_n_patches(ctx_clip);
bool encoded =
clip_image_encode(ctx_clip, n_threads, &img_res_v.data[0], image_embd); // image_embd shape is 729 x
delete[] img_res_v.data;
if (!encoded)
{
LOG_TEE("Unable to encode image\n");
return false;
}
}
else if (strcmp(mm_patch_merge_type, "spatial_unpad") != 0)
{
// flat / default llava-1.5 type embedding
@ -414,7 +427,7 @@ static bool encode_image_with_clip(clip_ctx *ctx_clip, int n_threads, const clip
// clip_image_convert_f32_to_u8(*image_feature, *tmp);
// clip_image_save_to_bmp(*tmp, "image_feature.bmp");
}
std::cout << __LINE__ << std::endl;
LOG_TEE("%s: image embedding created: %d tokens\n", __func__, *n_img_pos);
const int64_t t_img_enc_end_us = ggml_time_us();
@ -458,7 +471,7 @@ bool llava_image_embed_make_with_clip_img(clip_ctx *ctx_clip, int n_threads, con
return false;
}
int n_img_pos;
int n_img_pos; // 0
if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_pos))
{
LOG_TEE("%s: cannot encode image, aborting\n", __func__);
@ -583,7 +596,6 @@ struct llava_image_embed *llava_image_embed_make_with_filename(struct clip_ctx *
LOG_TEE("%s: failed to load %s\n", __func__, image_path);
return NULL;
}
llava_image_embed *embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, image_bytes, image_bytes_length);
free(image_bytes);

View file

@ -0,0 +1,5 @@
python examples/xgenmm/xgenmm_convert_image_encoder_to_gguf.py\
--surgery_dir /export/share/yutong/xgenmm/llamacpp_wd \
--version siglip_kosmos_phi3_4k_instruct \
--xgenmm_projector /export/home/Projects/xgenmm-quantization/target_models/MiniCPM-Llama3-V-2_5/minicpmv.projector \
--use_f32

View file

@ -70,6 +70,8 @@ if __name__ == "__main__":
tokenizer_path=cfg.lm_path,
model_family=cfg.model_family,
**additional_kwargs)
print(model)
exit(1)
model.load_state_dict(ckpt, strict=True)
end = time.time()
print(f"🟢 time used: [{end-start:.3f} s] | Done with instaiating the model.")