From 9f03ac7086e2dcfe76766991e3edaa27d89b3b9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Mon, 6 Nov 2023 13:40:20 +0300 Subject: [PATCH] Fix according to review comments --- Makefile | 4 +- examples/llava/clip.h | 29 +++-- examples/llava/llava-cli.cpp | 185 ++++++++++++++++++++++++++++--- examples/llava/llava-utils.h | 207 ----------------------------------- examples/llava/llava.cpp | 23 +--- examples/llava/llava.h | 26 ++++- 6 files changed, 218 insertions(+), 256 deletions(-) delete mode 100644 examples/llava/llava-utils.h diff --git a/Makefile b/Makefile index 68710ad43..f2d4fd031 100644 --- a/Makefile +++ b/Makefile @@ -617,10 +617,10 @@ convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggm llama-bench: examples/llama-bench/llama-bench.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -libllava.a: examples/llava/llava.cpp examples/llava/llava.h examples/llava/llava-utils.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h common/base64.hpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) +libllava.a: examples/llava/llava.cpp examples/llava/llava.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h common/base64.hpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ $(LDFLAGS) -Wno-cast-qual -llava-cli: examples/llava/llava-cli.cpp examples/llava/clip.h examples/llava/clip.cpp examples/llava/llava.h examples/llava/llava.cpp examples/llava/llava-utils.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +llava-cli: examples/llava/llava-cli.cpp examples/llava/clip.h examples/llava/clip.cpp examples/llava/llava.h examples/llava/llava.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -Wno-cast-qual baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) diff --git a/examples/llava/clip.h b/examples/llava/clip.h index 955b23928..f11df85de 100644 --- a/examples/llava/clip.h +++ b/examples/llava/clip.h @@ -1,7 +1,22 @@ #ifndef CLIP_H #define CLIP_H -#include "llama.h" +#include +#include + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define CLIP_API __declspec(dllexport) +# else +# define CLIP_API __declspec(dllimport) +# endif +# else +# define CLIP_API __attribute__ ((visibility ("default"))) +# endif +#else +# define CLIP_API +#endif struct clip_ctx; @@ -21,9 +36,9 @@ struct clip_vision_hparams { }; /** load mmproj model */ -LLAMA_API struct clip_ctx * clip_model_load(const char * fname, const int verbosity); +CLIP_API struct clip_ctx * clip_model_load(const char * fname, const int verbosity); /** free mmproj model */ -LLAMA_API void clip_free(struct clip_ctx * ctx); +CLIP_API void clip_free(struct clip_ctx * ctx); size_t clip_embd_nbytes(const struct clip_ctx * ctx); int clip_n_patches(const struct clip_ctx * ctx); @@ -58,11 +73,11 @@ struct clip_image_f32_batch { struct clip_image_u8 * make_clip_image_u8(); struct clip_image_f32 * make_clip_image_f32(); -LLAMA_API void clip_image_u8_free(clip_image_u8 * img); -LLAMA_API void clip_image_f32_free(clip_image_f32 * img); -LLAMA_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); +CLIP_API void clip_image_u8_free(clip_image_u8 * img); +CLIP_API void clip_image_f32_free(clip_image_f32 * img); +CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); /** interpret bytes as an image file with length bytes_length, and use the result to populate img */ -LLAMA_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img); +CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img); bool clip_image_preprocess(const struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, const bool pad2square); bool clip_image_encode(const struct clip_ctx * ctx, const int n_threads, struct clip_image_f32 * img, float * vec); diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index dd2756969..19374c67f 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -1,11 +1,171 @@ -#include -#include - #include "ggml.h" #include "common.h" #include "clip.h" #include "llava.h" -#include "llava-utils.h" +#include "llama.h" + +#include "base64.hpp" + +#include +#include +#include + +static bool eval_tokens(struct llama_context * ctx_llama, std::vector tokens, int n_batch, int * n_past) { + int N = (int) tokens.size(); + for (int i = 0; i < N; i += n_batch) { + int n_eval = (int) tokens.size() - i; + if (n_eval > n_batch) { + n_eval = n_batch; + } + if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) { + fprintf(stderr, "%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); + return false; + } + *n_past += n_eval; + } + return true; +} + +static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) { + std::vector tokens; + tokens.push_back(id); + return eval_tokens(ctx_llama, tokens, 1, n_past); +} + +static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){ + std::string str2 = str; + std::vector embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos); + eval_tokens(ctx_llama, embd_inp, n_batch, n_past); + return true; +} + +// TODO: use common/sampling.h +static llama_token sample_id(llama_context * ctx_llama, gpt_params & params) { + auto & sparams = params.sparams; + + // out of user input, sample next token + const float temp = sparams.temp; + const int32_t top_k = sparams.top_k <= 0 ? llama_n_vocab(llama_get_model(ctx_llama)) : sparams.top_k; + const float top_p = sparams.top_p; + const float tfs_z = sparams.tfs_z; + const float typical_p = sparams.typical_p; + // const int32_t repeat_last_n = sparams.repeat_last_n < 0 ? n_ctx : sparams.repeat_last_n; + // const float repeat_penalty = sparams.repeat_penalty; + // const float alpha_presence = sparams.presence_penalty; + // const float alpha_frequency = sparams.frequency_penalty; + const int mirostat = sparams.mirostat; + const float mirostat_tau = sparams.mirostat_tau; + const float mirostat_eta = sparams.mirostat_eta; + // const bool penalize_nl = sparams.penalize_nl; + + llama_token id = 0; + { + auto logits = llama_get_logits(ctx_llama); + auto n_vocab = llama_n_vocab(llama_get_model(ctx_llama)); + + // Apply params.logit_bias map + for (auto it = sparams.logit_bias.begin(); it != sparams.logit_bias.end(); it++) { + logits[it->first] += it->second; + } + + std::vector candidates; + candidates.reserve(n_vocab); + for (llama_token token_id = 0; token_id < n_vocab; token_id++) { + candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f}); + } + + llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; + + if (temp <= 0) { + // Greedy sampling + id = llama_sample_token_greedy(ctx_llama, &candidates_p); + } else { + if (mirostat == 1) { + static float mirostat_mu = 2.0f * mirostat_tau; + const int mirostat_m = 100; + llama_sample_temp(ctx_llama, &candidates_p, temp); + id = llama_sample_token_mirostat(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); + } else if (mirostat == 2) { + static float mirostat_mu = 2.0f * mirostat_tau; + llama_sample_temp(ctx_llama, &candidates_p, temp); + id = llama_sample_token_mirostat_v2(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); + } else { + // Temperature sampling + llama_sample_top_k(ctx_llama, &candidates_p, top_k, 1); + llama_sample_tail_free(ctx_llama, &candidates_p, tfs_z, 1); + llama_sample_typical(ctx_llama, &candidates_p, typical_p, 1); + llama_sample_top_p(ctx_llama, &candidates_p, top_p, 1); + llama_sample_temp(ctx_llama, &candidates_p, temp); + id = llama_sample_token(ctx_llama, &candidates_p); + } + } + } + + return id; +} + +static const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) { + int id = sample_id(ctx_llama, params); + static std::string ret; + if (id == llama_token_eos(llama_get_model(ctx_llama))) { + ret = ""; + } else { + ret = llama_token_to_piece(ctx_llama, id); + } + eval_id(ctx_llama, id, n_past); + return ret.c_str(); +} + +static const char* IMG_BASE64_TAG_BEGIN = ""; + +static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { + begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); + end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); +} + +static bool prompt_contains_image(const std::string& prompt) { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + return (begin != std::string::npos); +} + +// replaces the base64 image tag in the prompt with `replacement` +static llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { + size_t img_base64_str_start, img_base64_str_end; + find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); + if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { + fprintf(stderr, "%s: invalid base64 image tag. must be %s%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); + return NULL; + } + + auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); + auto base64_bytes_count = img_base64_str_end - base64_bytes_start; + auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count ); + + auto required_bytes = base64::required_encode_size(base64_str.size()); + auto img_bytes = std::vector(required_bytes); + base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); + + auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size()); + if (!embed) { + fprintf(stderr, "%s: could not load image from base64 string.\n", __func__); + return NULL; + } + + return embed; +} + +static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + if (begin == std::string::npos || end == std::string::npos) { + return prompt; + } + auto pre = prompt.substr(0, begin); + auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END)); + return pre + replacement + post; +} struct llava_context { struct clip_ctx * ctx_clip = NULL; @@ -13,8 +173,6 @@ struct llava_context { struct llama_model * model = NULL; }; - - static void show_additional_info(int /*argc*/, char ** argv) { printf("\n example usage: %s -m --mmproj --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); @@ -46,20 +204,15 @@ static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_para return embed; } -static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, gpt_params * params, const char * prompt) { +static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, gpt_params * params, const std::string & prompt) { int n_past = 0; const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; - // llava chat format is "USER: \n\nASSISTANT:" - printf("evaluating system prompt\n"); + // llava chat format is "\nUSER:\n\nASSISTANT:" eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:", params->n_batch, &n_past, true); - printf("evaluating image embed\n"); llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past); - printf("evaluating prompt\n"); - eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past, false); - eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past, false); - printf("awaiting response\n"); + eval_string(ctx_llava->ctx_llama, (prompt + "\nASSISTANT:").c_str(), params->n_batch, &n_past, false); // generate the response @@ -117,7 +270,6 @@ static struct llava_context * llava_init(gpt_params * params) { return ctx_llava; } - static void llava_free(struct llava_context * ctx_llava) { if (ctx_llava->ctx_clip) { clip_free(ctx_llava->ctx_clip); @@ -129,7 +281,6 @@ static void llava_free(struct llava_context * ctx_llava) { llama_backend_free(); } - int main(int argc, char ** argv) { ggml_time_init(); @@ -154,7 +305,7 @@ int main(int argc, char ** argv) { auto image_embed = load_image(ctx_llava, ¶ms); // process the prompt - process_prompt(ctx_llava, image_embed, ¶ms, params.prompt.c_str()); + process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); llama_print_timings(ctx_llava->ctx_llama); diff --git a/examples/llava/llava-utils.h b/examples/llava/llava-utils.h deleted file mode 100644 index 3303f7a87..000000000 --- a/examples/llava/llava-utils.h +++ /dev/null @@ -1,207 +0,0 @@ -#ifndef LLA_UTILS_H -#define LLAVA_UTILS_H - -#pragma once - -// this one and clip lib will be eventually merged to a single lib, let's keep it this way for now - -#include "common.h" -#include "llama.h" -#include "llava.h" - -#include "base64.hpp" - -#include -#include -#include - -// todo: remove this -inline bool eval_image_embd(llama_context * ctx_llama, float * embd, int N, int n_batch, int * n_past) { - int n_embd = llama_n_embd(llama_get_model(ctx_llama)); - - for (int i = 0; i < N; i += n_batch) { - int n_eval = N - i; - if (n_eval > n_batch) { - n_eval = n_batch; - } - llama_batch batch = {int32_t(n_eval), nullptr, (embd+i*n_embd), nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, }; - if (llama_decode(ctx_llama, batch)) { - fprintf(stderr, "%s : failed to eval\n", __func__); - return false; - } - *n_past += n_eval; - } - return true; -} - -inline bool eval_tokens(struct llama_context * ctx_llama, std::vector tokens, int n_batch, int * n_past) { - int N = (int) tokens.size(); - for (int i = 0; i < N; i += n_batch) { - int n_eval = (int) tokens.size() - i; - if (n_eval > n_batch) { - n_eval = n_batch; - } - if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) { - fprintf(stderr, "%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); - return false; - } - *n_past += n_eval; - } - return true; -} - -inline bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) { - std::vector tokens; - tokens.push_back(id); - return eval_tokens(ctx_llama, tokens, 1, n_past); -} - -inline bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){ - std::string str2 = str; - std::vector embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos); - eval_tokens(ctx_llama, embd_inp, n_batch, n_past); - return true; -} - -// TODO: use common/sampling.h -inline llama_token sample_id(llama_context * ctx_llama, gpt_params & params) { - auto & sparams = params.sparams; - - // out of user input, sample next token - const float temp = sparams.temp; - const int32_t top_k = sparams.top_k <= 0 ? llama_n_vocab(llama_get_model(ctx_llama)) : sparams.top_k; - const float top_p = sparams.top_p; - const float tfs_z = sparams.tfs_z; - const float typical_p = sparams.typical_p; - // const int32_t repeat_last_n = sparams.repeat_last_n < 0 ? n_ctx : sparams.repeat_last_n; - // const float repeat_penalty = sparams.repeat_penalty; - // const float alpha_presence = sparams.presence_penalty; - // const float alpha_frequency = sparams.frequency_penalty; - const int mirostat = sparams.mirostat; - const float mirostat_tau = sparams.mirostat_tau; - const float mirostat_eta = sparams.mirostat_eta; - // const bool penalize_nl = sparams.penalize_nl; - - llama_token id = 0; - { - auto logits = llama_get_logits(ctx_llama); - auto n_vocab = llama_n_vocab(llama_get_model(ctx_llama)); - - // Apply params.logit_bias map - for (auto it = sparams.logit_bias.begin(); it != sparams.logit_bias.end(); it++) { - logits[it->first] += it->second; - } - - std::vector candidates; - candidates.reserve(n_vocab); - for (llama_token token_id = 0; token_id < n_vocab; token_id++) { - candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f}); - } - - llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; - - // TODO: Apply penalties - // float nl_logit = logits[llama_token_nl(ctx)]; - // auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx); - // llama_sample_repetition_penalty(ctx, &candidates_p, - // last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, - // last_n_repeat, repeat_penalty); - // llama_sample_frequency_and_presence_penalties(ctx, &candidates_p, - // last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, - // last_n_repeat, alpha_frequency, alpha_presence); - // if (!penalize_nl) { - // logits[llama_token_nl(ctx)] = nl_logit; - // } - - if (temp <= 0) { - // Greedy sampling - id = llama_sample_token_greedy(ctx_llama, &candidates_p); - } else { - if (mirostat == 1) { - static float mirostat_mu = 2.0f * mirostat_tau; - const int mirostat_m = 100; - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token_mirostat(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); - } else if (mirostat == 2) { - static float mirostat_mu = 2.0f * mirostat_tau; - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token_mirostat_v2(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); - } else { - // Temperature sampling - llama_sample_top_k(ctx_llama, &candidates_p, top_k, 1); - llama_sample_tail_free(ctx_llama, &candidates_p, tfs_z, 1); - llama_sample_typical(ctx_llama, &candidates_p, typical_p, 1); - llama_sample_top_p(ctx_llama, &candidates_p, top_p, 1); - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token(ctx_llama, &candidates_p); - } - } - } - - return id; -} - -inline const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) { - int id = sample_id(ctx_llama, params); - static std::string ret; - if (id == llama_token_eos(llama_get_model(ctx_llama))) { - ret = ""; - } else { - ret = llama_token_to_piece(ctx_llama, id); - } - eval_id(ctx_llama, id, n_past); - return ret.c_str(); -} - -static const char* IMG_BASE64_TAG_BEGIN = ""; - -inline void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { - begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); - end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); -} - -inline bool prompt_contains_image(const std::string& prompt) { - size_t begin, end; - find_image_tag_in_prompt(prompt, begin, end); - return (begin != std::string::npos); -} - -// replaces the base64 image tag in the prompt with `replacement` -inline llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { - size_t img_base64_str_start, img_base64_str_end; - find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); - if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { - fprintf(stderr, "%s: invalid base64 image tag. must be %s%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); - return NULL; - } - - auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); - auto base64_bytes_count = img_base64_str_end - base64_bytes_start; - auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count ); - - auto required_bytes = base64::required_encode_size(base64_str.size()); - auto img_bytes = std::vector(required_bytes); - base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); - - auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size()); - if (!embed) { - fprintf(stderr, "%s: could not load image from base64 string.\n", __func__); - return NULL; - } - - return embed; -} - -inline std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { - size_t begin, end; - find_image_tag_in_prompt(prompt, begin, end); - if (begin == std::string::npos || end == std::string::npos) { - return prompt; - } - auto pre = prompt.substr(0, begin); - auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END)); - return pre + replacement + post; -} - -#endif diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index d2ce65df6..d10bcf2d2 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -1,5 +1,4 @@ #include "clip.h" -#include "llava-utils.h" #include "common.h" #include "llama.h" #include "llava.h" @@ -28,12 +27,11 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli return false; } + const int64_t t_img_enc_end_us = ggml_time_us(); float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0; - { - printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / *n_img_pos); - } + printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / *n_img_pos); return true; } @@ -50,7 +48,6 @@ bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * } static bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) { - float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); if (!image_embd) { fprintf(stderr, "Unable to allocate memory for image embeddings\n"); @@ -70,8 +67,6 @@ static bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_thre return true; } - - bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) { int n_embd = llama_n_embd(llama_get_model(ctx_llama)); @@ -90,9 +85,7 @@ bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_ return true; } - -LLAMA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) -{ +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) { clip_image_u8 * img = make_clip_image_u8(); if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) { clip_image_u8_free(img); @@ -116,8 +109,7 @@ LLAMA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct cl return result; } -static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) -{ +static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) { auto file = fopen(path, "rb"); if (file == NULL) { fprintf(stderr, "%s: can't read file %s\n", __func__, path); @@ -141,11 +133,9 @@ static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *bytesOut = buffer; *sizeOut = fileSize; return true; - } -LLAMA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) -{ +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) { unsigned char* image_bytes; long image_bytes_length; auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length); @@ -160,8 +150,7 @@ LLAMA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct return embed; } - -LLAMA_API void llava_image_embed_free(struct llava_image_embed * embed) { +LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed) { free(embed->embed); free(embed); } diff --git a/examples/llava/llava.h b/examples/llava/llava.h index 637fe4c09..e08ce7883 100644 --- a/examples/llava/llava.h +++ b/examples/llava/llava.h @@ -2,7 +2,21 @@ #define LLAVA_H #include "ggml.h" -#include "common.h" + + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define LLAVA_API __declspec(dllexport) +# else +# define LLAVA_API __declspec(dllimport) +# endif +# else +# define LLAVA_API __attribute__ ((visibility ("default"))) +# endif +#else +# define LLAVA_API +#endif struct clip_ctx; @@ -16,17 +30,17 @@ struct llava_image_embed { }; /** sanity check for clip <-> llava embed size match */ -LLAMA_API bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip); +LLAVA_API bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip); /** build an image embed from image file bytes */ -LLAMA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); /** build an image embed from a path to an image filename */ -LLAMA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); -LLAMA_API void llava_image_embed_free(struct llava_image_embed * embed); +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); +LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed); /** free an embedding made with llava_image_embed_make_* */ /** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ -LLAMA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past); +LLAVA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past); #ifdef __cplusplus