Merge branch 'master' into gg/flash-attn

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-04-29 18:37:04 +03:00
commit ca0275ceb7
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
5 changed files with 44 additions and 35 deletions

View file

@ -893,7 +893,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
invalid_param = true;
return true;
}
params.image = argv[i];
params.image.emplace_back(argv[i]);
return true;
}
if (arg == "-i" || arg == "--interactive") {
@ -1500,7 +1500,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA. see examples/llava/README.md\n");
printf(" --image IMAGE_FILE path to an image file. use with multimodal models\n");
printf(" --image IMAGE_FILE path to an image file. use with multimodal models. Specify multiple times for batching\n");
if (llama_supports_mlock()) {
printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
}

View file

@ -168,8 +168,8 @@ struct gpt_params {
std::string cache_type_v = "f16"; // KV cache data type for the V
// multimodal models (see examples/llava)
std::string mmproj = ""; // path to multimodal projector
std::string image = ""; // path to an image file
std::string mmproj = ""; // path to multimodal projector
std::vector<std::string> image; // path to image file(s)
};
bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);