From 0acc56719a881048785af6b0be6f4a32335192a5 Mon Sep 17 00:00:00 2001 From: cpumaxx <163466046+cpumaxx@users.noreply.github.com> Date: Fri, 5 Apr 2024 11:20:34 -0700 Subject: [PATCH] Update common.cpp Update help text --- common/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/common.cpp b/common/common.cpp index 20c432c70..f37dc8fb7 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1395,7 +1395,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" -ps N, --p-split N speculative decoding split probability (default: %.1f)\n", (double)params.p_split); printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n"); printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA. see examples/llava/README.md\n"); - printf(" --image IMAGE_FILE path to an image file. use with multimodal models\n"); + printf(" --image IMAGE_FILE path to an image file. use with multimodal models. Specify multiple times for batching\n"); if (llama_supports_mlock()) { printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n"); }