Added GPU support on qwen2vl readme
Undo changes on qwen2vl-cli
This commit is contained in:
parent
8777473a43
commit
185e1b107e
2 changed files with 4 additions and 12 deletions
|
@ -57,6 +57,7 @@ Now the model is ready to use in the `model_path` directory. You can quantize th
|
||||||
|
|
||||||
*Have fun with the models ! :)*
|
*Have fun with the models ! :)*
|
||||||
|
|
||||||
## Limitations
|
## Current limitations
|
||||||
|
|
||||||
* Currently, only support the image to be in the very beginning of the input prompt to the LLM.
|
* This only supports the image to be in the very beginning of the input prompt to the LLM.
|
||||||
|
* The vision model (clip.cpp)'s GPU backend support, which Qwen2VL uses, is disabled.
|
||||||
|
|
|
@ -524,7 +524,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
common_init();
|
common_init();
|
||||||
|
|
||||||
if (params.mmproj.empty()) {
|
if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
|
||||||
print_usage(argc, argv);
|
print_usage(argc, argv);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -547,15 +547,6 @@ int main(int argc, char ** argv) {
|
||||||
llava_image_embed_free(image_embed);
|
llava_image_embed_free(image_embed);
|
||||||
ctx_llava->model = NULL;
|
ctx_llava->model = NULL;
|
||||||
llava_free(ctx_llava);
|
llava_free(ctx_llava);
|
||||||
} else if (params.image.empty()) {
|
|
||||||
auto ctx_llava = llava_init_context(¶ms, model);
|
|
||||||
|
|
||||||
// process the prompt
|
|
||||||
process_prompt(ctx_llava, nullptr, ¶ms, params.prompt);
|
|
||||||
|
|
||||||
llama_perf_context_print(ctx_llava->ctx_llama);
|
|
||||||
ctx_llava->model = NULL;
|
|
||||||
llava_free(ctx_llava);
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
} else if (params.image[0].empty()) {
|
} else if (params.image[0].empty()) {
|
||||||
auto ctx_llava = llava_init_context(¶ms, model);
|
auto ctx_llava = llava_init_context(¶ms, model);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue