From 587bde8e0c1dc702b91db61cfa11f43bbaa4aaf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Wed, 11 Oct 2023 06:40:52 +0300 Subject: [PATCH] Maybe seed is unlucky? --- examples/llava/llava.cpp | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index a9c11d200..d2716e046 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -80,10 +80,9 @@ int main(int argc, char ** argv) { } llama_context_params ctx_params = llama_context_default_params(); - ctx_params.seed = 1234; - ctx_params.n_ctx = 2048; - ctx_params.n_threads = params.n_threads; - ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; + ctx_params.n_ctx = 2048; + ctx_params.n_threads = params.n_threads; + ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); if (ctx_llama == NULL) { @@ -92,14 +91,14 @@ int main(int argc, char ** argv) { } // process the prompt - // llava chat format is "user: \n\nassistant:" + // llava chat format is "USER: \n\nASSISTANT:" int n_past = 0; int max_tgt_len = 256; - eval_string(ctx_llama, "user: ", params.n_batch, &n_past); + eval_string(ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params.n_batch, &n_past); eval_image_embd(ctx_llama, image_embd, n_img_pos, params.n_batch, &n_past); eval_string(ctx_llama, params.prompt.c_str(), params.n_batch, &n_past); -eval_string(ctx_llama, "\nassistant:", params.n_batch, &n_past); +eval_string(ctx_llama, "\nASSISTANT:", params.n_batch, &n_past); // generate the response