llama : switch to floating-point token positions

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-02-23 12:18:30 +02:00
parent 15499eb942
commit fc775366f1
No known key found for this signature in database
GPG key ID: BF970631944C16B7
14 changed files with 68 additions and 61 deletions

View file

@ -338,7 +338,7 @@ bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_
if (n_eval > n_batch) {
n_eval = n_batch;
}
llama_batch batch = {int32_t(n_eval), nullptr, (image_embed->embed+i*n_embd), nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, };
llama_batch batch = {int32_t(n_eval), nullptr, (image_embed->embed+i*n_embd), nullptr, nullptr, nullptr, nullptr, (float) *n_past, 1, 0, };
if (llama_decode(ctx_llama, batch)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return false;