llama : switch to floating-point token positions

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-02-23 12:18:30 +02:00
parent 15499eb942
commit fc775366f1
No known key found for this signature in database
GPG key ID: BF970631944C16B7
14 changed files with 68 additions and 61 deletions

View file

@ -1015,9 +1015,9 @@ static struct ggml_tensor * forward_lora(
struct ggml_tensor * kc = kv_self.k;
struct ggml_tensor * vc = kv_self.v;
struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, N);
{
int * data = (int *) KQ_pos->data;
float * data = (float *) KQ_pos->data;
for (int i = 0; i < N; ++i) {
data[i] = n_past + i;
}

View file

@ -554,7 +554,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
};
// KQ_pos - contains the positions
struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N);
struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, N);
ggml_set_input(KQ_pos);
// rope has so much parameters that we make a custom function for it
@ -743,7 +743,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
// set KQ_pos
{
int * data = (int *) KQ_pos->data;
float * data = (float *) KQ_pos->data;
for (int i = 0; i < N; ++i) {
data[i] = n_past + i;
}

View file

@ -338,7 +338,7 @@ bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_
if (n_eval > n_batch) {
n_eval = n_batch;
}
llama_batch batch = {int32_t(n_eval), nullptr, (image_embed->embed+i*n_embd), nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, };
llama_batch batch = {int32_t(n_eval), nullptr, (image_embed->embed+i*n_embd), nullptr, nullptr, nullptr, nullptr, (float) *n_past, 1, 0, };
if (llama_decode(ctx_llama, batch)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return false;

View file

@ -1281,7 +1281,7 @@ struct llama_server_context
}
const int n_embd = llama_n_embd(model);
llama_batch batch_img = { n_eval, nullptr, (img.image_embedding + i * n_embd), nullptr, nullptr, nullptr, nullptr, slot.n_past, 1, 0, };
llama_batch batch_img = { n_eval, nullptr, (img.image_embedding + i * n_embd), nullptr, nullptr, nullptr, nullptr, (float) slot.n_past, 1, 0, };
if (llama_decode(ctx, batch_img))
{
LOG_TEE("%s : failed to eval image\n", __func__);

View file

@ -291,7 +291,7 @@ static struct ggml_tensor * llama_build_train_graphs(
};
// KQ_pos - contains the positions
struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N);
struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, N);
ggml_set_input(KQ_pos);
// rope has so much parameters that we make a custom function for it
@ -419,7 +419,7 @@ static struct ggml_tensor * llama_build_train_graphs(
ggml_gallocr_alloc_graph(alloc, gb);
if (!measure_only) {
int * data = (int *) KQ_pos->data;
float * data = (float *) KQ_pos->data;
for (int i = 0; i < N; ++i) {
data[i] = n_past + i;
}