fix finetune rope call to use correct default attn_factor of 1.0f

This commit is contained in:
xaedes 2023-11-06 22:42:39 +01:00
parent ff0a3645d8
commit 93e65d32a4
No known key found for this signature in database
GPG key ID: 30030EDD817EA2B1

View file

@ -643,7 +643,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
return ggml_rope_custom(ctx, return ggml_rope_custom(ctx,
t, KQ_pos, n_rot, rope_mode, n_ctx, 0, t, KQ_pos, n_rot, rope_mode, n_ctx, 0,
rope_freq_base, rope_freq_scale, 0.0f, 0.0f, 0.0f, 0.0f rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f
); );
}; };