fix finetune rope call to use correct default attn_factor of 1.0f
This commit is contained in:
parent
ff0a3645d8
commit
93e65d32a4
1 changed files with 1 additions and 1 deletions
|
@ -643,7 +643,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
|||
|
||||
return ggml_rope_custom(ctx,
|
||||
t, KQ_pos, n_rot, rope_mode, n_ctx, 0,
|
||||
rope_freq_base, rope_freq_scale, 0.0f, 0.0f, 0.0f, 0.0f
|
||||
rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f
|
||||
);
|
||||
};
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue