From 93e65d32a4f6dff77661b4f6b46b0a1e414204e5 Mon Sep 17 00:00:00 2001 From: xaedes Date: Mon, 6 Nov 2023 22:42:39 +0100 Subject: [PATCH] fix finetune rope call to use correct default attn_factor of 1.0f --- examples/finetune/finetune.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 649a3b7c1..fa7dbe496 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -643,7 +643,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( return ggml_rope_custom(ctx, t, KQ_pos, n_rot, rope_mode, n_ctx, 0, - rope_freq_base, rope_freq_scale, 0.0f, 0.0f, 0.0f, 0.0f + rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f ); };