From 37873ae77ae114d8cc28ebef017520278037ad95 Mon Sep 17 00:00:00 2001 From: jameswu2014 <545426914@qq.com> Date: Mon, 4 Sep 2023 19:10:22 +0800 Subject: [PATCH] update format --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index dc82037ad..aea30c6ed 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2779,7 +2779,7 @@ static struct ggml_cgraph * llm_build_baichaun( switch (model.type) { case MODEL_7B: Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale); - Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale); + Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale); break; case MODEL_13B: Kcur = ggml_reshape_3d(ctx0, tmpk, n_embd/n_head, n_head, N);