diff --git a/llama.cpp b/llama.cpp index e46147b17..512805980 100644 --- a/llama.cpp +++ b/llama.cpp @@ -790,6 +790,7 @@ static bool llama_eval_internal( memcpy(embd->data, tokens, N*ggml_element_size(embd)); struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd); + struct ggml_tensor * scale = ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)); for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * inpSA = inpL; @@ -850,7 +851,7 @@ static bool llama_eval_internal( struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, - ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head))); + scale); // KQ_masked = mask_past(KQ_scaled) struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);