From 41e8d2b434132616321193e252cadc73a6726a95 Mon Sep 17 00:00:00 2001 From: Howard Su Date: Sat, 1 Apr 2023 01:51:44 +0800 Subject: [PATCH] Move constant out of loop --- llama.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index e46147b17..512805980 100644 --- a/llama.cpp +++ b/llama.cpp @@ -790,6 +790,7 @@ static bool llama_eval_internal( memcpy(embd->data, tokens, N*ggml_element_size(embd)); struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd); + struct ggml_tensor * scale = ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)); for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * inpSA = inpL; @@ -850,7 +851,7 @@ static bool llama_eval_internal( struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, - ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head))); + scale); // KQ_masked = mask_past(KQ_scaled) struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);