From 68afe4f71b994db322314d9d1ee54123ec67f165 Mon Sep 17 00:00:00 2001 From: manikbhandari Date: Wed, 27 Dec 2023 07:33:27 -0500 Subject: [PATCH] remove unused code --- llama.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/llama.cpp b/llama.cpp index 017204783..68c7cced6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5851,10 +5851,6 @@ struct llm_build_context { struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cb(inp_pos, "inp_pos", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); cb(KQ_mask, "KQ_mask", -1); @@ -5914,6 +5910,7 @@ struct llm_build_context { model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, NULL, model.layers[il].ffn_down, model.layers[il].ffn_down_b, + NULL, LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); }