From 3cbeffc50fc2a61ed71e67ac86b96f073eabc2f2 Mon Sep 17 00:00:00 2001 From: Layl Bongers <3094382+LaylBongers@users.noreply.github.com> Date: Mon, 13 May 2024 14:39:50 +0200 Subject: [PATCH] Add time mix output loading --- src/llama.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/llama.cpp b/src/llama.cpp index 287a36520..a878980f8 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -531,6 +531,7 @@ enum llm_tensor { LLM_TENSOR_TIME_MIX_RECEPTANCE, LLM_TENSOR_TIME_MIX_GATE, LLM_TENSOR_TIME_MIX_LN, + LLM_TENSOR_TIME_MIX_OUTPUT, LLM_TENSOR_ATTN_Q_A, LLM_TENSOR_ATTN_Q_B, LLM_TENSOR_ATTN_KV_A_MQA, @@ -1372,6 +1373,7 @@ static const std::map> LLM_TENSOR_NA { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix.receptance" }, { LLM_TENSOR_TIME_MIX_GATE, "blk.%d.time_mix.gate" }, { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix.ln" }, + { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix.output" }, }, }, { @@ -2551,6 +2553,7 @@ struct llama_layer { struct ggml_tensor * time_mix_ln; struct ggml_tensor * time_mix_ln_b; + struct ggml_tensor * time_mix_output; // long rope factors struct ggml_tensor * rope_long = nullptr; @@ -8313,6 +8316,7 @@ static bool llm_load_tensors( layer.time_mix_ln = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}); layer.time_mix_ln_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}); + layer.time_mix_output = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, n_embd}); } }