llama: rwkv6: Fix tensor loading for 7B/14B models

Signed-off-by: Molly Sophia <mollysophia379@gmail.com>
This commit is contained in:
Molly Sophia 2024-08-13 17:06:07 +08:00
parent b0f4fe5279
commit 683d70cb68

View file

@ -8364,10 +8364,9 @@ static bool llm_load_tensors(
model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
// TODO: Parameterize this const int time_mix_extra_dim = (n_embd == 4096) ? 64 : 32;
const int time_mix_extra_dim = 32; const int time_decay_extra_dim = (n_embd == 4096) ? 128 : 64;
const int time_decay_extra_dim = 64; const int head_size = hparams.wkv_head_size;
const int head_size = 64;
const int attn_hidden_size = n_embd; const int attn_hidden_size = n_embd;
const int ffn_size = (int)(n_embd * 3.5 / 32) * 32; const int ffn_size = (int)(n_embd * 3.5 / 32) * 32;