llama : correct llm_build_moe_ffn() arguments in build_arctic()
This commit is contained in:
parent
bb9c361802
commit
f3b5e7d436
1 changed files with 1 additions and 1 deletions
|
@ -11142,7 +11142,7 @@ struct llm_build_context {
|
||||||
model.layers[il].ffn_down_exps,
|
model.layers[il].ffn_down_exps,
|
||||||
n_expert, n_expert_used,
|
n_expert, n_expert_used,
|
||||||
LLM_FFN_SILU, true,
|
LLM_FFN_SILU, true,
|
||||||
true, hparams.expert_weights_scale,
|
false, 0.0,
|
||||||
cb, il);
|
cb, il);
|
||||||
cb(cur, "ffn_moe_out", il);
|
cb(cur, "ffn_moe_out", il);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue