llama : fix compile warning (#8304)
This commit is contained in:
parent
1d894a790e
commit
7ed03b8974
1 changed files with 1 additions and 1 deletions
|
@ -7261,7 +7261,7 @@ static bool llm_load_tensors(
|
|||
|
||||
layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
|
||||
|
||||
if (i < hparams.n_layer_dense_lead) {
|
||||
if (i < (int) hparams.n_layer_dense_lead) {
|
||||
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
|
||||
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
|
||||
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue