llama : fix compile warning (#8304)

This commit is contained in:
Georgi Gerganov 2024-07-05 17:32:09 +03:00 committed by Neo Zhang
parent c667e897e9
commit 401892e563

View file

@ -7262,7 +7262,7 @@ static bool llm_load_tensors(
layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
if (i < hparams.n_layer_dense_lead) {
if (i < (int) hparams.n_layer_dense_lead) {
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});