fix: fix the usage of the code model

This commit is contained in:
Joan Martinez 2024-05-31 15:10:43 +02:00
parent 21936ddb5d
commit 9a65c7a273
2 changed files with 5 additions and 3 deletions

View file

@ -2442,10 +2442,10 @@ class JinaBertV2Model(BertModel):
if 'gated_layer' in name:
d1 = data[:self.intermediate_size, :]
name1 = name.replace('gated_layers', 'gated_layers_w')
name1 = name.replace('up_gated_layer', 'gated_layers_w')
name1 = name.replace('up_gated_layer', 'gated_layers_v')
d2 = data[self.intermediate_size:, :]
name2 = name.replace('gated_layers', 'gated_layers_v')
name2 = name.replace('up_gated_layer', 'gated_layers_v')
name2 = name.replace('up_gated_layer', 'gated_layers_w')
yield name1, d1
yield name2, d2
continue

View file

@ -5498,7 +5498,7 @@ static bool llm_load_tensors(
layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
} else {
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
}
layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
@ -8506,6 +8506,8 @@ struct llm_build_context {
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, cb, il);
if (model.layers[il].attn_norm_2 != nullptr) {
// re-add the layer input
cur = ggml_add(ctx0, cur, inpL);
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_norm_2, model.layers[il].attn_norm_2_b, LLM_NORM, cb, il);
}