llama : minor / style

This commit is contained in:
Georgi Gerganov 2024-04-24 09:39:22 +03:00
parent 1bf93ced81
commit 32661ac8b4
No known key found for this signature in database
GPG key ID: 449E073F9DC10735

View file

@ -9029,7 +9029,6 @@ struct llm_build_context {
}
struct ggml_cgraph * build_phi3() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
const int64_t n_embd_head = hparams.n_embd_head_v;
@ -9048,7 +9047,6 @@ struct llm_build_context {
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
auto residual = inpL;
// self-attention
@ -9120,6 +9118,8 @@ struct llm_build_context {
cb(cur, "ffn_norm", il);
// FF
// special-case: the up and gate tensors are merged into a single tensor
// TOOD: support into llm_build_ffn
{
struct ggml_tensor* up = ggml_mul_mat(ctx0, model.layers[il].ffn_up, cur);
cb(up, "ffn_up", il);
@ -9152,9 +9152,6 @@ struct llm_build_context {
cur = ggml_mul_mat(ctx0, model.output, cur);
cb(cur, "result_output", -1);
//cur = ggml_add(ctx0, cur, NULL);
//cb(cur, "result_output", -1);
ggml_build_forward_expand(gf, cur);
return gf;