llama: fix missing k_cache store for rwkv6qwen2 (#11445)
Signed-off-by: Molly Sophia <mollysophia379@gmail.com>
This commit is contained in:
parent
794fe23f29
commit
325afb370a
1 changed files with 3 additions and 7 deletions
|
@ -7700,17 +7700,13 @@ struct llm_build_context {
|
|||
1
|
||||
);
|
||||
|
||||
struct ggml_tensor * last_norm_att = ggml_view_3d(ctx0, x_norm_att, n_embd, 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(x_norm_att));
|
||||
ggml_build_forward_expand(
|
||||
gf,
|
||||
ggml_cpy(
|
||||
ctx0,
|
||||
wkv_states,
|
||||
ggml_view_1d(
|
||||
ctx0,
|
||||
kv_self.v_l[il],
|
||||
hparams.n_embd_v_s() * n_seqs,
|
||||
hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self.v_l[il])
|
||||
)
|
||||
ggml_view_1d(ctx0, last_norm_att, n_embd * n_seqs, 0),
|
||||
ggml_view_1d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self.k_l[il]))
|
||||
)
|
||||
);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue