llama : remove unused variable
This commit is contained in:
parent
aff96920f9
commit
e04910dc48
1 changed files with 1 additions and 2 deletions
|
@ -7718,7 +7718,6 @@ static bool llm_load_tensors(
|
|||
const int64_t d_inner = hparams.ssm_d_inner;
|
||||
const int64_t d_state = hparams.ssm_d_state;
|
||||
const int64_t n_head = hparams.ssm_dt_rank;
|
||||
const int64_t head_dim = n_embd / n_head;
|
||||
const int64_t n_group = hparams.ssm_n_group;
|
||||
const int64_t d_in_proj = 2*d_inner + 2*n_group*d_state + n_head;
|
||||
|
||||
|
@ -9287,7 +9286,7 @@ static struct ggml_tensor * llm_build_mamba2(
|
|||
const int64_t d_inner = hparams.ssm_d_inner;
|
||||
const int64_t d_state = hparams.ssm_d_state;
|
||||
const int64_t n_head = hparams.ssm_dt_rank;
|
||||
const int64_t head_dim = d_inner / n_head; // FIXME
|
||||
const int64_t head_dim = d_inner / n_head;
|
||||
const int64_t n_group = hparams.ssm_n_group;
|
||||
const int64_t n_seqs = batch.n_seqs;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue