llama : fix grok rope type
This commit is contained in:
parent
81ce9df3ee
commit
abdc8ea34a
1 changed files with 1 additions and 1 deletions
|
@ -13680,7 +13680,6 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
|||
|
||||
// use what we call a normal RoPE, operating on pairs of consecutive head values
|
||||
case LLM_ARCH_LLAMA:
|
||||
case LLM_ARCH_GROK:
|
||||
case LLM_ARCH_BAICHUAN:
|
||||
case LLM_ARCH_STARCODER:
|
||||
case LLM_ARCH_PLAMO:
|
||||
|
@ -13693,6 +13692,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
|||
|
||||
// the pairs of head values are offset by n_rot/2
|
||||
case LLM_ARCH_FALCON:
|
||||
case LLM_ARCH_GROK:
|
||||
case LLM_ARCH_PERSIMMON:
|
||||
case LLM_ARCH_BERT:
|
||||
case LLM_ARCH_NOMIC_BERT:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue