add 128k yarn context for Qwen

This commit is contained in:
Roberto Tomás Collins 2024-12-06 18:39:08 -05:00
parent c5ede3849f
commit b8c3607a8a

View file

@ -1989,6 +1989,15 @@ class Qwen2Model(Model):
except FileNotFoundError:
self._set_vocab_gpt2()
def set_gguf_parameters(self):
super().set_gguf_parameters()
if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
if self.hparams["rope_scaling"].get("type") == "yarn":
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1)
@Model.register("Qwen2MoeForCausalLM")
class Qwen2MoeModel(Model):