update: convert-hf-to-gguf.py cleanup for Qwen2MoeForCausalLM
This commit is contained in:
parent
aa8a7cd350
commit
06531cbaec
1 changed files with 2 additions and 43 deletions
|
@ -1624,54 +1624,13 @@ class Qwen2MoeModel(Model):
|
||||||
model_arch = gguf.MODEL_ARCH.QWEN2MOE
|
model_arch = gguf.MODEL_ARCH.QWEN2MOE
|
||||||
|
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
self.gguf_writer.add_name(self.dir_model.name)
|
super().set_gguf_parameters()
|
||||||
self.gguf_writer.add_block_count(self.block_count)
|
if (n_experts := self.hparams.get("num_experts")) is not None:
|
||||||
|
|
||||||
if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None:
|
|
||||||
self.gguf_writer.add_context_length(n_ctx)
|
|
||||||
logger.info(f"gguf: context length = {n_ctx}")
|
|
||||||
|
|
||||||
n_embd = self.find_hparam(["hidden_size", "n_embd"])
|
|
||||||
self.gguf_writer.add_embedding_length(n_embd)
|
|
||||||
logger.info(f"gguf: embedding length = {n_embd}")
|
|
||||||
|
|
||||||
n_head = self.find_hparam(["num_attention_heads", "n_head"])
|
|
||||||
self.gguf_writer.add_head_count(n_head)
|
|
||||||
logger.info(f"gguf: head count = {n_head}")
|
|
||||||
|
|
||||||
if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
|
|
||||||
self.gguf_writer.add_head_count_kv(n_head_kv)
|
|
||||||
logger.info(f"gguf: key-value head count = {n_head_kv}")
|
|
||||||
|
|
||||||
if (rope_theta := self.hparams.get("rope_theta")) is not None:
|
|
||||||
self.gguf_writer.add_rope_freq_base(rope_theta)
|
|
||||||
logger.info(f"gguf: rope theta = {rope_theta}")
|
|
||||||
if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
|
|
||||||
self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
|
|
||||||
logger.info(f"gguf: rms norm epsilon = {f_rms_eps}")
|
|
||||||
if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
|
|
||||||
self.gguf_writer.add_layer_norm_eps(f_norm_eps)
|
|
||||||
logger.info(f"gguf: layer norm epsilon = {f_norm_eps}")
|
|
||||||
|
|
||||||
if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
|
|
||||||
self.gguf_writer.add_expert_used_count(n_experts_used)
|
|
||||||
logger.info(f"gguf: experts used count = {n_experts_used}")
|
|
||||||
|
|
||||||
if (n_experts := self.find_hparam(["num_experts", "num_local_experts"])) is not None:
|
|
||||||
self.gguf_writer.add_expert_count(n_experts)
|
self.gguf_writer.add_expert_count(n_experts)
|
||||||
|
|
||||||
if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
|
if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
|
||||||
self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
|
self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
|
||||||
logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
|
logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
|
||||||
|
|
||||||
if (shared_expert_intermediate_size := self.find_hparam(["shared_expert_intermediate_size","intermediate_size", "n_inner"])) is not None:
|
|
||||||
self.gguf_writer.add_feed_forward_length(shared_expert_intermediate_size)
|
|
||||||
logger.info(f"gguf: feed forward length = {shared_expert_intermediate_size}")
|
|
||||||
|
|
||||||
self.gguf_writer.add_file_type(self.ftype)
|
|
||||||
logger.info(f"gguf: file type = {self.ftype}")
|
|
||||||
|
|
||||||
|
|
||||||
_experts: list[dict[str, Tensor]] | None = None
|
_experts: list[dict[str, Tensor]] | None = None
|
||||||
|
|
||||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue