From 582b13c96669e899bfd18f87ced8c9d1d3194670 Mon Sep 17 00:00:00 2001 From: zhangkaihuo Date: Thu, 11 Apr 2024 15:35:18 +0800 Subject: [PATCH] for old config --- convert-hf-to-gguf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index d00481d02..861dfd643 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -1097,7 +1097,8 @@ class MiniCPMModel(Model): self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_tie_lm_head(self.hparams["tie_lm_head"]) + if "tie_lm_head" in self.hparams: + self.gguf_writer.add_tie_lm_head(self.hparams["tie_lm_head"]) def set_vocab(self): self._set_vocab_hf() @@ -1560,7 +1561,6 @@ class InternLM2Model(Model): self.gguf_writer.add_add_space_prefix(add_prefix) special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - print(special_vocab) old_eos = special_vocab.special_token_ids["eos"] if "chat" in os.path.basename(self.dir_model.absolute()): # For the chat model, we replace the eos with '<|im_end|>'.