diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 58aa50e23..6cea73f08 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -3182,7 +3182,7 @@ class ChatGLMModel(Model): def set_vocab_chatglm3(self): dir_model = self.dir_model hparams = self.hparams - tokens: list[bytearray] = [] + tokens: list[bytes] = [] toktypes: list[int] = [] scores: list[float] = [] @@ -3331,7 +3331,7 @@ class ChatGLMModel(Model): special_vocab.add_to_gguf(self.gguf_writer) def set_gguf_parameters(self): - self.gguf_writer.add_name(self.hparams.get("_name_or_path").split("/")[1]) # THUDM/glm4-9b-chat or THUDM/chatglm3-6b + self.gguf_writer.add_name(self.hparams["_name_or_path"].split("/")[1]) # THUDM/glm4-9b-chat or THUDM/chatglm3-6b n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) n_head_kv = self.hparams.get("multi_query_group_num", n_head) diff --git a/gguf-py/scripts/gguf_hash.py b/gguf-py/scripts/gguf_hash.py index 956775182..770b79a93 100755 --- a/gguf-py/scripts/gguf_hash.py +++ b/gguf-py/scripts/gguf_hash.py @@ -63,9 +63,9 @@ def gguf_hash(reader: GGUFReader, filename: str, disable_progress_bar) -> None: bar.update(sum_weights_in_tensor) sha1_layer = hashlib.sha1() - sha1_layer.update(tensor.data) - sha1.update(tensor.data) - uuidv5_sha1.update(tensor.data) + sha1_layer.update(tensor.data.data) + sha1.update(tensor.data.data) + uuidv5_sha1.update(tensor.data.data) print("sha1 {0} {1}:{2}".format(sha1_layer.hexdigest(), filename, tensor.name)) # noqa: NP100 # Flush Hash Progress Bar