From 3be4270fc8e86a1f8e4f5e37a24994cd6ef757fb Mon Sep 17 00:00:00 2001 From: Umpire2018 <138990495+Umpire2018@users.noreply.github.com> Date: Fri, 5 Jul 2024 14:44:29 +0000 Subject: [PATCH] fix: resolve Flake8 errors in `convert-hf-to-gguf.py` - Fix E302 by adding two blank lines before top-level function definitions - Replace print statements to fix NP100 - Fix E303 by ensuring only one blank line between lines of code --- convert-hf-to-gguf.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 8c0fa5d8e..5b85f49de 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -3060,6 +3060,7 @@ class JaisModel(Model): super().write_tensors() self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias) + @Model.register("ChatGLMModel", "ChatGLMForConditionalGeneration") class ChatGLMModel(Model): model_arch = gguf.MODEL_ARCH.CHATGLM @@ -3077,8 +3078,6 @@ class ChatGLMModel(Model): assert max(tokenizer.get_vocab().values()) < vocab_size role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - print(vocab_size) - print(max(tokenizer.get_vocab().values())) for token_id in range(vocab_size): piece = tokenizer._convert_id_to_token(token_id) if token_id == 0: @@ -3234,7 +3233,6 @@ class ChatGLMModel(Model): self.gguf_writer.add_add_bos_token(False) self.gguf_writer.add_rope_freq_base(self.hparams.get("rope_ratio", 10000)) - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused