From 6d84a42d1e72a95754f796c8f23c1951e6684386 Mon Sep 17 00:00:00 2001 From: "Zheng.Deng" <32841220+CUGfred@users.noreply.github.com> Date: Wed, 17 Apr 2024 02:10:12 +0800 Subject: [PATCH] change code to full string match and print necessary message change code to full string match and print a short message to inform users that lm_head.weight has been skipped. --- convert-hf-to-gguf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 0b4efc3e7..2c186f6f2 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -2279,8 +2279,9 @@ class GemmaModel(Model): for name, data_torch in self.get_tensors(): # lm_head is not used in llama.cpp, while autoawq will include this tensor in model # To prevent errors, skip loading lm_head.weight. - if "lm_head.weight" in name: - continue + if name == "lm_head.weight": + print(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") + continue old_dtype = data_torch.dtype