From 20823530201f96faf4582c1782eb40dcd22fd827 Mon Sep 17 00:00:00 2001 From: "Zheng.Deng" <32841220+CUGfred@users.noreply.github.com> Date: Mon, 15 Apr 2024 23:44:00 +0800 Subject: [PATCH] fix autoawq quantized gemma model convert error using autoawq to quantize gemma model will include a lm_head.weight tensor in model-00001-of-00002.safetensors. it result in this situation that convert-hf-to-gguf.py can't map lm_head.weight. skip loading this tensor could prevent this error. --- convert-hf-to-gguf.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index b51d68307..2e518e71d 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -2262,6 +2262,11 @@ class GemmaModel(Model): tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) for name, data_torch in self.get_tensors(): + # lm_head is not used in llama.cpp, while autoawq will include this tensor in model + # To prevent errors, skip loading lm_head.weight. + if "lm_head.weight" in name: + continue + old_dtype = data_torch.dtype # convert any unsupported data types to float32