From df78f196127d2f429895ed3f5465bb7bd44b5341 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 10 Jul 2024 22:02:21 -0700 Subject: [PATCH] 9B - query_pre_attn_scalar = 256 not 224 See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e Gemma 9b should use 256 and not 224 (self.config.hidden_size // self.config.num_attention_heads) --- convert_hf_to_gguf.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index cf930be17..d70224a80 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2473,11 +2473,6 @@ class Gemma2Model(Model): ) self.gguf_writer.add_sliding_window(self.hparams["sliding_window"]) - # sanity check - attn_scalar = self.hparams["query_pre_attn_scalar"] - if attn_scalar != hparams["hidden_size"] / hparams["num_attention_heads"]: - raise ValueError("query_pre_attn_scalar must be equal to n_embd / n_head") - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused