From 7dc9cbf03fd7d3b3a977872f7bae0fe6cbfad5db Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 1 Jul 2024 18:38:24 +0300 Subject: [PATCH] convert : add sanity check for query_pre_attn_scalar --- convert-hf-to-gguf.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 27fc9eea6..4a7f500ff 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -2371,6 +2371,11 @@ class Gemma2Model(Model): ) self.gguf_writer.add_sliding_window(self.hparams["sliding_window"]) + # sanity check + attn_scalar = self.hparams["query_pre_attn_scalar"] + if attn_scalar != hparams["hidden_size"] / hparams["num_attention_heads"]: + raise ValueError("query_pre_attn_scalar must be equal to n_embd / n_head") + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unusem