Add attention and final logit softcapping.
This commit is contained in:
parent
8748d8ac6f
commit
4d3f17b4ac
4 changed files with 38 additions and 2 deletions
|
@ -2363,6 +2363,14 @@ class Gemma2Model(Model):
|
|||
self.gguf_writer.add_key_length(hparams["head_dim"])
|
||||
self.gguf_writer.add_value_length(hparams["head_dim"])
|
||||
self.gguf_writer.add_file_type(self.ftype)
|
||||
self.gguf_writer.add_float32(
|
||||
gguf.Keys.LLM.ATTN_LOGIT_SOFTCAPPING.format(arch=self.model_arch),
|
||||
self.hparams["attn_logit_softcapping"]
|
||||
)
|
||||
self.gguf_writer.add_float32(
|
||||
gguf.Keys.LLM.FINAL_LOGIT_SOFTCAPPING.format(arch=self.model_arch),
|
||||
self.hparams["final_logit_softcapping"]
|
||||
)
|
||||
|
||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||
del bid # unusem
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue