Fix OLMo HF to GGUF conversion (#6910)
This commit is contained in:
parent
260b7c6529
commit
b6aa670203
4 changed files with 12 additions and 2 deletions
|
@ -314,6 +314,9 @@ class Model(ABC):
|
|||
if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
|
||||
# ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
|
||||
res = "command-r"
|
||||
if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
|
||||
# ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
|
||||
res = "olmo"
|
||||
|
||||
if res is None:
|
||||
logger.warning("\n")
|
||||
|
@ -2831,8 +2834,9 @@ class OlmoModel(Model):
|
|||
def set_gguf_parameters(self):
|
||||
super().set_gguf_parameters()
|
||||
self.gguf_writer.add_layer_norm_eps(1e-5)
|
||||
if "clip_qkv" in self.hparams is not None:
|
||||
self.gguf_writer.add_clamp_kqv(self.hparams["clip_qkv"])
|
||||
clip_qkv = self.hparams.get("clip_qkv")
|
||||
if clip_qkv is not None:
|
||||
self.gguf_writer.add_clamp_kqv(clip_qkv)
|
||||
|
||||
# Same as super class, but permuting q_proj, k_proj
|
||||
# Copied from: LlamaModel
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue