nvidia uses the LLaMAForCausalLM string in their config.json, example nvidia/Llama3-ChatQA-2-8B
This commit is contained in:
parent
822b6322de
commit
aaf7f53d46
1 changed files with 1 additions and 1 deletions
|
@ -1487,7 +1487,7 @@ class StableLMModel(Model):
|
|||
raise ValueError(f"Unprocessed norms: {norms}")
|
||||
|
||||
|
||||
@Model.register("LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM")
|
||||
@Model.register("LLaMAForCausalLM", "LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM")
|
||||
class LlamaModel(Model):
|
||||
model_arch = gguf.MODEL_ARCH.LLAMA
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue