From 4380e52abc4f94beddb2b3d0d8b79b458371cb2f Mon Sep 17 00:00:00 2001 From: akawrykow Date: Tue, 29 Aug 2023 15:20:00 -0700 Subject: [PATCH] Update tensor map for falcon-rw --- gguf-py/gguf/gguf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 838a2c0f8..f3a193dc2 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -144,6 +144,7 @@ MODEL_TENSOR_NAMES = { MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2", MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv", MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output", + MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm", MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down", MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up", }, @@ -291,6 +292,7 @@ def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> dict: tensor_map["transformer.blocks."+str(i)+".norm_2"] = mapped_to # mpt tensor_map["model.layers."+str(i)+".post_attention_layernorm"] = mapped_to # llama-hf tensor_map["layers."+str(i)+".ffn_norm"] = mapped_to # llama-pth + tensor_map["transformer.h."+str(i)+".post_attention_layernorm"] = mapped_to # falcon-rw # Feed-forward up mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_UP, None)