WIP: Add support for rwkv v7

Signed-off-by: Molly Sophia <mollysophia379@gmail.com>
This commit is contained in:
Molly Sophia 2025-01-15 20:43:23 +08:00
parent 5445300758
commit 6dcc21e7f5
14 changed files with 952 additions and 48 deletions

View file

@ -27,7 +27,8 @@ class TensorNameMap:
"embedding.word_embeddings", # chatglm
"transformer.token_embeddings", # openelm
"shared", # t5
"rwkv.embeddings", # rwkv
"rwkv.embeddings", # rwkv v6
"model.embeddings", # rwkv v7
),
# Token type embeddings
@ -41,7 +42,8 @@ class TensorNameMap:
"embeddings.LayerNorm", # bert
"emb_ln", # nomic-bert
"transformer.norm", # openelm
"rwkv.blocks.0.pre_ln", # rwkv
"rwkv.blocks.0.pre_ln", # rwkv v6
"model.pre_ln", # rwkv v7
"backbone.norm", # wavtokenizer
),
@ -81,7 +83,8 @@ class TensorNameMap:
"encoder.final_layernorm", # chatglm
"transformer.norm", # openelm
"model.norm", # nemotron
"rwkv.ln_out", # rwkv
"rwkv.ln_out", # rwkv v6
"model.ln_out", # rwkv v7
"backbone.final_layer_norm", # wavtokenizer
),
@ -122,14 +125,16 @@ class TensorNameMap:
"transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx
"encoder.layers.{bid}.input_layernorm", # chatglm
"transformer.layers.{bid}.attn_norm", # openelm
"rwkv.blocks.{bid}.ln1", # rwkv
"rwkv.blocks.{bid}.ln1", # rwkv v6
"model.blocks.{bid}.ln1", # rwkv v7
),
# Attention norm 2
MODEL_TENSOR.ATTN_NORM_2: (
"transformer.h.{bid}.ln_attn", # falcon40b
"encoder.layer.{bid}.layer_norm_1", # jina-v2-code
"rwkv.blocks.{bid}.ln2", # rwkv
"rwkv.blocks.{bid}.ln2", # rwkv v6
"model.blocks.{bid}.ln2", # rwkv v7
),
# Attention query-key-value
@ -462,14 +467,64 @@ class TensorNameMap:
"backbone.layers.{bid}.mixer.out_proj",
),
MODEL_TENSOR.TIME_MIX_W0: (
"model.blocks.{bid}.attention.w0", # rwkv7
),
MODEL_TENSOR.TIME_MIX_W1: (
"rwkv.blocks.{bid}.attention.time_maa_w1", # rwkv v6
"model.layers.{bid}.self_attn.time_maa_w1", # rwkv6qwen2
"model.blocks.{bid}.attention.w1" # rwkv7
),
MODEL_TENSOR.TIME_MIX_W2: (
"rwkv.blocks.{bid}.attention.time_maa_w2", # rwkv v6
"model.layers.{bid}.self_attn.time_maa_w2", # rwkv6qwen2
"model.blocks.{bid}.attention.w2" # rwkv7
),
MODEL_TENSOR.TIME_MIX_A0: (
"model.blocks.{bid}.attention.a0", # rwkv7
),
MODEL_TENSOR.TIME_MIX_A1: (
"model.blocks.{bid}.attention.a1", # rwkv7
),
MODEL_TENSOR.TIME_MIX_A2: (
"model.blocks.{bid}.attention.a2", # rwkv7
),
MODEL_TENSOR.TIME_MIX_V0: (
"model.blocks.{bid}.attention.v0", # rwkv7
),
MODEL_TENSOR.TIME_MIX_V1: (
"model.blocks.{bid}.attention.v1", # rwkv7
),
MODEL_TENSOR.TIME_MIX_V2: (
"model.blocks.{bid}.attention.v2", # rwkv7
),
MODEL_TENSOR.TIME_MIX_G1: (
"model.blocks.{bid}.attention.g1", # rwkv7
),
MODEL_TENSOR.TIME_MIX_G2: (
"model.blocks.{bid}.attention.g2", # rwkv7
),
MODEL_TENSOR.TIME_MIX_K_K: (
"model.blocks.{bid}.attention.k_k", # rwkv7
),
MODEL_TENSOR.TIME_MIX_K_A: (
"model.blocks.{bid}.attention.k_a", # rwkv7
),
MODEL_TENSOR.TIME_MIX_R_K: (
"model.blocks.{bid}.attention.r_k", # rwkv7
),
MODEL_TENSOR.TIME_MIX_LERP_X: (
@ -522,36 +577,42 @@ class TensorNameMap:
),
MODEL_TENSOR.TIME_MIX_KEY: (
"rwkv.blocks.{bid}.attention.key", # rwkv
"rwkv.blocks.{bid}.attention.key", # rwkv v6
"model.layers.{bid}.self_attn.k_proj", # rwkv6qwen2
"model.blocks.{bid}.attention.key", # rwkv v7
),
MODEL_TENSOR.TIME_MIX_VALUE: (
"rwkv.blocks.{bid}.attention.value", # rwkv
"rwkv.blocks.{bid}.attention.value", # rwkv v6
"model.layers.{bid}.self_attn.v_proj", # rwkv6qwen2
"model.blocks.{bid}.attention.value", # rwkv v7
),
MODEL_TENSOR.TIME_MIX_RECEPTANCE: (
"rwkv.blocks.{bid}.attention.receptance", # rwkv
"model.layers.{bid}.self_attn.q_proj", # rwkv6qwen2
"rwkv.blocks.{bid}.attention.receptance", # rwkv v6
"model.layers.{bid}.self_attn.q_proj", # rwkv6qwen2
"model.blocks.{bid}.attention.receptance", # rwkv v7
),
MODEL_TENSOR.TIME_MIX_GATE: (
"rwkv.blocks.{bid}.attention.gate", # rwkv
"rwkv.blocks.{bid}.attention.gate", # rwkv v6
"model.layers.{bid}.self_attn.gate", # rwkv6qwen2
),
MODEL_TENSOR.TIME_MIX_LN: (
"rwkv.blocks.{bid}.attention.ln_x", # rwkv
"rwkv.blocks.{bid}.attention.ln_x", # rwkv v6
"model.blocks.{bid}.attention.ln_x" # rwkv v7
),
MODEL_TENSOR.TIME_MIX_OUTPUT: (
"rwkv.blocks.{bid}.attention.output", # rwkv
"model.layers.{bid}.self_attn.o_proj", # rwkv6qwen2
"model.blocks.{bid}.attention.output", # rwkv v7
),
MODEL_TENSOR.CHANNEL_MIX_LERP_K: (
"rwkv.blocks.{bid}.feed_forward.time_maa_k", # rwkv v6
"model.blocks.{bid}.feed_forward.x_k", # rwkv v7
),
MODEL_TENSOR.CHANNEL_MIX_LERP_R: (
@ -559,7 +620,8 @@ class TensorNameMap:
),
MODEL_TENSOR.CHANNEL_MIX_KEY: (
"rwkv.blocks.{bid}.feed_forward.key", # rwkv
"rwkv.blocks.{bid}.feed_forward.key", # rwkv v6
"model.blocks.{bid}.feed_forward.key", # rwkv v7
),
MODEL_TENSOR.CHANNEL_MIX_RECEPTANCE: (
@ -567,7 +629,8 @@ class TensorNameMap:
),
MODEL_TENSOR.CHANNEL_MIX_VALUE: (
"rwkv.blocks.{bid}.feed_forward.value", # rwkv
"rwkv.blocks.{bid}.feed_forward.value", # rwkv v6
"model.blocks.{bid}.feed_forward.value", # rwkv v7
),
MODEL_TENSOR.ATTN_Q_A: (