llama : support InternLM2 (#5184)

* support InternLM2 inference
  * add add_space_prefix KV pair
This commit is contained in:
Guoteng 2024-02-01 17:19:51 +08:00 committed by GitHub
parent 1cfb5372cf
commit ce32060198
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 387 additions and 5 deletions

View file

@ -19,6 +19,7 @@ class TensorNameMap:
"language_model.embedding.word_embeddings", # persimmon
"wte", # gpt2
"transformer.embd.wte", # phi2
"model.tok_embeddings", # internlm2
),
# Token type embeddings
@ -42,7 +43,7 @@ class TensorNameMap:
MODEL_TENSOR.OUTPUT: (
"embed_out", # gptneox
"lm_head", # gpt2 mpt falcon llama-hf baichuan qwen
"output", # llama-pth bloom
"output", # llama-pth bloom internlm2
"word_embeddings_for_head", # persimmon
"lm_head.linear", # phi2
),
@ -51,7 +52,7 @@ class TensorNameMap:
MODEL_TENSOR.OUTPUT_NORM: (
"gpt_neox.final_layer_norm", # gptneox
"transformer.ln_f", # gpt2 gpt-j falcon
"model.norm", # llama-hf baichuan
"model.norm", # llama-hf baichuan internlm2
"norm", # llama-pth
"embeddings.LayerNorm", # bert
"transformer.norm_f", # mpt
@ -84,6 +85,7 @@ class TensorNameMap:
"h.{bid}.ln_1", # gpt2
"transformer.h.{bid}.ln", # phi2
"model.layers.layers.{bid}.norm", # plamo
"model.layers.{bid}.attention_norm", # internlm2
),
# Attention norm 2
@ -111,6 +113,7 @@ class TensorNameMap:
"encoder.layer.{bid}.attention.self.query", # bert
"transformer.h.{bid}.attn.q_proj", # gpt-j
"model.layers.layers.{bid}.self_attn.q_proj", # plamo
"model.layers.{bid}.attention.wq" # internlm2
),
# Attention key
@ -120,6 +123,7 @@ class TensorNameMap:
"encoder.layer.{bid}.attention.self.key", # bert
"transformer.h.{bid}.attn.k_proj", # gpt-j
"model.layers.layers.{bid}.self_attn.k_proj", # plamo
"model.layers.{bid}.attention.wk" # internlm2
),
# Attention value
@ -129,6 +133,7 @@ class TensorNameMap:
"encoder.layer.{bid}.attention.self.value", # bert
"transformer.h.{bid}.attn.v_proj", # gpt-j
"model.layers.layers.{bid}.self_attn.v_proj", # plamo
"model.layers.{bid}.attention.wv" # internlm2
),
# Attention output
@ -147,6 +152,7 @@ class TensorNameMap:
"h.{bid}.attn.c_proj", # gpt2
"transformer.h.{bid}.mixer.out_proj", # phi2
"model.layers.layers.{bid}.self_attn.o_proj", # plamo
"model.layers.{bid}.attention.wo", # internlm2
),
# Rotary embeddings
@ -169,6 +175,7 @@ class TensorNameMap:
"language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
"model.layers.{bid}.ln2", # yi
"h.{bid}.ln_2", # gpt2
"model.layers.{bid}.ffn_norm", # internlm2
),
MODEL_TENSOR.FFN_GATE_INP: (
@ -194,6 +201,7 @@ class TensorNameMap:
"transformer.h.{bid}.mlp.fc1", # phi2
"model.layers.{bid}.mlp.fc1", # phi2
"model.layers.layers.{bid}.mlp.up_proj", # plamo
"model.layers.{bid}.feed_forward.w3", # internlm2
),
MODEL_TENSOR.FFN_UP_EXP: (
@ -212,6 +220,7 @@ class TensorNameMap:
"layers.{bid}.feed_forward.w1", # llama-pth
"transformer.h.{bid}.mlp.w2", # qwen
"model.layers.layers.{bid}.mlp.gate_proj", # plamo
"model.layers.{bid}.feed_forward.w1", # internlm2
),
MODEL_TENSOR.FFN_GATE_EXP: (
@ -236,6 +245,7 @@ class TensorNameMap:
"transformer.h.{bid}.mlp.fc2", # phi2
"model.layers.{bid}.mlp.fc2", # phi2
"model.layers.layers.{bid}.mlp.down_proj", # plamo
"model.layers.{bid}.feed_forward.w2", # internlm2
),
MODEL_TENSOR.FFN_DOWN_EXP: (