Merge branch 'master' into openelm
This commit is contained in:
commit
51b2577dd4
628 changed files with 205721 additions and 125563 deletions
|
@ -33,17 +33,25 @@ class Keys:
|
|||
FILE_TYPE = "general.file_type"
|
||||
|
||||
class LLM:
|
||||
VOCAB_SIZE = "{arch}.vocab_size"
|
||||
CONTEXT_LENGTH = "{arch}.context_length"
|
||||
EMBEDDING_LENGTH = "{arch}.embedding_length"
|
||||
BLOCK_COUNT = "{arch}.block_count"
|
||||
FEED_FORWARD_LENGTH = "{arch}.feed_forward_length"
|
||||
USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual"
|
||||
TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
|
||||
EXPERT_COUNT = "{arch}.expert_count"
|
||||
EXPERT_USED_COUNT = "{arch}.expert_used_count"
|
||||
POOLING_TYPE = "{arch}.pooling_type"
|
||||
LOGIT_SCALE = "{arch}.logit_scale"
|
||||
VOCAB_SIZE = "{arch}.vocab_size"
|
||||
CONTEXT_LENGTH = "{arch}.context_length"
|
||||
EMBEDDING_LENGTH = "{arch}.embedding_length"
|
||||
BLOCK_COUNT = "{arch}.block_count"
|
||||
LEADING_DENSE_BLOCK_COUNT = "{arch}.leading_dense_block_count"
|
||||
FEED_FORWARD_LENGTH = "{arch}.feed_forward_length"
|
||||
EXPERT_FEED_FORWARD_LENGTH = "{arch}.expert_feed_forward_length"
|
||||
EXPERT_SHARED_FEED_FORWARD_LENGTH = "{arch}.expert_shared_feed_forward_length"
|
||||
USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual"
|
||||
TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
|
||||
EXPERT_COUNT = "{arch}.expert_count"
|
||||
EXPERT_USED_COUNT = "{arch}.expert_used_count"
|
||||
EXPERT_SHARED_COUNT = "{arch}.expert_shared_count"
|
||||
EXPERT_WEIGHTS_SCALE = "{arch}.expert_weights_scale"
|
||||
POOLING_TYPE = "{arch}.pooling_type"
|
||||
LOGIT_SCALE = "{arch}.logit_scale"
|
||||
DECODER_START_TOKEN_ID = "{arch}.decoder_start_token_id"
|
||||
ATTN_LOGIT_SOFTCAPPING = "{arch}.attn_logit_softcapping"
|
||||
FINAL_LOGIT_SOFTCAPPING = "{arch}.final_logit_softcapping"
|
||||
|
||||
class Attention:
|
||||
HEAD_COUNT = "{arch}.attention.head_count"
|
||||
|
@ -55,14 +63,24 @@ class Keys:
|
|||
LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon"
|
||||
LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon"
|
||||
CAUSAL = "{arch}.attention.causal"
|
||||
Q_LORA_RANK = "{arch}.attention.q_lora_rank"
|
||||
KV_LORA_RANK = "{arch}.attention.kv_lora_rank"
|
||||
REL_BUCKETS_COUNT = "{arch}.attention.relative_buckets_count"
|
||||
|
||||
class Rope:
|
||||
DIMENSION_COUNT = "{arch}.rope.dimension_count"
|
||||
FREQ_BASE = "{arch}.rope.freq_base"
|
||||
SCALING_TYPE = "{arch}.rope.scaling.type"
|
||||
SCALING_FACTOR = "{arch}.rope.scaling.factor"
|
||||
SCALING_ORIG_CTX_LEN = "{arch}.rope.scaling.original_context_length"
|
||||
SCALING_FINETUNED = "{arch}.rope.scaling.finetuned"
|
||||
DIMENSION_COUNT = "{arch}.rope.dimension_count"
|
||||
FREQ_BASE = "{arch}.rope.freq_base"
|
||||
SCALING_TYPE = "{arch}.rope.scaling.type"
|
||||
SCALING_FACTOR = "{arch}.rope.scaling.factor"
|
||||
SCALING_ATTN_FACTOR = "{arch}.rope.scaling.attn_factor"
|
||||
SCALING_ORIG_CTX_LEN = "{arch}.rope.scaling.original_context_length"
|
||||
SCALING_FINETUNED = "{arch}.rope.scaling.finetuned"
|
||||
SCALING_YARN_LOG_MUL = "{arch}.rope.scaling.yarn_log_multiplier"
|
||||
|
||||
class Split:
|
||||
LLM_KV_SPLIT_NO = "split.no"
|
||||
LLM_KV_SPLIT_COUNT = "split.count"
|
||||
LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count"
|
||||
|
||||
class SSM:
|
||||
CONV_KERNEL = "{arch}.ssm.conv_kernel"
|
||||
|
@ -71,33 +89,35 @@ class Keys:
|
|||
TIME_STEP_RANK = "{arch}.ssm.time_step_rank"
|
||||
|
||||
class Tokenizer:
|
||||
MODEL = "tokenizer.ggml.model"
|
||||
PRE = "tokenizer.ggml.pre"
|
||||
LIST = "tokenizer.ggml.tokens"
|
||||
TOKEN_TYPE = "tokenizer.ggml.token_type"
|
||||
TOKEN_TYPE_COUNT = "tokenizer.ggml.token_type_count" # for BERT-style token types
|
||||
SCORES = "tokenizer.ggml.scores"
|
||||
MERGES = "tokenizer.ggml.merges"
|
||||
BOS_ID = "tokenizer.ggml.bos_token_id"
|
||||
EOS_ID = "tokenizer.ggml.eos_token_id"
|
||||
UNK_ID = "tokenizer.ggml.unknown_token_id"
|
||||
SEP_ID = "tokenizer.ggml.seperator_token_id"
|
||||
PAD_ID = "tokenizer.ggml.padding_token_id"
|
||||
CLS_ID = "tokenizer.ggml.cls_token_id"
|
||||
MASK_ID = "tokenizer.ggml.mask_token_id"
|
||||
ADD_BOS = "tokenizer.ggml.add_bos_token"
|
||||
ADD_EOS = "tokenizer.ggml.add_eos_token"
|
||||
ADD_PREFIX = "tokenizer.ggml.add_space_prefix"
|
||||
HF_JSON = "tokenizer.huggingface.json"
|
||||
RWKV = "tokenizer.rwkv.world"
|
||||
CHAT_TEMPLATE = "tokenizer.chat_template"
|
||||
CHAT_TEMPLATE_N = "tokenizer.chat_template.{name}"
|
||||
CHAT_TEMPLATES = "tokenizer.chat_templates"
|
||||
MODEL = "tokenizer.ggml.model"
|
||||
PRE = "tokenizer.ggml.pre"
|
||||
LIST = "tokenizer.ggml.tokens"
|
||||
TOKEN_TYPE = "tokenizer.ggml.token_type"
|
||||
TOKEN_TYPE_COUNT = "tokenizer.ggml.token_type_count" # for BERT-style token types
|
||||
SCORES = "tokenizer.ggml.scores"
|
||||
MERGES = "tokenizer.ggml.merges"
|
||||
BOS_ID = "tokenizer.ggml.bos_token_id"
|
||||
EOS_ID = "tokenizer.ggml.eos_token_id"
|
||||
UNK_ID = "tokenizer.ggml.unknown_token_id"
|
||||
SEP_ID = "tokenizer.ggml.seperator_token_id"
|
||||
PAD_ID = "tokenizer.ggml.padding_token_id"
|
||||
CLS_ID = "tokenizer.ggml.cls_token_id"
|
||||
MASK_ID = "tokenizer.ggml.mask_token_id"
|
||||
ADD_BOS = "tokenizer.ggml.add_bos_token"
|
||||
ADD_EOS = "tokenizer.ggml.add_eos_token"
|
||||
ADD_PREFIX = "tokenizer.ggml.add_space_prefix"
|
||||
REMOVE_EXTRA_WS = "tokenizer.ggml.remove_extra_whitespaces"
|
||||
PRECOMPILED_CHARSMAP = "tokenizer.ggml.precompiled_charsmap"
|
||||
HF_JSON = "tokenizer.huggingface.json"
|
||||
RWKV = "tokenizer.rwkv.world"
|
||||
CHAT_TEMPLATE = "tokenizer.chat_template"
|
||||
CHAT_TEMPLATE_N = "tokenizer.chat_template.{name}"
|
||||
CHAT_TEMPLATES = "tokenizer.chat_templates"
|
||||
# FIM/Infill special tokens constants
|
||||
PREFIX_ID = "tokenizer.ggml.prefix_token_id"
|
||||
SUFFIX_ID = "tokenizer.ggml.suffix_token_id"
|
||||
MIDDLE_ID = "tokenizer.ggml.middle_token_id"
|
||||
EOT_ID = "tokenizer.ggml.eot_token_id"
|
||||
PREFIX_ID = "tokenizer.ggml.prefix_token_id"
|
||||
SUFFIX_ID = "tokenizer.ggml.suffix_token_id"
|
||||
MIDDLE_ID = "tokenizer.ggml.middle_token_id"
|
||||
EOT_ID = "tokenizer.ggml.eot_token_id"
|
||||
|
||||
|
||||
#
|
||||
|
@ -106,82 +126,128 @@ class Keys:
|
|||
|
||||
|
||||
class MODEL_ARCH(IntEnum):
|
||||
LLAMA = auto()
|
||||
FALCON = auto()
|
||||
BAICHUAN = auto()
|
||||
GROK = auto()
|
||||
GPT2 = auto()
|
||||
GPTJ = auto()
|
||||
GPTNEOX = auto()
|
||||
MPT = auto()
|
||||
STARCODER = auto()
|
||||
PERSIMMON = auto()
|
||||
REFACT = auto()
|
||||
BERT = auto()
|
||||
NOMIC_BERT = auto()
|
||||
LLAMA = auto()
|
||||
FALCON = auto()
|
||||
BAICHUAN = auto()
|
||||
GROK = auto()
|
||||
GPT2 = auto()
|
||||
GPTJ = auto()
|
||||
GPTNEOX = auto()
|
||||
MPT = auto()
|
||||
STARCODER = auto()
|
||||
REFACT = auto()
|
||||
BERT = auto()
|
||||
NOMIC_BERT = auto()
|
||||
JINA_BERT_V2 = auto()
|
||||
BLOOM = auto()
|
||||
STABLELM = auto()
|
||||
QWEN = auto()
|
||||
QWEN2 = auto()
|
||||
QWEN2MOE = auto()
|
||||
PHI2 = auto()
|
||||
PHI3 = auto()
|
||||
PLAMO = auto()
|
||||
CODESHELL = auto()
|
||||
ORION = auto()
|
||||
INTERNLM2 = auto()
|
||||
MINICPM = auto()
|
||||
GEMMA = auto()
|
||||
STARCODER2 = auto()
|
||||
MAMBA = auto()
|
||||
XVERSE = auto()
|
||||
COMMAND_R = auto()
|
||||
DBRX = auto()
|
||||
OLMO = auto()
|
||||
OPENELM = auto()
|
||||
BLOOM = auto()
|
||||
STABLELM = auto()
|
||||
QWEN = auto()
|
||||
QWEN2 = auto()
|
||||
QWEN2MOE = auto()
|
||||
PHI2 = auto()
|
||||
PHI3 = auto()
|
||||
PLAMO = auto()
|
||||
CODESHELL = auto()
|
||||
ORION = auto()
|
||||
INTERNLM2 = auto()
|
||||
MINICPM = auto()
|
||||
GEMMA = auto()
|
||||
GEMMA2 = auto()
|
||||
STARCODER2 = auto()
|
||||
MAMBA = auto()
|
||||
XVERSE = auto()
|
||||
COMMAND_R = auto()
|
||||
DBRX = auto()
|
||||
OLMO = auto()
|
||||
OPENELM = auto()
|
||||
ARCTIC = auto()
|
||||
DEEPSEEK2 = auto()
|
||||
BITNET = auto()
|
||||
T5 = auto()
|
||||
|
||||
|
||||
class MODEL_TENSOR(IntEnum):
|
||||
TOKEN_EMBD = auto()
|
||||
TOKEN_EMBD_NORM = auto()
|
||||
TOKEN_TYPES = auto()
|
||||
POS_EMBD = auto()
|
||||
OUTPUT = auto()
|
||||
OUTPUT_NORM = auto()
|
||||
ROPE_FREQS = auto()
|
||||
ATTN_Q = auto()
|
||||
ATTN_K = auto()
|
||||
ATTN_V = auto()
|
||||
ATTN_QKV = auto()
|
||||
ATTN_OUT = auto()
|
||||
ATTN_NORM = auto()
|
||||
ATTN_NORM_2 = auto()
|
||||
ATTN_OUT_NORM = auto()
|
||||
ATTN_ROT_EMBD = auto()
|
||||
FFN_GATE_INP = auto()
|
||||
FFN_GATE_INP_SHEXP = auto()
|
||||
FFN_NORM = auto()
|
||||
FFN_GATE = auto()
|
||||
FFN_DOWN = auto()
|
||||
FFN_UP = auto()
|
||||
FFN_ACT = auto()
|
||||
FFN_GATE_EXP = auto()
|
||||
FFN_DOWN_EXP = auto()
|
||||
FFN_UP_EXP = auto()
|
||||
FFN_GATE_SHEXP = auto()
|
||||
FFN_DOWN_SHEXP = auto()
|
||||
FFN_UP_SHEXP = auto()
|
||||
ATTN_Q_NORM = auto()
|
||||
ATTN_K_NORM = auto()
|
||||
LAYER_OUT_NORM = auto()
|
||||
SSM_IN = auto()
|
||||
SSM_CONV1D = auto()
|
||||
SSM_X = auto()
|
||||
SSM_DT = auto()
|
||||
SSM_A = auto()
|
||||
SSM_D = auto()
|
||||
SSM_OUT = auto()
|
||||
TOKEN_EMBD = auto()
|
||||
TOKEN_EMBD_NORM = auto()
|
||||
TOKEN_TYPES = auto()
|
||||
POS_EMBD = auto()
|
||||
OUTPUT = auto()
|
||||
OUTPUT_NORM = auto()
|
||||
ROPE_FREQS = auto()
|
||||
ROPE_FACTORS_LONG = auto()
|
||||
ROPE_FACTORS_SHORT = auto()
|
||||
ATTN_Q = auto()
|
||||
ATTN_K = auto()
|
||||
ATTN_V = auto()
|
||||
ATTN_QKV = auto()
|
||||
ATTN_OUT = auto()
|
||||
ATTN_NORM = auto()
|
||||
ATTN_NORM_2 = auto()
|
||||
ATTN_OUT_NORM = auto()
|
||||
ATTN_POST_NORM = auto()
|
||||
ATTN_ROT_EMBD = auto()
|
||||
FFN_GATE_INP = auto()
|
||||
FFN_GATE_INP_SHEXP = auto()
|
||||
FFN_NORM = auto()
|
||||
FFN_PRE_NORM = auto()
|
||||
FFN_POST_NORM = auto()
|
||||
FFN_GATE = auto()
|
||||
FFN_DOWN = auto()
|
||||
FFN_UP = auto()
|
||||
FFN_ACT = auto()
|
||||
FFN_NORM_EXP = auto()
|
||||
FFN_GATE_EXP = auto()
|
||||
FFN_DOWN_EXP = auto()
|
||||
FFN_UP_EXP = auto()
|
||||
FFN_GATE_SHEXP = auto()
|
||||
FFN_DOWN_SHEXP = auto()
|
||||
FFN_UP_SHEXP = auto()
|
||||
ATTN_Q_NORM = auto()
|
||||
ATTN_K_NORM = auto()
|
||||
LAYER_OUT_NORM = auto()
|
||||
SSM_IN = auto()
|
||||
SSM_CONV1D = auto()
|
||||
SSM_X = auto()
|
||||
SSM_DT = auto()
|
||||
SSM_A = auto()
|
||||
SSM_D = auto()
|
||||
SSM_OUT = auto()
|
||||
ATTN_Q_A = auto()
|
||||
ATTN_Q_B = auto()
|
||||
ATTN_KV_A_MQA = auto()
|
||||
ATTN_KV_B = auto()
|
||||
ATTN_Q_A_NORM = auto()
|
||||
ATTN_KV_A_NORM = auto()
|
||||
FFN_SUB_NORM = auto()
|
||||
ATTN_SUB_NORM = auto()
|
||||
DEC_ATTN_NORM = auto()
|
||||
DEC_ATTN_Q = auto()
|
||||
DEC_ATTN_K = auto()
|
||||
DEC_ATTN_V = auto()
|
||||
DEC_ATTN_OUT = auto()
|
||||
DEC_ATTN_REL_B = auto()
|
||||
DEC_CROSS_ATTN_NORM = auto()
|
||||
DEC_CROSS_ATTN_Q = auto()
|
||||
DEC_CROSS_ATTN_K = auto()
|
||||
DEC_CROSS_ATTN_V = auto()
|
||||
DEC_CROSS_ATTN_OUT = auto()
|
||||
DEC_CROSS_ATTN_REL_B = auto()
|
||||
DEC_FFN_NORM = auto()
|
||||
DEC_FFN_GATE = auto()
|
||||
DEC_FFN_DOWN = auto()
|
||||
DEC_FFN_UP = auto()
|
||||
DEC_OUTPUT_NORM = auto()
|
||||
ENC_ATTN_NORM = auto()
|
||||
ENC_ATTN_Q = auto()
|
||||
ENC_ATTN_K = auto()
|
||||
ENC_ATTN_V = auto()
|
||||
ENC_ATTN_OUT = auto()
|
||||
ENC_ATTN_REL_B = auto()
|
||||
ENC_FFN_NORM = auto()
|
||||
ENC_FFN_GATE = auto()
|
||||
ENC_FFN_DOWN = auto()
|
||||
ENC_FFN_UP = auto()
|
||||
ENC_OUTPUT_NORM = auto()
|
||||
|
||||
|
||||
MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
||||
|
@ -194,7 +260,6 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
|||
MODEL_ARCH.GPTNEOX: "gptneox",
|
||||
MODEL_ARCH.MPT: "mpt",
|
||||
MODEL_ARCH.STARCODER: "starcoder",
|
||||
MODEL_ARCH.PERSIMMON: "persimmon",
|
||||
MODEL_ARCH.REFACT: "refact",
|
||||
MODEL_ARCH.BERT: "bert",
|
||||
MODEL_ARCH.NOMIC_BERT: "nomic-bert",
|
||||
|
@ -212,6 +277,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
|||
MODEL_ARCH.INTERNLM2: "internlm2",
|
||||
MODEL_ARCH.MINICPM: "minicpm",
|
||||
MODEL_ARCH.GEMMA: "gemma",
|
||||
MODEL_ARCH.GEMMA2: "gemma2",
|
||||
MODEL_ARCH.STARCODER2: "starcoder2",
|
||||
MODEL_ARCH.MAMBA: "mamba",
|
||||
MODEL_ARCH.XVERSE: "xverse",
|
||||
|
@ -219,48 +285,94 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
|||
MODEL_ARCH.DBRX: "dbrx",
|
||||
MODEL_ARCH.OLMO: "olmo",
|
||||
MODEL_ARCH.OPENELM: "openelm",
|
||||
MODEL_ARCH.ARCTIC: "arctic",
|
||||
MODEL_ARCH.DEEPSEEK2: "deepseek2",
|
||||
MODEL_ARCH.BITNET: "bitnet",
|
||||
MODEL_ARCH.T5: "t5",
|
||||
}
|
||||
|
||||
TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
|
||||
MODEL_TENSOR.TOKEN_EMBD: "token_embd",
|
||||
MODEL_TENSOR.TOKEN_EMBD_NORM: "token_embd_norm",
|
||||
MODEL_TENSOR.TOKEN_TYPES: "token_types",
|
||||
MODEL_TENSOR.POS_EMBD: "position_embd",
|
||||
MODEL_TENSOR.OUTPUT_NORM: "output_norm",
|
||||
MODEL_TENSOR.OUTPUT: "output",
|
||||
MODEL_TENSOR.ROPE_FREQS: "rope_freqs",
|
||||
MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
|
||||
MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2",
|
||||
MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv",
|
||||
MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q",
|
||||
MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k",
|
||||
MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v",
|
||||
MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd",
|
||||
MODEL_TENSOR.ATTN_Q_NORM: "blk.{bid}.attn_q_norm",
|
||||
MODEL_TENSOR.ATTN_K_NORM: "blk.{bid}.attn_k_norm",
|
||||
MODEL_TENSOR.ATTN_OUT_NORM: "blk.{bid}.attn_output_norm",
|
||||
MODEL_TENSOR.FFN_GATE_INP: "blk.{bid}.ffn_gate_inp",
|
||||
MODEL_TENSOR.FFN_GATE_INP_SHEXP: "blk.{bid}.ffn_gate_inp_shexp",
|
||||
MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm",
|
||||
MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
|
||||
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
||||
MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
|
||||
MODEL_TENSOR.FFN_GATE_SHEXP: "blk.{bid}.ffn_gate_shexp",
|
||||
MODEL_TENSOR.FFN_DOWN_SHEXP: "blk.{bid}.ffn_down_shexp",
|
||||
MODEL_TENSOR.FFN_UP_SHEXP: "blk.{bid}.ffn_up_shexp",
|
||||
MODEL_TENSOR.FFN_ACT: "blk.{bid}.ffn",
|
||||
MODEL_TENSOR.FFN_GATE_EXP: "blk.{bid}.ffn_gate_exps",
|
||||
MODEL_TENSOR.FFN_DOWN_EXP: "blk.{bid}.ffn_down_exps",
|
||||
MODEL_TENSOR.FFN_UP_EXP: "blk.{bid}.ffn_up_exps",
|
||||
MODEL_TENSOR.LAYER_OUT_NORM: "blk.{bid}.layer_output_norm",
|
||||
MODEL_TENSOR.SSM_IN: "blk.{bid}.ssm_in",
|
||||
MODEL_TENSOR.SSM_CONV1D: "blk.{bid}.ssm_conv1d",
|
||||
MODEL_TENSOR.SSM_X: "blk.{bid}.ssm_x",
|
||||
MODEL_TENSOR.SSM_DT: "blk.{bid}.ssm_dt",
|
||||
MODEL_TENSOR.SSM_A: "blk.{bid}.ssm_a",
|
||||
MODEL_TENSOR.SSM_D: "blk.{bid}.ssm_d",
|
||||
MODEL_TENSOR.SSM_OUT: "blk.{bid}.ssm_out",
|
||||
MODEL_TENSOR.TOKEN_EMBD: "token_embd",
|
||||
MODEL_TENSOR.TOKEN_EMBD_NORM: "token_embd_norm",
|
||||
MODEL_TENSOR.TOKEN_TYPES: "token_types",
|
||||
MODEL_TENSOR.POS_EMBD: "position_embd",
|
||||
MODEL_TENSOR.OUTPUT_NORM: "output_norm",
|
||||
MODEL_TENSOR.OUTPUT: "output",
|
||||
MODEL_TENSOR.ROPE_FREQS: "rope_freqs",
|
||||
MODEL_TENSOR.ROPE_FACTORS_LONG: "rope_factors_long",
|
||||
MODEL_TENSOR.ROPE_FACTORS_SHORT: "rope_factors_short",
|
||||
MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
|
||||
MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2",
|
||||
MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv",
|
||||
MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q",
|
||||
MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k",
|
||||
MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v",
|
||||
MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd",
|
||||
MODEL_TENSOR.ATTN_Q_NORM: "blk.{bid}.attn_q_norm",
|
||||
MODEL_TENSOR.ATTN_K_NORM: "blk.{bid}.attn_k_norm",
|
||||
MODEL_TENSOR.ATTN_OUT_NORM: "blk.{bid}.attn_output_norm",
|
||||
MODEL_TENSOR.ATTN_POST_NORM: "blk.{bid}.post_attention_norm",
|
||||
MODEL_TENSOR.FFN_GATE_INP: "blk.{bid}.ffn_gate_inp",
|
||||
MODEL_TENSOR.FFN_GATE_INP_SHEXP: "blk.{bid}.ffn_gate_inp_shexp",
|
||||
MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm",
|
||||
MODEL_TENSOR.FFN_PRE_NORM: "blk.{bid}.ffn_norm",
|
||||
MODEL_TENSOR.FFN_POST_NORM: "blk.{bid}.post_ffw_norm",
|
||||
MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
|
||||
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
||||
MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
|
||||
MODEL_TENSOR.FFN_GATE_SHEXP: "blk.{bid}.ffn_gate_shexp",
|
||||
MODEL_TENSOR.FFN_DOWN_SHEXP: "blk.{bid}.ffn_down_shexp",
|
||||
MODEL_TENSOR.FFN_UP_SHEXP: "blk.{bid}.ffn_up_shexp",
|
||||
MODEL_TENSOR.FFN_ACT: "blk.{bid}.ffn",
|
||||
MODEL_TENSOR.FFN_NORM_EXP: "blk.{bid}.ffn_norm_exps",
|
||||
MODEL_TENSOR.FFN_GATE_EXP: "blk.{bid}.ffn_gate_exps",
|
||||
MODEL_TENSOR.FFN_DOWN_EXP: "blk.{bid}.ffn_down_exps",
|
||||
MODEL_TENSOR.FFN_UP_EXP: "blk.{bid}.ffn_up_exps",
|
||||
MODEL_TENSOR.LAYER_OUT_NORM: "blk.{bid}.layer_output_norm",
|
||||
MODEL_TENSOR.SSM_IN: "blk.{bid}.ssm_in",
|
||||
MODEL_TENSOR.SSM_CONV1D: "blk.{bid}.ssm_conv1d",
|
||||
MODEL_TENSOR.SSM_X: "blk.{bid}.ssm_x",
|
||||
MODEL_TENSOR.SSM_DT: "blk.{bid}.ssm_dt",
|
||||
MODEL_TENSOR.SSM_A: "blk.{bid}.ssm_a",
|
||||
MODEL_TENSOR.SSM_D: "blk.{bid}.ssm_d",
|
||||
MODEL_TENSOR.SSM_OUT: "blk.{bid}.ssm_out",
|
||||
MODEL_TENSOR.ATTN_Q_A: "blk.{bid}.attn_q_a",
|
||||
MODEL_TENSOR.ATTN_Q_B: "blk.{bid}.attn_q_b",
|
||||
MODEL_TENSOR.ATTN_KV_A_MQA: "blk.{bid}.attn_kv_a_mqa",
|
||||
MODEL_TENSOR.ATTN_KV_B: "blk.{bid}.attn_kv_b",
|
||||
MODEL_TENSOR.ATTN_Q_A_NORM: "blk.{bid}.attn_q_a_norm",
|
||||
MODEL_TENSOR.ATTN_KV_A_NORM: "blk.{bid}.attn_kv_a_norm",
|
||||
MODEL_TENSOR.ATTN_SUB_NORM: "blk.{bid}.attn_sub_norm",
|
||||
MODEL_TENSOR.FFN_SUB_NORM: "blk.{bid}.ffn_sub_norm",
|
||||
MODEL_TENSOR.DEC_ATTN_NORM: "dec.blk.{bid}.attn_norm",
|
||||
MODEL_TENSOR.DEC_ATTN_Q: "dec.blk.{bid}.attn_q",
|
||||
MODEL_TENSOR.DEC_ATTN_K: "dec.blk.{bid}.attn_k",
|
||||
MODEL_TENSOR.DEC_ATTN_V: "dec.blk.{bid}.attn_v",
|
||||
MODEL_TENSOR.DEC_ATTN_OUT: "dec.blk.{bid}.attn_o",
|
||||
MODEL_TENSOR.DEC_ATTN_REL_B: "dec.blk.{bid}.attn_rel_b",
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_NORM: "dec.blk.{bid}.cross_attn_norm",
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_Q: "dec.blk.{bid}.cross_attn_q",
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_K: "dec.blk.{bid}.cross_attn_k",
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_V: "dec.blk.{bid}.cross_attn_v",
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_OUT: "dec.blk.{bid}.cross_attn_o",
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: "dec.blk.{bid}.cross_attn_rel_b",
|
||||
MODEL_TENSOR.DEC_FFN_NORM: "dec.blk.{bid}.ffn_norm",
|
||||
MODEL_TENSOR.DEC_FFN_GATE: "dec.blk.{bid}.ffn_gate",
|
||||
MODEL_TENSOR.DEC_FFN_DOWN: "dec.blk.{bid}.ffn_down",
|
||||
MODEL_TENSOR.DEC_FFN_UP: "dec.blk.{bid}.ffn_up",
|
||||
MODEL_TENSOR.DEC_OUTPUT_NORM: "dec.output_norm",
|
||||
MODEL_TENSOR.ENC_ATTN_NORM: "enc.blk.{bid}.attn_norm",
|
||||
MODEL_TENSOR.ENC_ATTN_Q: "enc.blk.{bid}.attn_q",
|
||||
MODEL_TENSOR.ENC_ATTN_K: "enc.blk.{bid}.attn_k",
|
||||
MODEL_TENSOR.ENC_ATTN_V: "enc.blk.{bid}.attn_v",
|
||||
MODEL_TENSOR.ENC_ATTN_OUT: "enc.blk.{bid}.attn_o",
|
||||
MODEL_TENSOR.ENC_ATTN_REL_B: "enc.blk.{bid}.attn_rel_b",
|
||||
MODEL_TENSOR.ENC_FFN_NORM: "enc.blk.{bid}.ffn_norm",
|
||||
MODEL_TENSOR.ENC_FFN_GATE: "enc.blk.{bid}.ffn_gate",
|
||||
MODEL_TENSOR.ENC_FFN_DOWN: "enc.blk.{bid}.ffn_down",
|
||||
MODEL_TENSOR.ENC_FFN_UP: "enc.blk.{bid}.ffn_up",
|
||||
MODEL_TENSOR.ENC_OUTPUT_NORM: "enc.output_norm",
|
||||
}
|
||||
|
||||
MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||
|
@ -389,6 +501,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.TOKEN_EMBD_NORM,
|
||||
MODEL_TENSOR.TOKEN_TYPES,
|
||||
MODEL_TENSOR.ATTN_NORM_2,
|
||||
MODEL_TENSOR.ATTN_OUT_NORM,
|
||||
MODEL_TENSOR.ATTN_Q,
|
||||
MODEL_TENSOR.ATTN_Q_NORM,
|
||||
|
@ -428,20 +541,6 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
],
|
||||
MODEL_ARCH.PERSIMMON: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
MODEL_TENSOR.ATTN_QKV,
|
||||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.FFN_NORM,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
MODEL_TENSOR.ATTN_Q_NORM,
|
||||
MODEL_TENSOR.ATTN_K_NORM,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
],
|
||||
MODEL_ARCH.REFACT: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
|
@ -633,6 +732,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
],
|
||||
MODEL_ARCH.MINICPM: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
|
@ -663,6 +763,21 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_TENSOR.FFN_UP,
|
||||
MODEL_TENSOR.FFN_NORM,
|
||||
],
|
||||
MODEL_ARCH.GEMMA2: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.ATTN_Q,
|
||||
MODEL_TENSOR.ATTN_K,
|
||||
MODEL_TENSOR.ATTN_V,
|
||||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.FFN_GATE,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
MODEL_TENSOR.ATTN_POST_NORM,
|
||||
MODEL_TENSOR.FFN_PRE_NORM,
|
||||
MODEL_TENSOR.FFN_POST_NORM,
|
||||
],
|
||||
MODEL_ARCH.STARCODER2: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
|
@ -757,6 +872,101 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_TENSOR.FFN_UP,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
],
|
||||
MODEL_ARCH.ARCTIC: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.OUTPUT,
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
MODEL_TENSOR.ATTN_Q,
|
||||
MODEL_TENSOR.ATTN_K,
|
||||
MODEL_TENSOR.ATTN_V,
|
||||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
MODEL_TENSOR.FFN_GATE_INP,
|
||||
MODEL_TENSOR.FFN_NORM,
|
||||
MODEL_TENSOR.FFN_GATE,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
MODEL_TENSOR.FFN_NORM_EXP,
|
||||
MODEL_TENSOR.FFN_GATE_EXP,
|
||||
MODEL_TENSOR.FFN_DOWN_EXP,
|
||||
MODEL_TENSOR.FFN_UP_EXP,
|
||||
],
|
||||
MODEL_ARCH.DEEPSEEK2: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.OUTPUT,
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
MODEL_TENSOR.ATTN_Q,
|
||||
MODEL_TENSOR.ATTN_Q_A,
|
||||
MODEL_TENSOR.ATTN_Q_B,
|
||||
MODEL_TENSOR.ATTN_KV_A_MQA,
|
||||
MODEL_TENSOR.ATTN_KV_B,
|
||||
MODEL_TENSOR.ATTN_Q_A_NORM,
|
||||
MODEL_TENSOR.ATTN_KV_A_NORM,
|
||||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
MODEL_TENSOR.FFN_GATE_INP,
|
||||
MODEL_TENSOR.FFN_NORM,
|
||||
MODEL_TENSOR.FFN_GATE,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
MODEL_TENSOR.FFN_GATE_EXP,
|
||||
MODEL_TENSOR.FFN_DOWN_EXP,
|
||||
MODEL_TENSOR.FFN_UP_EXP,
|
||||
MODEL_TENSOR.FFN_GATE_SHEXP,
|
||||
MODEL_TENSOR.FFN_DOWN_SHEXP,
|
||||
MODEL_TENSOR.FFN_UP_SHEXP,
|
||||
],
|
||||
MODEL_ARCH.BITNET: [
|
||||
MODEL_TENSOR.ATTN_Q,
|
||||
MODEL_TENSOR.ATTN_K,
|
||||
MODEL_TENSOR.ATTN_V,
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.FFN_NORM,
|
||||
MODEL_TENSOR.FFN_GATE,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
MODEL_TENSOR.ATTN_SUB_NORM,
|
||||
MODEL_TENSOR.FFN_SUB_NORM,
|
||||
],
|
||||
MODEL_ARCH.T5: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT,
|
||||
MODEL_TENSOR.DEC_ATTN_NORM,
|
||||
MODEL_TENSOR.DEC_ATTN_Q,
|
||||
MODEL_TENSOR.DEC_ATTN_K,
|
||||
MODEL_TENSOR.DEC_ATTN_V,
|
||||
MODEL_TENSOR.DEC_ATTN_OUT,
|
||||
MODEL_TENSOR.DEC_ATTN_REL_B,
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_NORM,
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_Q,
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_K,
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_V,
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_OUT,
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_REL_B,
|
||||
MODEL_TENSOR.DEC_FFN_NORM,
|
||||
MODEL_TENSOR.DEC_FFN_GATE,
|
||||
MODEL_TENSOR.DEC_FFN_DOWN,
|
||||
MODEL_TENSOR.DEC_FFN_UP,
|
||||
MODEL_TENSOR.DEC_OUTPUT_NORM,
|
||||
MODEL_TENSOR.ENC_ATTN_NORM,
|
||||
MODEL_TENSOR.ENC_ATTN_Q,
|
||||
MODEL_TENSOR.ENC_ATTN_K,
|
||||
MODEL_TENSOR.ENC_ATTN_V,
|
||||
MODEL_TENSOR.ENC_ATTN_OUT,
|
||||
MODEL_TENSOR.ENC_ATTN_REL_B,
|
||||
MODEL_TENSOR.ENC_FFN_NORM,
|
||||
MODEL_TENSOR.ENC_FFN_GATE,
|
||||
MODEL_TENSOR.ENC_FFN_DOWN,
|
||||
MODEL_TENSOR.ENC_FFN_UP,
|
||||
MODEL_TENSOR.ENC_OUTPUT_NORM,
|
||||
],
|
||||
# TODO
|
||||
}
|
||||
|
||||
|
@ -770,9 +980,6 @@ MODEL_TENSOR_SKIP: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
],
|
||||
MODEL_ARCH.PERSIMMON: [
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
],
|
||||
MODEL_ARCH.QWEN: [
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
|
@ -793,6 +1000,10 @@ MODEL_TENSOR_SKIP: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
],
|
||||
MODEL_ARCH.DEEPSEEK2: [
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
],
|
||||
}
|
||||
|
||||
#
|
||||
|
@ -933,9 +1144,8 @@ class GGUFValueType(IntEnum):
|
|||
raise ValueError(f"Unknown type: {type(val)}")
|
||||
|
||||
|
||||
# Note: Does not support GGML_QKK_64
|
||||
QK_K = 256
|
||||
# Items here are (block size, type size)
|
||||
QK_K = 256
|
||||
GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = {
|
||||
GGMLQuantizationType.F32: (1, 4),
|
||||
GGMLQuantizationType.F16: (1, 2),
|
||||
|
|
|
@ -12,6 +12,8 @@ from typing import Any, Literal, NamedTuple, TypeVar, Union
|
|||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
|
||||
from .quants import quant_shape_to_byte_shape
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
@ -67,6 +69,7 @@ class GGUFReader:
|
|||
# I - same as host, S - swapped
|
||||
byte_order: Literal['I'] | Literal['S'] = 'I'
|
||||
alignment: int = GGUF_DEFAULT_ALIGNMENT
|
||||
data_offset: int
|
||||
|
||||
# Note: Internal helper, API may change.
|
||||
gguf_scalar_to_np: dict[GGUFValueType, type[np.generic]] = {
|
||||
|
@ -86,9 +89,13 @@ class GGUFReader:
|
|||
def __init__(self, path: os.PathLike[str] | str, mode: Literal['r'] | Literal['r+'] | Literal['c'] = 'r'):
|
||||
self.data = np.memmap(path, mode = mode)
|
||||
offs = 0
|
||||
|
||||
# Check for GGUF magic
|
||||
if self._get(offs, np.uint32, override_order = '<')[0] != GGUF_MAGIC:
|
||||
raise ValueError('GGUF magic invalid')
|
||||
offs += 4
|
||||
|
||||
# Check GGUF version
|
||||
temp_version = self._get(offs, np.uint32)
|
||||
if temp_version[0] & 65535 == 0:
|
||||
# If we get 0 here that means it's (probably) a GGUF file created for
|
||||
|
@ -101,12 +108,16 @@ class GGUFReader:
|
|||
self.fields: OrderedDict[str, ReaderField] = OrderedDict()
|
||||
self.tensors: list[ReaderTensor] = []
|
||||
offs += self._push_field(ReaderField(offs, 'GGUF.version', [temp_version], [0], [GGUFValueType.UINT32]))
|
||||
|
||||
# Check tensor count and kv count
|
||||
temp_counts = self._get(offs, np.uint64, 2)
|
||||
offs += self._push_field(ReaderField(offs, 'GGUF.tensor_count', [temp_counts[:1]], [0], [GGUFValueType.UINT64]))
|
||||
offs += self._push_field(ReaderField(offs, 'GGUF.kv_count', [temp_counts[1:]], [0], [GGUFValueType.UINT64]))
|
||||
tensor_count, kv_count = temp_counts
|
||||
offs = self._build_fields(offs, kv_count)
|
||||
offs, tensors_fields = self._build_tensors_fields(offs, tensor_count)
|
||||
|
||||
# Build Tensor Info Fields
|
||||
offs, tensors_fields = self._build_tensor_info(offs, tensor_count)
|
||||
new_align = self.fields.get('general.alignment')
|
||||
if new_align is not None:
|
||||
if new_align.types != [GGUFValueType.UINT32]:
|
||||
|
@ -115,6 +126,7 @@ class GGUFReader:
|
|||
padding = offs % self.alignment
|
||||
if padding != 0:
|
||||
offs += self.alignment - padding
|
||||
self.data_offset = offs
|
||||
self._build_tensors(offs, tensors_fields)
|
||||
|
||||
_DT = TypeVar('_DT', bound = npt.DTypeLike)
|
||||
|
@ -191,18 +203,29 @@ class GGUFReader:
|
|||
# We can't deal with this one.
|
||||
raise ValueError('Unknown/unhandled field type {gtype}')
|
||||
|
||||
def _get_tensor(self, orig_offs: int) -> ReaderField:
|
||||
def _get_tensor_info_field(self, orig_offs: int) -> ReaderField:
|
||||
offs = orig_offs
|
||||
|
||||
# Get Tensor Name
|
||||
name_len, name_data = self._get_str(offs)
|
||||
offs += int(name_len.nbytes + name_data.nbytes)
|
||||
|
||||
# Get Tensor Dimensions Count
|
||||
n_dims = self._get(offs, np.uint32)
|
||||
offs += int(n_dims.nbytes)
|
||||
|
||||
# Get Tensor Dimension Array
|
||||
dims = self._get(offs, np.uint64, n_dims[0])
|
||||
offs += int(dims.nbytes)
|
||||
|
||||
# Get Tensor Encoding Scheme Type
|
||||
raw_dtype = self._get(offs, np.uint32)
|
||||
offs += int(raw_dtype.nbytes)
|
||||
|
||||
# Get Tensor Offset
|
||||
offset_tensor = self._get(offs, np.uint64)
|
||||
offs += int(offset_tensor.nbytes)
|
||||
|
||||
return ReaderField(
|
||||
orig_offs,
|
||||
str(bytes(name_data), encoding = 'utf-8'),
|
||||
|
@ -231,10 +254,10 @@ class GGUFReader:
|
|||
offs += field_size
|
||||
return offs
|
||||
|
||||
def _build_tensors_fields(self, offs: int, count: int) -> tuple[int, list[ReaderField]]:
|
||||
def _build_tensor_info(self, offs: int, count: int) -> tuple[int, list[ReaderField]]:
|
||||
tensor_fields = []
|
||||
for _ in range(count):
|
||||
field = self._get_tensor(offs)
|
||||
field = self._get_tensor_info_field(offs)
|
||||
offs += sum(int(part.nbytes) for part in field.parts)
|
||||
tensor_fields.append(field)
|
||||
return offs, tensor_fields
|
||||
|
@ -251,6 +274,7 @@ class GGUFReader:
|
|||
tensor_names.add(tensor_name)
|
||||
ggml_type = GGMLQuantizationType(raw_dtype[0])
|
||||
n_elems = int(np.prod(dims))
|
||||
np_dims = tuple(reversed(dims.tolist()))
|
||||
block_size, type_size = GGML_QUANT_SIZES[ggml_type]
|
||||
n_bytes = n_elems * type_size // block_size
|
||||
data_offs = int(start_offs + offset_tensor[0])
|
||||
|
@ -279,6 +303,7 @@ class GGUFReader:
|
|||
else:
|
||||
item_count = n_bytes
|
||||
item_type = np.uint8
|
||||
np_dims = quant_shape_to_byte_shape(np_dims, ggml_type)
|
||||
tensors.append(ReaderTensor(
|
||||
name = tensor_name,
|
||||
tensor_type = ggml_type,
|
||||
|
@ -286,7 +311,7 @@ class GGUFReader:
|
|||
n_elements = n_elems,
|
||||
n_bytes = n_bytes,
|
||||
data_offset = data_offs,
|
||||
data = self._get(data_offs, item_type, item_count),
|
||||
data = self._get(data_offs, item_type, item_count).reshape(np_dims),
|
||||
field = field,
|
||||
))
|
||||
self.tensors = tensors
|
||||
|
|
|
@ -5,7 +5,9 @@ import os
|
|||
import shutil
|
||||
import struct
|
||||
import tempfile
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum, auto
|
||||
from pathlib import Path
|
||||
from io import BufferedWriter
|
||||
from typing import IO, Any, Sequence, Mapping
|
||||
from string import ascii_letters, digits
|
||||
|
@ -13,7 +15,6 @@ from string import ascii_letters, digits
|
|||
import numpy as np
|
||||
|
||||
from .constants import (
|
||||
GGML_QUANT_SIZES,
|
||||
GGUF_DEFAULT_ALIGNMENT,
|
||||
GGUF_MAGIC,
|
||||
GGUF_VERSION,
|
||||
|
@ -26,20 +27,44 @@ from .constants import (
|
|||
TokenType,
|
||||
)
|
||||
|
||||
from .quants import quant_shape_from_byte_shape
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
SHARD_NAME_FORMAT = "{:s}-{:05d}-of-{:05d}.gguf"
|
||||
|
||||
|
||||
@dataclass
|
||||
class TensorInfo:
|
||||
shape: Sequence[int]
|
||||
dtype: GGMLQuantizationType
|
||||
nbytes: int
|
||||
tensor: np.ndarray[Any, Any] | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class GGUFValue:
|
||||
value: Any
|
||||
type: GGUFValueType
|
||||
|
||||
|
||||
class WriterState(Enum):
|
||||
NO_FILE = auto()
|
||||
EMPTY = auto()
|
||||
HEADER = auto()
|
||||
KV_DATA = auto()
|
||||
TI_DATA = auto()
|
||||
WEIGHTS = auto()
|
||||
|
||||
|
||||
class GGUFWriter:
|
||||
fout: BufferedWriter
|
||||
fout: list[BufferedWriter] | None
|
||||
path: Path | None
|
||||
temp_file: tempfile.SpooledTemporaryFile[bytes] | None
|
||||
tensors: list[np.ndarray[Any, Any]]
|
||||
tensors: list[dict[str, TensorInfo]]
|
||||
kv_data: list[dict[str, GGUFValue]]
|
||||
state: WriterState
|
||||
_simple_value_packing = {
|
||||
GGUFValueType.UINT8: "B",
|
||||
GGUFValueType.INT8: "b",
|
||||
|
@ -55,141 +80,188 @@ class GGUFWriter:
|
|||
}
|
||||
|
||||
def __init__(
|
||||
self, path: os.PathLike[str] | str, arch: str, use_temp_file: bool = True,
|
||||
endianess: GGUFEndian = GGUFEndian.LITTLE,
|
||||
self, path: os.PathLike[str] | str | None, arch: str, use_temp_file: bool = False, endianess: GGUFEndian = GGUFEndian.LITTLE,
|
||||
split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False
|
||||
):
|
||||
self.fout = open(path, "wb")
|
||||
self.fout = None
|
||||
self.path = Path(path) if path else None
|
||||
self.arch = arch
|
||||
self.endianess = endianess
|
||||
self.offset_tensor = 0
|
||||
self.data_alignment = GGUF_DEFAULT_ALIGNMENT
|
||||
self.kv_data = bytearray()
|
||||
self.kv_data_count = 0
|
||||
self.ti_data = bytearray()
|
||||
self.ti_data_count = 0
|
||||
self.ti_names = set()
|
||||
self.use_temp_file = use_temp_file
|
||||
self.temp_file = None
|
||||
self.tensors = []
|
||||
self.tensors = [{}]
|
||||
self.kv_data = [{}]
|
||||
self.split_max_tensors = split_max_tensors
|
||||
self.split_max_size = split_max_size
|
||||
self.dry_run = dry_run
|
||||
self.small_first_shard = small_first_shard
|
||||
logger.info("gguf: This GGUF file is for {0} Endian only".format(
|
||||
"Big" if self.endianess == GGUFEndian.BIG else "Little",
|
||||
))
|
||||
self.state = WriterState.EMPTY
|
||||
self.state = WriterState.NO_FILE
|
||||
|
||||
if self.small_first_shard:
|
||||
self.tensors.append({})
|
||||
|
||||
self.add_architecture()
|
||||
|
||||
def write_header_to_file(self) -> None:
|
||||
def format_shard_names(self, path: Path) -> list[Path]:
|
||||
if len(self.tensors) == 1:
|
||||
return [path]
|
||||
return [path.with_name(SHARD_NAME_FORMAT.format(path.stem, i + 1, len(self.tensors))) for i in range(len(self.tensors))]
|
||||
|
||||
def open_output_file(self, path: Path | None = None) -> None:
|
||||
if self.state is WriterState.EMPTY and self.fout is not None and (path is None or path == self.path):
|
||||
# allow calling this multiple times as long as the path is the same
|
||||
return
|
||||
if self.state is not WriterState.NO_FILE:
|
||||
raise ValueError(f'Expected output file to be not yet opened, got {self.state}')
|
||||
|
||||
if path is not None:
|
||||
self.path = path
|
||||
|
||||
if self.path is not None:
|
||||
filenames = self.print_plan()
|
||||
self.fout = [open(filename, "wb") for filename in filenames]
|
||||
self.state = WriterState.EMPTY
|
||||
|
||||
def print_plan(self) -> list[Path]:
|
||||
logger.info("Writing the following files:")
|
||||
assert self.path is not None
|
||||
filenames = self.format_shard_names(self.path)
|
||||
assert len(filenames) == len(self.tensors)
|
||||
for name, tensors in zip(filenames, self.tensors):
|
||||
logger.info(f"{name}: n_tensors = {len(tensors)}, total_size = {GGUFWriter.format_n_bytes_to_str(sum(ti.nbytes for ti in tensors.values()))}")
|
||||
|
||||
if self.dry_run:
|
||||
logger.info("Dry run, not writing files")
|
||||
exit()
|
||||
|
||||
return filenames
|
||||
|
||||
def add_shard_kv_data(self) -> None:
|
||||
if len(self.tensors) == 1:
|
||||
return
|
||||
|
||||
total_tensors = sum(len(t) for t in self.tensors)
|
||||
assert self.fout is not None
|
||||
total_splits = len(self.fout)
|
||||
self.kv_data.extend({} for _ in range(len(self.kv_data), total_splits))
|
||||
for i, kv_data in enumerate(self.kv_data):
|
||||
kv_data[Keys.Split.LLM_KV_SPLIT_NO] = GGUFValue(i, GGUFValueType.UINT16)
|
||||
kv_data[Keys.Split.LLM_KV_SPLIT_COUNT] = GGUFValue(total_splits, GGUFValueType.UINT16)
|
||||
kv_data[Keys.Split.LLM_KV_SPLIT_TENSORS_COUNT] = GGUFValue(total_tensors, GGUFValueType.INT32)
|
||||
|
||||
def write_header_to_file(self, path: Path | None = None) -> None:
|
||||
if len(self.tensors) == 1 and (self.split_max_tensors != 0 or self.split_max_size != 0):
|
||||
logger.warning("Model fails split requirements, not splitting")
|
||||
|
||||
self.open_output_file(path)
|
||||
|
||||
if self.state is not WriterState.EMPTY:
|
||||
raise ValueError(f'Expected output file to be empty, got {self.state}')
|
||||
|
||||
self._write_packed("<I", GGUF_MAGIC, skip_pack_prefix = True)
|
||||
self._write_packed("I", GGUF_VERSION)
|
||||
self._write_packed("Q", self.ti_data_count)
|
||||
self._write_packed("Q", self.kv_data_count)
|
||||
self.flush()
|
||||
assert self.fout is not None
|
||||
assert len(self.fout) == len(self.tensors)
|
||||
assert len(self.kv_data) == 1
|
||||
|
||||
self.add_shard_kv_data()
|
||||
|
||||
for fout, tensors, kv_data in zip(self.fout, self.tensors, self.kv_data):
|
||||
fout.write(self._pack("<I", GGUF_MAGIC, skip_pack_prefix = True))
|
||||
fout.write(self._pack("I", GGUF_VERSION))
|
||||
fout.write(self._pack("Q", len(tensors)))
|
||||
fout.write(self._pack("Q", len(kv_data)))
|
||||
fout.flush()
|
||||
self.state = WriterState.HEADER
|
||||
|
||||
def write_kv_data_to_file(self) -> None:
|
||||
if self.state is not WriterState.HEADER:
|
||||
raise ValueError(f'Expected output file to contain the header, got {self.state}')
|
||||
assert self.fout is not None
|
||||
|
||||
for fout, kv_data in zip(self.fout, self.kv_data):
|
||||
kv_bytes = bytearray()
|
||||
|
||||
for key, val in kv_data.items():
|
||||
kv_bytes += self._pack_val(key, GGUFValueType.STRING, add_vtype=False)
|
||||
kv_bytes += self._pack_val(val.value, val.type, add_vtype=True)
|
||||
|
||||
fout.write(kv_bytes)
|
||||
|
||||
self.fout.write(self.kv_data)
|
||||
self.flush()
|
||||
self.state = WriterState.KV_DATA
|
||||
|
||||
def write_ti_data_to_file(self) -> None:
|
||||
if self.state is not WriterState.KV_DATA:
|
||||
raise ValueError(f'Expected output file to contain KV data, got {self.state}')
|
||||
assert self.fout is not None
|
||||
|
||||
self.fout.write(self.ti_data)
|
||||
self.flush()
|
||||
for fout, tensors in zip(self.fout, self.tensors):
|
||||
ti_data = bytearray()
|
||||
offset_tensor = 0
|
||||
|
||||
for name, ti in tensors.items():
|
||||
ti_data += self._pack_val(name, GGUFValueType.STRING, add_vtype=False)
|
||||
n_dims = len(ti.shape)
|
||||
ti_data += self._pack("I", n_dims)
|
||||
for j in range(n_dims):
|
||||
ti_data += self._pack("Q", ti.shape[n_dims - 1 - j])
|
||||
ti_data += self._pack("I", ti.dtype)
|
||||
ti_data += self._pack("Q", offset_tensor)
|
||||
offset_tensor += GGUFWriter.ggml_pad(ti.nbytes, self.data_alignment)
|
||||
|
||||
fout.write(ti_data)
|
||||
fout.flush()
|
||||
self.state = WriterState.TI_DATA
|
||||
|
||||
def add_key(self, key: str) -> None:
|
||||
self.add_val(key, GGUFValueType.STRING, add_vtype=False)
|
||||
def add_key_value(self, key: str, val: Any, vtype: GGUFValueType) -> None:
|
||||
if any(key in kv_data for kv_data in self.kv_data):
|
||||
raise ValueError(f'Duplicated key name {key!r}')
|
||||
|
||||
self.kv_data[0][key] = GGUFValue(value=val, type=vtype)
|
||||
|
||||
def add_uint8(self, key: str, val: int) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.UINT8)
|
||||
self.add_key_value(key,val, GGUFValueType.UINT8)
|
||||
|
||||
def add_int8(self, key: str, val: int) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.INT8)
|
||||
self.add_key_value(key, val, GGUFValueType.INT8)
|
||||
|
||||
def add_uint16(self, key: str, val: int) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.UINT16)
|
||||
self.add_key_value(key, val, GGUFValueType.UINT16)
|
||||
|
||||
def add_int16(self, key: str, val: int) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.INT16)
|
||||
self.add_key_value(key, val, GGUFValueType.INT16)
|
||||
|
||||
def add_uint32(self, key: str, val: int) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.UINT32)
|
||||
self.add_key_value(key, val, GGUFValueType.UINT32)
|
||||
|
||||
def add_int32(self, key: str, val: int) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.INT32)
|
||||
self.add_key_value(key, val, GGUFValueType.INT32)
|
||||
|
||||
def add_float32(self, key: str, val: float) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.FLOAT32)
|
||||
self.add_key_value(key, val, GGUFValueType.FLOAT32)
|
||||
|
||||
def add_uint64(self, key: str, val: int) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.UINT64)
|
||||
self.add_key_value(key, val, GGUFValueType.UINT64)
|
||||
|
||||
def add_int64(self, key: str, val: int) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.INT64)
|
||||
self.add_key_value(key, val, GGUFValueType.INT64)
|
||||
|
||||
def add_float64(self, key: str, val: float) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.FLOAT64)
|
||||
self.add_key_value(key, val, GGUFValueType.FLOAT64)
|
||||
|
||||
def add_bool(self, key: str, val: bool) -> None:
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.BOOL)
|
||||
self.add_key_value(key, val, GGUFValueType.BOOL)
|
||||
|
||||
def add_string(self, key: str, val: str) -> None:
|
||||
if not val:
|
||||
return
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.STRING)
|
||||
self.add_key_value(key, val, GGUFValueType.STRING)
|
||||
|
||||
def add_array(self, key: str, val: Sequence[Any]) -> None:
|
||||
if not isinstance(val, Sequence):
|
||||
raise ValueError("Value must be a sequence for array type")
|
||||
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.ARRAY)
|
||||
|
||||
def add_val(self, val: Any, vtype: GGUFValueType | None = None, add_vtype: bool = True) -> None:
|
||||
if vtype is None:
|
||||
vtype = GGUFValueType.get_type(val)
|
||||
|
||||
if add_vtype:
|
||||
self.kv_data += self._pack("I", vtype)
|
||||
self.kv_data_count += 1
|
||||
|
||||
pack_fmt = self._simple_value_packing.get(vtype)
|
||||
if pack_fmt is not None:
|
||||
self.kv_data += self._pack(pack_fmt, val, skip_pack_prefix = vtype == GGUFValueType.BOOL)
|
||||
elif vtype == GGUFValueType.STRING:
|
||||
encoded_val = val.encode("utf-8") if isinstance(val, str) else val
|
||||
self.kv_data += self._pack("Q", len(encoded_val))
|
||||
self.kv_data += encoded_val
|
||||
elif vtype == GGUFValueType.ARRAY and isinstance(val, Sequence) and val:
|
||||
ltype = GGUFValueType.get_type(val[0])
|
||||
if not all(GGUFValueType.get_type(i) is ltype for i in val[1:]):
|
||||
raise ValueError("All items in a GGUF array should be of the same type")
|
||||
self.kv_data += self._pack("I", ltype)
|
||||
self.kv_data += self._pack("Q", len(val))
|
||||
for item in val:
|
||||
self.add_val(item, add_vtype=False)
|
||||
else:
|
||||
raise ValueError("Invalid GGUF metadata value type or value")
|
||||
self.add_key_value(key, val, GGUFValueType.ARRAY)
|
||||
|
||||
@staticmethod
|
||||
def ggml_pad(x: int, n: int) -> int:
|
||||
|
@ -199,16 +271,12 @@ class GGUFWriter:
|
|||
self, name: str, tensor_shape: Sequence[int], tensor_dtype: np.dtype,
|
||||
tensor_nbytes: int, raw_dtype: GGMLQuantizationType | None = None,
|
||||
) -> None:
|
||||
if self.state is not WriterState.EMPTY:
|
||||
raise ValueError(f'Expected output file to be empty, got {self.state}')
|
||||
if self.state is not WriterState.NO_FILE:
|
||||
raise ValueError(f'Expected output file to be not yet opened, got {self.state}')
|
||||
|
||||
if name in self.ti_names:
|
||||
raise ValueError(f'Duplicated tensor name {name}')
|
||||
self.ti_names.add(name)
|
||||
if any(name in tensors for tensors in self.tensors):
|
||||
raise ValueError(f'Duplicated tensor name {name!r}')
|
||||
|
||||
encoded_name = name.encode("utf-8")
|
||||
self.ti_data += self._pack("Q", len(encoded_name))
|
||||
self.ti_data += encoded_name
|
||||
if raw_dtype is None:
|
||||
if tensor_dtype == np.float16:
|
||||
dtype = GGMLQuantizationType.F16
|
||||
|
@ -229,18 +297,20 @@ class GGUFWriter:
|
|||
else:
|
||||
dtype = raw_dtype
|
||||
if tensor_dtype == np.uint8:
|
||||
block_size, type_size = GGML_QUANT_SIZES[raw_dtype]
|
||||
if tensor_shape[-1] % type_size != 0:
|
||||
raise ValueError(f"Quantized tensor row size ({tensor_shape[-1]}) is not a multiple of {dtype.name} type size ({type_size})")
|
||||
tensor_shape = tuple(tensor_shape[:-1]) + (tensor_shape[-1] // type_size * block_size,)
|
||||
n_dims = len(tensor_shape)
|
||||
self.ti_data += self._pack("I", n_dims)
|
||||
for i in range(n_dims):
|
||||
self.ti_data += self._pack("Q", tensor_shape[n_dims - 1 - i])
|
||||
self.ti_data += self._pack("I", dtype)
|
||||
self.ti_data += self._pack("Q", self.offset_tensor)
|
||||
self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment)
|
||||
self.ti_data_count += 1
|
||||
tensor_shape = quant_shape_from_byte_shape(tensor_shape, raw_dtype)
|
||||
|
||||
# make sure there is at least one tensor before splitting
|
||||
if len(self.tensors[-1]) > 0:
|
||||
if ( # split when over tensor limit
|
||||
self.split_max_tensors != 0
|
||||
and len(self.tensors[-1]) >= self.split_max_tensors
|
||||
) or ( # split when over size limit
|
||||
self.split_max_size != 0
|
||||
and sum(ti.nbytes for ti in self.tensors[-1].values()) + tensor_nbytes > self.split_max_size
|
||||
):
|
||||
self.tensors.append({})
|
||||
|
||||
self.tensors[-1][name] = TensorInfo(shape=tensor_shape, dtype=dtype, nbytes=tensor_nbytes)
|
||||
|
||||
def add_tensor(
|
||||
self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequence[int] | None = None,
|
||||
|
@ -254,10 +324,10 @@ class GGUFWriter:
|
|||
self.temp_file = fp
|
||||
|
||||
shape: Sequence[int] = raw_shape if raw_shape is not None else tensor.shape
|
||||
self.add_tensor_info(name, shape, tensor.dtype, tensor.nbytes, raw_dtype = raw_dtype)
|
||||
self.add_tensor_info(name, shape, tensor.dtype, tensor.nbytes, raw_dtype=raw_dtype)
|
||||
|
||||
if self.temp_file is None:
|
||||
self.tensors.append(tensor)
|
||||
self.tensors[-1][name].tensor = tensor
|
||||
return
|
||||
|
||||
tensor.tofile(self.temp_file)
|
||||
|
@ -269,59 +339,90 @@ class GGUFWriter:
|
|||
fp.write(bytes([0] * pad))
|
||||
|
||||
def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None:
|
||||
if self.state is not WriterState.TI_DATA:
|
||||
raise ValueError(f'Expected output file to contain tensor info, got {self.state}')
|
||||
if self.state is not WriterState.TI_DATA and self.state is not WriterState.WEIGHTS:
|
||||
raise ValueError(f'Expected output file to contain tensor info or weights, got {self.state}')
|
||||
assert self.fout is not None
|
||||
|
||||
if self.endianess == GGUFEndian.BIG:
|
||||
tensor.byteswap(inplace=True)
|
||||
self.write_padding(self.fout, self.fout.tell())
|
||||
tensor.tofile(self.fout)
|
||||
self.write_padding(self.fout, tensor.nbytes)
|
||||
|
||||
file_id = -1
|
||||
for i, tensors in enumerate(self.tensors):
|
||||
if len(tensors) > 0:
|
||||
file_id = i
|
||||
break
|
||||
|
||||
fout = self.fout[file_id]
|
||||
|
||||
# pop the first tensor info
|
||||
# TODO: cleaner way to get the first key
|
||||
first_tensor_name = [name for name, _ in zip(self.tensors[file_id].keys(), range(1))][0]
|
||||
ti = self.tensors[file_id].pop(first_tensor_name)
|
||||
assert ti.nbytes == tensor.nbytes
|
||||
|
||||
self.write_padding(fout, fout.tell())
|
||||
tensor.tofile(fout)
|
||||
self.write_padding(fout, tensor.nbytes)
|
||||
|
||||
self.state = WriterState.WEIGHTS
|
||||
|
||||
def write_tensors_to_file(self, *, progress: bool = False) -> None:
|
||||
self.write_ti_data_to_file()
|
||||
|
||||
self.write_padding(self.fout, self.fout.tell())
|
||||
assert self.fout is not None
|
||||
|
||||
for fout in self.fout:
|
||||
self.write_padding(fout, fout.tell())
|
||||
|
||||
if self.temp_file is None:
|
||||
self.tensors.reverse() # to pop from the "beginning" in constant time
|
||||
shard_bar = None
|
||||
bar = None
|
||||
|
||||
if progress:
|
||||
from tqdm import tqdm
|
||||
|
||||
total_bytes = sum(t.nbytes for t in self.tensors)
|
||||
total_bytes = sum(ti.nbytes for t in self.tensors for ti in t.values())
|
||||
|
||||
if len(self.fout) > 1:
|
||||
shard_bar = tqdm(desc=f"Shard (0/{len(self.fout)})", total=None, unit="byte", unit_scale=True)
|
||||
bar = tqdm(desc="Writing", total=total_bytes, unit="byte", unit_scale=True)
|
||||
|
||||
while True:
|
||||
try:
|
||||
tensor = self.tensors.pop()
|
||||
except IndexError:
|
||||
break
|
||||
tensor.tofile(self.fout)
|
||||
bar.update(tensor.nbytes)
|
||||
self.write_padding(self.fout, tensor.nbytes)
|
||||
return
|
||||
while True:
|
||||
try:
|
||||
tensor = self.tensors.pop()
|
||||
except IndexError:
|
||||
break
|
||||
tensor.tofile(self.fout)
|
||||
self.write_padding(self.fout, tensor.nbytes)
|
||||
return
|
||||
for i, (fout, tensors) in enumerate(zip(self.fout, self.tensors)):
|
||||
if shard_bar is not None:
|
||||
shard_bar.set_description(f"Shard ({i + 1}/{len(self.fout)})")
|
||||
total = sum(ti.nbytes for ti in tensors.values())
|
||||
shard_bar.reset(total=(total if total > 0 else None))
|
||||
|
||||
self.temp_file.seek(0)
|
||||
# relying on the fact that Python dicts preserve insertion order (since 3.7)
|
||||
for ti in tensors.values():
|
||||
assert ti.tensor is not None # can only iterate once over the tensors
|
||||
assert ti.tensor.nbytes == ti.nbytes
|
||||
ti.tensor.tofile(fout)
|
||||
if shard_bar is not None:
|
||||
shard_bar.update(ti.nbytes)
|
||||
if bar is not None:
|
||||
bar.update(ti.nbytes)
|
||||
self.write_padding(fout, ti.nbytes)
|
||||
ti.tensor = None
|
||||
else:
|
||||
self.temp_file.seek(0)
|
||||
|
||||
shutil.copyfileobj(self.temp_file, self.fout)
|
||||
self.flush()
|
||||
self.temp_file.close()
|
||||
shutil.copyfileobj(self.temp_file, self.fout[0 if not self.small_first_shard else 1])
|
||||
self.flush()
|
||||
self.temp_file.close()
|
||||
|
||||
self.state = WriterState.WEIGHTS
|
||||
|
||||
def flush(self) -> None:
|
||||
self.fout.flush()
|
||||
assert self.fout is not None
|
||||
for fout in self.fout:
|
||||
fout.flush()
|
||||
|
||||
def close(self) -> None:
|
||||
self.fout.close()
|
||||
if self.fout is not None:
|
||||
for fout in self.fout:
|
||||
fout.close()
|
||||
self.fout = None
|
||||
|
||||
def add_architecture(self) -> None:
|
||||
self.add_string(Keys.General.ARCHITECTURE, self.arch)
|
||||
|
@ -376,12 +477,24 @@ class GGUFWriter:
|
|||
def add_block_count(self, length: int) -> None:
|
||||
self.add_uint32(Keys.LLM.BLOCK_COUNT.format(arch=self.arch), length)
|
||||
|
||||
def add_leading_dense_block_count(self, length: int) -> None:
|
||||
self.add_uint32(Keys.LLM.LEADING_DENSE_BLOCK_COUNT.format(arch=self.arch), length)
|
||||
|
||||
def add_feed_forward_length(self, length: int) -> None:
|
||||
self.add_uint32(Keys.LLM.FEED_FORWARD_LENGTH.format(arch=self.arch), length)
|
||||
|
||||
def add_expert_feed_forward_length(self, length: int) -> None:
|
||||
self.add_uint32(Keys.LLM.EXPERT_FEED_FORWARD_LENGTH.format(arch=self.arch), length)
|
||||
|
||||
def add_expert_shared_feed_forward_length(self, length: int) -> None:
|
||||
self.add_uint32(Keys.LLM.EXPERT_SHARED_FEED_FORWARD_LENGTH.format(arch=self.arch), length)
|
||||
|
||||
def add_parallel_residual(self, use: bool) -> None:
|
||||
self.add_bool(Keys.LLM.USE_PARALLEL_RESIDUAL.format(arch=self.arch), use)
|
||||
|
||||
def add_decoder_start_token_id(self, id: int) -> None:
|
||||
self.add_uint32(Keys.LLM.DECODER_START_TOKEN_ID.format(arch=self.arch), id)
|
||||
|
||||
def add_head_count(self, count: int) -> None:
|
||||
self.add_uint32(Keys.Attention.HEAD_COUNT.format(arch=self.arch), count)
|
||||
|
||||
|
@ -403,12 +516,24 @@ class GGUFWriter:
|
|||
def add_logit_scale(self, value: float) -> None:
|
||||
self.add_float32(Keys.LLM.LOGIT_SCALE.format(arch=self.arch), value)
|
||||
|
||||
def add_attn_logit_softcapping(self, value: float) -> None:
|
||||
self.add_float32(Keys.LLM.ATTN_LOGIT_SOFTCAPPING.format(arch=self.arch), value)
|
||||
|
||||
def add_final_logit_softcapping(self, value: float) -> None:
|
||||
self.add_float32(Keys.LLM.FINAL_LOGIT_SOFTCAPPING.format(arch=self.arch), value)
|
||||
|
||||
def add_expert_count(self, count: int) -> None:
|
||||
self.add_uint32(Keys.LLM.EXPERT_COUNT.format(arch=self.arch), count)
|
||||
|
||||
def add_expert_used_count(self, count: int) -> None:
|
||||
self.add_uint32(Keys.LLM.EXPERT_USED_COUNT.format(arch=self.arch), count)
|
||||
|
||||
def add_expert_shared_count(self, count: int) -> None:
|
||||
self.add_uint32(Keys.LLM.EXPERT_SHARED_COUNT.format(arch=self.arch), count)
|
||||
|
||||
def add_expert_weights_scale(self, value: float) -> None:
|
||||
self.add_float32(Keys.LLM.EXPERT_WEIGHTS_SCALE.format(arch=self.arch), value)
|
||||
|
||||
def add_layer_norm_eps(self, value: float) -> None:
|
||||
self.add_float32(Keys.Attention.LAYERNORM_EPS.format(arch=self.arch), value)
|
||||
|
||||
|
@ -418,6 +543,15 @@ class GGUFWriter:
|
|||
def add_causal_attention(self, value: bool) -> None:
|
||||
self.add_bool(Keys.Attention.CAUSAL.format(arch=self.arch), value)
|
||||
|
||||
def add_q_lora_rank(self, length: int) -> None:
|
||||
self.add_uint32(Keys.Attention.Q_LORA_RANK.format(arch=self.arch), length)
|
||||
|
||||
def add_kv_lora_rank(self, length: int) -> None:
|
||||
self.add_uint32(Keys.Attention.KV_LORA_RANK.format(arch=self.arch), length)
|
||||
|
||||
def add_relative_attn_buckets_count(self, value: int) -> None:
|
||||
self.add_uint32(Keys.Attention.REL_BUCKETS_COUNT.format(arch=self.arch), value)
|
||||
|
||||
def add_pooling_type(self, value: PoolingType) -> None:
|
||||
self.add_uint32(Keys.LLM.POOLING_TYPE.format(arch=self.arch), value.value)
|
||||
|
||||
|
@ -433,12 +567,18 @@ class GGUFWriter:
|
|||
def add_rope_scaling_factor(self, value: float) -> None:
|
||||
self.add_float32(Keys.Rope.SCALING_FACTOR.format(arch=self.arch), value)
|
||||
|
||||
def add_rope_scaling_attn_factors(self, value: float) -> None:
|
||||
self.add_float32(Keys.Rope.SCALING_ATTN_FACTOR.format(arch=self.arch), value)
|
||||
|
||||
def add_rope_scaling_orig_ctx_len(self, value: int) -> None:
|
||||
self.add_uint32(Keys.Rope.SCALING_ORIG_CTX_LEN.format(arch=self.arch), value)
|
||||
|
||||
def add_rope_scaling_finetuned(self, value: bool) -> None:
|
||||
self.add_bool(Keys.Rope.SCALING_FINETUNED.format(arch=self.arch), value)
|
||||
|
||||
def add_rope_scaling_yarn_log_mul(self, value: float) -> None:
|
||||
self.add_float32(Keys.Rope.SCALING_YARN_LOG_MUL.format(arch=self.arch), value)
|
||||
|
||||
def add_ssm_conv_kernel(self, value: int) -> None:
|
||||
self.add_uint32(Keys.SSM.CONV_KERNEL.format(arch=self.arch), value)
|
||||
|
||||
|
@ -502,6 +642,12 @@ class GGUFWriter:
|
|||
def add_add_space_prefix(self, value: bool) -> None:
|
||||
self.add_bool(Keys.Tokenizer.ADD_PREFIX, value)
|
||||
|
||||
def add_remove_extra_whitespaces(self, value: bool) -> None:
|
||||
self.add_bool(Keys.Tokenizer.REMOVE_EXTRA_WS, value)
|
||||
|
||||
def add_precompiled_charsmap(self, charsmap: Sequence[bytes]) -> None:
|
||||
self.add_array(Keys.Tokenizer.PRECOMPILED_CHARSMAP, charsmap)
|
||||
|
||||
def add_chat_template(self, value: str | Sequence[Mapping[str, str]]) -> None:
|
||||
if not isinstance(value, str):
|
||||
template_default = None
|
||||
|
@ -549,5 +695,42 @@ class GGUFWriter:
|
|||
pack_prefix = '<' if self.endianess == GGUFEndian.LITTLE else '>'
|
||||
return struct.pack(f'{pack_prefix}{fmt}', value)
|
||||
|
||||
def _write_packed(self, fmt: str, value: Any, skip_pack_prefix: bool = False) -> None:
|
||||
self.fout.write(self._pack(fmt, value, skip_pack_prefix))
|
||||
def _pack_val(self, val: Any, vtype: GGUFValueType, add_vtype: bool) -> bytes:
|
||||
kv_data = bytearray()
|
||||
|
||||
if add_vtype:
|
||||
kv_data += self._pack("I", vtype)
|
||||
|
||||
pack_fmt = self._simple_value_packing.get(vtype)
|
||||
if pack_fmt is not None:
|
||||
kv_data += self._pack(pack_fmt, val, skip_pack_prefix = vtype == GGUFValueType.BOOL)
|
||||
elif vtype == GGUFValueType.STRING:
|
||||
encoded_val = val.encode("utf-8") if isinstance(val, str) else val
|
||||
kv_data += self._pack("Q", len(encoded_val))
|
||||
kv_data += encoded_val
|
||||
elif vtype == GGUFValueType.ARRAY and isinstance(val, Sequence) and val:
|
||||
if isinstance(val, bytes):
|
||||
ltype = GGUFValueType.UINT8
|
||||
else:
|
||||
ltype = GGUFValueType.get_type(val[0])
|
||||
if not all(GGUFValueType.get_type(i) is ltype for i in val[1:]):
|
||||
raise ValueError("All items in a GGUF array should be of the same type")
|
||||
kv_data += self._pack("I", ltype)
|
||||
kv_data += self._pack("Q", len(val))
|
||||
for item in val:
|
||||
kv_data += self._pack_val(item, ltype, add_vtype=False)
|
||||
else:
|
||||
raise ValueError("Invalid GGUF metadata value type or value")
|
||||
|
||||
return kv_data
|
||||
|
||||
@staticmethod
|
||||
def format_n_bytes_to_str(num: int) -> str:
|
||||
if num == 0:
|
||||
return "negligible - metadata only"
|
||||
fnum = float(num)
|
||||
for unit in ("", "K", "M", "G"):
|
||||
if abs(fnum) < 1000.0:
|
||||
return f"{fnum:3.1f}{unit}"
|
||||
fnum /= 1000.0
|
||||
return f"{fnum:.1f}T - over 1TB, split recommended"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from __future__ import annotations
|
||||
from typing import Callable
|
||||
from typing import Callable, Sequence
|
||||
|
||||
from numpy.typing import DTypeLike
|
||||
|
||||
|
@ -9,6 +9,20 @@ from .lazy import LazyNumpyTensor
|
|||
import numpy as np
|
||||
|
||||
|
||||
def quant_shape_to_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType):
|
||||
block_size, type_size = GGML_QUANT_SIZES[quant_type]
|
||||
if shape[-1] % block_size != 0:
|
||||
raise ValueError(f"Quantized tensor row size ({shape[-1]}) is not a multiple of {quant_type.name} block size ({block_size})")
|
||||
return (*shape[:-1], shape[-1] // block_size * type_size)
|
||||
|
||||
|
||||
def quant_shape_from_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType):
|
||||
block_size, type_size = GGML_QUANT_SIZES[quant_type]
|
||||
if shape[-1] % type_size != 0:
|
||||
raise ValueError(f"Quantized tensor bytes per row ({shape[-1]}) is not a multiple of {quant_type.name} type size ({type_size})")
|
||||
return (*shape[:-1], shape[-1] // type_size * block_size)
|
||||
|
||||
|
||||
# same as ggml_compute_fp32_to_bf16 in ggml-impl.h
|
||||
def __compute_fp32_to_bf16(n: np.ndarray) -> np.ndarray:
|
||||
n = n.astype(np.float32, copy=False).view(np.int32)
|
||||
|
|
|
@ -25,6 +25,7 @@ class TensorNameMap:
|
|||
"backbone.embeddings", # mamba-hf
|
||||
"transformer.in_out_embed", # Grok
|
||||
"transformer.token_embeddings", # openelm
|
||||
"shared", # t5
|
||||
),
|
||||
|
||||
# Token type embeddings
|
||||
|
@ -106,6 +107,7 @@ class TensorNameMap:
|
|||
# Attention norm 2
|
||||
MODEL_TENSOR.ATTN_NORM_2: (
|
||||
"transformer.h.{bid}.ln_attn", # falcon40b
|
||||
"encoder.layer.{bid}.layer_norm_1", # jina-v2-code
|
||||
),
|
||||
|
||||
# Attention query-key-value
|
||||
|
@ -191,6 +193,10 @@ class TensorNameMap:
|
|||
"transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_POST_NORM: (
|
||||
"model.layers.{bid}.post_attention_layernorm", # gemma2
|
||||
),
|
||||
|
||||
# Rotary embeddings
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD: (
|
||||
"model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
|
||||
|
@ -215,6 +221,16 @@ class TensorNameMap:
|
|||
"transformer.layers.{bid}.ffn_norm", # openelm
|
||||
),
|
||||
|
||||
# Post feed-forward norm
|
||||
MODEL_TENSOR.FFN_PRE_NORM: (
|
||||
"model.layers.{bid}.pre_feedforward_layernorm", # gemma2
|
||||
),
|
||||
|
||||
# Post feed-forward norm
|
||||
MODEL_TENSOR.FFN_POST_NORM: (
|
||||
"model.layers.{bid}.post_feedforward_layernorm", # gemma2
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_GATE_INP: (
|
||||
"layers.{bid}.feed_forward.gate", # mixtral
|
||||
"model.layers.{bid}.block_sparse_moe.gate", # mixtral
|
||||
|
@ -252,6 +268,7 @@ class TensorNameMap:
|
|||
"model.layers.{bid}.mlp.c_fc", # starcoder2
|
||||
"encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
|
||||
"transformer.layers.{bid}.ffn.proj_1", # openelm
|
||||
"model.layers.{bid}.residual_mlp.w3", # arctic
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_UP_EXP: (
|
||||
|
@ -263,6 +280,7 @@ class TensorNameMap:
|
|||
|
||||
MODEL_TENSOR.FFN_UP_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe
|
||||
"model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek2
|
||||
),
|
||||
|
||||
# AWQ-activation gate
|
||||
|
@ -280,6 +298,7 @@ class TensorNameMap:
|
|||
"encoder.layers.{bid}.mlp.fc12", # nomic-bert
|
||||
"encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
|
||||
"transformer.h.{bid}.mlp.linear_1", # refact
|
||||
"model.layers.{bid}.residual_mlp.w1", # arctic
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_GATE_EXP: (
|
||||
|
@ -291,6 +310,7 @@ class TensorNameMap:
|
|||
|
||||
MODEL_TENSOR.FFN_GATE_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe
|
||||
"model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek2
|
||||
),
|
||||
|
||||
# Feed-forward down
|
||||
|
@ -315,6 +335,8 @@ class TensorNameMap:
|
|||
"model.layers.{bid}.mlp.c_proj", # starcoder2
|
||||
"encoder.layer.{bid}.mlp.wo", # jina-bert-v2
|
||||
"transformer.layers.{bid}.ffn.proj_2", # openelm
|
||||
"model.layers.{bid}.residual_mlp.w2", # arctic
|
||||
"encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_DOWN_EXP: (
|
||||
|
@ -326,6 +348,7 @@ class TensorNameMap:
|
|||
|
||||
MODEL_TENSOR.FFN_DOWN_SHEXP: (
|
||||
"model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe
|
||||
"model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek2
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_Q_NORM: (
|
||||
|
@ -355,6 +378,7 @@ class TensorNameMap:
|
|||
"encoder.layers.{bid}.norm2", # nomic-bert
|
||||
"transformer.decoder_layer.{bid}.rms_norm_3", # Grok
|
||||
"encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
|
||||
"encoder.layer.{bid}.layer_norm_2" # jina-v2-code
|
||||
),
|
||||
|
||||
MODEL_TENSOR.SSM_IN: (
|
||||
|
@ -391,6 +415,164 @@ class TensorNameMap:
|
|||
"model.layers.{bid}.out_proj",
|
||||
"backbone.layers.{bid}.mixer.out_proj",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_Q_A: (
|
||||
"model.layers.{bid}.self_attn.q_a_proj", # deepseek2
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_Q_B: (
|
||||
"model.layers.{bid}.self_attn.q_b_proj", # deepseek2
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_KV_A_MQA: (
|
||||
"model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_KV_B: (
|
||||
"model.layers.{bid}.self_attn.kv_b_proj", # deepseek2
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_Q_A_NORM: (
|
||||
"model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_KV_A_NORM: (
|
||||
"model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ATTN_SUB_NORM: (
|
||||
"model.layers.{bid}.self_attn.inner_attn_ln", # bitnet
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_SUB_NORM: (
|
||||
"model.layers.{bid}.mlp.ffn_layernorm", # bitnet
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_NORM: (
|
||||
"decoder.block.{bid}.layer.0.layer_norm", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_Q: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.q", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_K: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.k", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_V: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.v", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_OUT: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.o", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_ATTN_REL_B: (
|
||||
"decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_NORM: (
|
||||
"decoder.block.{bid}.layer.1.layer_norm", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_Q: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.q", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_K: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.k", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_V: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.v", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_OUT: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.o", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: (
|
||||
"decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_FFN_NORM: (
|
||||
"decoder.block.{bid}.layer.2.layer_norm", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_FFN_GATE: (
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_FFN_UP: (
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_FFN_DOWN: (
|
||||
"decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.DEC_OUTPUT_NORM: (
|
||||
"decoder.final_layer_norm", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_NORM: (
|
||||
"encoder.block.{bid}.layer.0.layer_norm", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_Q: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.q", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_K: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.k", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_V: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.v", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_OUT: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.o", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_ATTN_REL_B: (
|
||||
"encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_FFN_NORM: (
|
||||
"encoder.block.{bid}.layer.1.layer_norm", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_FFN_GATE: (
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_FFN_UP: (
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_FFN_DOWN: (
|
||||
"encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
|
||||
),
|
||||
|
||||
MODEL_TENSOR.ENC_OUTPUT_NORM: (
|
||||
"encoder.final_layer_norm", # t5
|
||||
),
|
||||
}
|
||||
|
||||
# architecture-specific block mappings
|
||||
arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
|
||||
MODEL_ARCH.ARCTIC: {
|
||||
MODEL_TENSOR.FFN_NORM: (
|
||||
"model.layers.{bid}.residual_layernorm",
|
||||
),
|
||||
MODEL_TENSOR.FFN_NORM_EXP: (
|
||||
"model.layers.{bid}.post_attention_layernorm",
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
mapping: dict[str, tuple[MODEL_TENSOR, str]]
|
||||
|
@ -404,12 +586,14 @@ class TensorNameMap:
|
|||
self.mapping[tensor_name] = (tensor, tensor_name)
|
||||
for key in keys:
|
||||
self.mapping[key] = (tensor, tensor_name)
|
||||
if arch in self.arch_block_mappings_cfg:
|
||||
self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch])
|
||||
for bid in range(n_blocks):
|
||||
for tensor, keys in self.block_mappings_cfg.items():
|
||||
if tensor not in MODEL_TENSORS[arch]:
|
||||
continue
|
||||
# TODO: make this configurable
|
||||
n_experts = 60
|
||||
n_experts = 160
|
||||
for xid in range(n_experts):
|
||||
tensor_name = TENSOR_NAMES[tensor].format(bid = bid, xid = xid)
|
||||
self.mapping[tensor_name] = (tensor, tensor_name)
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Sequence, Mapping, Iterable
|
||||
from typing import Any, Callable, Sequence, Mapping, Iterable, Protocol, ClassVar, runtime_checkable
|
||||
|
||||
from sentencepiece import SentencePieceProcessor
|
||||
|
||||
import gguf
|
||||
|
||||
from .gguf_writer import GGUFWriter
|
||||
|
||||
|
@ -163,3 +168,298 @@ class SpecialVocab:
|
|||
for typ in self.special_token_types:
|
||||
self._set_special_token(typ, config.get(f'{typ}_token_id'))
|
||||
return True
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class BaseVocab(Protocol):
|
||||
tokenizer_model: ClassVar[str]
|
||||
name: ClassVar[str]
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class Vocab(BaseVocab, Protocol):
|
||||
vocab_size: int
|
||||
added_tokens_dict: dict[str, int]
|
||||
added_tokens_list: list[str]
|
||||
fname_tokenizer: Path
|
||||
|
||||
def __init__(self, base_path: Path): ...
|
||||
def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: ...
|
||||
|
||||
|
||||
class NoVocab(BaseVocab):
|
||||
tokenizer_model = "no_vocab"
|
||||
name = "no_vocab"
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<NoVocab for a model without integrated vocabulary>"
|
||||
|
||||
|
||||
class BpeVocab(Vocab):
|
||||
tokenizer_model = "gpt2"
|
||||
name = "bpe"
|
||||
|
||||
def __init__(self, base_path: Path):
|
||||
added_tokens: dict[str, int] = {}
|
||||
|
||||
if (fname_tokenizer := base_path / 'vocab.json').exists():
|
||||
# "slow" tokenizer
|
||||
with open(fname_tokenizer, encoding="utf-8") as f:
|
||||
self.vocab = json.load(f)
|
||||
|
||||
try:
|
||||
# FIXME: Verify that added tokens here _cannot_ overlap with the main vocab.
|
||||
with open(base_path / 'added_tokens.json', encoding="utf-8") as f:
|
||||
added_tokens = json.load(f)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
else:
|
||||
# "fast" tokenizer
|
||||
fname_tokenizer = base_path / 'tokenizer.json'
|
||||
|
||||
# if this fails, FileNotFoundError propagates to caller
|
||||
with open(fname_tokenizer, encoding="utf-8") as f:
|
||||
tokenizer_json = json.load(f)
|
||||
|
||||
tokenizer_model: dict[str, Any] = tokenizer_json['model']
|
||||
if (
|
||||
tokenizer_model['type'] != 'BPE' or tokenizer_model.get('byte_fallback', False)
|
||||
or tokenizer_json['decoder']['type'] != 'ByteLevel'
|
||||
):
|
||||
raise FileNotFoundError('Cannot find GPT-2 BPE tokenizer')
|
||||
|
||||
self.vocab = tokenizer_model["vocab"]
|
||||
|
||||
if (added := tokenizer_json.get('added_tokens')) is not None:
|
||||
# Added tokens here can be duplicates of the main vocabulary.
|
||||
added_tokens = {item['content']: item['id']
|
||||
for item in added
|
||||
if item['content'] not in self.vocab}
|
||||
|
||||
vocab_size = len(self.vocab)
|
||||
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
|
||||
actual_ids = sorted(added_tokens.values())
|
||||
if expected_ids != actual_ids:
|
||||
expected_end_id = vocab_size + len(actual_ids) - 1
|
||||
raise ValueError(f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range "
|
||||
f"{vocab_size} - {expected_end_id}; got {actual_ids}")
|
||||
|
||||
items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
|
||||
self.added_tokens_dict = added_tokens
|
||||
self.added_tokens_list = [text for (text, idx) in items]
|
||||
self.vocab_size_base = vocab_size
|
||||
self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
|
||||
self.fname_tokenizer = fname_tokenizer
|
||||
|
||||
def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
reverse_vocab = {id: encoded_tok for encoded_tok, id in self.vocab.items()}
|
||||
|
||||
for i, _ in enumerate(self.vocab):
|
||||
yield reverse_vocab[i], 0.0, gguf.TokenType.NORMAL
|
||||
|
||||
def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
for text in self.added_tokens_list:
|
||||
score = -1000.0
|
||||
yield text.encode("utf-8"), score, gguf.TokenType.CONTROL
|
||||
|
||||
def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
yield from self.bpe_tokens()
|
||||
yield from self.added_tokens()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
||||
|
||||
|
||||
class SentencePieceVocab(Vocab):
|
||||
tokenizer_model = "llama"
|
||||
name = "spm"
|
||||
|
||||
def __init__(self, base_path: Path):
|
||||
added_tokens: dict[str, int] = {}
|
||||
if (fname_tokenizer := base_path / 'tokenizer.model').exists():
|
||||
# normal location
|
||||
try:
|
||||
with open(base_path / 'added_tokens.json', encoding="utf-8") as f:
|
||||
added_tokens = json.load(f)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
elif not (fname_tokenizer := base_path.parent / 'tokenizer.model').exists():
|
||||
# not found in alternate location either
|
||||
raise FileNotFoundError('Cannot find tokenizer.model')
|
||||
|
||||
self.sentencepiece_tokenizer = SentencePieceProcessor()
|
||||
self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer))
|
||||
vocab_size = self.sentencepiece_tokenizer.vocab_size()
|
||||
|
||||
new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
|
||||
expected_new_ids = list(range(vocab_size, vocab_size + len(new_tokens)))
|
||||
actual_new_ids = sorted(new_tokens.keys())
|
||||
|
||||
if expected_new_ids != actual_new_ids:
|
||||
raise ValueError(f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}")
|
||||
|
||||
# Token pieces that were added to the base vocabulary.
|
||||
self.added_tokens_dict = added_tokens
|
||||
self.added_tokens_list = [new_tokens[id] for id in actual_new_ids]
|
||||
self.vocab_size_base = vocab_size
|
||||
self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
|
||||
self.fname_tokenizer = fname_tokenizer
|
||||
|
||||
def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
tokenizer = self.sentencepiece_tokenizer
|
||||
for i in range(tokenizer.vocab_size()):
|
||||
piece = tokenizer.IdToPiece(i)
|
||||
text = piece.encode("utf-8")
|
||||
score: float = tokenizer.GetScore(i)
|
||||
|
||||
toktype = gguf.TokenType.NORMAL
|
||||
if tokenizer.IsUnknown(i):
|
||||
toktype = gguf.TokenType.UNKNOWN
|
||||
if tokenizer.IsControl(i):
|
||||
toktype = gguf.TokenType.CONTROL
|
||||
|
||||
# NOTE: I think added_tokens are user defined.
|
||||
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
||||
# if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
|
||||
|
||||
if tokenizer.IsUnused(i):
|
||||
toktype = gguf.TokenType.UNUSED
|
||||
if tokenizer.IsByte(i):
|
||||
toktype = gguf.TokenType.BYTE
|
||||
|
||||
yield text, score, toktype
|
||||
|
||||
def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
for text in self.added_tokens_list:
|
||||
score = -1000.0
|
||||
yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED
|
||||
|
||||
def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
yield from self.sentencepiece_tokens()
|
||||
yield from self.added_tokens()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
||||
|
||||
|
||||
class LlamaHfVocab(Vocab):
|
||||
tokenizer_model = "llama"
|
||||
name = "hfft"
|
||||
|
||||
def __init__(self, base_path: Path):
|
||||
fname_tokenizer = base_path / 'tokenizer.json'
|
||||
# if this fails, FileNotFoundError propagates to caller
|
||||
with open(fname_tokenizer, encoding='utf-8') as f:
|
||||
tokenizer_json = json.load(f)
|
||||
|
||||
# pre-check so we know if we need transformers
|
||||
tokenizer_model: dict[str, Any] = tokenizer_json['model']
|
||||
is_llama3 = (
|
||||
tokenizer_model['type'] == 'BPE' and tokenizer_model.get('ignore_merges', False)
|
||||
and not tokenizer_model.get('byte_fallback', True)
|
||||
)
|
||||
if is_llama3:
|
||||
raise TypeError('Llama 3 must be converted with BpeVocab')
|
||||
|
||||
if not is_llama3 and (
|
||||
tokenizer_model['type'] != 'BPE' or not tokenizer_model.get('byte_fallback', False)
|
||||
or tokenizer_json['decoder']['type'] != 'Sequence'
|
||||
):
|
||||
raise FileNotFoundError('Cannot find Llama BPE tokenizer')
|
||||
|
||||
try:
|
||||
from transformers import AutoTokenizer
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"To use LlamaHfVocab, please install the `transformers` package. "
|
||||
"You can install it with `pip install transformers`."
|
||||
) from e
|
||||
|
||||
# Allow the tokenizer to default to slow or fast versions.
|
||||
# Explicitly set tokenizer to use local paths.
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(
|
||||
base_path,
|
||||
cache_dir=base_path,
|
||||
local_files_only=True,
|
||||
)
|
||||
assert self.tokenizer.is_fast # assume tokenizer.json is used
|
||||
|
||||
# Initialize lists and dictionaries for added tokens
|
||||
self.added_tokens_list = []
|
||||
self.added_tokens_dict = dict()
|
||||
self.added_tokens_ids = set()
|
||||
|
||||
# Process added tokens
|
||||
for tok, tokidx in sorted(
|
||||
self.tokenizer.get_added_vocab().items(), key=lambda x: x[1]
|
||||
):
|
||||
# Only consider added tokens that are not in the base vocabulary
|
||||
if tokidx >= self.tokenizer.vocab_size:
|
||||
self.added_tokens_list.append(tok)
|
||||
self.added_tokens_dict[tok] = tokidx
|
||||
self.added_tokens_ids.add(tokidx)
|
||||
|
||||
# Store special tokens and their IDs
|
||||
self.specials = {
|
||||
tok: self.tokenizer.get_vocab()[tok]
|
||||
for tok in self.tokenizer.all_special_tokens
|
||||
}
|
||||
self.special_ids = set(self.tokenizer.all_special_ids)
|
||||
|
||||
# Set vocabulary sizes
|
||||
self.vocab_size_base = self.tokenizer.vocab_size
|
||||
self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
|
||||
|
||||
self.fname_tokenizer = fname_tokenizer
|
||||
|
||||
def hf_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
reverse_vocab = {
|
||||
id: encoded_tok for encoded_tok, id in self.tokenizer.get_vocab().items()
|
||||
}
|
||||
|
||||
for token_id in range(self.vocab_size_base):
|
||||
# Skip processing added tokens here
|
||||
if token_id in self.added_tokens_ids:
|
||||
continue
|
||||
|
||||
# Convert token text to bytes
|
||||
token_text = reverse_vocab[token_id].encode("utf-8")
|
||||
|
||||
# Yield token text, score, and type
|
||||
yield token_text, self.get_token_score(token_id), self.get_token_type(
|
||||
token_id, token_text, self.special_ids # Reuse already stored special IDs
|
||||
)
|
||||
|
||||
def get_token_type(self, token_id: int, token_text: bytes, special_ids: set[int]) -> gguf.TokenType:
|
||||
# Special case for byte tokens
|
||||
if re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
|
||||
return gguf.TokenType.BYTE
|
||||
|
||||
# Determine token type based on whether it's a special token
|
||||
return gguf.TokenType.CONTROL if token_id in special_ids else gguf.TokenType.NORMAL
|
||||
|
||||
def get_token_score(self, token_id: int) -> float:
|
||||
# Placeholder for actual logic to determine the token's score
|
||||
# This needs to be implemented based on specific requirements
|
||||
return -1000.0 # Default score
|
||||
|
||||
def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
for text in self.added_tokens_list:
|
||||
if text in self.specials:
|
||||
toktype = self.get_token_type(self.specials[text], b'', self.special_ids)
|
||||
score = self.get_token_score(self.specials[text])
|
||||
else:
|
||||
toktype = gguf.TokenType.USER_DEFINED
|
||||
score = -1000.0
|
||||
|
||||
yield text.encode("utf-8"), score, toktype
|
||||
|
||||
def has_newline_token(self):
|
||||
return "<0x0A>" in self.tokenizer.vocab or "\n" in self.tokenizer.vocab
|
||||
|
||||
def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
yield from self.hf_tokens()
|
||||
yield from self.added_tokens()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<LlamaHfVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
||||
|
|
|
@ -14,7 +14,7 @@ import numpy as np
|
|||
if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists():
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from gguf import GGUFReader, GGUFValueType # noqa: E402
|
||||
from gguf import GGUFReader, GGUFValueType, ReaderTensor # noqa: E402
|
||||
|
||||
logger = logging.getLogger("gguf-dump")
|
||||
|
||||
|
@ -101,25 +101,318 @@ def dump_metadata_json(reader: GGUFReader, args: argparse.Namespace) -> None:
|
|||
json.dump(result, sys.stdout)
|
||||
|
||||
|
||||
def markdown_table_with_alignment_support(header_map: list[dict[str, str]], data: list[dict[str, Any]]):
|
||||
# JSON to Markdown table formatting: https://stackoverflow.com/a/72983854/2850957
|
||||
|
||||
# Alignment Utility Function
|
||||
def strAlign(padding: int, alignMode: str | None, strVal: str):
|
||||
if alignMode == 'center':
|
||||
return strVal.center(padding)
|
||||
elif alignMode == 'right':
|
||||
return strVal.rjust(padding - 1) + ' '
|
||||
elif alignMode == 'left':
|
||||
return ' ' + strVal.ljust(padding - 1)
|
||||
else: # default left
|
||||
return ' ' + strVal.ljust(padding - 1)
|
||||
|
||||
def dashAlign(padding: int, alignMode: str | None):
|
||||
if alignMode == 'center':
|
||||
return ':' + '-' * (padding - 2) + ':'
|
||||
elif alignMode == 'right':
|
||||
return '-' * (padding - 1) + ':'
|
||||
elif alignMode == 'left':
|
||||
return ':' + '-' * (padding - 1)
|
||||
else: # default left
|
||||
return '-' * (padding)
|
||||
|
||||
# Calculate Padding For Each Column Based On Header and Data Length
|
||||
rowsPadding = {}
|
||||
for index, columnEntry in enumerate(header_map):
|
||||
padCount = max([len(str(v)) for d in data for k, v in d.items() if k == columnEntry['key_name']], default=0) + 2
|
||||
headerPadCount = len(columnEntry['header_name']) + 2
|
||||
rowsPadding[index] = headerPadCount if padCount <= headerPadCount else padCount
|
||||
|
||||
# Render Markdown Header
|
||||
rows = []
|
||||
rows.append('|'.join(strAlign(rowsPadding[index], columnEntry.get('align'), str(columnEntry['header_name'])) for index, columnEntry in enumerate(header_map)))
|
||||
rows.append('|'.join(dashAlign(rowsPadding[index], columnEntry.get('align')) for index, columnEntry in enumerate(header_map)))
|
||||
|
||||
# Render Tabular Data
|
||||
for item in data:
|
||||
rows.append('|'.join(strAlign(rowsPadding[index], columnEntry.get('align'), str(item[columnEntry['key_name']])) for index, columnEntry in enumerate(header_map)))
|
||||
|
||||
# Convert Tabular String Rows Into String
|
||||
tableString = ""
|
||||
for row in rows:
|
||||
tableString += f'|{row}|\n'
|
||||
|
||||
return tableString
|
||||
|
||||
|
||||
def element_count_rounded_notation(count: int) -> str:
|
||||
if count > 1e15 :
|
||||
# Quadrillion
|
||||
scaled_amount = count * 1e-15
|
||||
scale_suffix = "Q"
|
||||
elif count > 1e12 :
|
||||
# Trillions
|
||||
scaled_amount = count * 1e-12
|
||||
scale_suffix = "T"
|
||||
elif count > 1e9 :
|
||||
# Billions
|
||||
scaled_amount = count * 1e-9
|
||||
scale_suffix = "B"
|
||||
elif count > 1e6 :
|
||||
# Millions
|
||||
scaled_amount = count * 1e-6
|
||||
scale_suffix = "M"
|
||||
elif count > 1e3 :
|
||||
# Thousands
|
||||
scaled_amount = count * 1e-3
|
||||
scale_suffix = "K"
|
||||
else:
|
||||
# Under Thousands
|
||||
scaled_amount = count
|
||||
scale_suffix = ""
|
||||
return f"{'~' if count > 1e3 else ''}{round(scaled_amount)}{scale_suffix}"
|
||||
|
||||
|
||||
def translate_tensor_name(name):
|
||||
words = name.split(".")
|
||||
|
||||
# Source: https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#standardized-tensor-names
|
||||
abbreviation_dictionary = {
|
||||
'token_embd': 'Token embedding',
|
||||
'pos_embd': 'Position embedding',
|
||||
'output_norm': 'Output normalization',
|
||||
'output': 'Output',
|
||||
'attn_norm': 'Attention normalization',
|
||||
'attn_norm_2': 'Attention normalization',
|
||||
'attn_qkv': 'Attention query-key-value',
|
||||
'attn_q': 'Attention query',
|
||||
'attn_k': 'Attention key',
|
||||
'attn_v': 'Attention value',
|
||||
'attn_output': 'Attention output',
|
||||
'ffn_norm': 'Feed-forward network normalization',
|
||||
'ffn_up': 'Feed-forward network "up"',
|
||||
'ffn_gate': 'Feed-forward network "gate"',
|
||||
'ffn_down': 'Feed-forward network "down"',
|
||||
'ffn_gate_inp': 'Expert-routing layer for the Feed-forward network in Mixture of Expert models',
|
||||
'ffn_gate_exp': 'Feed-forward network "gate" layer per expert in Mixture of Expert models',
|
||||
'ffn_down_exp': 'Feed-forward network "down" layer per expert in Mixture of Expert models',
|
||||
'ffn_up_exp': 'Feed-forward network "up" layer per expert in Mixture of Expert models',
|
||||
'ssm_in': 'State space model input projections',
|
||||
'ssm_conv1d': 'State space model rolling/shift',
|
||||
'ssm_x': 'State space model selective parametrization',
|
||||
'ssm_a': 'State space model state compression',
|
||||
'ssm_d': 'State space model skip connection',
|
||||
'ssm_dt': 'State space model time step',
|
||||
'ssm_out': 'State space model output projection',
|
||||
'blk': 'Block',
|
||||
'enc': 'Encoder',
|
||||
'dec': 'Decoder',
|
||||
}
|
||||
|
||||
expanded_words = []
|
||||
for word in words:
|
||||
word_norm = word.strip().lower()
|
||||
if word_norm in abbreviation_dictionary:
|
||||
expanded_words.append(abbreviation_dictionary[word_norm].title())
|
||||
else:
|
||||
expanded_words.append(word.title())
|
||||
|
||||
return ' '.join(expanded_words)
|
||||
|
||||
|
||||
def dump_markdown_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
|
||||
host_endian, file_endian = get_file_host_endian(reader)
|
||||
markdown_content = ""
|
||||
markdown_content += f'# {args.model} - GGUF Internal File Dump\n\n'
|
||||
markdown_content += f'- Endian: {file_endian} endian\n'
|
||||
markdown_content += '\n'
|
||||
markdown_content += '## Key Value Metadata Store\n\n'
|
||||
markdown_content += f'There are {len(reader.fields)} key-value pairs in this file\n'
|
||||
markdown_content += '\n'
|
||||
|
||||
kv_dump_table: list[dict[str, str | int]] = []
|
||||
for n, field in enumerate(reader.fields.values(), 1):
|
||||
if not field.types:
|
||||
pretty_type = 'N/A'
|
||||
elif field.types[0] == GGUFValueType.ARRAY:
|
||||
nest_count = len(field.types) - 1
|
||||
pretty_type = '[' * nest_count + str(field.types[-1].name) + ']' * nest_count
|
||||
else:
|
||||
pretty_type = str(field.types[-1].name)
|
||||
|
||||
total_elements = len(field.data)
|
||||
value = ""
|
||||
if len(field.types) == 1:
|
||||
curr_type = field.types[0]
|
||||
if curr_type == GGUFValueType.STRING:
|
||||
value = repr(str(bytes(field.parts[-1]), encoding='utf-8')[:60])
|
||||
elif curr_type in reader.gguf_scalar_to_np:
|
||||
value = str(field.parts[-1][0])
|
||||
else:
|
||||
if field.types[0] == GGUFValueType.ARRAY:
|
||||
curr_type = field.types[1]
|
||||
if curr_type == GGUFValueType.STRING:
|
||||
render_element = min(5, total_elements)
|
||||
for element_pos in range(render_element):
|
||||
value += repr(str(bytes(field.parts[-1 - element_pos]), encoding='utf-8')[:5]) + (", " if total_elements > 1 else "")
|
||||
elif curr_type in reader.gguf_scalar_to_np:
|
||||
render_element = min(7, total_elements)
|
||||
for element_pos in range(render_element):
|
||||
value += str(field.parts[-1 - element_pos][0]) + (", " if total_elements > 1 else "")
|
||||
value = f'[ {value}{" ..." if total_elements > 1 else ""} ]'
|
||||
kv_dump_table.append({"n":n, "pretty_type":pretty_type, "total_elements":total_elements, "field_name":field.name, "value":value})
|
||||
|
||||
kv_dump_table_header_map = [
|
||||
{'key_name':'n', 'header_name':'POS', 'align':'right'},
|
||||
{'key_name':'pretty_type', 'header_name':'TYPE', 'align':'left'},
|
||||
{'key_name':'total_elements', 'header_name':'Count', 'align':'right'},
|
||||
{'key_name':'field_name', 'header_name':'Key', 'align':'left'},
|
||||
{'key_name':'value', 'header_name':'Value', 'align':'left'},
|
||||
]
|
||||
|
||||
markdown_content += markdown_table_with_alignment_support(kv_dump_table_header_map, kv_dump_table)
|
||||
|
||||
markdown_content += "\n"
|
||||
|
||||
if not args.no_tensors:
|
||||
# Group tensors by their prefix and maintain order
|
||||
tensor_prefix_order: list[str] = []
|
||||
tensor_name_to_key: dict[str, int] = {}
|
||||
tensor_groups: dict[str, list[ReaderTensor]] = {}
|
||||
total_elements = sum(tensor.n_elements for tensor in reader.tensors)
|
||||
|
||||
# Parsing Tensors Record
|
||||
for key, tensor in enumerate(reader.tensors):
|
||||
tensor_components = tensor.name.split('.')
|
||||
|
||||
# Classify Tensor Group
|
||||
tensor_group_name = "base"
|
||||
if tensor_components[0] == 'blk':
|
||||
tensor_group_name = f"{tensor_components[0]}.{tensor_components[1]}"
|
||||
elif tensor_components[0] in ['enc', 'dec'] and tensor_components[1] == 'blk':
|
||||
tensor_group_name = f"{tensor_components[0]}.{tensor_components[1]}.{tensor_components[2]}"
|
||||
elif tensor_components[0] in ['enc', 'dec']:
|
||||
tensor_group_name = f"{tensor_components[0]}"
|
||||
|
||||
# Check if new Tensor Group
|
||||
if tensor_group_name not in tensor_groups:
|
||||
tensor_groups[tensor_group_name] = []
|
||||
tensor_prefix_order.append(tensor_group_name)
|
||||
|
||||
# Record Tensor and Tensor Position
|
||||
tensor_groups[tensor_group_name].append(tensor)
|
||||
tensor_name_to_key[tensor.name] = key
|
||||
|
||||
# Tensors Mapping Dump
|
||||
markdown_content += f'## Tensors Overview {element_count_rounded_notation(total_elements)} Elements\n\n'
|
||||
markdown_content += f'Total number of elements in all tensors: {total_elements} Elements\n'
|
||||
markdown_content += '\n'
|
||||
|
||||
for group in tensor_prefix_order:
|
||||
tensors = tensor_groups[group]
|
||||
group_elements = sum(tensor.n_elements for tensor in tensors)
|
||||
markdown_content += f"- [{translate_tensor_name(group)} Tensor Group - {element_count_rounded_notation(group_elements)} Elements](#{group.replace('.', '_')})\n"
|
||||
|
||||
markdown_content += "\n"
|
||||
|
||||
markdown_content += "### Tensor Data Offset\n"
|
||||
markdown_content += '\n'
|
||||
markdown_content += 'This table contains the offset and data segment relative to start of file\n'
|
||||
markdown_content += '\n'
|
||||
|
||||
tensor_mapping_table: list[dict[str, str | int]] = []
|
||||
for key, tensor in enumerate(reader.tensors):
|
||||
data_offset_pretty = '{0:#16x}'.format(tensor.data_offset)
|
||||
data_size_pretty = '{0:#16x}'.format(tensor.n_bytes)
|
||||
tensor_mapping_table.append({"t_id":key, "layer_name":tensor.name, "data_offset":data_offset_pretty, "data_size":data_size_pretty})
|
||||
|
||||
tensors_mapping_table_header_map = [
|
||||
{'key_name':'t_id', 'header_name':'T_ID', 'align':'right'},
|
||||
{'key_name':'layer_name', 'header_name':'Tensor Layer Name', 'align':'left'},
|
||||
{'key_name':'data_offset', 'header_name':'Data Offset (B)', 'align':'right'},
|
||||
{'key_name':'data_size', 'header_name':'Data Size (B)', 'align':'right'},
|
||||
]
|
||||
|
||||
markdown_content += markdown_table_with_alignment_support(tensors_mapping_table_header_map, tensor_mapping_table)
|
||||
markdown_content += "\n"
|
||||
|
||||
for group in tensor_prefix_order:
|
||||
tensors = tensor_groups[group]
|
||||
group_elements = sum(tensor.n_elements for tensor in tensors)
|
||||
group_percentage = group_elements / total_elements * 100
|
||||
markdown_content += f"### <a name=\"{group.replace('.', '_')}\">{translate_tensor_name(group)} Tensor Group : {element_count_rounded_notation(group_elements)} Elements</a>\n\n"
|
||||
|
||||
# Precalculate column sizing for visual consistency
|
||||
prettify_element_est_count_size: int = 1
|
||||
prettify_element_count_size: int = 1
|
||||
prettify_dimension_max_widths: dict[int, int] = {}
|
||||
for tensor in tensors:
|
||||
prettify_element_est_count_size = max(prettify_element_est_count_size, len(str(element_count_rounded_notation(tensor.n_elements))))
|
||||
prettify_element_count_size = max(prettify_element_count_size, len(str(tensor.n_elements)))
|
||||
for i, dimension_size in enumerate(list(tensor.shape) + [1] * (4 - len(tensor.shape))):
|
||||
prettify_dimension_max_widths[i] = max(prettify_dimension_max_widths.get(i,1), len(str(dimension_size)))
|
||||
|
||||
# Generate Tensor Layer Table Content
|
||||
tensor_dump_table: list[dict[str, str | int]] = []
|
||||
for tensor in tensors:
|
||||
human_friendly_name = translate_tensor_name(tensor.name.replace(".weight", ".(W)").replace(".bias", ".(B)"))
|
||||
pretty_dimension = ' x '.join(f'{str(d):>{prettify_dimension_max_widths[i]}}' for i, d in enumerate(list(tensor.shape) + [1] * (4 - len(tensor.shape))))
|
||||
element_count_est = f"({element_count_rounded_notation(tensor.n_elements):>{prettify_element_est_count_size}})"
|
||||
element_count_string = f"{element_count_est} {tensor.n_elements:>{prettify_element_count_size}}"
|
||||
type_name_string = f"{tensor.tensor_type.name}"
|
||||
tensor_dump_table.append({"t_id":tensor_name_to_key[tensor.name], "layer_name":tensor.name, "human_layer_name":human_friendly_name, "element_count":element_count_string, "pretty_dimension":pretty_dimension, "tensor_type":type_name_string})
|
||||
|
||||
tensor_dump_table_header_map = [
|
||||
{'key_name':'t_id', 'header_name':'T_ID', 'align':'right'},
|
||||
{'key_name':'layer_name', 'header_name':'Tensor Layer Name', 'align':'left'},
|
||||
{'key_name':'human_layer_name', 'header_name':'Human Friendly Tensor Layer Name', 'align':'left'},
|
||||
{'key_name':'element_count', 'header_name':'Elements', 'align':'left'},
|
||||
{'key_name':'pretty_dimension', 'header_name':'Shape', 'align':'left'},
|
||||
{'key_name':'tensor_type', 'header_name':'Type', 'align':'left'},
|
||||
]
|
||||
|
||||
markdown_content += markdown_table_with_alignment_support(tensor_dump_table_header_map, tensor_dump_table)
|
||||
|
||||
markdown_content += "\n"
|
||||
markdown_content += f"- Total elements in {group}: ({element_count_rounded_notation(group_elements):>4}) {group_elements}\n"
|
||||
markdown_content += f"- Percentage of total elements: {group_percentage:.2f}%\n"
|
||||
markdown_content += "\n\n"
|
||||
|
||||
print(markdown_content) # noqa: NP100
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Dump GGUF file metadata")
|
||||
parser.add_argument("model", type=str, help="GGUF format model filename")
|
||||
parser.add_argument("--no-tensors", action="store_true", help="Don't dump tensor metadata")
|
||||
parser.add_argument("--json", action="store_true", help="Produce JSON output")
|
||||
parser.add_argument("--json-array", action="store_true", help="Include full array values in JSON output (long)")
|
||||
parser.add_argument("--data-offset", action="store_true", help="Start of data offset")
|
||||
parser.add_argument("--data-alignment", action="store_true", help="Data alignment applied globally to data field")
|
||||
parser.add_argument("--markdown", action="store_true", help="Produce markdown output")
|
||||
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
|
||||
|
||||
args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"])
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
|
||||
|
||||
if not args.json:
|
||||
if not args.json and not args.markdown and not args.data_offset and not args.data_alignment:
|
||||
logger.info(f'* Loading: {args.model}')
|
||||
|
||||
reader = GGUFReader(args.model, 'r')
|
||||
|
||||
if args.json:
|
||||
dump_metadata_json(reader, args)
|
||||
elif args.markdown:
|
||||
dump_markdown_metadata(reader, args)
|
||||
elif args.data_offset:
|
||||
print(reader.data_offset) # noqa: NP100
|
||||
elif args.data_alignment:
|
||||
print(reader.alignment) # noqa: NP100
|
||||
else:
|
||||
dump_metadata(reader, args)
|
||||
|
||||
|
|
|
@ -101,8 +101,7 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new
|
|||
logger.debug(f'Copying {field.name}')
|
||||
|
||||
if val.value is not None:
|
||||
writer.add_key(field.name)
|
||||
writer.add_val(val.value, val.type)
|
||||
writer.add_key_value(field.name, val.value, val.type)
|
||||
|
||||
if gguf.Keys.Tokenizer.CHAT_TEMPLATE in new_metadata:
|
||||
logger.debug('Adding chat template(s)')
|
||||
|
@ -111,16 +110,13 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new
|
|||
|
||||
for key, val in new_metadata.items():
|
||||
logger.debug(f'Adding {key}: "{val.value}" {val.description}')
|
||||
writer.add_key(key)
|
||||
writer.add_val(val.value, val.type)
|
||||
writer.add_key_value(key, val.value, val.type)
|
||||
|
||||
total_bytes = 0
|
||||
|
||||
for tensor in reader.tensors:
|
||||
total_bytes += tensor.n_bytes
|
||||
# Dimensions are written in reverse order, so flip them first
|
||||
shape = np.flipud(tensor.shape).tolist()
|
||||
writer.add_tensor_info(tensor.name, shape, tensor.data.dtype, tensor.data.nbytes, tensor.tensor_type)
|
||||
writer.add_tensor_info(tensor.name, tensor.data.shape, tensor.data.dtype, tensor.data.nbytes, tensor.tensor_type)
|
||||
|
||||
bar = tqdm(desc="Writing", total=total_bytes, unit="byte", unit_scale=True)
|
||||
|
||||
|
@ -146,6 +142,7 @@ def main() -> None:
|
|||
parser.add_argument("--general-description", type=str, help="The models general.description", metavar='"Description ..."')
|
||||
parser.add_argument("--chat-template", type=str, help="Chat template string (or JSON string containing templates)", metavar='"{% ... %} ..."')
|
||||
parser.add_argument("--chat-template-config", type=Path, help="Config file containing chat template(s)", metavar='tokenizer_config.json')
|
||||
parser.add_argument("--pre-tokenizer", type=str, help="The models tokenizer.ggml.pre", metavar='"pre tokenizer"')
|
||||
parser.add_argument("--remove-metadata", action="append", type=str, help="Remove metadata (by key name) from output model", metavar='general.url')
|
||||
parser.add_argument("--special-token", action="append", type=str, help="Special token by value", nargs=2, metavar=(' | '.join(token_names.keys()), '"<token>"'))
|
||||
parser.add_argument("--special-token-by-id", action="append", type=str, help="Special token by id", nargs=2, metavar=(' | '.join(token_names.keys()), '0'))
|
||||
|
@ -174,6 +171,9 @@ def main() -> None:
|
|||
if template:
|
||||
new_metadata[gguf.Keys.Tokenizer.CHAT_TEMPLATE] = MetadataDetails(gguf.GGUFValueType.STRING, template)
|
||||
|
||||
if args.pre_tokenizer:
|
||||
new_metadata[gguf.Keys.Tokenizer.PRE] = MetadataDetails(gguf.GGUFValueType.STRING, args.pre_tokenizer)
|
||||
|
||||
if remove_metadata:
|
||||
logger.warning('*** Warning *** Warning *** Warning **')
|
||||
logger.warning('* Most metadata is required for a fully functional GGUF file,')
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue