feat: Add qwen pattern and tokenizer
This commit is contained in:
parent
9d2fcd0db5
commit
b8f8a96ff1
2 changed files with 10 additions and 0 deletions
|
@ -4389,6 +4389,9 @@ static void llm_load_vocab(
|
|||
} else if (
|
||||
tokenizer_pre == "command-r") {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
|
||||
} else if (
|
||||
tokenizer_pre == "qwen") {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN;
|
||||
} else {
|
||||
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
||||
}
|
||||
|
@ -12252,6 +12255,12 @@ struct llm_tokenizer_bpe {
|
|||
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
|
||||
});
|
||||
break;
|
||||
case LLAMA_VOCAB_PRE_TYPE_QWEN:
|
||||
word_collection = unicode_regex_split(text, {
|
||||
// original regex from tokenization_qwen.py
|
||||
"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
||||
});
|
||||
break;
|
||||
default:
|
||||
// default regex for BPE tokenization pre-processing
|
||||
word_collection = unicode_regex_split(text, {
|
||||
|
|
1
llama.h
1
llama.h
|
@ -81,6 +81,7 @@ extern "C" {
|
|||
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
|
||||
LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
|
||||
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
|
||||
LLAMA_VOCAB_PRE_TYPE_QWEN = 11,
|
||||
};
|
||||
|
||||
// note: these values should be synchronized with ggml_rope
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue