diff --git a/llama.cpp b/llama.cpp index f67cb7e23..e2c389acd 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4596,7 +4596,11 @@ static void llm_load_vocab( } else if ( tokenizer_pre == "smaug-bpe") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG; - } else { + } else if ( + tokenizer_pre == "Poro-34B-chat") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO; + } + else { throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); } } else { @@ -12580,6 +12584,11 @@ struct llm_tokenizer_bpe { "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", }); break; + case LLAMA_VOCAB_PRE_TYPE_PORO: + word_collection = unicode_regex_split(text, { + " ?[^(\\s|.,!?…。,、।۔،)]+", + }); + break; default: // default regex for BPE tokenization pre-processing word_collection = unicode_regex_split(text, { diff --git a/llama.h b/llama.h index 3e4474bb9..f84162f58 100644 --- a/llama.h +++ b/llama.h @@ -86,6 +86,7 @@ extern "C" { LLAMA_VOCAB_PRE_TYPE_OLMO = 12, LLAMA_VOCAB_PRE_TYPE_DBRX = 13, LLAMA_VOCAB_PRE_TYPE_SMAUG = 14, + LLAMA_VOCAB_PRE_TYPE_PORO = 15, }; // note: these values should be synchronized with ggml_rope