Compare commits

...
Sign in to create a new pull request.

14 commits

Author SHA1 Message Date
Francis Couture-Harpin
59ce85318a test-tokenizer-random : reduce potential confilcts with #8379
* test-tokenizer-random : add a failing edge case for falcon
2024-07-13 01:56:05 -04:00
Francis Couture-Harpin
1caa20fc7a convert_hf : reduce usages of UNKNOWN for InternLM2
This makes the changes from #8321 more consistent
with the other changes made here.
2024-07-10 17:33:04 -04:00
Francis Couture-Harpin
afa6119850 Merge branch 'master' into compilade/fix-mpt-pretok 2024-07-10 15:32:04 -04:00
Francis Couture-Harpin
98edea60bc llama : add UNKNOWN tokens in the special tokens cache 2024-07-08 21:23:19 -04:00
Francis Couture-Harpin
d4df785868 convert_hf : reduce usages of the UNKNOWN token type 2024-07-08 21:09:52 -04:00
Francis Couture-Harpin
d6fe269ced llama : fix command-r detokenization 2024-07-08 18:13:16 -04:00
Francis Couture-Harpin
31a1b0eeaa llama : fix Viking pre-tokenizer regex
The order was previously wrong, which caused errors in some tests.
2024-07-08 16:34:39 -04:00
Francis Couture-Harpin
f9d42c598b convert_hf : identify more added control tokens for SPM tokenziers
This makes Gemma and Gemma-2 tokenize pretty much EVERYTHING correctly,
including HTML tags and consecutive spaces,
but it unfortunately requires model re-conversion.

There seems to be a weird behavior of the HF tokenizer for Gemma,
which prefers to use the 16-space token over more lengthy space tokens,
while using the SentencePiece tokenizer does not do this.
(the implementation in llama.cpp has the same behavior as SentencePiece)

* llama : fix wrong pre-tokenization of byte tokens
2024-07-07 23:28:38 -04:00
Francis Couture-Harpin
6e351e0425 convert_hf : identify which user-defined tokens are control tokens
Only used in _set_vocab_gpt2() for now.
2024-07-07 16:59:07 -04:00
Francis Couture-Harpin
56df1fcdcb llama : fix detection of control-like user-defined tokens 2024-07-07 16:13:35 -04:00
Francis Couture-Harpin
6b961e3d24 Merge branch 'master' into compilade/fix-mpt-pretok 2024-07-07 15:33:20 -04:00
Francis Couture-Harpin
d5d30b20c3 llama : pre-tokenize non-special user-defined tokens first 2024-07-07 15:32:42 -04:00
Francis Couture-Harpin
ac0f33c920 Merge branch 'master' into compilade/fix-mpt-pretok 2024-07-07 11:36:17 -04:00
Francis Couture-Harpin
db2ffd519d llama : fix mpt and olmo pre-tokenizer 2024-06-30 14:34:55 -04:00
4 changed files with 91 additions and 61 deletions

View file

@ -373,6 +373,29 @@ class Model:
except KeyError: except KeyError:
raise NotImplementedError(f'Architecture {arch!r} not supported!') from None raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
def does_token_look_special(self, token: str | bytes) -> bool:
if isinstance(token, (bytes, bytearray)):
token_text = token.decode(encoding="utf-8")
elif isinstance(token, memoryview):
token_text = token.tobytes().decode(encoding="utf-8")
else:
token_text = token
# Some models mark some added tokens which ought to be control tokens as not special.
# (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
seems_special = token_text in (
"<pad>", # deepseek-coder
"<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
)
seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
seems_special = seems_special or (token_text.startswith("<") and token_text.endswith(">")) # deepseek-coder
# TODO: should these be marked as UNUSED instead? (maybe not)
seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
return seems_special
# used for GPT-2 BPE and WordPiece vocabs # used for GPT-2 BPE and WordPiece vocabs
def get_vocab_base(self) -> tuple[list[str], list[int], str]: def get_vocab_base(self) -> tuple[list[str], list[int], str]:
tokens: list[str] = [] tokens: list[str] = []
@ -391,16 +414,18 @@ class Model:
for i in range(vocab_size): for i in range(vocab_size):
if i not in reverse_vocab: if i not in reverse_vocab:
tokens.append(f"[PAD{i}]") tokens.append(f"[PAD{i}]")
toktypes.append(gguf.TokenType.USER_DEFINED) toktypes.append(gguf.TokenType.UNUSED)
elif reverse_vocab[i] in added_vocab:
tokens.append(reverse_vocab[i])
if tokenizer.added_tokens_decoder[i].special:
toktypes.append(gguf.TokenType.CONTROL)
else:
toktypes.append(gguf.TokenType.USER_DEFINED)
else: else:
tokens.append(reverse_vocab[i]) token: str = reverse_vocab[i]
toktypes.append(gguf.TokenType.NORMAL) if token in added_vocab:
if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token):
toktypes.append(gguf.TokenType.CONTROL)
else:
token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
toktypes.append(gguf.TokenType.USER_DEFINED)
else:
toktypes.append(gguf.TokenType.NORMAL)
tokens.append(token)
return tokens, toktypes, tokpre return tokens, toktypes, tokpre
@ -559,7 +584,7 @@ class Model:
for i in range(vocab_size): for i in range(vocab_size):
if i not in reverse_vocab: if i not in reverse_vocab:
tokens.append(f"[PAD{i}]") tokens.append(f"[PAD{i}]")
toktypes.append(gguf.TokenType.USER_DEFINED) toktypes.append(gguf.TokenType.UNUSED)
elif reverse_vocab[i] in added_vocab: elif reverse_vocab[i] in added_vocab:
tokens.append(reverse_vocab[i]) tokens.append(reverse_vocab[i])
toktypes.append(gguf.TokenType.CONTROL) toktypes.append(gguf.TokenType.CONTROL)
@ -609,7 +634,7 @@ class Model:
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
scores: list[float] = [-10000.0] * vocab_size scores: list[float] = [-10000.0] * vocab_size
toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
for token_id in range(tokenizer.vocab_size()): for token_id in range(tokenizer.vocab_size()):
piece = tokenizer.IdToPiece(token_id) piece = tokenizer.IdToPiece(token_id)
@ -644,6 +669,25 @@ class Model:
scores[token_id] = -1000.0 scores[token_id] = -1000.0
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
if tokenizer_config_file.is_file():
with open(tokenizer_config_file, "r", encoding="utf-8") as f:
tokenizer_config_json = json.load(f)
added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
for token_id, token_data in added_tokens_decoder.items():
token_id = int(token_id)
token: str = token_data["content"]
if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
assert tokens[token_id] == token.encode("utf-8")
if token_data.get("special") or self.does_token_look_special(token):
toktypes[token_id] = SentencePieceTokenTypes.CONTROL
else:
token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
scores[token_id] = -1000.0
tokens[token_id] = token.encode("utf-8")
if vocab_size > len(tokens): if vocab_size > len(tokens):
pad_count = vocab_size - len(tokens) pad_count = vocab_size - len(tokens)
logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]") logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
@ -1267,7 +1311,7 @@ class StableLMModel(Model):
if (self.dir_model / "tokenizer.json").is_file(): if (self.dir_model / "tokenizer.json").is_file():
self._set_vocab_gpt2() self._set_vocab_gpt2()
else: else:
# StableLM 2 1.6B uses a vocab in a similar format to Qwen's vocab # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
self._set_vocab_qwen() self._set_vocab_qwen()
def set_gguf_parameters(self): def set_gguf_parameters(self):
@ -1579,7 +1623,6 @@ class DbrxModel(Model):
self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"]) self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"]) self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
self.gguf_writer.add_file_type(self.ftype)
self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"]) self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"]) self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
@ -1873,7 +1916,7 @@ class Phi3MiniModel(Model):
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
scores: list[float] = [-10000.0] * vocab_size scores: list[float] = [-10000.0] * vocab_size
toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
for token_id in range(tokenizer.vocab_size()): for token_id in range(tokenizer.vocab_size()):
@ -1918,7 +1961,7 @@ class Phi3MiniModel(Model):
for token_id, foken_data in added_tokens_decoder.items(): for token_id, foken_data in added_tokens_decoder.items():
token_id = int(token_id) token_id = int(token_id)
token = foken_data["content"].encode("utf-8") token = foken_data["content"].encode("utf-8")
if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN: if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
assert tokens[token_id] == token assert tokens[token_id] == token
tokens[token_id] = token tokens[token_id] = token
scores[token_id] = -1000.0 scores[token_id] = -1000.0
@ -1934,7 +1977,7 @@ class Phi3MiniModel(Model):
for foken_data in added_tokens: for foken_data in added_tokens:
token_id = int(foken_data["id"]) token_id = int(foken_data["id"])
token = foken_data["content"].encode("utf-8") token = foken_data["content"].encode("utf-8")
if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN: if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
assert tokens[token_id] == token assert tokens[token_id] == token
tokens[token_id] = token tokens[token_id] = token
scores[token_id] = -1000.0 scores[token_id] = -1000.0
@ -2146,7 +2189,7 @@ class InternLM2Model(Model):
toktype = SentencePieceTokenTypes.BYTE toktype = SentencePieceTokenTypes.BYTE
# take care of ununsed raw token # take care of ununsed raw token
if piece.startswith('[UNUSED'): if piece.startswith('[UNUSED'):
toktype = SentencePieceTokenTypes.UNKNOWN toktype = SentencePieceTokenTypes.UNUSED
tokens.append(text) tokens.append(text)
scores.append(score) scores.append(score)
@ -2176,7 +2219,7 @@ class InternLM2Model(Model):
if token == chat_eos_token: if token == chat_eos_token:
chat_eos_token_id = token_id chat_eos_token_id = token_id
token = token.encode("utf-8") token = token.encode("utf-8")
if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN: if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
assert(tokens[token_id] == token) assert(tokens[token_id] == token)
tokens[token_id] = token tokens[token_id] = token
scores[token_id] = -1000.0 scores[token_id] = -1000.0
@ -2195,7 +2238,7 @@ class InternLM2Model(Model):
if token == chat_eos_token: if token == chat_eos_token:
chat_eos_token_id = token_id chat_eos_token_id = token_id
token = token.encode("utf-8") token = token.encode("utf-8")
if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN: if toktypes[token_id] != SentencePieceTokenTypes.UNUSED:
assert(tokens[token_id] == token) assert(tokens[token_id] == token)
tokens[token_id] = token tokens[token_id] = token
scores[token_id] = -1000.0 scores[token_id] = -1000.0
@ -2435,19 +2478,7 @@ class Gemma2Model(Model):
model_arch = gguf.MODEL_ARCH.GEMMA2 model_arch = gguf.MODEL_ARCH.GEMMA2
def set_vocab(self): def set_vocab(self):
tokens, scores, toktypes = self._create_vocab_sentencepiece() self._set_vocab_sentencepiece()
# hack: This is required so that we can properly use start/end-of-turn for chat template
for i in range(108):
# including <unusedX>, <start_of_turn>, <end_of_turn>
toktypes[i] = SentencePieceTokenTypes.CONTROL
self.gguf_writer.add_tokenizer_model("llama")
self.gguf_writer.add_tokenizer_pre("default")
self.gguf_writer.add_token_list(tokens)
self.gguf_writer.add_token_scores(scores)
self.gguf_writer.add_token_types(toktypes)
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
special_vocab.add_to_gguf(self.gguf_writer)
self.gguf_writer.add_add_space_prefix(False) self.gguf_writer.add_add_space_prefix(False)
@ -2771,7 +2802,7 @@ class ArcticModel(Model):
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
scores: list[float] = [-10000.0] * vocab_size scores: list[float] = [-10000.0] * vocab_size
toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
for token_id in range(tokenizer.vocab_size()): for token_id in range(tokenizer.vocab_size()):
@ -3026,7 +3057,7 @@ class T5Model(Model):
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
scores: list[float] = [-10000.0] * vocab_size scores: list[float] = [-10000.0] * vocab_size
toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
for token_id in range(tokenizer.vocab_size()): for token_id in range(tokenizer.vocab_size()):
piece = tokenizer.IdToPiece(token_id) piece = tokenizer.IdToPiece(token_id)
@ -3244,15 +3275,14 @@ class ChatGLMModel(Model):
if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size(): if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
score = tokenizer.tokenizer.sp_model.get_score(token_id) score = tokenizer.tokenizer.sp_model.get_score(token_id)
if len(piece) == 0:
text = f"[PAD{token_id}]".encode("utf-8")
if token_id >= tokenizer.tokenizer.sp_model.vocab_size(): if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
if piece in special_tokens: if piece in special_tokens:
# show special tokens in prompt toktype = SentencePieceTokenTypes.CONTROL
toktype = SentencePieceTokenTypes.USER_DEFINED elif len(piece) == 0:
text = f"[PAD{token_id}]".encode("utf-8")
toktype = SentencePieceTokenTypes.UNUSED
else: else:
toktype = SentencePieceTokenTypes.UNKNOWN toktype = SentencePieceTokenTypes.USER_DEFINED
tokens.append(text) tokens.append(text)
scores.append(score) scores.append(score)
toktypes.append(toktype) toktypes.append(toktype)
@ -3341,7 +3371,7 @@ class ChatGLMModel(Model):
for i in range(vocab_size): for i in range(vocab_size):
if i not in reverse_vocab: if i not in reverse_vocab:
tokens.append(f"[PAD{i}]") tokens.append(f"[PAD{i}]")
toktypes.append(gguf.TokenType.USER_DEFINED) toktypes.append(gguf.TokenType.UNUSED)
elif reverse_vocab[i] in added_vocab: elif reverse_vocab[i] in added_vocab:
tokens.append(reverse_vocab[i]) tokens.append(reverse_vocab[i])
if tokenizer.added_tokens_decoder[i].special: if tokenizer.added_tokens_decoder[i].special:

View file

@ -5419,6 +5419,7 @@ static void llm_load_vocab(
} else if ( } else if (
tokenizer_pre == "command-r") { tokenizer_pre == "command-r") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R; vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
vocab.tokenizer_clean_spaces = false;
} else if ( } else if (
tokenizer_pre == "qwen2") { tokenizer_pre == "qwen2") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2; vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
@ -5652,7 +5653,7 @@ static void llm_load_vocab(
// build special tokens cache // build special tokens cache
{ {
for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) { for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
if (!(vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL)) { if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN)) {
vocab.cache_special_tokens.push_back(id); vocab.cache_special_tokens.push_back(id);
} }
} }
@ -15418,17 +15419,6 @@ struct llm_tokenizer_bpe {
"[0-9][0-9][0-9]", "[0-9][0-9][0-9]",
}; };
break; break;
case LLAMA_VOCAB_PRE_TYPE_MPT:
// TODO: MPT pre-tokenization regexes are unknown
// the following are close, but not exact. run the following:
// ./bin/test-tokenizer-0 ../models/ggml-vocab-mpt.gguf
GGML_ASSERT("MPT pre-tokenization regexes are unknown - fixes needed");
regex_exprs = {
"\\s?\\p{L}+",
"\\s?\\p{P}+",
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
};
break;
case LLAMA_VOCAB_PRE_TYPE_STARCODER: case LLAMA_VOCAB_PRE_TYPE_STARCODER:
case LLAMA_VOCAB_PRE_TYPE_REFACT: case LLAMA_VOCAB_PRE_TYPE_REFACT:
case LLAMA_VOCAB_PRE_TYPE_COMMAND_R: case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
@ -15438,6 +15428,7 @@ struct llm_tokenizer_bpe {
}; };
break; break;
case LLAMA_VOCAB_PRE_TYPE_GPT2: case LLAMA_VOCAB_PRE_TYPE_GPT2:
case LLAMA_VOCAB_PRE_TYPE_MPT:
case LLAMA_VOCAB_PRE_TYPE_OLMO: case LLAMA_VOCAB_PRE_TYPE_OLMO:
case LLAMA_VOCAB_PRE_TYPE_JAIS: case LLAMA_VOCAB_PRE_TYPE_JAIS:
regex_exprs = { regex_exprs = {
@ -15464,8 +15455,8 @@ struct llm_tokenizer_bpe {
break; break;
case LLAMA_VOCAB_PRE_TYPE_VIKING: case LLAMA_VOCAB_PRE_TYPE_VIKING:
regex_exprs = { regex_exprs = {
"\\p{N}",
" ?[^(\\s|.,!?…。,、।۔،)]+", " ?[^(\\s|.,!?…。,、।۔،)]+",
"\\p{N}",
}; };
break; break;
default: default:
@ -16185,12 +16176,20 @@ struct fragment_buffer_variant {
// #define PRETOKENIZERDEBUG // #define PRETOKENIZERDEBUG
static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer) { static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer, bool parse_special) {
// for each special token // for each special token
for (const llama_vocab::id special_id : vocab.cache_special_tokens) { for (const llama_vocab::id special_id : vocab.cache_special_tokens) {
const auto & data = vocab.id_to_token[special_id]; const auto & data = vocab.id_to_token[special_id];
const auto & special_token = data.text; const auto & special_token = data.text;
if (!parse_special && (data.attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_UNKNOWN))) {
// Ignore control and unknown tokens when parse_special == false
continue;
// User-defined tokens are still pre-tokenized before everything else
// ref: https://github.com/huggingface/tokenizers/blob/fdd26ba9a3f0c133427aab0423888cbde91362d7/tokenizers/src/tokenizer/mod.rs#L726
// This is mostly relevant for neox-style tokenizers (mpt, olmo, stablelm, etc.)
}
// for each text fragment // for each text fragment
std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin(); std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
while (it != buffer.end()) { while (it != buffer.end()) {
@ -16303,7 +16302,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
if (!raw_text.empty()) { if (!raw_text.empty()) {
fragment_buffer.emplace_front(raw_text, 0, raw_text.length()); fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
if (parse_special) tokenizer_st_partition(vocab, fragment_buffer); tokenizer_st_partition(vocab, fragment_buffer, parse_special);
} }
switch (vocab.type) { switch (vocab.type) {

View file

@ -195,7 +195,7 @@ int main(int argc, char **argv) {
const bool add_special = false; const bool add_special = false;
for (const auto & test_kv : k_tests) { for (const auto & test_kv : k_tests) {
const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special, true); const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special, false);
printf("\n"); printf("\n");
printf("src: '%s'\n", test_kv.first.c_str()); printf("src: '%s'\n", test_kv.first.c_str());
@ -253,7 +253,7 @@ int main(int argc, char **argv) {
{ {
const auto t_start = ggml_time_us(); const auto t_start = ggml_time_us();
res = llama_tokenize(ctx, text, add_special, true); res = llama_tokenize(ctx, text, add_special, false);
const auto t_end = ggml_time_us(); const auto t_end = ggml_time_us();

View file

@ -20,7 +20,7 @@ from typing import Any, Iterator, cast
from typing_extensions import Buffer from typing_extensions import Buffer
import cffi import cffi
from transformers import AutoTokenizer from transformers import AutoTokenizer, PreTrainedTokenizer
logger = logging.getLogger("test-tokenizer-random") logger = logging.getLogger("test-tokenizer-random")
@ -129,7 +129,7 @@ class Tokenizer:
class TokenizerGroundtruth (Tokenizer): class TokenizerGroundtruth (Tokenizer):
def __init__(self, dir_tokenizer: str): def __init__(self, dir_tokenizer: str):
self.model = AutoTokenizer.from_pretrained(dir_tokenizer) self.model: PreTrainedTokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
# guess BOS and EOS # guess BOS and EOS
ids = self.encode("a") ids = self.encode("a")
assert 1 <= len(ids) <= 3 assert 1 <= len(ids) <= 3
@ -143,7 +143,7 @@ class TokenizerGroundtruth (Tokenizer):
self.vocab = list(sorted(self.vocab)) self.vocab = list(sorted(self.vocab))
# tokens and lists # tokens and lists
self.special_tokens = list(self.model.all_special_tokens) self.special_tokens = list(self.model.all_special_tokens)
self.added_tokens = list(self.model.added_tokens_encoder) self.added_tokens = self.model.batch_decode(self.model.added_tokens_encoder.values(), skip_special_tokens=False)
self.bos_token = self.model.bos_token self.bos_token = self.model.bos_token
self.eos_token = self.model.eos_token self.eos_token = self.model.eos_token
@ -232,6 +232,7 @@ def generator_custom_text_edge_cases() -> Iterator[str]:
'a\na', # bert fail 'a\na', # bert fail
'"`', # falcon '"`', # falcon
' \u2e4e', # falcon ' \u2e4e', # falcon
'\n\x0b ', # falcon
'a\xa0\xa0\x00b', # jina-v2-es 'a\xa0\xa0\x00b', # jina-v2-es
'one <mask>', # jina-v2-es <mask> lstrip=true 'one <mask>', # jina-v2-es <mask> lstrip=true
'a </s> b', # rstrip phi-3 'a </s> b', # rstrip phi-3