From e8b3955346cf70af8b2be312d047cdc82e5c40f9 Mon Sep 17 00:00:00 2001 From: jaime-m-p <> Date: Tue, 9 Jul 2024 00:55:54 +0200 Subject: [PATCH 1/7] Fix pyparse problems: gcc inline functions --- tests/test-tokenizer-random.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test-tokenizer-random.py b/tests/test-tokenizer-random.py index 48cab8a1e..11baf6989 100644 --- a/tests/test-tokenizer-random.py +++ b/tests/test-tokenizer-random.py @@ -36,7 +36,7 @@ class LibLlama: self.lib.llama_backend_init() def _load_libllama_cffi(self, path_llama_h: str, path_includes: list[str], path_libllama: str): - cmd = ["gcc", "-E", "-P", "-D__restrict=", "-D__attribute__(x)=", "-D__asm__(x)="] + cmd = ["gcc", "-O0", "-fno-inline", "-E", "-P", "-D__restrict=", "-D__attribute__(x)=", "-D__asm__(x)="] cmd += ["-I" + path for path in path_includes] + [path_llama_h] res = subprocess.run(cmd, stdout=subprocess.PIPE) assert (res.returncode == 0) From 9307c3fd46470658d8432b8de193c66738c92fd3 Mon Sep 17 00:00:00 2001 From: jaime-m-p <> Date: Tue, 9 Jul 2024 00:59:29 +0200 Subject: [PATCH 2/7] Test l/r-strip for more than 4 spaces --- tests/test-tokenizer-random.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test-tokenizer-random.py b/tests/test-tokenizer-random.py index 11baf6989..5b31cfc9c 100644 --- a/tests/test-tokenizer-random.py +++ b/tests/test-tokenizer-random.py @@ -273,7 +273,7 @@ def generator_apostrophe() -> Iterator[str]: def generator_added_lr_strip(tokenizer: TokenizerGroundtruth) -> Iterator[str]: - WHITESPACES = ["", " ", " ", "\n", "\r\n", "\n\n", "\t", "\t\t"] + WHITESPACES = ["", " ", " ", "\n", "\r\n", "\n\n", "\t", "\t\t", " "] all_tokens = list(sorted(set(tokenizer.special_tokens + tokenizer.added_tokens))) for token in all_tokens: for lstrip in WHITESPACES: From a943b424169b051d37f219043c10b17d6efa7a44 Mon Sep 17 00:00:00 2001 From: jaime-m-p <> Date: Tue, 9 Jul 2024 01:02:44 +0200 Subject: [PATCH 3/7] Improve mismatch range localization --- tests/test-tokenizer-random.py | 56 ++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/tests/test-tokenizer-random.py b/tests/test-tokenizer-random.py index 5b31cfc9c..9c261fb6a 100644 --- a/tests/test-tokenizer-random.py +++ b/tests/test-tokenizer-random.py @@ -404,14 +404,6 @@ def generator_random_vocab_words(tokenizer: TokenizerGroundtruth, iterations=100 def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLlamaCpp, generator: Iterator[str]): - def find_first_mismatch(ids1: list[int], ids2: list[int]): - for i, (a, b) in enumerate(zip(ids1, ids2)): - if a != b: - return i - if len(ids1) == len(ids2): - return -1 - return min(len(ids1), len(ids2)) - def check_detokenizer(text: str, text1: str, text2: str) -> bool: if text1 == text2: # equal to TokenizerGroundtruth? return True @@ -431,6 +423,7 @@ def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLl t_start = time.perf_counter() encode_errors = 0 decode_errors = 0 + total_tests = 0 MAX_ERRORS = 10 logger.info("%s: %s" % (generator.__name__, "ini")) @@ -450,21 +443,44 @@ def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLl t_encode2 += t2 - t1 t_decode1 += t3 - t2 t_decode2 += t4 - t3 - if encode_errors < MAX_ERRORS and ids1 != ids2: - i = find_first_mismatch(ids1, ids2) - ids1 = list(ids1)[max(0, i - 2) : i + 5 + 1] - ids2 = list(ids2)[max(0, i - 2) : i + 5 + 1] + # compare + encode_ok = ids1 == ids2 + decode_ok = check_detokenizer(text, text1, text2) + encode_errors += not encode_ok + decode_errors += not decode_ok + total_tests += 1 + if (encode_errors < MAX_ERRORS and not encode_ok) or (decode_errors < MAX_ERRORS and not decode_ok): + def _compare(text: str): + ids1 = tokenizer1.encode(text) + ids2 = tokenizer2.encode(text) + text1 = tokenizer1.decode(ids1) + text2 = tokenizer2.decode(ids1) + encode_ok = ids1 == ids2 + decode_ok = check_detokenizer(text, text1, text2) + ok = encode_ok and decode_ok + return ok, ids1, ids2, text1, text2 + a, b = 0, len(text) + for step in [64, 32, 16, 8, 4, 2, 1]: + while a < b: + t = max(a, b - step) + if _compare(text[a : t])[0]: + break + b = t + for step in [64, 32, 16, 8, 4, 2, 1]: + while a < b: + t = min(a + step, b) + if _compare(text[t : b])[0]: + break + a = t + ok, ids1, ids2, text1, text2 = _compare(text[a : b]) + assert a <= b and not ok + logger.error(" Text:" + repr(text[a : b])) + logger.error(" " + " ".join(repr(x) + ":" + hex(ord(x)) for x in text[a : b])) logger.error(" Expected: " + str(ids1)) logger.error(" Result: " + str(ids2)) - encode_errors += 1 + logger.error(" Expected: " + " ".join(repr(x) + ":" + hex(ord(x)) for x in text1)) + logger.error(" Result: " + " ".join(repr(x) + ":" + hex(ord(x)) for x in text2)) logger.error(f" {encode_errors=}") - if decode_errors < MAX_ERRORS and not check_detokenizer(text, text1, text2): - i = find_first_mismatch(text1, text2) - text1 = list(text1[max(0, i - 2) : i + 5 + 1]) - text2 = list(text2[max(0, i - 2) : i + 5 + 1]) - logger.error(" Expected: " + " ".join(hex(ord(x)) for x in text1)) - logger.error(" Result: " + " ".join(hex(ord(x)) for x in text2)) - decode_errors += 1 logger.error(f" {decode_errors=}") if encode_errors >= MAX_ERRORS and decode_errors >= MAX_ERRORS: logger.error(f" EXIT: {encode_errors=} {decode_errors=}") From dec64ef7930f2ff69d9c67afe2c8450b6aaf917d Mon Sep 17 00:00:00 2001 From: jaime-m-p <> Date: Tue, 9 Jul 2024 01:04:22 +0200 Subject: [PATCH 4/7] Compare vocabs --- tests/test-tokenizer-random.py | 81 +++++++++++++++++++++++++++++----- 1 file changed, 70 insertions(+), 11 deletions(-) diff --git a/tests/test-tokenizer-random.py b/tests/test-tokenizer-random.py index 9c261fb6a..e7fed3fa3 100644 --- a/tests/test-tokenizer-random.py +++ b/tests/test-tokenizer-random.py @@ -112,9 +112,25 @@ class LibLlamaModel: num = self.lib.llama_detokenize(self.model, self.token_ids, len(ids), self.text_buff, len(self.text_buff), remove_special, unparse_special) return str(self.ffi.buffer(self.text_buff, num), encoding="utf-8", errors="replace") # replace errors with '\uFFFD' + def get_vocab(self, detokenize=False) -> list[str]: + vocab: list[str] = [] + num_tokens = self.lib.llama_n_vocab(self.model) + for id in range(num_tokens): + if detokenize: + text = self.detokenize([id], remove_special=False, unparse_special=True) + else: + text = self.lib.llama_token_get_text(self.model, id) + text = self.ffi.string(text) + text = str(text, encoding="utf-8", errors="replace") # replace errors with '\uFFFD' + vocab.append(text) + return vocab + class Tokenizer: + def get_vocab(self, detokenize=False) -> list[str]: + raise NotImplementedError + def encode(self, text: str) -> list[int]: raise NotImplementedError @@ -125,7 +141,7 @@ class Tokenizer: class TokenizerGroundtruth (Tokenizer): def __init__(self, dir_tokenizer: str): - self.model = AutoTokenizer.from_pretrained(dir_tokenizer) + self.model = AutoTokenizer.from_pretrained(dir_tokenizer, trust_remote_code=False) # guess BOS and EOS ids = self.encode("a") assert 1 <= len(ids) <= 3 @@ -134,15 +150,24 @@ class TokenizerGroundtruth (Tokenizer): self.add_bos_token = getattr(self.model, "add_bos_token", add_bos_token) self.add_eos_token = getattr(self.model, "add_eos_token", add_eos_token) # build vocab - tokens = list(self.model.get_vocab().values()) - self.vocab = self.model.batch_decode(tokens, skip_special_tokens=True) - self.vocab = list(sorted(self.vocab)) + self.vocab = self.get_vocab(detokenize=True) # tokens and lists self.special_tokens = list(self.model.all_special_tokens) self.added_tokens = list(self.model.added_tokens_encoder) self.bos_token = self.model.bos_token self.eos_token = self.model.eos_token + def get_vocab(self, detokenize=False) -> list[str]: + max_token_id = max(self.model.get_vocab().values()) + if detokenize: + ids = list(range(max_token_id + 1)) + vocab = self.model.batch_decode(ids, skip_special_tokens=False) + else: + vocab = [None] * (max_token_id + 1) + for text, id in self.model.get_vocab().items(): + vocab[id] = text + return vocab + def encode(self, text: str) -> list[int]: return self.model.encode(text, add_special_tokens=True) @@ -159,6 +184,9 @@ class TokenizerLlamaCpp (Tokenizer): self.libllama = LibLlama() self.model = LibLlamaModel(self.libllama, vocab_file, mparams=dict(vocab_only=True), cparams=dict(n_ctx=4096)) + def get_vocab(self, detokenize=False) -> list[str]: + return self.model.get_vocab(detokenize) + def encode(self, text: str) -> list[int]: return self.model.tokenize(text, add_special=True, parse_special=True) @@ -491,6 +519,34 @@ def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLl logger.info(f"{generator.__name__}: end, {t_encode1=:.3f} {t_encode2=:.3f} {t_decode1=:.3f} {t_decode2=:.3f} {t_total=:.3f}") +def compare_vocabs(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLlamaCpp): + + MAX_PRINT_ERRORS = 10 + + logger.info("compare_vocabs: ini") + + t_start = time.perf_counter() + + for detokenize in (False, True): + vocab1 = tokenizer1.get_vocab(detokenize) + vocab2 = tokenizer2.get_vocab(detokenize) + if vocab1 != vocab2: + num_errors = 0 + for i in range(max(len(vocab1), len(vocab2))): + text1 = vocab1[i] if i < len(vocab1) else "" + text2 = vocab2[i] if i < len(vocab2) else "" + is_unused = text1.startswith("[UNUSED_TOKEN_") # AutoTokenizer adds more unused tokens than SentencePiece ? + if text1 != text2 and is_unused and text2: + num_errors += 1 + if num_errors < MAX_PRINT_ERRORS: + logger.error(f" {detokenize=} id={i} expected={repr(text1)} result={repr(text2)}") + if num_errors: + logger.error(f" {num_errors=}") + + t_total = time.perf_counter() - t_start + logger.info(f"compare_vocabs: end, {t_total=:.3f}") + + def main(argv: list[str] = None): parser = argparse.ArgumentParser() parser.add_argument("vocab_file", help="path to vocab 'gguf' file") @@ -504,13 +560,16 @@ def main(argv: list[str] = None): tokenizer1 = TokenizerGroundtruth(args.dir_tokenizer) tokenizer2 = TokenizerLlamaCpp(args.vocab_file) - # compare_tokenizers(tokenizer1, tokenizer2, generator_custom_text()) - # compare_tokenizers(tokenizer1, tokenizer2, generator_custom_text_edge_cases()) - compare_tokenizers(tokenizer1, tokenizer2, generator_ascii_lr_strip()) - compare_tokenizers(tokenizer1, tokenizer2, generator_apostrophe()) - compare_tokenizers(tokenizer1, tokenizer2, generator_unicodes()) - compare_tokenizers(tokenizer1, tokenizer2, generator_vocab_words(tokenizer1)) - compare_tokenizers(tokenizer1, tokenizer2, generator_added_lr_strip(tokenizer1)) + compare_vocabs(tokenizer1, tokenizer2) + + compare_tokenizers(tokenizer1, tokenizer2, generator_custom_text()) + compare_tokenizers(tokenizer1, tokenizer2, generator_custom_text_edge_cases()) + # compare_tokenizers(tokenizer1, tokenizer2, generator_representative(tokenizer1)) + # compare_tokenizers(tokenizer1, tokenizer2, generator_ascii_lr_strip()) + # compare_tokenizers(tokenizer1, tokenizer2, generator_apostrophe()) + # compare_tokenizers(tokenizer1, tokenizer2, generator_unicodes()) + # compare_tokenizers(tokenizer1, tokenizer2, generator_vocab_words(tokenizer1)) + # compare_tokenizers(tokenizer1, tokenizer2, generator_added_lr_strip(tokenizer1)) # compare_tokenizers(tokenizer1, tokenizer2, generator_random_added_tokens(tokenizer1, 10_000)) # compare_tokenizers(tokenizer1, tokenizer2, generator_random_chars(10_000)) # compare_tokenizers(tokenizer1, tokenizer2, generator_random_unicodes(10_000)) From c184db74b31f5d898161a0b48366f24fe1d429e7 Mon Sep 17 00:00:00 2001 From: jaime-m-p <> Date: Tue, 9 Jul 2024 01:28:56 +0200 Subject: [PATCH 5/7] Options to mange token text decoding errors: Some models ('jais' and 'command-r') copy original utf8 on error. Others ('deepseek') seems to use the replacement character 0xFFFD. --- src/llama.cpp | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/llama.cpp b/src/llama.cpp index 2b9ace285..f61f60907 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -21084,7 +21084,8 @@ int32_t llama_tokenize( return res.size(); } -static std::string llama_decode_text(const std::string & text) { +// errors: 'c': copy, 'i': ignore, 'r': replace 0xFFFD, 'v': verbose +static std::string llama_decode_text(const std::string & text, const char errors = 'v') { std::string decoded_text; const auto cpts = unicode_cpts_from_utf8(text); @@ -21093,11 +21094,21 @@ static std::string llama_decode_text(const std::string & text) { try { decoded_text += unicode_utf8_to_byte(utf8); } catch (const std::out_of_range & /*e*/) { - decoded_text += "[UNK_BYTE_0x"; - for (const auto c : utf8) { - decoded_text += format("%02x", (uint8_t) c); + switch (errors) { + case 'c': + decoded_text += utf8; // copy original + break; + case 'r': + decoded_text += "\xEF\xBF\xBD"; // 0xFFFD REPLACEMENT CHARACTER + break; + case 'v': + decoded_text += format("[UNK_BYTE_0x%02X]", cpt); + break; + case 'i': + default: + // ignore + break; } - decoded_text += text + "]"; } } @@ -21163,7 +21174,7 @@ int32_t llama_token_to_piece(const struct llama_model * model, llama_token token if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) { return _try_copy(token_text.data(), token_text.size()); } else if (attr & LLAMA_TOKEN_ATTR_NORMAL) { - std::string result = llama_decode_text(token_text); + std::string result = llama_decode_text(token_text, 'c'); // copy on error //TODO: use a tokenizer variable return _try_copy(result.data(), result.size()); } break; From 3eb1900e5cc362ebbfcfad940da67eb6a5eaa042 Mon Sep 17 00:00:00 2001 From: jaime-m-p <> Date: Wed, 10 Jul 2024 00:46:19 +0200 Subject: [PATCH 6/7] Skip literal UNUSED token checks --- tests/test-tokenizer-random.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tests/test-tokenizer-random.py b/tests/test-tokenizer-random.py index e7fed3fa3..ee79d7c27 100644 --- a/tests/test-tokenizer-random.py +++ b/tests/test-tokenizer-random.py @@ -533,10 +533,20 @@ def compare_vocabs(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLlamaC if vocab1 != vocab2: num_errors = 0 for i in range(max(len(vocab1), len(vocab2))): - text1 = vocab1[i] if i < len(vocab1) else "" - text2 = vocab2[i] if i < len(vocab2) else "" - is_unused = text1.startswith("[UNUSED_TOKEN_") # AutoTokenizer adds more unused tokens than SentencePiece ? - if text1 != text2 and is_unused and text2: + text1 = vocab1[i] if i < len(vocab1) else None + text2 = vocab2[i] if i < len(vocab2) else None + if True: #WIP: SentencePiece adds more unused tokens than AutoTokenizer ? + if text1 is None: + if not text2 or text2.startswith('[PAD'): # is unused ? #TODO: use toktypes + text2 = None + else: + #TODO: is "UNUSED_TOKEN_" valid for all models ? + text1 = text1.replace("[UNUSED_TOKEN_", "[PAD") + #if text1 is None or text1.startswith("[UNUSED_TOKEN_"): # is unused ? + # text1 = "" + #if text2 is None or text2.startswith('[PAD'): # is unused ? + # text2 = "" + if text1 != text2: num_errors += 1 if num_errors < MAX_PRINT_ERRORS: logger.error(f" {detokenize=} id={i} expected={repr(text1)} result={repr(text2)}") From c4956e4a05ff0e9d94bd7d71e651f13ba1623614 Mon Sep 17 00:00:00 2001 From: jaime-m-p <> Date: Thu, 11 Jul 2024 19:50:48 +0200 Subject: [PATCH 7/7] update test: fix special and added token lists --- tests/test-tokenizer-random.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test-tokenizer-random.py b/tests/test-tokenizer-random.py index ee79d7c27..440c3c2c2 100644 --- a/tests/test-tokenizer-random.py +++ b/tests/test-tokenizer-random.py @@ -152,8 +152,8 @@ class TokenizerGroundtruth (Tokenizer): # build vocab self.vocab = self.get_vocab(detokenize=True) # tokens and lists - self.special_tokens = list(self.model.all_special_tokens) - self.added_tokens = list(self.model.added_tokens_encoder) + self.special_tokens = [self.vocab[i] for i in sorted(self.model.all_special_ids)] + self.added_tokens = [self.vocab[i] for i in sorted(self.model.added_tokens_encoder.values())] self.bos_token = self.model.bos_token self.eos_token = self.model.eos_token