tests : write a Python tokenizer test (wip)

This commit is contained in:
Georgi Gerganov 2023-08-26 15:55:23 +03:00
parent a2ca4e9de9
commit 5cad62bce4
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
2 changed files with 39 additions and 19 deletions

View file

@ -27,6 +27,8 @@ static const std::map<std::string, std::vector<llama_token>> & k_tests() {
{ "Hello World", { 1, 15043, 2787, }, },
{ " Hello World", { 1, 29871, 15043, 2787, }, },
{ " Hello World!", { 1, 29871, 15043, 2787, 29991, }, },
{ "Hello, world!", { 1, 15043, 29892, 3186, 29991, }, },
{ " Hello, world!", { 1, 29871, 15043, 29892, 3186, 29991, }, },
{ " this is 🦙.cpp", { 1, 29871, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, },
{ "w048 7tuijk dsdfhu", { 1, 281, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, },
{ "нещо на Български", { 1, 1538, 4851, 665, 1386, 29713, 1305, }, },
@ -40,12 +42,12 @@ static const std::map<std::string, std::vector<llama_token>> & k_tests() {
243, 162, 155, 185, 30722, 243, 162, 143, 174, 30598,
313, 20787, 953, 3848, 275, 16125, 630, 29897, 29871, 31681,
313, 6194, 953, 29877, 2397, 393, 756, 967, 1914, 5993, 29897, }, },
{ "Hello", { 1, 15043 }, },
{ " Hello", { 1, 29871, 15043 }, },
{ " Hello", { 1, 259, 15043 }, },
{ " Hello", { 1, 1678, 15043 }, },
{ " Hello", { 1, 268, 15043 }, },
{ " Hello\n Hello", { 1, 268, 15043, 13, 1678, 15043 }, },
{ "Hello", { 1, 15043, }, },
{ " Hello", { 1, 29871, 15043, }, },
{ " Hello", { 1, 259, 15043, }, },
{ " Hello", { 1, 1678, 15043, }, },
{ " Hello", { 1, 268, 15043, }, },
{ " Hello\n Hello", { 1, 268, 15043, 13, 1678, 15043, }, },
};
return _k_tests;

18
tests/test-tokenizer-0.py Normal file
View file

@ -0,0 +1,18 @@
import os
import sys
import argparse
from sentencepiece import SentencePieceProcessor
parser = argparse.ArgumentParser()
parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
args = parser.parse_args()
dir_tokenizer = args.dir_tokenizer
tokenizer = SentencePieceProcessor(dir_tokenizer + '/tokenizer.model')
text = 'Hello, world!'
print(text)
print(tokenizer.encode(text, add_bos=True))
print(tokenizer.decode(tokenizer.encode(text, add_bos=True)))