Fix tokenizer tests

This commit is contained in:
jaime-m-p 2024-06-20 19:18:23 +02:00
parent 40a66606a8
commit 16a7503dcc
2 changed files with 5 additions and 5 deletions

View file

@ -195,7 +195,7 @@ int main(int argc, char **argv) {
const bool add_special = false; const bool add_special = false;
for (const auto & test_kv : k_tests) { for (const auto & test_kv : k_tests) {
const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special); const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special, true);
printf("\n"); printf("\n");
printf("src: '%s'\n", test_kv.first.c_str()); printf("src: '%s'\n", test_kv.first.c_str());
@ -253,7 +253,7 @@ int main(int argc, char **argv) {
{ {
const auto t_start = ggml_time_us(); const auto t_start = ggml_time_us();
res = llama_tokenize(ctx, text, add_special); res = llama_tokenize(ctx, text, add_special, true);
const auto t_end = ggml_time_us(); const auto t_end = ggml_time_us();

View file

@ -62,8 +62,8 @@ int main(int argc, char ** argv) {
const int n_vocab = llama_n_vocab(model); const int n_vocab = llama_n_vocab(model);
for (int i = 0; i < n_vocab; ++i) { for (int i = 0; i < n_vocab; ++i) {
std::string str = llama_detokenize(ctx, std::vector<int>(1, i)); std::string str = llama_detokenize(ctx, std::vector<int>(1, i), true);
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false); std::vector<llama_token> tokens = llama_tokenize(ctx, str, false, true);
std::string check = llama_detokenize(ctx, tokens); std::string check = llama_detokenize(ctx, tokens);
if (check != str) { if (check != str) {
fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n", fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
@ -86,7 +86,7 @@ int main(int argc, char ** argv) {
} }
std::string str = unicode_cpt_to_utf8(cp); std::string str = unicode_cpt_to_utf8(cp);
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false); std::vector<llama_token> tokens = llama_tokenize(ctx, str, false, true);
std::string check = llama_detokenize(ctx, tokens); std::string check = llama_detokenize(ctx, tokens);
if (cp != 9601 && str != check) { if (cp != 9601 && str != check) {
fprintf(stderr, "error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n", fprintf(stderr, "error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",