From 40a66606a89440d90307ffe7556b57790035829d Mon Sep 17 00:00:00 2001 From: jaime-m-p <> Date: Thu, 20 Jun 2024 19:14:02 +0200 Subject: [PATCH] Using llama_tokenize() in tests --- tests/test-tokenizer-0.cpp | 8 ++++---- tests/test-tokenizer-1-bpe.cpp | 6 +++--- tests/test-tokenizer-1-spm.cpp | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp index d478f1041..0c2d7781b 100644 --- a/tests/test-tokenizer-0.cpp +++ b/tests/test-tokenizer-0.cpp @@ -199,7 +199,7 @@ int main(int argc, char **argv) { printf("\n"); printf("src: '%s'\n", test_kv.first.c_str()); - printf("res: '%s'\n", llama_detokenize_bpe(ctx, res).c_str()); + printf("res: '%s'\n", llama_detokenize(ctx, res).c_str()); printf("tok: "); for (const auto & tok : res) { printf("%d ", tok); @@ -216,8 +216,8 @@ int main(int argc, char **argv) { if (!correct) { fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str()); fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__, - llama_detokenize_bpe(ctx, res).c_str(), - llama_detokenize_bpe(ctx, test_kv.second).c_str()); + llama_detokenize(ctx, res).c_str(), + llama_detokenize(ctx, test_kv.second).c_str()); fprintf(stderr, "%s : expected tokens: ", __func__); for (const auto & t : test_kv.second) { fprintf(stderr, "%6d '%s', ", t, llama_token_to_piece(ctx, t).c_str()); @@ -272,7 +272,7 @@ int main(int argc, char **argv) { } for (const auto & tok : res) { - //ofs << tok << " '" << string_strip(llama_detokenize_bpe(ctx, std::vector{tok})) << "'" << std::endl; + //ofs << tok << " '" << string_strip(llama_detokenize(ctx, std::vector{tok})) << "'" << std::endl; ofs << tok << "\n"; } } diff --git a/tests/test-tokenizer-1-bpe.cpp b/tests/test-tokenizer-1-bpe.cpp index 209a04ad6..e464b3358 100644 --- a/tests/test-tokenizer-1-bpe.cpp +++ b/tests/test-tokenizer-1-bpe.cpp @@ -74,7 +74,7 @@ int main(int argc, char **argv) { const int n_vocab = llama_n_vocab(model); for (int i = 0; i < n_vocab; ++i) { - std::string str = llama_detokenize_bpe(ctx, std::vector(1, i)); + std::string str = llama_detokenize(ctx, std::vector(1, i)); try { auto cps = unicode_cpts_from_utf8(str); std::vector tokens = llama_tokenize(ctx, str, false, true); @@ -90,7 +90,7 @@ int main(int argc, char **argv) { fprintf(stderr, "]\n"); return 2; } - std::string check = llama_detokenize_bpe(ctx, tokens); + std::string check = llama_detokenize(ctx, tokens); if (check != str) { fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n", __func__, i, str.c_str(), str.length(), check.c_str(), check.length()); @@ -123,7 +123,7 @@ int main(int argc, char **argv) { std::string str = unicode_cpt_to_utf8(cp); std::vector tokens = llama_tokenize(ctx, str, false); - std::string check = llama_detokenize_bpe(ctx, tokens); + std::string check = llama_detokenize(ctx, tokens); if (cp != 9601 && str != check) { fprintf(stderr, "error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n", cp, check.c_str(), check.length(), str.c_str(), str.length()); diff --git a/tests/test-tokenizer-1-spm.cpp b/tests/test-tokenizer-1-spm.cpp index ac2333dda..8c7093489 100644 --- a/tests/test-tokenizer-1-spm.cpp +++ b/tests/test-tokenizer-1-spm.cpp @@ -62,9 +62,9 @@ int main(int argc, char ** argv) { const int n_vocab = llama_n_vocab(model); for (int i = 0; i < n_vocab; ++i) { - std::string str = llama_detokenize_spm(ctx, std::vector(1, i)); + std::string str = llama_detokenize(ctx, std::vector(1, i)); std::vector tokens = llama_tokenize(ctx, str, false); - std::string check = llama_detokenize_spm(ctx, tokens); + std::string check = llama_detokenize(ctx, tokens); if (check != str) { fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n", __func__, i, str.c_str(), str.length(), check.c_str(), check.length()); @@ -87,7 +87,7 @@ int main(int argc, char ** argv) { std::string str = unicode_cpt_to_utf8(cp); std::vector tokens = llama_tokenize(ctx, str, false); - std::string check = llama_detokenize_spm(ctx, tokens); + std::string check = llama_detokenize(ctx, tokens); if (cp != 9601 && str != check) { fprintf(stderr, "error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n", cp, check.c_str(), check.length(), str.c_str(), str.length());