diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp index 0c2d7781b..1f04b6f34 100644 --- a/tests/test-tokenizer-0.cpp +++ b/tests/test-tokenizer-0.cpp @@ -195,7 +195,7 @@ int main(int argc, char **argv) { const bool add_special = false; for (const auto & test_kv : k_tests) { - const std::vector res = llama_tokenize(ctx, test_kv.first, add_special); + const std::vector res = llama_tokenize(ctx, test_kv.first, add_special, true); printf("\n"); printf("src: '%s'\n", test_kv.first.c_str()); @@ -253,7 +253,7 @@ int main(int argc, char **argv) { { const auto t_start = ggml_time_us(); - res = llama_tokenize(ctx, text, add_special); + res = llama_tokenize(ctx, text, add_special, true); const auto t_end = ggml_time_us(); diff --git a/tests/test-tokenizer-1-spm.cpp b/tests/test-tokenizer-1-spm.cpp index 8c7093489..e82da302c 100644 --- a/tests/test-tokenizer-1-spm.cpp +++ b/tests/test-tokenizer-1-spm.cpp @@ -62,8 +62,8 @@ int main(int argc, char ** argv) { const int n_vocab = llama_n_vocab(model); for (int i = 0; i < n_vocab; ++i) { - std::string str = llama_detokenize(ctx, std::vector(1, i)); - std::vector tokens = llama_tokenize(ctx, str, false); + std::string str = llama_detokenize(ctx, std::vector(1, i), true); + std::vector tokens = llama_tokenize(ctx, str, false, true); std::string check = llama_detokenize(ctx, tokens); if (check != str) { fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n", @@ -86,7 +86,7 @@ int main(int argc, char ** argv) { } std::string str = unicode_cpt_to_utf8(cp); - std::vector tokens = llama_tokenize(ctx, str, false); + std::vector tokens = llama_tokenize(ctx, str, false, true); std::string check = llama_detokenize(ctx, tokens); if (cp != 9601 && str != check) { fprintf(stderr, "error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",