llama : pre-tokenize non-special user-defined tokens first

This commit is contained in:
Francis Couture-Harpin 2024-07-07 15:32:42 -04:00
parent ac0f33c920
commit d5d30b20c3
2 changed files with 21 additions and 37 deletions

View file

@ -195,7 +195,7 @@ int main(int argc, char **argv) {
const bool add_special = false;
for (const auto & test_kv : k_tests) {
const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special, true);
const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special);
printf("\n");
printf("src: '%s'\n", test_kv.first.c_str());
@ -253,7 +253,7 @@ int main(int argc, char **argv) {
{
const auto t_start = ggml_time_us();
res = llama_tokenize(ctx, text, add_special, true);
res = llama_tokenize(ctx, text, add_special);
const auto t_end = ggml_time_us();