From 64b0b7453e83f2e7561afc0aa3fc0502e0e50a01 Mon Sep 17 00:00:00 2001 From: goerch Date: Thu, 14 Sep 2023 17:05:04 +0200 Subject: [PATCH] Fixing the last deviations from sentencepiece indicated by test-tokenizer-1 --- common/common.cpp | 4 ++-- llama.cpp | 6 ++++-- llama.h | 2 ++ tests/test-tokenizer-1-llama.cpp | 10 +++++----- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 6e5d5b4d5..4a076b0af 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -791,10 +791,10 @@ std::vector llama_tokenize( // upper limit for the number of tokens int n_tokens = text.length() + add_bos; std::vector result(n_tokens); - n_tokens = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos); + n_tokens = llama_tokenize(ctx, text.c_str(), text.length(), result.data(), result.size(), add_bos); if (n_tokens < 0) { result.resize(-n_tokens); - int check = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos); + int check = llama_tokenize(ctx, text.c_str(), text.length(), result.data(), result.size(), add_bos); GGML_ASSERT(check == -n_tokens); } else { result.resize(n_tokens); diff --git a/llama.cpp b/llama.cpp index cbaf8edac..2fea1cac1 100644 --- a/llama.cpp +++ b/llama.cpp @@ -6202,19 +6202,21 @@ llama_token llama_token_nl(const struct llama_context * ctx) { int llama_tokenize( struct llama_context * ctx, const char * text, + int text_len, llama_token * tokens, int n_max_tokens, bool add_bos) { - return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos); + return llama_tokenize_with_model(&ctx->model, text, text_len, tokens, n_max_tokens, add_bos); } int llama_tokenize_with_model( const struct llama_model * model, const char * text, + int text_len, llama_token * tokens, int n_max_tokens, bool add_bos) { - auto res = llama_tokenize_internal(model->vocab, text, add_bos); + auto res = llama_tokenize_internal(model->vocab, std::string(text, text_len), add_bos); if (n_max_tokens < (int) res.size()) { // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__); diff --git a/llama.h b/llama.h index 37975bebe..795389d8d 100644 --- a/llama.h +++ b/llama.h @@ -374,6 +374,7 @@ extern "C" { LLAMA_API int llama_tokenize( struct llama_context * ctx, const char * text, + int text_len, llama_token * tokens, int n_max_tokens, bool add_bos); @@ -381,6 +382,7 @@ extern "C" { LLAMA_API int llama_tokenize_with_model( const struct llama_model * model, const char * text, + int text_len, llama_token * tokens, int n_max_tokens, bool add_bos); diff --git a/tests/test-tokenizer-1-llama.cpp b/tests/test-tokenizer-1-llama.cpp index ab3d822f2..8cc6f1532 100644 --- a/tests/test-tokenizer-1-llama.cpp +++ b/tests/test-tokenizer-1-llama.cpp @@ -89,8 +89,7 @@ int main(int argc, char **argv) { if (check != str) { fprintf(stderr, "%s : error: token %d detokenizes to >%s<(%llu) but tokenization of this detokenizes to >%s<(%llu)\n", __func__, i, str.c_str(), str.length(), check.c_str(), check.length()); - if(i != 3) - return 2; + return 2; } } @@ -100,10 +99,11 @@ int main(int argc, char **argv) { std::vector tokens = llama_tokenize(ctx, str, false); std::string check = llama_detokenize_spm(ctx, tokens); if (str != check) { - fprintf(stderr, "%s : error: codepoint %d detokenizes to >%s<(%llu) instead of >%s<(%llu)\n", - __func__, cp, check.c_str(), check.length(), str.c_str(), str.length()); - if(cp != 0 && cp != 9601) + if(cp != 9601) { + fprintf(stderr, "%s : error: codepoint %d detokenizes to >%s<(%llu) instead of >%s<(%llu)\n", + __func__, cp, check.c_str(), check.length(), str.c_str(), str.length()); return 3; + } } } }