From b6f536dfb37051b677735303845e3eaceb354a01 Mon Sep 17 00:00:00 2001 From: digiwombat Date: Tue, 30 May 2023 21:14:24 -0400 Subject: [PATCH] Cull to end of generated_text when encountering a stopping string in case it's a partial token. Will roll this back if it proves to be a problem. --- examples/server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index a4bb38244..3d9cd5ca1 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -280,7 +280,7 @@ struct llama_server_context for (const std::string& word : params.antiprompt) { size_t i = generated_text.find(word, generated_text.size() - (word.size() + token_text.size())); if (i != std::string::npos) { - generated_text.erase(generated_text.begin() + i, generated_text.begin() + i + word.size()); + generated_text.erase(generated_text.begin() + i, generated_text.end()); stopping_word = word; has_next_token = false; break;