Cull to end of generated_text when encountering a stopping string in case it's a partial token.
Will roll this back if it proves to be a problem.
This commit is contained in:
parent
9197674a6b
commit
b6f536dfb3
1 changed files with 1 additions and 1 deletions
|
@ -280,7 +280,7 @@ struct llama_server_context
|
|||
for (const std::string& word : params.antiprompt) {
|
||||
size_t i = generated_text.find(word, generated_text.size() - (word.size() + token_text.size()));
|
||||
if (i != std::string::npos) {
|
||||
generated_text.erase(generated_text.begin() + i, generated_text.begin() + i + word.size());
|
||||
generated_text.erase(generated_text.begin() + i, generated_text.end());
|
||||
stopping_word = word;
|
||||
has_next_token = false;
|
||||
break;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue