From 3c211c64bd6520aa64a94a632cb24ee0f294552f Mon Sep 17 00:00:00 2001 From: rabidcopy Date: Tue, 21 Mar 2023 12:53:32 -0500 Subject: [PATCH] tokenize reverse prompt when needed makes this PR compatible with https://github.com/ggerganov/llama.cpp/pull/330 --- main.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/main.cpp b/main.cpp index dd2cc160e..2235f7b25 100644 --- a/main.cpp +++ b/main.cpp @@ -1004,7 +1004,12 @@ int main(int argc, char ** argv) { // replace end of text token with newline token when in interactive mode if (id == EOS_TOKEN_ID && params.interactive) { id = NEWLINE_TOKEN_ID; - if (!antipromptv_inp.empty()) { + if (params.antiprompt.size() != 0) { + // tokenize the reverse prompt to inject + std::vector> antipromptv_inp; + for (auto antiprompt : params.antiprompt){ + antipromptv_inp.push_back(::llama_tokenize(vocab, antiprompt, false)); + } // inject the reverse prompt to return control to the user auto& ap_inp = antipromptv_inp.front(); embd_inp.insert(embd_inp.end(), ap_inp.begin(), ap_inp.end());