From 94edeaf7dfa1f987a5b2acaef9197b8ba6e3b57a Mon Sep 17 00:00:00 2001 From: rabidcopy Date: Mon, 20 Mar 2023 14:35:36 -0500 Subject: [PATCH] relocate previous newline token const --- main.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/main.cpp b/main.cpp index 7e8853296..097b17eef 100644 --- a/main.cpp +++ b/main.cpp @@ -175,9 +175,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; vocab.score[i] = score; - - // dynamically determine the newline token - const auto NEWLINE_TOKEN_ID = vocab.token_to_id["\n"]; //if (i < 30000) { // fprintf(stderr, "%s: vocab[%d] = '%s'\n", __func__, i, word.c_str()); @@ -952,6 +949,9 @@ int main(int argc, char ** argv) { bool input_noecho = false; int remaining_tokens = params.n_predict; + + // dynamically determine the newline token + const auto NEWLINE_TOKEN_ID = vocab.token_to_id["\n"]; // set the color for the prompt which will be output initially if (params.use_color) {