diff --git a/main.cpp b/main.cpp index 7e8853296..097b17eef 100644 --- a/main.cpp +++ b/main.cpp @@ -175,9 +175,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; vocab.score[i] = score; - - // dynamically determine the newline token - const auto NEWLINE_TOKEN_ID = vocab.token_to_id["\n"]; //if (i < 30000) { // fprintf(stderr, "%s: vocab[%d] = '%s'\n", __func__, i, word.c_str()); @@ -952,6 +949,9 @@ int main(int argc, char ** argv) { bool input_noecho = false; int remaining_tokens = params.n_predict; + + // dynamically determine the newline token + const auto NEWLINE_TOKEN_ID = vocab.token_to_id["\n"]; // set the color for the prompt which will be output initially if (params.use_color) {