From df0e0d094ca8e24bf144ceeabb3a6c4a297803dc Mon Sep 17 00:00:00 2001 From: Randall Fitzgerald Date: Tue, 23 May 2023 06:22:30 -0700 Subject: [PATCH] Forgot to remove some testing code. --- examples/server/server.cpp | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index eccb2f2ad..1ee6ce1d1 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -13,8 +13,6 @@ struct llama_server_context { bool as_loop = false; bool has_next_token = false; - - std::string generated_text = ""; size_t num_tokens_predicted = 0; @@ -65,21 +63,6 @@ struct llama_server_context bool loadPrompt() { params.prompt.insert(0, 1, ' '); // always add a first space - - if(processed_tokens.size() != 0) - { - processed_tokens.erase(processed_tokens.begin() + 1, processed_tokens.end()); - } - - if(embd_inp.size() != 0) - { - embd_inp.erase(embd_inp.begin() + 1, embd_inp.end()); - } - - n_remain = 0; - n_past = 0; - n_consumed = 0; - std::vector prompt_tokens = ::llama_tokenize(ctx, params.prompt, true); // compare the evaluated prompt with the new prompt for (n_past = 0; n_past < prompt_tokens.size() - 1 && n_past < processed_tokens.size(); n_past++) { @@ -117,7 +100,6 @@ struct llama_server_context // Reset context const int n_left = n_past - params.n_keep; n_past = std::max(1, params.n_keep); - last_n_tokens.erase(last_n_tokens.begin() + n_past, last_n_tokens.end()); processed_tokens.erase(processed_tokens.begin() + n_past, processed_tokens.end()); embd.insert(embd.begin(), last_n_tokens.begin() + params.n_ctx - n_left / 2 - embd.size(), last_n_tokens.end() - embd.size()); }