From 2071d730faa2e5c9e6dc21d32902cc389ab5c4e8 Mon Sep 17 00:00:00 2001 From: Randall Fitzgerald Date: Tue, 23 May 2023 06:22:30 -0700 Subject: [PATCH] Forgot to remove some testing code. --- examples/server/server.cpp | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 644490f9b..15bb9b729 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -13,8 +13,6 @@ struct llama_server_context { bool as_loop = false; bool has_next_token = false; - - std::string generated_text = ""; int32_t num_tokens_predicted = 0; @@ -81,21 +79,6 @@ struct llama_server_context bool loadPrompt() { params.prompt.insert(0, 1, ' '); // always add a first space - - if(processed_tokens.size() != 0) - { - processed_tokens.erase(processed_tokens.begin() + 1, processed_tokens.end()); - } - - if(embd_inp.size() != 0) - { - embd_inp.erase(embd_inp.begin() + 1, embd_inp.end()); - } - - n_remain = 0; - n_past = 0; - n_consumed = 0; - std::vector prompt_tokens = ::llama_tokenize(ctx, params.prompt, true); // compare the evaluated prompt with the new prompt int new_prompt_len = 0; @@ -150,7 +133,6 @@ struct llama_server_context // Reset context const int n_left = n_past - params.n_keep; n_past = std::max(1, params.n_keep); - last_n_tokens.erase(last_n_tokens.begin() + n_past, last_n_tokens.end()); processed_tokens.erase(processed_tokens.begin() + n_past, processed_tokens.end()); embd.insert(embd.begin(), last_n_tokens.begin() + params.n_ctx - n_left / 2 - embd.size(), last_n_tokens.end() - embd.size()); }