From e4429e912badc0cb6e85b874eff9ff7e1b5db1ce Mon Sep 17 00:00:00 2001 From: Evan Jones Date: Sun, 7 May 2023 22:46:06 -0400 Subject: [PATCH] restore original implementation with new names --- examples/common.cpp | 15 +++------------ examples/common.h | 10 +++++----- examples/main/main.cpp | 11 +++++------ 3 files changed, 13 insertions(+), 23 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index c932914e9..53afe3cc2 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -124,12 +124,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { break; } params.path_prompt_cache = argv[i]; - } else if (arg == "--session") { - if (++i >= argc) { - invalid_param = true; - break; - } - params.path_session = argv[i]; + } else if (arg == "--prompt-cache-all") { + params.prompt_cache_save_all = true; } else if (arg == "-f" || arg == "--file") { if (++i >= argc) { invalid_param = true; @@ -348,11 +344,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { gpt_print_usage(argc, argv, default_params); exit(1); } - if (!params.path_session.empty() && !params.path_prompt_cache.empty()) { - fprintf(stderr, "error: only one of --prompt-cache or --session may be specified\n"); - gpt_print_usage(argc, argv, default_params); - exit(1); - } if (escape_prompt) { process_escapes(params.prompt); } @@ -379,7 +370,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, " prompt to start generation with (default: empty)\n"); fprintf(stderr, " -e process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n"); fprintf(stderr, " --prompt-cache FNAME file to cache prompt state for faster startup (default: none)\n"); - fprintf(stderr, " --session FNAME file to store prompt and generations, allowing continuation (default: none)\n"); + fprintf(stderr, " --prompt-cache-all if specified, saves user input and generations to cache as well\n"); fprintf(stderr, " --random-prompt start with a randomized prompt.\n"); fprintf(stderr, " --in-prefix STRING string to prefix user inputs with (default: empty)\n"); fprintf(stderr, " --in-suffix STRING string to suffix after user inputs with (default: empty)\n"); diff --git a/examples/common.h b/examples/common.h index 0c721bad0..00ddd7732 100644 --- a/examples/common.h +++ b/examples/common.h @@ -47,7 +47,6 @@ struct gpt_params { std::string model = "models/lamma-7B/ggml-model.bin"; // model path std::string prompt = ""; std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state - std::string path_session = ""; // file for saving/loading prompt and generations std::string input_prefix = ""; // string to prefix user inputs with std::string input_suffix = ""; // string to suffix user inputs with std::vector antiprompt; // string upon seeing which more user input is prompted @@ -55,10 +54,11 @@ struct gpt_params { std::string lora_adapter = ""; // lora adapter path std::string lora_base = ""; // base model path for the lora adapter - bool memory_f16 = true; // use f16 instead of f32 for memory kv - bool random_prompt = false; // do not randomize prompt if none provided - bool use_color = false; // use color to distinguish generations and inputs - bool interactive = false; // interactive mode + bool memory_f16 = true; // use f16 instead of f32 for memory kv + bool random_prompt = false; // do not randomize prompt if none provided + bool use_color = false; // use color to distinguish generations and inputs + bool interactive = false; // interactive mode + bool prompt_cache_save_all = false; // save user input and generations to prompt cache bool embedding = false; // get only sentence embedding bool interactive_first = false; // wait for user input immediately diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 106110e55..bb172cce2 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -139,10 +139,9 @@ int main(int argc, char ** argv) { // Add a space in front of the first character to match OG llama tokenizer behavior params.prompt.insert(0, 1, ' '); - std::string path_session = - !params.path_session.empty() ? params.path_session : params.path_prompt_cache; + std::string path_session = params.path_prompt_cache; + const bool session_save_all = params.prompt_cache_save_all; std::vector session_tokens; - bool resume_session = !params.path_session.empty(); if (!path_session.empty()) { fprintf(stderr, "%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str()); @@ -325,8 +324,8 @@ int main(int argc, char ** argv) { // insert n_left/2 tokens at the start of embd from last_n_tokens embd.insert(embd.begin(), last_n_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_n_tokens.end() - embd.size()); - // stop saving session if we run out of context, saving whatever was evaled - if (!path_session.empty() && resume_session) { + // stop saving session if we run out of context + if (!path_session.empty() && session_save_all) { llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size()); } @@ -605,7 +604,7 @@ int main(int argc, char ** argv) { } } - if (!path_session.empty() && resume_session) { + if (!path_session.empty() && session_save_all) { fprintf(stderr, "\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str()); llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size()); }