restore original implementation with new names
This commit is contained in:
parent
56758f033c
commit
e4429e912b
3 changed files with 13 additions and 23 deletions
|
@ -124,12 +124,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.path_prompt_cache = argv[i];
|
params.path_prompt_cache = argv[i];
|
||||||
} else if (arg == "--session") {
|
} else if (arg == "--prompt-cache-all") {
|
||||||
if (++i >= argc) {
|
params.prompt_cache_save_all = true;
|
||||||
invalid_param = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
params.path_session = argv[i];
|
|
||||||
} else if (arg == "-f" || arg == "--file") {
|
} else if (arg == "-f" || arg == "--file") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
|
@ -348,11 +344,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||||
gpt_print_usage(argc, argv, default_params);
|
gpt_print_usage(argc, argv, default_params);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
if (!params.path_session.empty() && !params.path_prompt_cache.empty()) {
|
|
||||||
fprintf(stderr, "error: only one of --prompt-cache or --session may be specified\n");
|
|
||||||
gpt_print_usage(argc, argv, default_params);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
if (escape_prompt) {
|
if (escape_prompt) {
|
||||||
process_escapes(params.prompt);
|
process_escapes(params.prompt);
|
||||||
}
|
}
|
||||||
|
@ -379,7 +370,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||||
fprintf(stderr, " prompt to start generation with (default: empty)\n");
|
fprintf(stderr, " prompt to start generation with (default: empty)\n");
|
||||||
fprintf(stderr, " -e process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n");
|
fprintf(stderr, " -e process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n");
|
||||||
fprintf(stderr, " --prompt-cache FNAME file to cache prompt state for faster startup (default: none)\n");
|
fprintf(stderr, " --prompt-cache FNAME file to cache prompt state for faster startup (default: none)\n");
|
||||||
fprintf(stderr, " --session FNAME file to store prompt and generations, allowing continuation (default: none)\n");
|
fprintf(stderr, " --prompt-cache-all if specified, saves user input and generations to cache as well\n");
|
||||||
fprintf(stderr, " --random-prompt start with a randomized prompt.\n");
|
fprintf(stderr, " --random-prompt start with a randomized prompt.\n");
|
||||||
fprintf(stderr, " --in-prefix STRING string to prefix user inputs with (default: empty)\n");
|
fprintf(stderr, " --in-prefix STRING string to prefix user inputs with (default: empty)\n");
|
||||||
fprintf(stderr, " --in-suffix STRING string to suffix after user inputs with (default: empty)\n");
|
fprintf(stderr, " --in-suffix STRING string to suffix after user inputs with (default: empty)\n");
|
||||||
|
|
|
@ -47,7 +47,6 @@ struct gpt_params {
|
||||||
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
|
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
|
||||||
std::string prompt = "";
|
std::string prompt = "";
|
||||||
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
||||||
std::string path_session = ""; // file for saving/loading prompt and generations
|
|
||||||
std::string input_prefix = ""; // string to prefix user inputs with
|
std::string input_prefix = ""; // string to prefix user inputs with
|
||||||
std::string input_suffix = ""; // string to suffix user inputs with
|
std::string input_suffix = ""; // string to suffix user inputs with
|
||||||
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
|
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
|
||||||
|
@ -55,10 +54,11 @@ struct gpt_params {
|
||||||
std::string lora_adapter = ""; // lora adapter path
|
std::string lora_adapter = ""; // lora adapter path
|
||||||
std::string lora_base = ""; // base model path for the lora adapter
|
std::string lora_base = ""; // base model path for the lora adapter
|
||||||
|
|
||||||
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
||||||
bool random_prompt = false; // do not randomize prompt if none provided
|
bool random_prompt = false; // do not randomize prompt if none provided
|
||||||
bool use_color = false; // use color to distinguish generations and inputs
|
bool use_color = false; // use color to distinguish generations and inputs
|
||||||
bool interactive = false; // interactive mode
|
bool interactive = false; // interactive mode
|
||||||
|
bool prompt_cache_save_all = false; // save user input and generations to prompt cache
|
||||||
|
|
||||||
bool embedding = false; // get only sentence embedding
|
bool embedding = false; // get only sentence embedding
|
||||||
bool interactive_first = false; // wait for user input immediately
|
bool interactive_first = false; // wait for user input immediately
|
||||||
|
|
|
@ -139,10 +139,9 @@ int main(int argc, char ** argv) {
|
||||||
// Add a space in front of the first character to match OG llama tokenizer behavior
|
// Add a space in front of the first character to match OG llama tokenizer behavior
|
||||||
params.prompt.insert(0, 1, ' ');
|
params.prompt.insert(0, 1, ' ');
|
||||||
|
|
||||||
std::string path_session =
|
std::string path_session = params.path_prompt_cache;
|
||||||
!params.path_session.empty() ? params.path_session : params.path_prompt_cache;
|
const bool session_save_all = params.prompt_cache_save_all;
|
||||||
std::vector<llama_token> session_tokens;
|
std::vector<llama_token> session_tokens;
|
||||||
bool resume_session = !params.path_session.empty();
|
|
||||||
|
|
||||||
if (!path_session.empty()) {
|
if (!path_session.empty()) {
|
||||||
fprintf(stderr, "%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
|
fprintf(stderr, "%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
|
||||||
|
@ -325,8 +324,8 @@ int main(int argc, char ** argv) {
|
||||||
// insert n_left/2 tokens at the start of embd from last_n_tokens
|
// insert n_left/2 tokens at the start of embd from last_n_tokens
|
||||||
embd.insert(embd.begin(), last_n_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_n_tokens.end() - embd.size());
|
embd.insert(embd.begin(), last_n_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_n_tokens.end() - embd.size());
|
||||||
|
|
||||||
// stop saving session if we run out of context, saving whatever was evaled
|
// stop saving session if we run out of context
|
||||||
if (!path_session.empty() && resume_session) {
|
if (!path_session.empty() && session_save_all) {
|
||||||
llama_save_session_file(ctx, path_session.c_str(),
|
llama_save_session_file(ctx, path_session.c_str(),
|
||||||
session_tokens.data(), session_tokens.size());
|
session_tokens.data(), session_tokens.size());
|
||||||
}
|
}
|
||||||
|
@ -605,7 +604,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!path_session.empty() && resume_session) {
|
if (!path_session.empty() && session_save_all) {
|
||||||
fprintf(stderr, "\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
|
fprintf(stderr, "\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
|
||||||
llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
|
llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue