Add "--instruct" argument for usage with Alpaca (#240)
Also start adding prompts in "./prompts"
This commit is contained in:
parent
22213a17b5
commit
9e1707218a
5 changed files with 64 additions and 40 deletions
18
utils.cpp
18
utils.cpp
|
@ -38,13 +38,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||
} else if (arg == "-p" || arg == "--prompt") {
|
||||
params.prompt = argv[++i];
|
||||
} else if (arg == "-f" || arg == "--file") {
|
||||
|
||||
std::ifstream file(argv[++i]);
|
||||
|
||||
std::copy(std::istreambuf_iterator<char>(file),
|
||||
std::istreambuf_iterator<char>(),
|
||||
back_inserter(params.prompt));
|
||||
|
||||
std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt));
|
||||
} else if (arg == "-n" || arg == "--n_predict") {
|
||||
params.n_predict = std::stoi(argv[++i]);
|
||||
} else if (arg == "--top_k") {
|
||||
|
@ -65,9 +60,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||
params.model = argv[++i];
|
||||
} else if (arg == "-i" || arg == "--interactive") {
|
||||
params.interactive = true;
|
||||
} else if (arg == "--interactive-start") {
|
||||
params.interactive = true;
|
||||
params.interactive_start = true;
|
||||
} else if (arg == "-ins" || arg == "--instruct") {
|
||||
params.instruct = true;
|
||||
} else if (arg == "--color") {
|
||||
params.use_color = true;
|
||||
} else if (arg == "-r" || arg == "--reverse-prompt") {
|
||||
|
@ -85,13 +79,13 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||
return true;
|
||||
}
|
||||
|
||||
void gpt_print_usage(int argc, char ** argv, const gpt_params & params) {
|
||||
void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||
fprintf(stderr, "usage: %s [options]\n", argv[0]);
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "options:\n");
|
||||
fprintf(stderr, " -h, --help show this help message and exit\n");
|
||||
fprintf(stderr, " -i, --interactive run in interactive mode\n");
|
||||
fprintf(stderr, " --interactive-start run in interactive mode and poll user input at startup\n");
|
||||
fprintf(stderr, " -ins, --instruct run in instruction mode (use with Alpaca models)\n");
|
||||
fprintf(stderr, " -r PROMPT, --reverse-prompt PROMPT\n");
|
||||
fprintf(stderr, " in interactive mode, poll user input upon seeing PROMPT\n");
|
||||
fprintf(stderr, " --color colorise output to distinguish prompt and user input from generations\n");
|
||||
|
@ -398,7 +392,7 @@ gpt_vocab::id llama_sample_top_p_top_k(
|
|||
logits_id.push_back(std::make_pair(logits[i]*scale*repeat_penalty, i));
|
||||
} else {
|
||||
logits_id.push_back(std::make_pair(logits[i]*scale/repeat_penalty, i));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logits_id.push_back(std::make_pair(logits[i]*scale, i));
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue