bring back missing --alias
This commit is contained in:
parent
fe6df473a3
commit
b1657cb934
1 changed files with 10 additions and 4 deletions
|
@ -650,7 +650,6 @@ std::vector<llama_arg> gpt_params_parser_init(gpt_params & params, llama_example
|
||||||
sampler_type_names += llama_sampling_type_to_str(sampler_type) + ";";
|
sampler_type_names += llama_sampling_type_to_str(sampler_type) + ";";
|
||||||
}
|
}
|
||||||
sampler_type_names.pop_back();
|
sampler_type_names.pop_back();
|
||||||
const char split_delim = ',';
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1804,6 +1803,13 @@ std::vector<llama_arg> gpt_params_parser_init(gpt_params & params, llama_example
|
||||||
params.control_vector_layer_end = std::stoi(end);
|
params.control_vector_layer_end = std::stoi(end);
|
||||||
}
|
}
|
||||||
));
|
));
|
||||||
|
add_opt(llama_arg(
|
||||||
|
{"-a", "--alias"}, "STRING",
|
||||||
|
"set alias for model name (to be used by REST API)",
|
||||||
|
[¶ms](std::string value) {
|
||||||
|
params.model_alias = value;
|
||||||
|
}
|
||||||
|
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL"));
|
||||||
add_opt(llama_arg(
|
add_opt(llama_arg(
|
||||||
{"-m", "--model"}, "FNAME",
|
{"-m", "--model"}, "FNAME",
|
||||||
ex == LLAMA_EXAMPLE_EXPORT_LORA
|
ex == LLAMA_EXAMPLE_EXPORT_LORA
|
||||||
|
@ -1950,7 +1956,7 @@ std::vector<llama_arg> gpt_params_parser_init(gpt_params & params, llama_example
|
||||||
{"-npp"}, "n0,n1,...",
|
{"-npp"}, "n0,n1,...",
|
||||||
"number of prompt tokens",
|
"number of prompt tokens",
|
||||||
[¶ms](std::string value) {
|
[¶ms](std::string value) {
|
||||||
auto p = string_split<int>(value, split_delim);
|
auto p = string_split<int>(value, ',');
|
||||||
params.n_pp.insert(params.n_pp.end(), p.begin(), p.end());
|
params.n_pp.insert(params.n_pp.end(), p.begin(), p.end());
|
||||||
}
|
}
|
||||||
).set_examples({LLAMA_EXAMPLE_BENCH}));
|
).set_examples({LLAMA_EXAMPLE_BENCH}));
|
||||||
|
@ -1958,7 +1964,7 @@ std::vector<llama_arg> gpt_params_parser_init(gpt_params & params, llama_example
|
||||||
{"-ntg"}, "n0,n1,...",
|
{"-ntg"}, "n0,n1,...",
|
||||||
"number of text generation tokens",
|
"number of text generation tokens",
|
||||||
[¶ms](std::string value) {
|
[¶ms](std::string value) {
|
||||||
auto p = string_split<int>(value, split_delim);
|
auto p = string_split<int>(value, ',');
|
||||||
params.n_tg.insert(params.n_tg.end(), p.begin(), p.end());
|
params.n_tg.insert(params.n_tg.end(), p.begin(), p.end());
|
||||||
}
|
}
|
||||||
).set_examples({LLAMA_EXAMPLE_BENCH}));
|
).set_examples({LLAMA_EXAMPLE_BENCH}));
|
||||||
|
@ -1966,7 +1972,7 @@ std::vector<llama_arg> gpt_params_parser_init(gpt_params & params, llama_example
|
||||||
{"-npl"}, "n0,n1,...",
|
{"-npl"}, "n0,n1,...",
|
||||||
"number of parallel prompts",
|
"number of parallel prompts",
|
||||||
[¶ms](std::string value) {
|
[¶ms](std::string value) {
|
||||||
auto p = string_split<int>(value, split_delim);
|
auto p = string_split<int>(value, ',');
|
||||||
params.n_pl.insert(params.n_pl.end(), p.begin(), p.end());
|
params.n_pl.insert(params.n_pl.end(), p.begin(), p.end());
|
||||||
}
|
}
|
||||||
).set_examples({LLAMA_EXAMPLE_BENCH}));
|
).set_examples({LLAMA_EXAMPLE_BENCH}));
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue