common : move arg parser to arg.cpp

This commit is contained in:
Xuan Son Nguyen 2024-09-09 15:17:58 +02:00
parent 5ed087573e
commit 5d399f5689
31 changed files with 2139 additions and 2089 deletions

View file

@ -925,6 +925,7 @@ OBJ_LLAMA = \
OBJ_COMMON = \ OBJ_COMMON = \
common/common.o \ common/common.o \
common/arg.o \
common/console.o \ common/console.o \
common/ngram-cache.o \ common/ngram-cache.o \
common/sampling.o \ common/sampling.o \
@ -1157,6 +1158,11 @@ common/common.o: \
include/llama.h include/llama.h
$(CXX) $(CXXFLAGS) -c $< -o $@ $(CXX) $(CXXFLAGS) -c $< -o $@
common/arg.o: \
common/arg.cpp \
common/arg.h
$(CXX) $(CXXFLAGS) -c $< -o $@
common/sampling.o: \ common/sampling.o: \
common/sampling.cpp \ common/sampling.cpp \
common/sampling.h \ common/sampling.h \

1981
common/arg.cpp Normal file

File diff suppressed because it is too large Load diff

98
common/arg.h Normal file
View file

@ -0,0 +1,98 @@
#pragma once
#include "common.h"
#include <string>
#include <vector>
#include <set>
//
// CLI argument parsing
//
struct gpt_params;
enum llama_example {
LLAMA_EXAMPLE_COMMON,
LLAMA_EXAMPLE_SPECULATIVE,
LLAMA_EXAMPLE_MAIN,
LLAMA_EXAMPLE_INFILL,
LLAMA_EXAMPLE_EMBEDDING,
LLAMA_EXAMPLE_PERPLEXITY,
LLAMA_EXAMPLE_RETRIEVAL,
LLAMA_EXAMPLE_PASSKEY,
LLAMA_EXAMPLE_IMATRIX,
LLAMA_EXAMPLE_BENCH,
LLAMA_EXAMPLE_SERVER,
LLAMA_EXAMPLE_CVECTOR_GENERATOR,
LLAMA_EXAMPLE_EXPORT_LORA,
LLAMA_EXAMPLE_LLAVA,
LLAMA_EXAMPLE_COUNT,
};
struct llama_arg {
std::set<enum llama_example> examples = {LLAMA_EXAMPLE_COMMON};
std::vector<const char *> args;
const char * value_hint = nullptr; // help text or example for arg value
const char * value_hint_2 = nullptr; // for second arg value
const char * env = nullptr;
std::string help;
void (*handler_void) (gpt_params & params) = nullptr;
void (*handler_string) (gpt_params & params, const std::string &) = nullptr;
void (*handler_str_str)(gpt_params & params, const std::string &, const std::string &) = nullptr;
void (*handler_int) (gpt_params & params, int) = nullptr;
llama_arg(
const std::initializer_list<const char *> & args,
const char * value_hint,
const std::string & help,
void (*handler)(gpt_params & params, const std::string &)
) : args(args), value_hint(value_hint), help(help), handler_string(handler) {}
llama_arg(
const std::initializer_list<const char *> & args,
const char * value_hint,
const std::string & help,
void (*handler)(gpt_params & params, int)
) : args(args), value_hint(value_hint), help(help), handler_int(handler) {}
llama_arg(
const std::initializer_list<const char *> & args,
const std::string & help,
void (*handler)(gpt_params & params)
) : args(args), help(help), handler_void(handler) {}
// support 2 values for arg
llama_arg(
const std::initializer_list<const char *> & args,
const char * value_hint,
const char * value_hint_2,
const std::string & help,
void (*handler)(gpt_params & params, const std::string &, const std::string &)
) : args(args), value_hint(value_hint), value_hint_2(value_hint_2), help(help), handler_str_str(handler) {}
llama_arg & set_examples(std::initializer_list<enum llama_example> examples);
llama_arg & set_env(const char * env);
bool in_example(enum llama_example ex);
bool get_value_from_env(std::string & output);
bool has_value_from_env();
std::string to_string();
};
struct llama_arg_context {
enum llama_example ex = LLAMA_EXAMPLE_COMMON;
gpt_params & params;
std::vector<llama_arg> options;
void(*print_usage)(int, char **) = nullptr;
llama_arg_context(gpt_params & params) : params(params) {}
};
// initialize list of options (arguments) that can be used by the current example
llama_arg_context gpt_params_parser_init(gpt_params & params, llama_example ex);
// optionally, we can provide "print_usage" to print example usage
llama_arg_context gpt_params_parser_init(gpt_params & params, llama_example ex, void(*print_usage)(int, char **));
// parse input arguments from CLI
// if one argument has invalid value, it will automatically display usage of the specific argument (and not the full usage message)
bool gpt_params_parse(int argc, char ** argv, llama_arg_context & ctx_arg);

File diff suppressed because it is too large Load diff

View file

@ -5,6 +5,7 @@
#include "llama.h" #include "llama.h"
#include "sampling.h" #include "sampling.h"
#include "arg.h"
#define LOG_NO_FILE_LINE_FUNCTION #define LOG_NO_FILE_LINE_FUNCTION
#include "log.h" #include "log.h"
@ -60,28 +61,9 @@ int32_t cpu_get_num_physical_cores();
int32_t cpu_get_num_math(); int32_t cpu_get_num_math();
// //
// CLI argument parsing // Common params
// //
enum llama_example {
LLAMA_EXAMPLE_COMMON,
LLAMA_EXAMPLE_SPECULATIVE,
LLAMA_EXAMPLE_MAIN,
LLAMA_EXAMPLE_INFILL,
LLAMA_EXAMPLE_EMBEDDING,
LLAMA_EXAMPLE_PERPLEXITY,
LLAMA_EXAMPLE_RETRIEVAL,
LLAMA_EXAMPLE_PASSKEY,
LLAMA_EXAMPLE_IMATRIX,
LLAMA_EXAMPLE_BENCH,
LLAMA_EXAMPLE_SERVER,
LLAMA_EXAMPLE_CVECTOR_GENERATOR,
LLAMA_EXAMPLE_EXPORT_LORA,
LLAMA_EXAMPLE_LLAVA,
LLAMA_EXAMPLE_COUNT,
};
// dimensionality reduction methods, used by cvector-generator // dimensionality reduction methods, used by cvector-generator
enum dimre_method { enum dimre_method {
DIMRE_METHOD_PCA, DIMRE_METHOD_PCA,
@ -98,8 +80,6 @@ struct cpu_params {
}; };
struct gpt_params { struct gpt_params {
enum llama_example curr_ex = LLAMA_EXAMPLE_COMMON;
int32_t n_predict = -1; // new tokens to predict int32_t n_predict = -1; // new tokens to predict
int32_t n_ctx = 0; // context size int32_t n_ctx = 0; // context size
int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS) int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
@ -299,92 +279,6 @@ struct gpt_params {
bool batched_bench_output_jsonl = false; bool batched_bench_output_jsonl = false;
}; };
struct llama_arg {
std::set<enum llama_example> examples = {LLAMA_EXAMPLE_COMMON};
std::vector<const char *> args;
const char * value_hint = nullptr; // help text or example for arg value
const char * value_hint_2 = nullptr; // for second arg value
const char * env = nullptr;
std::string help;
void (*handler_void) (gpt_params & params) = nullptr;
void (*handler_string) (gpt_params & params, const std::string &) = nullptr;
void (*handler_str_str)(gpt_params & params, const std::string &, const std::string &) = nullptr;
void (*handler_int) (gpt_params & params, int) = nullptr;
llama_arg(
const std::initializer_list<const char *> & args,
const char * value_hint,
const std::string & help,
void (*handler)(gpt_params & params, const std::string &)
) : args(args), value_hint(value_hint), help(help), handler_string(handler) {}
llama_arg(
const std::initializer_list<const char *> & args,
const char * value_hint,
const std::string & help,
void (*handler)(gpt_params & params, int)
) : args(args), value_hint(value_hint), help(help), handler_int(handler) {}
llama_arg(
const std::initializer_list<const char *> & args,
const std::string & help,
void (*handler)(gpt_params & params)
) : args(args), help(help), handler_void(handler) {}
// support 2 values for arg
llama_arg(
const std::initializer_list<const char *> & args,
const char * value_hint,
const char * value_hint_2,
const std::string & help,
void (*handler)(gpt_params & params, const std::string &, const std::string &)
) : args(args), value_hint(value_hint), value_hint_2(value_hint_2), help(help), handler_str_str(handler) {}
llama_arg & set_examples(std::initializer_list<enum llama_example> examples) {
this->examples = std::move(examples);
return *this;
}
llama_arg & set_env(const char * env) {
help = help + "\n(env: " + env + ")";
this->env = env;
return *this;
}
bool in_example(enum llama_example ex) {
return examples.find(ex) != examples.end();
}
bool get_value_from_env(std::string & output) const {
if (env == nullptr) return false;
char * value = std::getenv(env);
if (value) {
output = value;
return true;
}
return false;
}
bool has_value_from_env() const {
return env != nullptr && std::getenv(env);
}
std::string to_string();
};
// initialize list of options (arguments) that can be used by the current example
std::vector<llama_arg> gpt_params_parser_init(gpt_params & params, llama_example ex);
// optionally, we can provide "print_usage" to print example usage
std::vector<llama_arg> gpt_params_parser_init(gpt_params & params, llama_example ex, std::function<void(int, char **)> print_usage);
// parse input arguments from CLI
// if one argument has invalid value, it will automatically display usage of the specific argument (and not the full usage message)
bool gpt_params_parse (int argc, char ** argv, gpt_params & params, std::vector<llama_arg> & options);
bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params, std::vector<llama_arg> & options);
// print full usage message; it will be called internally by gpt_params_parse() if "-h" is set
void gpt_params_print_usage(gpt_params & params, std::vector<llama_arg> & options);
std::string gpt_params_get_system_info(const gpt_params & params); std::string gpt_params_get_system_info(const gpt_params & params);
bool parse_cpu_range(const std::string& range, bool(&boolmask)[GGML_MAX_N_THREADS]); bool parse_cpu_range(const std::string& range, bool(&boolmask)[GGML_MAX_N_THREADS]);

View file

@ -37,8 +37,8 @@ static void print_usage(int, char ** argv) {
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_BENCH, print_usage); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_BENCH, print_usage);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -18,8 +18,8 @@ int main(int argc, char ** argv) {
params.prompt = "Hello my name is"; params.prompt = "Hello my name is";
params.n_predict = 32; params.n_predict = 32;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON, print_usage); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON, print_usage);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -388,8 +388,8 @@ static int prepare_entries(gpt_params & params, train_context & ctx_train) {
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -79,8 +79,8 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_EMBEDDING); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_EMBEDDING);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -144,8 +144,8 @@ int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -401,8 +401,8 @@ static void print_usage(int, char ** argv) {
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -9,11 +9,11 @@ static void export_md(std::string fname, llama_example ex) {
std::ofstream file(fname, std::ofstream::out | std::ofstream::trunc); std::ofstream file(fname, std::ofstream::out | std::ofstream::trunc);
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, ex); auto ctx_arg = gpt_params_parser_init(params, ex);
file << "| Argument | Explanation |\n"; file << "| Argument | Explanation |\n";
file << "| -------- | ----------- |\n"; file << "| -------- | ----------- |\n";
for (auto & opt : options) { for (auto & opt : ctx_arg.options) {
file << "| `"; file << "| `";
// args // args
for (const auto & arg : opt.args) { for (const auto & arg : opt.args) {

View file

@ -154,8 +154,8 @@ static std::string gritlm_instruction(const std::string & instruction) {
int main(int argc, char * argv[]) { int main(int argc, char * argv[]) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -577,8 +577,8 @@ int main(int argc, char ** argv) {
params.logits_all = true; params.logits_all = true;
params.verbosity = 1; params.verbosity = 1;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_IMATRIX, print_usage); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_IMATRIX, print_usage);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -105,8 +105,8 @@ int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
g_params = &params; g_params = &params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_INFILL); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_INFILL);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -278,8 +278,8 @@ int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_LLAVA, print_usage); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_LLAVA, print_usage);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -253,8 +253,8 @@ int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON, show_additional_info); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON, show_additional_info);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -36,8 +36,8 @@ struct ngram_container {
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -13,8 +13,8 @@
int main(int argc, char ** argv){ int main(int argc, char ** argv){
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -15,8 +15,8 @@
int main(int argc, char ** argv){ int main(int argc, char ** argv){
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_SPECULATIVE);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -12,8 +12,8 @@
int main(int argc, char ** argv){ int main(int argc, char ** argv){
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -138,9 +138,8 @@ static std::string chat_add_and_format(struct llama_model * model, std::vector<l
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
g_params = &params; g_params = &params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_MAIN, print_usage); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_MAIN, print_usage);
if (!gpt_params_parse(argc, argv, ctx_arg)) {
if (!gpt_params_parse(argc, argv, params, options)) {
return 1; return 1;
} }

View file

@ -100,8 +100,8 @@ int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -19,8 +19,8 @@ int main(int argc, char ** argv) {
params.n_keep = 32; params.n_keep = 32;
params.i_pos = -1; params.i_pos = -1;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_PASSKEY, print_usage); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_PASSKEY, print_usage);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -1967,8 +1967,8 @@ int main(int argc, char ** argv) {
params.n_ctx = 512; params.n_ctx = 512;
params.logits_all = true; params.logits_all = true;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_PERPLEXITY); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_PERPLEXITY);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -111,8 +111,8 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_RETRIEVAL, print_usage); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_RETRIEVAL, print_usage);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -10,8 +10,8 @@ int main(int argc, char ** argv) {
params.prompt = "The quick brown fox"; params.prompt = "The quick brown fox";
params.sparams.seed = 1234; params.sparams.seed = 1234;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -2423,8 +2423,8 @@ int main(int argc, char ** argv) {
// own arguments required by this example // own arguments required by this example
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_SERVER); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_SERVER);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -18,8 +18,8 @@ int main(int argc, char ** argv) {
params.prompt = "Hello my name is"; params.prompt = "Hello my name is";
params.n_predict = 32; params.n_predict = 32;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON, print_usage); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON, print_usage);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -27,8 +27,8 @@ struct seq_draft {
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_SPECULATIVE); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_SPECULATIVE);
if (!gpt_params_parse(argc, argv, params, options)) { if (!gpt_params_parse(argc, argv, ctx_arg)) {
return 1; return 1;
} }

View file

@ -14,7 +14,7 @@ int main(void) {
printf("test-arg-parser: make sure there is no duplicated arguments in any examples\n\n"); printf("test-arg-parser: make sure there is no duplicated arguments in any examples\n\n");
for (int ex = 0; ex < LLAMA_EXAMPLE_COUNT; ex++) { for (int ex = 0; ex < LLAMA_EXAMPLE_COUNT; ex++) {
try { try {
auto options = gpt_params_parser_init(params, (enum llama_example)ex); auto ctx_arg = gpt_params_parser_init(params, (enum llama_example)ex);
std::unordered_set<std::string> seen_args; std::unordered_set<std::string> seen_args;
std::unordered_set<std::string> seen_env_vars; std::unordered_set<std::string> seen_env_vars;
for (const auto & opt : options) { for (const auto & opt : options) {
@ -52,7 +52,7 @@ int main(void) {
}; };
std::vector<std::string> argv; std::vector<std::string> argv;
auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON); auto ctx_arg = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON);
printf("test-arg-parser: test invalid usage\n\n"); printf("test-arg-parser: test invalid usage\n\n");