diff --git a/.gitignore b/.gitignore index 9986ac6b1..1092d097a 100644 --- a/.gitignore +++ b/.gitignore @@ -61,6 +61,7 @@ llama-batched-swift /rpc-server out/ tmp/ +autogen-*.md # Deprecated diff --git a/Makefile b/Makefile index 9c61d3ec0..ba3f11c53 100644 --- a/Makefile +++ b/Makefile @@ -39,6 +39,7 @@ BUILD_TARGETS = \ llama-tokenize \ llama-vdot \ llama-cvector-generator \ + llama-export-docs \ tests/test-c.o # Binaries only useful for tests @@ -1449,6 +1450,12 @@ examples/server/%.hpp: examples/server/public/% Makefile echo "unsigned int $${NAME}_len = $(shell cat $< | wc -c );" \ ) > $@ +llama-export-docs: examples/export-docs/export-docs.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + ./llama-export-docs + libllava.a: examples/llava/llava.cpp \ examples/llava/llava.h \ examples/llava/clip.cpp \ diff --git a/common/common.cpp b/common/common.cpp index 49db551ae..2d99bfc25 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -425,7 +425,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params, std::vecto throw std::invalid_argument(format( "error while handling argument \"%s\": %s\n\n" "usage:\n%s\n\nto show complete usage, run with -h", - arg.c_str(), e.what(), arg_to_options[arg]->to_string(false).c_str())); + arg.c_str(), e.what(), arg_to_options[arg]->to_string().c_str())); } } @@ -582,14 +582,13 @@ static std::vector break_str_into_lines(std::string input, size_t m return result; } -std::string llama_arg::to_string(bool markdown) { +std::string llama_arg::to_string() { // params for printing to console const static int n_leading_spaces = 40; const static int n_char_per_line_help = 70; // TODO: detect this based on current console std::string leading_spaces(n_leading_spaces, ' '); std::ostringstream ss; - if (markdown) ss << "| `"; for (const auto & arg : args) { if (arg == args.front()) { ss << (args.size() == 1 ? arg : format("%-7s", (arg + ",").c_str())); @@ -598,20 +597,16 @@ std::string llama_arg::to_string(bool markdown) { } } if (!value_hint.empty()) ss << " " << value_hint; - if (!markdown) { - if (ss.tellp() > n_leading_spaces - 3) { - // current line is too long, add new line - ss << "\n" << leading_spaces; - } else { - // padding between arg and help, same line - ss << std::string(leading_spaces.size() - ss.tellp(), ' '); - } - const auto help_lines = break_str_into_lines(help, n_char_per_line_help); - for (const auto & line : help_lines) { - ss << (&line == &help_lines.front() ? "" : leading_spaces) << line << "\n"; - } + if (ss.tellp() > n_leading_spaces - 3) { + // current line is too long, add new line + ss << "\n" << leading_spaces; } else { - ss << "` | " << help << " |"; + // padding between arg and help, same line + ss << std::string(leading_spaces.size() - ss.tellp(), ' '); + } + const auto help_lines = break_str_into_lines(help, n_char_per_line_help); + for (const auto & line : help_lines) { + ss << (&line == &help_lines.front() ? "" : leading_spaces) << line << "\n"; } return ss.str(); } @@ -619,7 +614,7 @@ std::string llama_arg::to_string(bool markdown) { void gpt_params_print_usage(std::vector & options) { auto print_options = [](std::vector & options) { for (llama_arg * opt : options) { - printf("%s", opt->to_string(false).c_str()); + printf("%s", opt->to_string().c_str()); } }; diff --git a/common/common.h b/common/common.h index f84948330..7536120fc 100644 --- a/common/common.h +++ b/common/common.h @@ -349,7 +349,7 @@ struct llama_arg { return std::getenv(env.c_str()); } - std::string to_string(bool markdown); + std::string to_string(); }; std::vector gpt_params_parser_init(gpt_params & params, llama_example ex); diff --git a/examples/export-docs/CMakeLists.txt b/examples/export-docs/CMakeLists.txt new file mode 100644 index 000000000..0e953167e --- /dev/null +++ b/examples/export-docs/CMakeLists.txt @@ -0,0 +1,5 @@ +set(TARGET llama-export-docs) +add_executable(${TARGET} export-docs.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/export-docs/export-docs.cpp b/examples/export-docs/export-docs.cpp new file mode 100644 index 000000000..e21c4b89d --- /dev/null +++ b/examples/export-docs/export-docs.cpp @@ -0,0 +1,47 @@ +#include "common.h" +#include "llama.h" + +#include +#include +#include +#include +#include + +// Export usage message (-h) to markdown format + +static void export_md(std::string fname, llama_example ex) { + std::ofstream file(fname, std::ofstream::out | std::ofstream::trunc); + + gpt_params params; + auto options = gpt_params_parser_init(params, ex); + + file << "| Argument | Explanation |\n"; + file << "| -------- | ----------- |\n"; + for (auto & opt : options) { + file << "| `"; + // args + for (const auto & arg : opt.args) { + if (arg == opt.args.front()) { + file << (opt.args.size() == 1 ? arg : (arg + ", ")); + } else { + file << arg << (arg != opt.args.back() ? ", " : ""); + } + } + // value hint + std::string md_value_hint(opt.value_hint); + string_replace_all(md_value_hint, "|", "\\|"); + file << " " << md_value_hint; + // help text + std::string md_help(opt.help); + string_replace_all(md_help, "\n", "
"); + string_replace_all(md_help, "|", "\\|"); + file << "` | " << md_help << " |\n"; + } +} + +int main(int, char **) { + export_md("autogen-main.md", LLAMA_EXAMPLE_MAIN); + export_md("autogen-server.md", LLAMA_EXAMPLE_SERVER); + + return 0; +}