add export-docs example
This commit is contained in:
parent
286dcc9dbe
commit
75d0869ef5
6 changed files with 73 additions and 18 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -61,6 +61,7 @@ llama-batched-swift
|
||||||
/rpc-server
|
/rpc-server
|
||||||
out/
|
out/
|
||||||
tmp/
|
tmp/
|
||||||
|
autogen-*.md
|
||||||
|
|
||||||
# Deprecated
|
# Deprecated
|
||||||
|
|
||||||
|
|
7
Makefile
7
Makefile
|
@ -39,6 +39,7 @@ BUILD_TARGETS = \
|
||||||
llama-tokenize \
|
llama-tokenize \
|
||||||
llama-vdot \
|
llama-vdot \
|
||||||
llama-cvector-generator \
|
llama-cvector-generator \
|
||||||
|
llama-export-docs \
|
||||||
tests/test-c.o
|
tests/test-c.o
|
||||||
|
|
||||||
# Binaries only useful for tests
|
# Binaries only useful for tests
|
||||||
|
@ -1449,6 +1450,12 @@ examples/server/%.hpp: examples/server/public/% Makefile
|
||||||
echo "unsigned int $${NAME}_len = $(shell cat $< | wc -c );" \
|
echo "unsigned int $${NAME}_len = $(shell cat $< | wc -c );" \
|
||||||
) > $@
|
) > $@
|
||||||
|
|
||||||
|
llama-export-docs: examples/export-docs/export-docs.cpp \
|
||||||
|
$(OBJ_ALL)
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
./llama-export-docs
|
||||||
|
|
||||||
libllava.a: examples/llava/llava.cpp \
|
libllava.a: examples/llava/llava.cpp \
|
||||||
examples/llava/llava.h \
|
examples/llava/llava.h \
|
||||||
examples/llava/clip.cpp \
|
examples/llava/clip.cpp \
|
||||||
|
|
|
@ -425,7 +425,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params, std::vecto
|
||||||
throw std::invalid_argument(format(
|
throw std::invalid_argument(format(
|
||||||
"error while handling argument \"%s\": %s\n\n"
|
"error while handling argument \"%s\": %s\n\n"
|
||||||
"usage:\n%s\n\nto show complete usage, run with -h",
|
"usage:\n%s\n\nto show complete usage, run with -h",
|
||||||
arg.c_str(), e.what(), arg_to_options[arg]->to_string(false).c_str()));
|
arg.c_str(), e.what(), arg_to_options[arg]->to_string().c_str()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -582,14 +582,13 @@ static std::vector<std::string> break_str_into_lines(std::string input, size_t m
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_arg::to_string(bool markdown) {
|
std::string llama_arg::to_string() {
|
||||||
// params for printing to console
|
// params for printing to console
|
||||||
const static int n_leading_spaces = 40;
|
const static int n_leading_spaces = 40;
|
||||||
const static int n_char_per_line_help = 70; // TODO: detect this based on current console
|
const static int n_char_per_line_help = 70; // TODO: detect this based on current console
|
||||||
std::string leading_spaces(n_leading_spaces, ' ');
|
std::string leading_spaces(n_leading_spaces, ' ');
|
||||||
|
|
||||||
std::ostringstream ss;
|
std::ostringstream ss;
|
||||||
if (markdown) ss << "| `";
|
|
||||||
for (const auto & arg : args) {
|
for (const auto & arg : args) {
|
||||||
if (arg == args.front()) {
|
if (arg == args.front()) {
|
||||||
ss << (args.size() == 1 ? arg : format("%-7s", (arg + ",").c_str()));
|
ss << (args.size() == 1 ? arg : format("%-7s", (arg + ",").c_str()));
|
||||||
|
@ -598,20 +597,16 @@ std::string llama_arg::to_string(bool markdown) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!value_hint.empty()) ss << " " << value_hint;
|
if (!value_hint.empty()) ss << " " << value_hint;
|
||||||
if (!markdown) {
|
if (ss.tellp() > n_leading_spaces - 3) {
|
||||||
if (ss.tellp() > n_leading_spaces - 3) {
|
// current line is too long, add new line
|
||||||
// current line is too long, add new line
|
ss << "\n" << leading_spaces;
|
||||||
ss << "\n" << leading_spaces;
|
|
||||||
} else {
|
|
||||||
// padding between arg and help, same line
|
|
||||||
ss << std::string(leading_spaces.size() - ss.tellp(), ' ');
|
|
||||||
}
|
|
||||||
const auto help_lines = break_str_into_lines(help, n_char_per_line_help);
|
|
||||||
for (const auto & line : help_lines) {
|
|
||||||
ss << (&line == &help_lines.front() ? "" : leading_spaces) << line << "\n";
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
ss << "` | " << help << " |";
|
// padding between arg and help, same line
|
||||||
|
ss << std::string(leading_spaces.size() - ss.tellp(), ' ');
|
||||||
|
}
|
||||||
|
const auto help_lines = break_str_into_lines(help, n_char_per_line_help);
|
||||||
|
for (const auto & line : help_lines) {
|
||||||
|
ss << (&line == &help_lines.front() ? "" : leading_spaces) << line << "\n";
|
||||||
}
|
}
|
||||||
return ss.str();
|
return ss.str();
|
||||||
}
|
}
|
||||||
|
@ -619,7 +614,7 @@ std::string llama_arg::to_string(bool markdown) {
|
||||||
void gpt_params_print_usage(std::vector<llama_arg> & options) {
|
void gpt_params_print_usage(std::vector<llama_arg> & options) {
|
||||||
auto print_options = [](std::vector<llama_arg *> & options) {
|
auto print_options = [](std::vector<llama_arg *> & options) {
|
||||||
for (llama_arg * opt : options) {
|
for (llama_arg * opt : options) {
|
||||||
printf("%s", opt->to_string(false).c_str());
|
printf("%s", opt->to_string().c_str());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -349,7 +349,7 @@ struct llama_arg {
|
||||||
return std::getenv(env.c_str());
|
return std::getenv(env.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string to_string(bool markdown);
|
std::string to_string();
|
||||||
};
|
};
|
||||||
|
|
||||||
std::vector<llama_arg> gpt_params_parser_init(gpt_params & params, llama_example ex);
|
std::vector<llama_arg> gpt_params_parser_init(gpt_params & params, llama_example ex);
|
||||||
|
|
5
examples/export-docs/CMakeLists.txt
Normal file
5
examples/export-docs/CMakeLists.txt
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
set(TARGET llama-export-docs)
|
||||||
|
add_executable(${TARGET} export-docs.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
47
examples/export-docs/export-docs.cpp
Normal file
47
examples/export-docs/export-docs.cpp
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
#include "common.h"
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <fstream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
// Export usage message (-h) to markdown format
|
||||||
|
|
||||||
|
static void export_md(std::string fname, llama_example ex) {
|
||||||
|
std::ofstream file(fname, std::ofstream::out | std::ofstream::trunc);
|
||||||
|
|
||||||
|
gpt_params params;
|
||||||
|
auto options = gpt_params_parser_init(params, ex);
|
||||||
|
|
||||||
|
file << "| Argument | Explanation |\n";
|
||||||
|
file << "| -------- | ----------- |\n";
|
||||||
|
for (auto & opt : options) {
|
||||||
|
file << "| `";
|
||||||
|
// args
|
||||||
|
for (const auto & arg : opt.args) {
|
||||||
|
if (arg == opt.args.front()) {
|
||||||
|
file << (opt.args.size() == 1 ? arg : (arg + ", "));
|
||||||
|
} else {
|
||||||
|
file << arg << (arg != opt.args.back() ? ", " : "");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// value hint
|
||||||
|
std::string md_value_hint(opt.value_hint);
|
||||||
|
string_replace_all(md_value_hint, "|", "\\|");
|
||||||
|
file << " " << md_value_hint;
|
||||||
|
// help text
|
||||||
|
std::string md_help(opt.help);
|
||||||
|
string_replace_all(md_help, "\n", "<br/>");
|
||||||
|
string_replace_all(md_help, "|", "\\|");
|
||||||
|
file << "` | " << md_help << " |\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int, char **) {
|
||||||
|
export_md("autogen-main.md", LLAMA_EXAMPLE_MAIN);
|
||||||
|
export_md("autogen-server.md", LLAMA_EXAMPLE_SERVER);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue