change compile target to llama-cvector-generator
This commit is contained in:
parent
2f055584cf
commit
64cad20c2e
12 changed files with 19 additions and 18 deletions
|
@ -27,5 +27,5 @@ indent_size = 2
|
|||
[examples/llama.swiftui/llama.swiftui.xcodeproj/*]
|
||||
indent_style = tab
|
||||
|
||||
[examples/control-vector-generator/*.txt]
|
||||
[examples/cvector-generator/*.txt]
|
||||
insert_final_newline = unset
|
||||
|
|
3
Makefile
3
Makefile
|
@ -38,6 +38,7 @@ BUILD_TARGETS = \
|
|||
llama-tokenize \
|
||||
llama-train-text-from-scratch \
|
||||
llama-vdot \
|
||||
llama-cvector-generator \
|
||||
tests/test-c.o
|
||||
|
||||
# Binaries only useful for tests
|
||||
|
@ -922,7 +923,7 @@ llama-eval-callback: examples/eval-callback/eval-callback.cpp ggml.o llama.o $(C
|
|||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
||||
control-vector-generator: examples/control-vector-generator/control-vector-generator.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||
llama-cvector-generator: examples/cvector-generator/cvector-generator.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
||||
|
|
|
@ -1611,7 +1611,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||
params.i_chunk = std::stoi(argv[i]);
|
||||
return true;
|
||||
}
|
||||
// control-vector-generator params
|
||||
// cvector params
|
||||
if (arg == "--completions-file") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
|
@ -1981,7 +1981,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
|||
options.push_back({ "logging", " --log-append", "Don't truncate the old log file." });
|
||||
#endif // LOG_DISABLE_LOGS
|
||||
|
||||
options.push_back({ "control-vector" });
|
||||
options.push_back({ "cvector" });
|
||||
options.push_back({ "cvector", "-o, --output FNAME", "output file (default: '%s')", params.cvector_outfile.c_str() });
|
||||
options.push_back({ "cvector", " --positive-file FNAME", "positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str() });
|
||||
options.push_back({ "cvector", " --negative-file FNAME", "negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str() });
|
||||
|
|
|
@ -233,14 +233,14 @@ struct gpt_params {
|
|||
bool process_output = false; // collect data for the output tensor
|
||||
bool compute_ppl = true; // whether to compute perplexity
|
||||
|
||||
// control-vector-generator params
|
||||
// cvector-generator params
|
||||
int n_completions = 64;
|
||||
int n_pca_batch = 20;
|
||||
int n_pca_iterations = 1000;
|
||||
std::string cvector_outfile = "control_vector.gguf";
|
||||
std::string cvector_completions_file = "examples/control-vector-generator/completions.txt";
|
||||
std::string cvector_positive_file = "examples/control-vector-generator/positive.txt";
|
||||
std::string cvector_negative_file = "examples/control-vector-generator/negative.txt";
|
||||
std::string cvector_completions_file = "examples/cvector-generator/completions.txt";
|
||||
std::string cvector_positive_file = "examples/cvector-generator/positive.txt";
|
||||
std::string cvector_negative_file = "examples/cvector-generator/negative.txt";
|
||||
};
|
||||
|
||||
void gpt_params_handle_model_default(gpt_params & params);
|
||||
|
|
|
@ -12,7 +12,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
|||
|
||||
if (EMSCRIPTEN)
|
||||
else()
|
||||
add_subdirectory(control-vector-generator)
|
||||
add_subdirectory(cvector-generator)
|
||||
add_subdirectory(baby-llama)
|
||||
add_subdirectory(batched-bench)
|
||||
add_subdirectory(batched)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
set(TARGET control-vector-generator)
|
||||
add_executable(${TARGET} control-vector-generator.cpp pca.hpp)
|
||||
set(TARGET llama-cvector-generator)
|
||||
add_executable(${TARGET} cvector-generator.cpp pca.hpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
|
@ -1,27 +1,27 @@
|
|||
# control-vector-generator
|
||||
# cvector-generator
|
||||
|
||||
This example demonstrates how to generate a control vector using gguf models.
|
||||
|
||||
Related PRs:
|
||||
- [Add support for control vectors](https://github.com/ggerganov/llama.cpp/pull/5970)
|
||||
- (Issue) [Generate control vector using llama.cpp](https://github.com/ggerganov/llama.cpp/issues/6880)
|
||||
- [Add control-vector-generator example](https://github.com/ggerganov/llama.cpp/pull/7514)
|
||||
- [Add cvector-generator example](https://github.com/ggerganov/llama.cpp/pull/7514)
|
||||
|
||||
## Examples
|
||||
|
||||
```sh
|
||||
# CPU only
|
||||
./control-vector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf
|
||||
./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf
|
||||
|
||||
# With GPU
|
||||
./control-vector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99
|
||||
./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99
|
||||
|
||||
# With advanced options
|
||||
./control-vector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 --completions 128 --pca-iter 2000 --batch-pca 100
|
||||
./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 --completions 128 --pca-iter 2000 --batch-pca 100
|
||||
|
||||
# To see help message
|
||||
./control-vector-generator -h
|
||||
# Then, have a look at "control-vector" section
|
||||
./cvector-generator -h
|
||||
# Then, have a look at "cvector" section
|
||||
```
|
||||
|
||||
## Tips and tricks
|
Loading…
Add table
Add a link
Reference in a new issue