From 0a46d73056a911853764abb6a8ee6d6afa5be121 Mon Sep 17 00:00:00 2001 From: ngxson Date: Fri, 24 May 2024 11:11:55 +0200 Subject: [PATCH] add control-vector-generator --- .gitignore | 1 + Makefile | 4 + .../control-vector-generator/CMakeLists.txt | 5 + .../control-vector-generator.cpp | 160 ++++++++++++++++++ 4 files changed, 170 insertions(+) create mode 100644 examples/control-vector-generator/CMakeLists.txt create mode 100644 examples/control-vector-generator/control-vector-generator.cpp diff --git a/.gitignore b/.gitignore index 50ae0973a..79a160bb5 100644 --- a/.gitignore +++ b/.gitignore @@ -86,6 +86,7 @@ models-mnt /train-text-from-scratch /tokenize /vdot +/control-vector-generator /common/build-info.cpp arm_neon.h compile_commands.json diff --git a/Makefile b/Makefile index fe63cbd60..c12a3e382 100644 --- a/Makefile +++ b/Makefile @@ -838,6 +838,10 @@ eval-callback: examples/eval-callback/eval-callback.cpp ggml.o llama.o $(COMMON_ $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) +control-vector-generator: examples/control-vector-generator/control-vector-generator.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) diff --git a/examples/control-vector-generator/CMakeLists.txt b/examples/control-vector-generator/CMakeLists.txt new file mode 100644 index 000000000..2515d2011 --- /dev/null +++ b/examples/control-vector-generator/CMakeLists.txt @@ -0,0 +1,5 @@ +set(TARGET control-vector-generator) +add_executable(${TARGET} control-vector-generator.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/control-vector-generator/control-vector-generator.cpp b/examples/control-vector-generator/control-vector-generator.cpp new file mode 100644 index 000000000..5c64c3b74 --- /dev/null +++ b/examples/control-vector-generator/control-vector-generator.cpp @@ -0,0 +1,160 @@ +#include "common.h" +#include "llama.h" +#include "ggml.h" + +#include +#include +#include +#include +#include + +struct callback_data { + std::vector data; + int n_tokens = 0; + int n_embd = 0; + bool is_eval_pos = true; + std::vector v_pos; + std::vector v_neg; + std::vector v_diff; +}; + +static std::string ggml_ne_string(const ggml_tensor * t) { + std::string str; + for (int i = 0; i < GGML_MAX_DIMS; ++i) { + str += std::to_string(t->ne[i]); + if (i + 1 < GGML_MAX_DIMS) { + str += ", "; + } + } + return str; +} + +static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) { + auto * cb_data = (callback_data *) user_data; + + static const char * l_out_name = "l_out"; + const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0; + const struct ggml_tensor * src0 = t->src[0]; + const struct ggml_tensor * src1 = t->src[1]; + + if (ask) { + return is_l_out; + } + + if (!is_l_out || t->ne[1] != cb_data->n_tokens) { + return true; + } + + char src1_str[128] = {0}; + if (src1) { + sprintf(src1_str, "%s{%s}", src1->name, ggml_ne_string(src1).c_str()); + } + + printf("%s: %24s = (%s) %10s(%s{%s}, %s}) = {%s}\n", __func__, + t->name, ggml_type_name(t->type), ggml_op_desc(t), + src0->name, ggml_ne_string(src0).c_str(), + src1 ? src1_str : "", + ggml_ne_string(t).c_str()); + + + // copy the data from the GPU memory if needed + const bool is_host = ggml_backend_buffer_is_host(t->buffer); + + if (!is_host) { + auto n_bytes = ggml_nbytes(t); + cb_data->data.resize(n_bytes); + ggml_backend_tensor_get(t, cb_data->data.data(), 0, n_bytes); + } + + if (t->type == GGML_TYPE_F32) { + float * data = (float *) (is_host ? t->data : cb_data->data.data()); + float * dest = (float *) malloc(ggml_nbytes(t)); + memcpy(dest, data, ggml_nbytes(t)); + if (cb_data->is_eval_pos) { + cb_data->v_pos.push_back(dest); + } else { + cb_data->v_neg.push_back(dest); + } + } + + return true; +} + +static bool get_hidden_layers(llama_context * ctx, std::vector & tokens) { + if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) { + fprintf(stderr, "%s : failed to eval\n", __func__); + return false; + } + return true; +} + +static void padding_seq(llama_context * ctx, std::vector & tokens, size_t len) { + // TODO: customize padding token + std::vector pad_tokens = ::llama_tokenize(ctx, " ", false); + llama_token pad_tok = pad_tokens.back(); + while (tokens.size() < len) { + tokens.push_back(pad_tok); + } +} + +int main(int argc, char ** argv) { + callback_data cb_data; + std::string prompt_pos = "happy"; + std::string prompt_neg = "sad"; + + gpt_params params; + if (!gpt_params_parse(argc, argv, params)) { + return 1; + } + + print_build_info(); + llama_backend_init(); + llama_numa_init(params.numa); + + // pass the callback to the backend scheduler + // it will be executed for each node during the graph computation + params.cb_eval = cb_eval; + params.cb_eval_user_data = &cb_data; + params.warmup = false; + + // init + llama_model * model; + llama_context * ctx; + std::tie(model, ctx) = llama_init_from_gpt_params(params); + if (model == nullptr || ctx == nullptr) { + fprintf(stderr, "%s : failed to init\n", __func__); + return 1; + } + + // print system information + { + fprintf(stderr, "\n"); + fprintf(stderr, "%s\n", gpt_params_get_system_info(params).c_str()); + } + + const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); + std::vector tokens_pos = ::llama_tokenize(ctx, prompt_pos, add_bos); + std::vector tokens_neg = ::llama_tokenize(ctx, prompt_neg, add_bos); + size_t max_seq_len = std::max(tokens_pos.size(), tokens_neg.size()); + padding_seq(ctx, tokens_pos, max_seq_len); + padding_seq(ctx, tokens_neg, max_seq_len); + cb_data.n_tokens = max_seq_len; + cb_data.n_embd = llama_n_embd(model); + + cb_data.is_eval_pos = true; + get_hidden_layers(ctx, tokens_pos); + cb_data.is_eval_pos = false; + get_hidden_layers(ctx, tokens_neg); + + printf("%f %f \n", cb_data.v_pos[0][4096], cb_data.v_pos[0][4096]); + printf("%f %f \n", cb_data.v_neg[0][4096], cb_data.v_neg[0][4096]); + + //llama_print_timings(ctx); + + llama_free(ctx); + llama_free_model(model); + + llama_backend_free(); + + return 0; +}