(wip) refactor
This commit is contained in:
parent
a42e783d75
commit
a710df749c
4 changed files with 566 additions and 545 deletions
2
Makefile
2
Makefile
|
@ -838,7 +838,7 @@ eval-callback: examples/eval-callback/eval-callback.cpp ggml.o llama.o $(COMMON_
|
|||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
||||
control-vector-generator: examples/control-vector-generator/control-vector-generator.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||
control-vector-generator: examples/control-vector-generator/control-vector-generator.cpp examples/control-vector-generator/pca.hpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
set(TARGET control-vector-generator)
|
||||
add_executable(${TARGET} control-vector-generator.cpp)
|
||||
add_executable(${TARGET} control-vector-generator.cpp pca.hpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "common.h"
|
||||
#include "llama.h"
|
||||
#include "ggml.h"
|
||||
#include "pca.hpp"
|
||||
|
||||
#ifdef GGML_USE_CUDA
|
||||
#include "ggml-cuda.h"
|
||||
|
@ -18,55 +19,208 @@
|
|||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
#define DEBUG_POS 2
|
||||
|
||||
// TODO read everything over and make sure it makes sense because I'm dropping logic errors left and right - Christian
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
// to reduce the amount of stuff that gets sent to cb_eval this is only what cb_eval actually needs
|
||||
|
||||
template <class Iter>
|
||||
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||
std::string ret;
|
||||
for (; begin != end; ++begin) {
|
||||
ret += llama_token_to_piece(ctx, *begin);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
|
||||
// cb_eval is reused for each pair of positive - negative prompt
|
||||
struct callback_data {
|
||||
std::vector<uint8_t> data;
|
||||
ggml_context * ctx_ggml; // holds v_pos, v_neg
|
||||
ggml_context * ctx_ggml = nullptr; // holds v_pos, v_neg, v_diff_filtered
|
||||
|
||||
int n_layers = 0;
|
||||
int n_tokens = 0;
|
||||
bool is_eval_pos = true;
|
||||
|
||||
// each element of the vector correspond to one layer
|
||||
std::vector<struct ggml_tensor *> v_pos; // vector of matrices of size [n_embd, n_tokens]
|
||||
std::vector<struct ggml_tensor *> v_neg; // vector of matrices of size [n_embd, n_tokens]
|
||||
std::vector<struct ggml_tensor *> v_pos; // vector of matrices of size [n_embd, n_tokens]
|
||||
std::vector<struct ggml_tensor *> v_neg; // vector of matrices of size [n_embd, n_tokens]
|
||||
std::vector<struct ggml_tensor *> v_diff_filtered; // vector of matrices of size [n_embd, n_nonzero_rows]. NOTE: n_nonzero_rows maybe different for each layer
|
||||
|
||||
// TODO I free everything as soon as it's unnecessary, rather than letting this live until the end of main() - is this undesirable?
|
||||
/*
|
||||
~callback_data() {
|
||||
for (auto ptr : v_pos) free(ptr);
|
||||
for (auto ptr : v_neg) free(ptr);
|
||||
ggml_free(ctx_ggml);
|
||||
}*/
|
||||
// save a tensor into either v_pos or v_neg (decided by is_eval_pos)
|
||||
void save_tensor_for_layer(struct ggml_tensor * t) {
|
||||
GGML_ASSERT(t->type == GGML_TYPE_F32);
|
||||
|
||||
if (ctx_ggml == nullptr) {
|
||||
// alloc a new ctx_ggml if needed
|
||||
struct ggml_init_params params_ggml = {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * n_layers * 3u,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
ctx_ggml = ggml_init(params_ggml);
|
||||
}
|
||||
|
||||
// copy tensor data
|
||||
auto n_bytes = ggml_nbytes(t);
|
||||
struct ggml_tensor * t_layer = ggml_new_tensor_2d(ctx_ggml, t->type, t->ne[0], t->ne[1]);
|
||||
t_layer->data = malloc(n_bytes); // TODO @ngxson : get rid of this malloc somehow
|
||||
ggml_backend_tensor_get(t, t_layer->data, 0, n_bytes);
|
||||
ggml_set_name(t_layer, ggml_get_name(t));
|
||||
print_debug_tensor(t_layer);
|
||||
|
||||
if (is_eval_pos) {
|
||||
v_pos.push_back(t_layer);
|
||||
} else {
|
||||
v_neg.push_back(t_layer);
|
||||
}
|
||||
}
|
||||
|
||||
// calculate diff (v_pos - v_neg) and place the result back to v_pos
|
||||
// all zero rows in the diff tensor will also be removed
|
||||
// NOTE: final layer is ignored. we only have (n_layers - 1) to process
|
||||
std::vector<struct ggml_tensor *> calc_diff() {
|
||||
for (float il = 0; il < v_pos.size(); il++) {
|
||||
float * a = (float *) v_pos[il]->data;
|
||||
float * b = (float *) v_neg[il]->data;
|
||||
size_t n_elem = ggml_nelements(v_pos[il]);
|
||||
for (size_t j = 0; j < n_elem; j++) {
|
||||
a[j] -= b[j];
|
||||
}
|
||||
//print_debug_tensor(v_pos[i]);
|
||||
auto diff_filtered = filter_nonzero_rows(v_pos[il]);
|
||||
v_diff_filtered.push_back(diff_filtered);
|
||||
}
|
||||
return v_pos; // for convinient, we return the result std::vector
|
||||
}
|
||||
|
||||
// delete zero rows from a given 2D tensor
|
||||
struct ggml_tensor * filter_nonzero_rows(struct ggml_tensor * a) {
|
||||
printf("filter_nonzero_rows\n");
|
||||
auto is_row_all_zeros = [](struct ggml_tensor * t, int row, float eps) -> bool {
|
||||
// check if given row containing all zero elements
|
||||
int n_cols = t->ne[0]; // hint: should be equal to n_embd
|
||||
for (int col = 0; col < n_cols; ++col) {
|
||||
if (ggml_get_f32_nd(t, col, row, 0, 0) > eps) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
std::vector<int> rows_to_copy; // the idx of non-zero cols (to be copied to row of diff_filtered)
|
||||
for (int i_row = 0; i_row < a->ne[1]; i_row++) {
|
||||
if (!is_row_all_zeros(a, i_row, 1e-6)) {
|
||||
rows_to_copy.push_back(i_row);
|
||||
}
|
||||
}
|
||||
|
||||
// get "n_nonzero_rows" for the output "diff_filtered"
|
||||
int n_nonzero_rows = rows_to_copy.size();
|
||||
printf("n_nonzero_rows: %d\n", n_nonzero_rows);
|
||||
int n_embd = a->ne[0];
|
||||
GGML_ASSERT(n_nonzero_rows > 0);
|
||||
|
||||
// diff_filtered: [n_embd, n_nonzero_rows]
|
||||
struct ggml_tensor * diff_filtered = ggml_new_tensor_2d(
|
||||
ctx_ggml, GGML_TYPE_F32, n_embd, n_nonzero_rows);
|
||||
ggml_set_name(diff_filtered, (std::string("diff_filtered_") + a->name).c_str());
|
||||
diff_filtered->data = malloc(ggml_nbytes(diff_filtered));
|
||||
|
||||
// copy non-zero rows
|
||||
for (int dest_row = 0; dest_row < n_nonzero_rows; dest_row++) {
|
||||
int src_row = rows_to_copy[dest_row];
|
||||
for (int i = 0; i < n_embd; i++) {
|
||||
float src_elem = ggml_get_f32_nd(a, i, src_row, 0, 0);
|
||||
ggml_set_f32_nd(diff_filtered, i, dest_row, 0, 0, src_elem);
|
||||
}
|
||||
}
|
||||
|
||||
print_debug_tensor(diff_filtered);
|
||||
|
||||
return diff_filtered;
|
||||
}
|
||||
|
||||
// we don't implement destructor, because we want to reuse callback_data. we just want to free the tensors
|
||||
void reset() {
|
||||
for (auto ptr : v_pos) free(ptr->data);
|
||||
for (auto ptr : v_neg) free(ptr->data);
|
||||
for (auto ptr : v_diff_filtered) free(ptr->data);
|
||||
v_pos.clear();
|
||||
v_neg.clear();
|
||||
v_diff_filtered.clear();
|
||||
if (ctx_ggml) {
|
||||
ggml_free(ctx_ggml);
|
||||
}
|
||||
ctx_ggml = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
// I prefer having the different contexts so we can free each immediately after we're done using it
|
||||
// e.g. we don't need the diffs_wrapped once we strip zero rows + concatenate them so we can ggml_free it, etc.
|
||||
// @ngxson let me know what you think - @christianazinn
|
||||
struct diff_ctx {
|
||||
int n_embd = 0;
|
||||
int n_threads = 8;
|
||||
|
||||
ggml_context * ctx_diffs_wrapped; // holds v_diffs_wrapped
|
||||
ggml_context * ctx_diff; // holds v_diff
|
||||
ggml_context * ctx_final; // holds v_final
|
||||
/**
|
||||
* process_ctx is used to store the ggml context for pre-post processing the diff vectors
|
||||
* in short, input => v_diff and output => v_final
|
||||
*/
|
||||
struct train_context {
|
||||
ggml_context * ctx_ggml;
|
||||
int n_embd;
|
||||
int n_layers;
|
||||
|
||||
// each element of the vector correspond to one layer
|
||||
std::vector<struct ggml_tensor *> v_diff; // vector of matrices of size [n_embd, m] where m ~ n_tokens * n_completions
|
||||
// NOTE: the last layer is discard. therefore, we will have (n_layers - 1) elements here
|
||||
std::vector<struct ggml_tensor *> v_diff; // vector of matrices of size [n_embd, m] where m ~ n_tokens * n_completions (v_diff contains no zero-rows)
|
||||
std::vector<struct ggml_tensor *> v_final; // vector of vectors of size [n_embd] to be written to file
|
||||
|
||||
// each element of the outer vector correspond to one layer, each element of the inner vector correspond to one prompt pass
|
||||
std::vector<std::vector<struct ggml_tensor *>> v_diffs_wrapped; // vector of compiled diff matrices of size [n_embd, n_tokens] to be concatenated
|
||||
// to easily re-alloc when concat v_diff, we temporary store v_diff in a vector instead of a tensor
|
||||
// v_diff_tmp will get converted unto v_diff later on
|
||||
std::vector<std::vector<uint8_t>> v_diff_tmp;
|
||||
|
||||
~diff_ctx() {
|
||||
for (auto ptr : v_diff) free(ptr);
|
||||
for (auto ptr : v_final) free(ptr);
|
||||
ggml_free(ctx_diff);
|
||||
ggml_free(ctx_final);
|
||||
// ctx_diffs_wrapped is freed in concatenate_diffs as soon as we're done with it - see above. undesirable?
|
||||
train_context(int n_embd_, int n_layers_) {
|
||||
n_embd = n_embd_;
|
||||
n_layers = n_layers_;
|
||||
struct ggml_init_params params_ggml = {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * (n_layers - 1) * 2u,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
ctx_ggml = ggml_init(params_ggml);
|
||||
for (int il = 0; il < n_layers - 1; il++) {
|
||||
std::vector<uint8_t> empty;
|
||||
v_diff_tmp.push_back(empty);
|
||||
v_final.push_back(ggml_new_tensor_1d(ctx_ggml, GGML_TYPE_F32, n_embd));
|
||||
}
|
||||
}
|
||||
|
||||
// add new rows into existing tensor in v_diff_tmp
|
||||
void concat_diff_tmp(const std::vector<struct ggml_tensor *> & diff_filtered) {
|
||||
GGML_ASSERT(diff_filtered.size() == n_layers - 1);
|
||||
for (int il = 0; il < n_layers - 1; il++) {
|
||||
auto t = diff_filtered[il];
|
||||
auto & diff_tmp = v_diff_tmp[il];
|
||||
size_t curr_size = diff_tmp.size();
|
||||
diff_tmp.resize(curr_size + ggml_nbytes(t));
|
||||
memcpy(diff_tmp.data() + curr_size, t->data, ggml_nbytes(t));
|
||||
}
|
||||
}
|
||||
|
||||
// build the v_diff tensors from v_diff_tmp
|
||||
void build_v_diff() {
|
||||
for (int il = 0; il < n_layers - 1; il++) {
|
||||
auto & diff_tmp = v_diff_tmp[il];
|
||||
int n_elem = diff_tmp.size() / sizeof(float);
|
||||
int n_rows = n_elem / n_embd;
|
||||
struct ggml_tensor * diff = ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_embd, n_rows);
|
||||
ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
|
||||
diff->data = diff_tmp.data();
|
||||
v_diff.push_back(diff);
|
||||
}
|
||||
}
|
||||
|
||||
~train_context() {
|
||||
for (auto ptr : v_final) free(ptr->data);
|
||||
// no need to free v_diff_tmp or v_diff, since we didn't use malloc
|
||||
ggml_free(ctx_ggml);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -82,23 +236,37 @@ struct ctrl_params {
|
|||
std::string positive_prompts_file = "examples/control-vector-generator/positive.txt";
|
||||
std::string negative_prompts_file = "examples/control-vector-generator/negative.txt";
|
||||
|
||||
/* pair of prompts to be used for generating the vectors */
|
||||
std::vector<std::string> positive_prompts;
|
||||
std::vector<std::string> negative_prompts;
|
||||
|
||||
/* pair of prompts to be used for testing */
|
||||
/* pair of prompts to be used for generating final vector */
|
||||
std::vector<std::string> positive_entries;
|
||||
std::vector<std::string> negative_entries;
|
||||
};
|
||||
|
||||
struct tokenized_prompt {
|
||||
std::string positive;
|
||||
std::string negative;
|
||||
std::vector<llama_token> tokens_pos;
|
||||
std::vector<llama_token> tokens_neg;
|
||||
size_t max_seq_len;
|
||||
|
||||
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
|
||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||
tokens_pos = ::llama_tokenize(ctx, pos, add_bos);
|
||||
tokens_neg = ::llama_tokenize(ctx, neg, add_bos);
|
||||
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
|
||||
padding_seq(ctx, tokens_pos, max_seq_len);
|
||||
padding_seq(ctx, tokens_neg, max_seq_len);
|
||||
}
|
||||
|
||||
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
|
||||
// TODO: customize padding token
|
||||
std::vector<llama_token> pad_tokens = ::llama_tokenize(ctx, " ", false);
|
||||
llama_token pad_tok = pad_tokens.back();
|
||||
while (tokens.size() < len) {
|
||||
tokens.push_back(pad_tok);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
template <typename T>
|
||||
static std::string to_string(const T & val) {
|
||||
std::stringstream ss;
|
||||
|
@ -235,7 +403,7 @@ static int ctrlvec_params_parse(int argc, char ** argv, ctrl_params & params) {
|
|||
return skipme;
|
||||
}
|
||||
|
||||
static std::vector<std::string> ctrlvec_load_prompt_file(std::string path) {
|
||||
static std::vector<std::string> ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines = false) {
|
||||
std::vector<std::string> output;
|
||||
std::ifstream file(path);
|
||||
if (!file.is_open()) {
|
||||
|
@ -243,7 +411,8 @@ static std::vector<std::string> ctrlvec_load_prompt_file(std::string path) {
|
|||
}
|
||||
std::string line;
|
||||
while (std::getline(file, line)) {
|
||||
if (!line.empty()) { // skip empty lines
|
||||
bool is_skip = skip_empty_lines && line.empty();
|
||||
if (!is_skip) {
|
||||
output.push_back(line);
|
||||
}
|
||||
}
|
||||
|
@ -251,49 +420,23 @@ static std::vector<std::string> ctrlvec_load_prompt_file(std::string path) {
|
|||
return output;
|
||||
}
|
||||
|
||||
static std::string format_template(std::string persona, std::string suffix) {
|
||||
//const std::string user_tag = "[INST]";
|
||||
//const std::string asst_tag = "[/INST]";
|
||||
//return user_tag + " Act as if you're extremely " + persona + ". " + asst_tag + " " + suffix;
|
||||
// TODO make this dynamic - allow the user to change it somehow - and adapt based on model
|
||||
return persona + " " + suffix; // entry in positive/negative.txt must already be formatted i.e. "[INST] Act as if you're extremely happy. [/INST]"
|
||||
}
|
||||
|
||||
static void populate_entries(ctrl_params & cparams, std::string positive, std::string negative) {
|
||||
std::string line;
|
||||
std::ifstream completions_file(cparams.completions_file);
|
||||
int i = 0;
|
||||
if (completions_file.is_open()) {
|
||||
while (std::getline(completions_file, line) && i < cparams.n_completions) {
|
||||
// TODO replicate the truncations done by the python implementation
|
||||
cparams.positive_entries.push_back(format_template(positive, line));
|
||||
cparams.negative_entries.push_back(format_template(negative, line));
|
||||
i++;
|
||||
}
|
||||
completions_file.close();
|
||||
} else {
|
||||
throw std::invalid_argument("error: invalid completions file or file could not be opened");
|
||||
}
|
||||
}
|
||||
|
||||
static std::string ggml_ne_string(const ggml_tensor * t) {
|
||||
std::string str;
|
||||
for (int i = 0; i < GGML_MAX_DIMS; ++i) {
|
||||
str += std::to_string(t->ne[i]);
|
||||
if (i + 1 < GGML_MAX_DIMS) {
|
||||
str += ", ";
|
||||
}
|
||||
}
|
||||
return str;
|
||||
}
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
|
||||
auto * cb_data = (callback_data *) user_data;
|
||||
auto ggml_ne_string = [](const ggml_tensor * t) -> std::string {
|
||||
std::string str;
|
||||
for (int i = 0; i < GGML_MAX_DIMS; ++i) {
|
||||
str += std::to_string(t->ne[i]);
|
||||
if (i + 1 < GGML_MAX_DIMS) {
|
||||
str += ", ";
|
||||
}
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
static const char * l_out_name = "l_out";
|
||||
const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
|
||||
const struct ggml_tensor * src0 = t->src[0];
|
||||
const struct ggml_tensor * src1 = t->src[1];
|
||||
|
||||
if (ask) {
|
||||
return is_l_out;
|
||||
|
@ -303,36 +446,8 @@ static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
|
|||
return true;
|
||||
}
|
||||
|
||||
char src1_str[128] = {0};
|
||||
if (src1) {
|
||||
sprintf(src1_str, "%s{%s}", src1->name, ggml_ne_string(src1).c_str());
|
||||
}
|
||||
|
||||
printf("%s: %24s = (%s) %10s(%s{%s}, %s}) = {%s}\n", __func__,
|
||||
t->name, ggml_type_name(t->type), ggml_op_desc(t),
|
||||
src0->name, ggml_ne_string(src0).c_str(),
|
||||
src1 ? src1_str : "",
|
||||
ggml_ne_string(t).c_str());
|
||||
|
||||
|
||||
// copy the data from the GPU memory if needed
|
||||
const bool is_host = ggml_backend_buffer_is_host(t->buffer);
|
||||
|
||||
struct ggml_tensor * t_host;
|
||||
auto n_bytes = ggml_nbytes(t);
|
||||
t_host = ggml_new_tensor_2d(cb_data->ctx_ggml, t->type, t->ne[0], t->ne[1]);
|
||||
t_host->data = malloc(n_bytes); // TODO @ngxson : get rid of this malloc somehow
|
||||
ggml_backend_tensor_get(t, t_host->data, 0, n_bytes);
|
||||
printf("t_host [0][%d]: %f\n", DEBUG_POS, ggml_get_f32_nd(t_host, 0, DEBUG_POS, 0, 0));
|
||||
|
||||
if (t_host->type == GGML_TYPE_F32) {
|
||||
if (cb_data->is_eval_pos) {
|
||||
cb_data->v_pos.push_back(t_host);
|
||||
} else {
|
||||
cb_data->v_neg.push_back(t_host);
|
||||
}
|
||||
}
|
||||
|
||||
// save the tensor to current context
|
||||
cb_data->save_tensor_for_layer(t);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -345,348 +460,17 @@ static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & to
|
|||
return true;
|
||||
}
|
||||
|
||||
static void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
|
||||
// TODO: customize padding token
|
||||
std::vector<llama_token> pad_tokens = ::llama_tokenize(ctx, " ", false);
|
||||
llama_token pad_tok = pad_tokens.back();
|
||||
while (tokens.size() < len) {
|
||||
tokens.push_back(pad_tok);
|
||||
}
|
||||
}
|
||||
|
||||
static void calc_diff(callback_data & cb_data, diff_ctx & dctx) {
|
||||
// TODO: assert cb_data.v_pos.size() == cb_data.v_neg.size()
|
||||
dctx.v_diffs_wrapped.resize(cb_data.v_pos.size());
|
||||
for (size_t il = 0; il < cb_data.v_pos.size(); il++) {
|
||||
std::cout << "il: " << il << " of " << cb_data.v_pos.size()-1 << std::endl;
|
||||
|
||||
auto & inp_pos = cb_data.v_pos[il];
|
||||
auto & inp_neg = cb_data.v_neg[il];
|
||||
auto n_bytes = ggml_nbytes(inp_pos);
|
||||
|
||||
printf("inp_pos [0][%d]: %f\n", DEBUG_POS, ggml_get_f32_nd(inp_pos, 0, DEBUG_POS, 0, 0));
|
||||
printf("inp_neg [0][%d]: %f\n", DEBUG_POS, ggml_get_f32_nd(inp_neg, 0, DEBUG_POS, 0, 0));
|
||||
|
||||
// TODO assert inp_pos->ne[0] == inp_neg->ne[0] && inp_pos->ne[1] == inp_neg->ne[1]
|
||||
struct ggml_tensor * dest = ggml_new_tensor_2d(dctx.ctx_diffs_wrapped, GGML_TYPE_F32, inp_pos->ne[0], inp_pos->ne[1]);
|
||||
dest->data = malloc(n_bytes); // TODO @ngxson get rid of this malloc somehow
|
||||
|
||||
for (size_t i = 0; i < inp_pos->ne[0]; i++) {
|
||||
for (size_t j = 0; j < inp_pos->ne[1]; j++) {
|
||||
ggml_set_f32_nd(dest, i, j, 0, 0, ggml_get_f32_nd(inp_pos, i, j, 0, 0) - ggml_get_f32_nd(inp_neg, i, j, 0, 0));
|
||||
}
|
||||
}
|
||||
|
||||
printf("dest [0][%d]: %f\n", DEBUG_POS, ggml_get_f32_nd(dest, 0, DEBUG_POS, 0, 0));
|
||||
|
||||
dctx.v_diffs_wrapped[il].push_back(dest);
|
||||
}
|
||||
}
|
||||
|
||||
// 50/50 chance this should be cols but it works and I don't want to touch it - @christianazinn
|
||||
static bool is_row_all_zeros(struct ggml_tensor * diff, int row, int cols, float eps = 1e-6) {
|
||||
for (int i = 0; i < cols; ++i) {
|
||||
if (ggml_get_f32_nd(diff, i, row, 0, 0) > eps) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void concatenate_diffs(diff_ctx & dctx) {
|
||||
// TODO can you do this inplace?
|
||||
// TODO assert each tensor has the same ->ne[0] and it equals dctx.n_embd
|
||||
printf("concatenate_diffs\n");
|
||||
for (size_t il = 0; il < dctx.v_diffs_wrapped.size(); ++il) {
|
||||
printf("il: %zu of %zu\n", il, dctx.v_diffs_wrapped.size()-1);
|
||||
std::vector<struct ggml_tensor *> & vec = dctx.v_diffs_wrapped[il];
|
||||
|
||||
// strip zero rows
|
||||
int n_nonzero_rows = 0;
|
||||
std::vector<std::vector<int>> nonzero_rows; // outer vector is tensor idx, inner vector is row in tensor
|
||||
nonzero_rows.resize(vec.size());
|
||||
for (int i = 0; i < vec.size(); ++i) {
|
||||
for (int j = 0; j < vec[i]->ne[1]; ++j) {
|
||||
if (!is_row_all_zeros(vec[i], j, vec[i]->ne[0])) {
|
||||
nonzero_rows[i].push_back(j);
|
||||
n_nonzero_rows++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
printf("n_nonzero_rows: %d\n", n_nonzero_rows);
|
||||
|
||||
// we transpose it here because ggml mul_mat is really weird
|
||||
struct ggml_tensor * diff = ggml_new_tensor_2d(dctx.ctx_diff, GGML_TYPE_F32, n_nonzero_rows, dctx.n_embd);
|
||||
|
||||
diff->data = malloc(dctx.n_embd * n_nonzero_rows * sizeof(float) + ggml_tensor_overhead()); // @ngxson get rid of this malloc somehow
|
||||
|
||||
for (size_t i = 0; i < nonzero_rows.size(); ++i) {
|
||||
for (size_t j : nonzero_rows[i]) {
|
||||
for (size_t k = 0; k < vec[i]->ne[0]; k++) {
|
||||
//std::cout << ggml_get_f32_nd(vec[i], k, j, 0, 0) << std::endl;
|
||||
ggml_set_f32_nd(diff, i, k, 0, 0, ggml_get_f32_nd(vec[i], k, j, 0, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
printf("diff[0][%d]: %f\n", DEBUG_POS, ggml_get_f32_nd(diff, 0, DEBUG_POS, 0, 0));
|
||||
|
||||
// TODO assert row == n_nonzero_rows
|
||||
|
||||
dctx.v_diff.push_back(diff);
|
||||
}
|
||||
//for (auto & vec : dctx.v_diffs_wrapped) for (auto ptr : vec) free(ptr);
|
||||
ggml_free(dctx.ctx_diffs_wrapped);
|
||||
}
|
||||
|
||||
struct pca_model {
|
||||
struct ggml_tensor * v_diff_original;
|
||||
struct ggml_tensor * square;
|
||||
struct ggml_tensor * square_transpose;
|
||||
struct ggml_tensor * eigenvector;
|
||||
|
||||
ggml_backend_t backend = NULL;
|
||||
ggml_backend_buffer_t buffer;
|
||||
struct ggml_context * ctx;
|
||||
};
|
||||
|
||||
void load_pca_model(pca_model & model, struct ggml_tensor * v_diff_original) {
|
||||
#ifdef GGML_USE_CUDA
|
||||
fprintf(stderr, "%s: using CUDA backend\n", __func__);
|
||||
model.backend = ggml_backend_cuda_init(0); // init device 0
|
||||
if (!model.backend) {
|
||||
fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
fprintf(stderr, "%s: using Metal backend\n", __func__);
|
||||
ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr);
|
||||
model.backend = ggml_backend_metal_init();
|
||||
if (!model.backend) {
|
||||
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
|
||||
}
|
||||
#endif
|
||||
|
||||
// if there aren't GPU Backends fallback to CPU backend
|
||||
if (!model.backend) {
|
||||
model.backend = ggml_backend_cpu_init();
|
||||
}
|
||||
|
||||
printf("v_diff_original[0][%d]: %f\n", DEBUG_POS, ggml_get_f32_nd(v_diff_original, 0, DEBUG_POS, 0, 0));
|
||||
|
||||
const int num_tensors = 4;
|
||||
|
||||
struct ggml_init_params params {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * num_tensors,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
|
||||
model.ctx = ggml_init(params);
|
||||
|
||||
model.v_diff_original = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, v_diff_original->ne[0], v_diff_original->ne[1]);
|
||||
model.square = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, v_diff_original->ne[1], v_diff_original->ne[1]);
|
||||
model.square_transpose = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, v_diff_original->ne[1], v_diff_original->ne[1]);
|
||||
model.eigenvector = ggml_new_tensor_1d(model.ctx, GGML_TYPE_F32, v_diff_original->ne[1]);
|
||||
|
||||
model.buffer = ggml_backend_alloc_ctx_tensors(model.ctx, model.backend);
|
||||
|
||||
ggml_backend_tensor_set(model.v_diff_original, v_diff_original->data, 0, ggml_nbytes(v_diff_original));
|
||||
|
||||
// no need to load anything into square or square_transpose yet
|
||||
|
||||
// initialize model.eigenvector to random vector
|
||||
std::vector<float> random_vec;
|
||||
std::default_random_engine generator(static_cast<unsigned int>(std::time(0)));
|
||||
std::uniform_real_distribution<float> distribution(0.0, 1.0);
|
||||
for (int i = 0; i < v_diff_original->ne[1]; ++i) {
|
||||
random_vec.push_back(distribution(generator));
|
||||
}
|
||||
|
||||
// we don't normalize it at first but that shouldn't be a problem
|
||||
ggml_backend_tensor_set(model.eigenvector, random_vec.data(), 0, ggml_nbytes(model.eigenvector));
|
||||
}
|
||||
|
||||
struct ggml_cgraph * square_diff_graph(const pca_model & model) {
|
||||
static size_t buf_size = ggml_tensor_overhead() * GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
|
||||
static std::vector<uint8_t> buf(buf_size);
|
||||
|
||||
struct ggml_init_params params0 = {
|
||||
/*.mem_size =*/ buf_size,
|
||||
/*.mem_buffer =*/ buf.data(),
|
||||
/*.no_alloc =*/ true, // the tensors will be allocated later by ggml_allocr_alloc_graph()
|
||||
};
|
||||
struct ggml_context * ctx0 = ggml_init(params0);
|
||||
struct ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||
|
||||
struct ggml_tensor * square = ggml_mul_mat(ctx0, model.v_diff_original, model.v_diff_original);
|
||||
//struct ggml_tensor * square_transpose = ggml_transpose(ctx0, square);
|
||||
|
||||
ggml_build_forward_expand(gf, square);
|
||||
|
||||
ggml_free(ctx0);
|
||||
return gf;
|
||||
}
|
||||
|
||||
struct ggml_tensor * compute_square(const pca_model & model, ggml_gallocr_t allocr, int n_threads) {
|
||||
struct ggml_cgraph * gf = square_diff_graph(model);
|
||||
|
||||
ggml_gallocr_alloc_graph(allocr, gf);
|
||||
|
||||
if (ggml_backend_is_cpu(model.backend)) {
|
||||
ggml_backend_cpu_set_n_threads(model.backend, n_threads);
|
||||
}
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
if (ggml_backend_is_metal(model.backend)) {
|
||||
ggml_backend_metal_set_n_cb(model.backend, n_threads);
|
||||
}
|
||||
#endif
|
||||
|
||||
ggml_backend_graph_compute(model.backend, gf);
|
||||
|
||||
return gf->nodes[gf->n_nodes - 1];
|
||||
}
|
||||
|
||||
struct ggml_cgraph * power_iteration_graph(const pca_model & model, float tolerance) {
|
||||
static size_t buf_size = ggml_tensor_overhead() * GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
|
||||
static std::vector<uint8_t> buf(buf_size);
|
||||
|
||||
struct ggml_init_params params0 = {
|
||||
/*.mem_size =*/ buf_size,
|
||||
/*.mem_buffer =*/ buf.data(),
|
||||
/*.no_alloc =*/ true, // the tensors will be allocated later by ggml_allocr_alloc_graph()
|
||||
};
|
||||
struct ggml_context * ctx0 = ggml_init(params0);
|
||||
struct ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||
|
||||
struct ggml_tensor * b_tensor = ggml_mul_mat(ctx0, model.square, model.eigenvector);
|
||||
// TODO difference between ggml_norm and ggml_norm_inplace?
|
||||
// also is this the right way to do multi-step graphs?
|
||||
b_tensor = ggml_norm_inplace(ctx0, b_tensor, tolerance);
|
||||
|
||||
ggml_build_forward_expand(gf, b_tensor);
|
||||
|
||||
ggml_free(ctx0);
|
||||
return gf;
|
||||
}
|
||||
|
||||
struct ggml_tensor * compute_piter(const pca_model & model, ggml_gallocr_t allocr, int n_threads, float tolerance) {
|
||||
struct ggml_cgraph * gf = power_iteration_graph(model, tolerance);
|
||||
|
||||
ggml_gallocr_alloc_graph(allocr, gf);
|
||||
|
||||
if (ggml_backend_is_cpu(model.backend)) {
|
||||
ggml_backend_cpu_set_n_threads(model.backend, n_threads);
|
||||
}
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
if (ggml_backend_is_metal(model.backend)) {
|
||||
ggml_backend_metal_set_n_cb(model.backend, n_threads);
|
||||
}
|
||||
#endif
|
||||
|
||||
ggml_backend_graph_compute(model.backend, gf);
|
||||
|
||||
return gf->nodes[gf->n_nodes - 1];
|
||||
}
|
||||
|
||||
static void power_iteration(diff_ctx & dctx, int idx, int maxIterations = 1000, float tolerance = 1e-7) {
|
||||
printf("in power iteration\n");
|
||||
|
||||
pca_model model;
|
||||
load_pca_model(model, dctx.v_diff[idx]);
|
||||
|
||||
ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend));
|
||||
|
||||
struct ggml_tensor * square = compute_square(model, allocr, dctx.n_threads);
|
||||
ggml_backend_tensor_set(model.square, square->data, 0, ggml_nbytes(model.square));
|
||||
|
||||
ggml_gallocr_free(allocr);
|
||||
|
||||
struct ggml_init_params host_params = {
|
||||
/*.mem_size =*/ (dctx.n_embd * sizeof(float) + ggml_tensor_overhead()) * 2u,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
struct ggml_context * host_ctx = ggml_init(host_params);
|
||||
|
||||
struct ggml_tensor * host_old_eigenvector = ggml_new_tensor_1d(host_ctx, GGML_TYPE_F32, dctx.n_embd);
|
||||
struct ggml_tensor * host_new_eigenvector = ggml_new_tensor_1d(host_ctx, GGML_TYPE_F32, dctx.n_embd);
|
||||
|
||||
for (int iter = 0; iter < maxIterations; ++iter) {
|
||||
|
||||
// TODO do I need to reset it like this every time?
|
||||
allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend));
|
||||
|
||||
struct ggml_tensor * b_tensor = compute_piter(model, allocr, dctx.n_threads, tolerance);
|
||||
|
||||
ggml_backend_tensor_get(b_tensor, host_new_eigenvector->data, 0, ggml_nbytes(b_tensor));
|
||||
ggml_backend_tensor_get(model.eigenvector, host_old_eigenvector->data, 0, ggml_nbytes(model.eigenvector));
|
||||
|
||||
// convergence check
|
||||
float diff = 0.0;
|
||||
for (int i = 0; i < dctx.n_embd; ++i) {
|
||||
diff += std::pow((ggml_get_f32_1d(host_new_eigenvector, i) - ggml_get_f32_1d(host_old_eigenvector, i)), 2);
|
||||
}
|
||||
|
||||
// update eigenvector
|
||||
ggml_backend_tensor_set(model.eigenvector, host_new_eigenvector->data, 0, ggml_nbytes(model.eigenvector));
|
||||
|
||||
try {
|
||||
if (std::sqrt(diff) < tolerance) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
catch (std::exception & e) {
|
||||
// catch division by zero I guess
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ggml_backend_tensor_get(model.eigenvector, dctx.v_final[idx]->data, 0, ggml_nbytes(model.eigenvector));
|
||||
|
||||
ggml_gallocr_free(allocr);
|
||||
ggml_free(host_ctx);
|
||||
ggml_free(model.ctx);
|
||||
ggml_backend_buffer_free(model.buffer);
|
||||
ggml_backend_free(model.backend);
|
||||
}
|
||||
|
||||
static void pca(diff_ctx & dctx) {
|
||||
printf("Running PCA...\n");
|
||||
for (int il = 0; il < dctx.v_diff.size(); ++il) {
|
||||
dctx.v_final.push_back(ggml_new_tensor_1d(dctx.ctx_final, GGML_TYPE_F32, dctx.n_embd));
|
||||
power_iteration(dctx, il);
|
||||
printf("Done with layer %d\n", il);
|
||||
printf("il = %d | %f %f \n", il, ggml_get_f32_1d(dctx.v_final[il], 0), ggml_get_f32_1d(dctx.v_final[il], 1));
|
||||
}
|
||||
printf("Done with PCA.\n");
|
||||
}
|
||||
|
||||
static void export_gguf(diff_ctx & dctx, int n_layers, const std::string fname, const std::string model_hint) {
|
||||
static void export_gguf(const std::vector<struct ggml_tensor *> & v_ctrl, const std::string fname, const std::string model_hint) {
|
||||
struct gguf_context * ctx = gguf_init_empty();
|
||||
|
||||
size_t v_final_size_eff = n_layers - 1;
|
||||
|
||||
const std::string arch = "controlvector";
|
||||
gguf_set_val_str(ctx, "general.architecture", arch.c_str());
|
||||
gguf_set_val_str(ctx, (arch + ".model_hint").c_str(), model_hint.c_str());
|
||||
gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_final_size_eff);
|
||||
gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_ctrl.size());
|
||||
|
||||
for (size_t i = 0; i < v_final_size_eff; ++i) {
|
||||
// TODO this number is probably not right - figure out which layer is which
|
||||
// i'm pretty sure it's right now
|
||||
const std::string name = "direction." + to_string(i+1);
|
||||
|
||||
printf("dctx.v_final[i][%d]: %f\n", DEBUG_POS, ggml_get_f32_1d(dctx.v_final[i], DEBUG_POS));
|
||||
|
||||
ggml_set_name(dctx.v_final[i], name.c_str());
|
||||
|
||||
gguf_add_tensor(ctx, dctx.v_final[i]);
|
||||
printf("Added tensor %zu\n", i);
|
||||
for (size_t i = 0; i < v_ctrl.size(); ++i) {
|
||||
gguf_add_tensor(ctx, v_ctrl[i]);
|
||||
printf("Added tensor: %s\n", v_ctrl[i]->name);
|
||||
}
|
||||
|
||||
printf("Writing file...\n");
|
||||
|
@ -698,6 +482,42 @@ static void export_gguf(diff_ctx & dctx, int n_layers, const std::string fname,
|
|||
gguf_free(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load prompt files and completion file.
|
||||
* Then format each pair of prompt + completion to make an entry.
|
||||
*/
|
||||
int prepare_entries(ctrl_params & cparams) {
|
||||
// load prompts
|
||||
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(cparams.positive_prompts_file);
|
||||
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(cparams.negative_prompts_file);
|
||||
if (positive_prompts.size() != negative_prompts.size()) {
|
||||
fprintf(stderr, "number of positive and negative prompts must be equal\n");
|
||||
return 1;
|
||||
}
|
||||
if (positive_prompts.empty()) {
|
||||
fprintf(stderr, "must provide at least one prompt pair\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
// create templated prompts
|
||||
std::vector<std::string> completions = ctrlvec_load_prompt_file(cparams.completions_file, false);
|
||||
auto format_template = [](std::string persona, std::string suffix) {
|
||||
//const std::string user_tag = "[INST]";
|
||||
//const std::string asst_tag = "[/INST]";
|
||||
//return user_tag + " Act as if you're extremely " + persona + ". " + asst_tag + " " + suffix;
|
||||
// TODO make this dynamic - allow the user to change it somehow - and adapt based on model
|
||||
return persona + " " + suffix; // entry in positive/negative.txt must already be formatted i.e. "[INST] Act as if you're extremely happy. [/INST]"
|
||||
};
|
||||
for (int i = 0; i < positive_prompts.size(); ++i) {
|
||||
for (auto & cmpl : completions) {
|
||||
// TODO replicate the truncations done by the python implementation
|
||||
cparams.positive_entries.push_back(format_template(positive_prompts[i], cmpl));
|
||||
cparams.negative_entries.push_back(format_template(negative_prompts[i], cmpl));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
ctrl_params cparams;
|
||||
|
||||
|
@ -710,17 +530,8 @@ int main(int argc, char ** argv) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
// load prompts
|
||||
cparams.positive_prompts = ctrlvec_load_prompt_file(cparams.positive_prompts_file);
|
||||
cparams.negative_prompts = ctrlvec_load_prompt_file(cparams.negative_prompts_file);
|
||||
if (cparams.positive_prompts.size() != cparams.negative_prompts.size()) {
|
||||
fprintf(stderr, "number of positive and negative prompts must be equal\n");
|
||||
return 1;
|
||||
}
|
||||
if (cparams.positive_prompts.empty()) {
|
||||
fprintf(stderr, "must provide at least one prompt pair\n");
|
||||
return 1;
|
||||
}
|
||||
// load and prepare entries for training
|
||||
prepare_entries(cparams);
|
||||
|
||||
callback_data cb_data;
|
||||
|
||||
|
@ -742,72 +553,29 @@ int main(int argc, char ** argv) {
|
|||
int n_ctx = llama_n_ctx(ctx);
|
||||
int n_layers = llama_n_layer(model);
|
||||
int n_embd = llama_n_embd(model);
|
||||
int n_prompts = cparams.positive_prompts.size();
|
||||
|
||||
// init ctx_ggml
|
||||
struct ggml_init_params params_ggml = {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * n_layers * 2u,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
cb_data.ctx_ggml = ggml_init(params_ggml);
|
||||
|
||||
// create templated prompts
|
||||
for (int i = 0; i < n_prompts; ++i) {
|
||||
populate_entries(cparams, cparams.positive_prompts[i], cparams.negative_prompts[i]);
|
||||
}
|
||||
// get model hint param (a.k.a model arch name)
|
||||
char model_hint[128];
|
||||
llama_model_meta_val_str(model, "general.architecture", model_hint, 128);
|
||||
|
||||
// we have to pretokenize everything because otherwise we don't know how much overhead to allocate ctx_diffs_wrapped
|
||||
std::vector<tokenized_prompt> tokenized_prompts;
|
||||
size_t n_total_tokens = 0;
|
||||
for (size_t i = 0; i < cparams.positive_entries.size(); ++i) {
|
||||
tokenized_prompt t;
|
||||
t.positive = cparams.positive_entries[i];
|
||||
t.negative = cparams.negative_entries[i];
|
||||
t.tokens_pos = ::llama_tokenize(ctx, t.positive, false);
|
||||
t.tokens_neg = ::llama_tokenize(ctx, t.negative, false);
|
||||
t.max_seq_len = std::max(t.tokens_pos.size(), t.tokens_neg.size());
|
||||
padding_seq(ctx, t.tokens_pos, t.max_seq_len);
|
||||
padding_seq(ctx, t.tokens_neg, t.max_seq_len);
|
||||
tokenized_prompt t(ctx, cparams.positive_entries[i], cparams.negative_entries[i]);
|
||||
n_total_tokens += 2 * t.max_seq_len;
|
||||
tokenized_prompts.push_back(t);
|
||||
tokenized_prompts.push_back(std::move(t));
|
||||
}
|
||||
|
||||
std::cout << "n_total_tokens: " << n_total_tokens << std::endl;
|
||||
|
||||
// init diff_ctx
|
||||
diff_ctx dctx;
|
||||
|
||||
struct ggml_init_params params_diffs_wrapped = {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * n_total_tokens,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
// this we know how much overhead to allocate in advance
|
||||
struct ggml_init_params params_diff = {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * n_layers,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
// and this we know exactly how much memory to allocate in advance without malloc() hacks
|
||||
struct ggml_init_params params_final = {
|
||||
/*.mem_size =*/ n_embd * sizeof(float) * n_layers
|
||||
+ ggml_tensor_overhead() * n_layers,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
dctx.n_embd = n_embd;
|
||||
dctx.n_threads = cparams.n_threads;
|
||||
dctx.ctx_diffs_wrapped = ggml_init(params_diffs_wrapped);
|
||||
dctx.ctx_diff = ggml_init(params_diff);
|
||||
dctx.ctx_final = ggml_init(params_final);
|
||||
|
||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||
// init train_context
|
||||
train_context ctx_train(n_embd, n_layers);
|
||||
|
||||
int token_ct = 0;
|
||||
|
||||
for(size_t i = 0; i < cparams.positive_entries.size(); ++i) {
|
||||
tokenized_prompt t = tokenized_prompts[i];
|
||||
cb_data.n_layers = n_layers;
|
||||
cb_data.n_tokens = t.max_seq_len;
|
||||
|
||||
// need to reload the model so it doesn't run out of context
|
||||
|
@ -825,57 +593,43 @@ int main(int argc, char ** argv) {
|
|||
break;
|
||||
}
|
||||
|
||||
printf("Evaluating prompt: \"%s\" - \"%s\" (%ld tokens)\n", t.positive.c_str(), t.negative.c_str(), t.max_seq_len);
|
||||
printf("Evaluating prompt: \"%s\" - \"%s\" (%ld tokens)\n",
|
||||
tokens_to_str(ctx, t.tokens_pos.cbegin(), t.tokens_pos.cend()).c_str(),
|
||||
tokens_to_str(ctx, t.tokens_neg.cbegin(), t.tokens_neg.cend()).c_str(),
|
||||
t.max_seq_len);
|
||||
|
||||
cb_data.is_eval_pos = true;
|
||||
get_hidden_layers(ctx, t.tokens_pos);
|
||||
cb_data.is_eval_pos = false;
|
||||
get_hidden_layers(ctx, t.tokens_neg);
|
||||
|
||||
calc_diff(cb_data, dctx);
|
||||
// calculate diff and remove all zero rows
|
||||
auto v_diff_filtered = cb_data.calc_diff();
|
||||
|
||||
// save & concat the filtered v_diff to ctx_train
|
||||
printf("concat_diff_tmp\n");
|
||||
ctx_train.concat_diff_tmp(v_diff_filtered);
|
||||
|
||||
// reset for next iteration
|
||||
// TODO @ngxson : find a more proper way to alloc / free tensors
|
||||
ggml_free(cb_data.ctx_ggml);
|
||||
// TODO move this to the top of the loop and remove the ggml_free() outside
|
||||
cb_data.ctx_ggml = ggml_init(params_ggml);
|
||||
cb_data.v_pos.clear();
|
||||
cb_data.v_neg.clear();
|
||||
cb_data.reset();
|
||||
printf("reset\n");
|
||||
}
|
||||
|
||||
// TODO we can actually delete cb_data here but do we want to?
|
||||
|
||||
printf("dctx.v_diffs_wrapped[0][0][%d]: %f\n", DEBUG_POS, ggml_get_f32_nd(dctx.v_diffs_wrapped[0][0], 0, DEBUG_POS, 0, 0));
|
||||
|
||||
printf("Done evaluate prompts\n");
|
||||
|
||||
concatenate_diffs(dctx);
|
||||
|
||||
printf("dctx.v_diff[0][0][%d]: %f\n", DEBUG_POS, ggml_get_f32_nd(dctx.v_diff[0], 0, DEBUG_POS, 0, 0));
|
||||
|
||||
printf("Done concatenate diffs\n");
|
||||
|
||||
// code is known to work up to here
|
||||
|
||||
pca(dctx);
|
||||
//printf("v_final %f %f \n", cb_data.v_final[0][0], cb_data.v_final[0][1]);
|
||||
|
||||
// done with the model, we can now free it to make gain some memory
|
||||
printf("Done evaluate prompts, unload model...\n");
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
// TODO figure out how to extract this from model - there's no API exposed to get model arch string
|
||||
// we need get_arch_name() from llama.cpp
|
||||
// TODO also has support been implemeneted for arches other than llama yet? see #5970
|
||||
std::string model_hint = "llama";
|
||||
export_gguf(dctx, n_layers, cparams.outfile, model_hint);
|
||||
// prepare ctx_train for PCA
|
||||
ctx_train.build_v_diff();
|
||||
|
||||
// run PCA
|
||||
pca(ctx_train.v_diff, ctx_train.v_final);
|
||||
|
||||
// write output vectors to gguf
|
||||
export_gguf(ctx_train.v_final, cparams.outfile, model_hint);
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
printf("confirm we got here\n");
|
||||
|
||||
// TODO free(): invalid pointer after the entire program is done????????
|
||||
// probably because destructors free after you've already manually freed
|
||||
// TODO fix destructor/ggml_free positioning
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
267
examples/control-vector-generator/pca.hpp
Normal file
267
examples/control-vector-generator/pca.hpp
Normal file
|
@ -0,0 +1,267 @@
|
|||
#include "common.h"
|
||||
#include "llama.h"
|
||||
#include "ggml.h"
|
||||
|
||||
#ifdef GGML_USE_CUDA
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
#include "ggml-metal.h"
|
||||
#endif
|
||||
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
#define DEBUG_POS 5
|
||||
|
||||
static void print_debug_tensor(struct ggml_tensor * t) {
|
||||
printf("%s: %s (%s): [%ld, %ld]\n", __func__, t->name, ggml_type_name(t->type), t->ne[0], t->ne[1]);
|
||||
printf("%s: %s[0] = [", __func__, t->name);
|
||||
for (size_t i = 0; i <= DEBUG_POS; i++) {
|
||||
printf(" %f,", ggml_get_f32_nd(t, i, 0, 0, 0));
|
||||
}
|
||||
printf(" ... ]\n");
|
||||
}
|
||||
|
||||
|
||||
|
||||
struct pca_model {
|
||||
struct ggml_tensor * v_diff_original;
|
||||
struct ggml_tensor * square;
|
||||
struct ggml_tensor * square_transpose;
|
||||
struct ggml_tensor * eigenvector;
|
||||
|
||||
ggml_backend_t backend = NULL;
|
||||
ggml_backend_buffer_t buffer;
|
||||
struct ggml_context * ctx;
|
||||
};
|
||||
|
||||
void load_pca_model(pca_model & model, struct ggml_tensor * v_diff_original) {
|
||||
#ifdef GGML_USE_CUDA
|
||||
fprintf(stderr, "%s: using CUDA backend\n", __func__);
|
||||
model.backend = ggml_backend_cuda_init(0); // init device 0
|
||||
if (!model.backend) {
|
||||
fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
fprintf(stderr, "%s: using Metal backend\n", __func__);
|
||||
ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr);
|
||||
model.backend = ggml_backend_metal_init();
|
||||
if (!model.backend) {
|
||||
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
|
||||
}
|
||||
#endif
|
||||
|
||||
// if there aren't GPU Backends fallback to CPU backend
|
||||
if (!model.backend) {
|
||||
model.backend = ggml_backend_cpu_init();
|
||||
}
|
||||
|
||||
//printf("v_diff_original[0][%d]: %f\n", DEBUG_POS, ggml_get_f32_nd(v_diff_original, 0, DEBUG_POS, 0, 0));
|
||||
|
||||
const int num_tensors = 4;
|
||||
|
||||
struct ggml_init_params params {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * num_tensors,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
|
||||
model.ctx = ggml_init(params);
|
||||
|
||||
model.v_diff_original = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, v_diff_original->ne[0], v_diff_original->ne[1]);
|
||||
model.square = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, v_diff_original->ne[1], v_diff_original->ne[1]);
|
||||
model.square_transpose = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, v_diff_original->ne[1], v_diff_original->ne[1]);
|
||||
model.eigenvector = ggml_new_tensor_1d(model.ctx, GGML_TYPE_F32, v_diff_original->ne[1]);
|
||||
|
||||
model.buffer = ggml_backend_alloc_ctx_tensors(model.ctx, model.backend);
|
||||
|
||||
ggml_backend_tensor_set(model.v_diff_original, v_diff_original->data, 0, ggml_nbytes(v_diff_original));
|
||||
|
||||
// no need to load anything into square or square_transpose yet
|
||||
|
||||
// initialize model.eigenvector to random vector
|
||||
std::vector<float> random_vec;
|
||||
std::default_random_engine generator(static_cast<unsigned int>(std::time(0)));
|
||||
std::uniform_real_distribution<float> distribution(0.0, 1.0);
|
||||
for (int i = 0; i < v_diff_original->ne[1]; ++i) {
|
||||
random_vec.push_back(distribution(generator));
|
||||
}
|
||||
|
||||
// we don't normalize it at first but that shouldn't be a problem
|
||||
ggml_backend_tensor_set(model.eigenvector, random_vec.data(), 0, ggml_nbytes(model.eigenvector));
|
||||
}
|
||||
|
||||
struct ggml_cgraph * square_diff_graph(const pca_model & model) {
|
||||
static size_t buf_size = ggml_tensor_overhead() * GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
|
||||
static std::vector<uint8_t> buf(buf_size);
|
||||
|
||||
struct ggml_init_params params0 = {
|
||||
/*.mem_size =*/ buf_size,
|
||||
/*.mem_buffer =*/ buf.data(),
|
||||
/*.no_alloc =*/ true, // the tensors will be allocated later by ggml_allocr_alloc_graph()
|
||||
};
|
||||
struct ggml_context * ctx0 = ggml_init(params0);
|
||||
struct ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||
|
||||
struct ggml_tensor * square = ggml_mul_mat(ctx0, model.v_diff_original, model.v_diff_original);
|
||||
//struct ggml_tensor * square_transpose = ggml_transpose(ctx0, square);
|
||||
|
||||
ggml_build_forward_expand(gf, square);
|
||||
|
||||
ggml_free(ctx0);
|
||||
return gf;
|
||||
}
|
||||
|
||||
struct ggml_tensor * compute_square(const pca_model & model, ggml_gallocr_t allocr, int n_threads) {
|
||||
struct ggml_cgraph * gf = square_diff_graph(model);
|
||||
|
||||
ggml_gallocr_alloc_graph(allocr, gf);
|
||||
|
||||
if (ggml_backend_is_cpu(model.backend)) {
|
||||
ggml_backend_cpu_set_n_threads(model.backend, n_threads);
|
||||
}
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
if (ggml_backend_is_metal(model.backend)) {
|
||||
ggml_backend_metal_set_n_cb(model.backend, n_threads);
|
||||
}
|
||||
#endif
|
||||
|
||||
ggml_backend_graph_compute(model.backend, gf);
|
||||
|
||||
return gf->nodes[gf->n_nodes - 1];
|
||||
}
|
||||
|
||||
struct ggml_cgraph * power_iteration_graph(const pca_model & model, float tolerance) {
|
||||
static size_t buf_size = ggml_tensor_overhead() * GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
|
||||
static std::vector<uint8_t> buf(buf_size);
|
||||
|
||||
struct ggml_init_params params0 = {
|
||||
/*.mem_size =*/ buf_size,
|
||||
/*.mem_buffer =*/ buf.data(),
|
||||
/*.no_alloc =*/ true, // the tensors will be allocated later by ggml_allocr_alloc_graph()
|
||||
};
|
||||
struct ggml_context * ctx0 = ggml_init(params0);
|
||||
struct ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||
|
||||
struct ggml_tensor * b_tensor = ggml_mul_mat(ctx0, model.square, model.eigenvector);
|
||||
// TODO difference between ggml_norm and ggml_norm_inplace?
|
||||
// also is this the right way to do multi-step graphs?
|
||||
b_tensor = ggml_norm_inplace(ctx0, b_tensor, tolerance);
|
||||
|
||||
ggml_build_forward_expand(gf, b_tensor);
|
||||
|
||||
ggml_free(ctx0);
|
||||
return gf;
|
||||
}
|
||||
|
||||
struct ggml_tensor * compute_piter(const pca_model & model, ggml_gallocr_t allocr, int n_threads, float tolerance) {
|
||||
struct ggml_cgraph * gf = power_iteration_graph(model, tolerance);
|
||||
|
||||
ggml_gallocr_alloc_graph(allocr, gf);
|
||||
|
||||
if (ggml_backend_is_cpu(model.backend)) {
|
||||
ggml_backend_cpu_set_n_threads(model.backend, n_threads);
|
||||
}
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
if (ggml_backend_is_metal(model.backend)) {
|
||||
ggml_backend_metal_set_n_cb(model.backend, n_threads);
|
||||
}
|
||||
#endif
|
||||
|
||||
ggml_backend_graph_compute(model.backend, gf);
|
||||
|
||||
return gf->nodes[gf->n_nodes - 1];
|
||||
}
|
||||
|
||||
static void power_iteration(struct ggml_tensor * input, struct ggml_tensor * output, int n_threads, int maxIterations = 1000, float tolerance = 1e-7) {
|
||||
printf("in power iteration\n");
|
||||
int n_embd = input->ne[0];// shape of input: [n_embd, m]
|
||||
|
||||
pca_model model;
|
||||
load_pca_model(model, input);
|
||||
|
||||
ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend));
|
||||
|
||||
struct ggml_tensor * square = compute_square(model, allocr, n_threads);
|
||||
ggml_backend_tensor_set(model.square, square->data, 0, ggml_nbytes(model.square));
|
||||
|
||||
ggml_gallocr_free(allocr);
|
||||
|
||||
struct ggml_init_params host_params = {
|
||||
/*.mem_size =*/ (n_embd * sizeof(float) + ggml_tensor_overhead()) * 2u,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
struct ggml_context * host_ctx = ggml_init(host_params);
|
||||
|
||||
struct ggml_tensor * host_old_eigenvector = ggml_new_tensor_1d(host_ctx, GGML_TYPE_F32, n_embd);
|
||||
struct ggml_tensor * host_new_eigenvector = ggml_new_tensor_1d(host_ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
for (int iter = 0; iter < maxIterations; ++iter) {
|
||||
|
||||
// TODO do I need to reset it like this every time?
|
||||
allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend));
|
||||
|
||||
struct ggml_tensor * b_tensor = compute_piter(model, allocr, n_threads, tolerance);
|
||||
|
||||
ggml_backend_tensor_get(b_tensor, host_new_eigenvector->data, 0, ggml_nbytes(b_tensor));
|
||||
ggml_backend_tensor_get(model.eigenvector, host_old_eigenvector->data, 0, ggml_nbytes(model.eigenvector));
|
||||
|
||||
// convergence check
|
||||
float diff = 0.0;
|
||||
for (int i = 0; i < n_embd; ++i) {
|
||||
diff += std::pow((ggml_get_f32_1d(host_new_eigenvector, i) - ggml_get_f32_1d(host_old_eigenvector, i)), 2);
|
||||
}
|
||||
|
||||
// update eigenvector
|
||||
ggml_backend_tensor_set(model.eigenvector, host_new_eigenvector->data, 0, ggml_nbytes(model.eigenvector));
|
||||
|
||||
try {
|
||||
if (std::sqrt(diff) < tolerance) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
catch (std::exception & e) {
|
||||
// catch division by zero I guess
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ggml_backend_tensor_get(model.eigenvector, output->data, 0, ggml_nbytes(model.eigenvector));
|
||||
|
||||
ggml_gallocr_free(allocr);
|
||||
ggml_free(host_ctx);
|
||||
ggml_free(model.ctx);
|
||||
ggml_backend_buffer_free(model.buffer);
|
||||
ggml_backend_free(model.backend);
|
||||
}
|
||||
|
||||
static void pca(
|
||||
const std::vector<struct ggml_tensor *> & v_input,
|
||||
const std::vector<struct ggml_tensor *> & v_output) {
|
||||
printf("Running PCA...\n");
|
||||
int n_embd = v_input[0]->ne[0]; // shape of v_input[0]: [n_embd, m]
|
||||
int n_threads = 8; // TODO: change me
|
||||
for (size_t il = 0; il < v_input.size(); ++il) {
|
||||
// prepare output vector
|
||||
struct ggml_tensor * ctrl_out = v_output[il];
|
||||
auto name = std::string("direction.") + std::to_string(il + 1);
|
||||
ggml_set_name(ctrl_out, name.c_str());
|
||||
// run power_iteration
|
||||
power_iteration(v_input[il], ctrl_out, n_threads);
|
||||
printf("Done with layer %d\n", il);
|
||||
print_debug_tensor(ctrl_out);
|
||||
}
|
||||
printf("Done with PCA.\n");
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue