From 73747fe8eb2ffe2a71879a76996ad040a4837e86 Mon Sep 17 00:00:00 2001 From: Christian Zhou-Zheng Date: Thu, 30 May 2024 00:31:29 -0400 Subject: [PATCH] proof-of-concept stdlib implementation Implements PCA and file writing using mostly standard libraries. The output is recognized as a functional control vector, but outputs gibberish. --- examples/CMakeLists.txt | 1 + .../control-vector-generator.cpp | 162 ++++++++++++++++++ 2 files changed, 163 insertions(+) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index b40ee4ccb..8a5a2b9e5 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -12,6 +12,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}) if (EMSCRIPTEN) else() + add_subdirectory(control-vector-generator) add_subdirectory(baby-llama) add_subdirectory(batched) add_subdirectory(batched-bench) diff --git a/examples/control-vector-generator/control-vector-generator.cpp b/examples/control-vector-generator/control-vector-generator.cpp index e72960636..eb7f05038 100644 --- a/examples/control-vector-generator/control-vector-generator.cpp +++ b/examples/control-vector-generator/control-vector-generator.cpp @@ -17,6 +17,7 @@ struct callback_data { std::vector v_pos; // vector of matrices of size [n_embd, n_tokens] std::vector v_neg; // vector of matrices of size [n_embd, n_tokens] std::vector v_diff; // vector of matrices of size [n_embd, n_tokens] + std::vector v_final; // vector of finished vectors of size [n_embd] }; static std::string ggml_ne_string(const ggml_tensor * t) { @@ -112,6 +113,162 @@ static void calc_diff(callback_data & cb_data) { } } +// BEGIN NON-GGML IMPLEMENTATION + +// TODO translate to ggml +// this probably doesn't want to be here - put it into the compute graph as a step in processing each layer +static float* square_diff(callback_data & cb_data, size_t idx) { + float* result = new float[cb_data.n_embd * cb_data.n_embd]; + std::memset(result, 0, cb_data.n_embd * cb_data.n_embd * sizeof(float)); + for (size_t i = 0; i < cb_data.n_embd; i++) { + for (size_t j = 0; j < cb_data.n_embd; j++) { + float sum = 0.0f; + for (size_t k = 0; k < cb_data.n_tokens; k++) { + sum += cb_data.v_diff[idx][i * cb_data.n_tokens + k] * cb_data.v_diff[idx][j * cb_data.n_tokens + k]; + } + result[i * cb_data.n_embd + j] = sum; + } + } + return result; +} + +// TODO translate to ggml +static void normalize_inplace(std::vector & vec) { + // inefficient(?) norm computation + float norm = 0.0f; + for (const float& val : vec) { + norm += val * val; + } + norm = std::sqrt(norm); + for (float& val : vec) { + val /= norm; + } +} + +// TODO translate to ggml +static std::vector mul_mat(const float * mat, const std::vector & vec, size_t dim) { + std::vector result(dim, 0.0f); + for (size_t i = 0; i < dim; ++i) { + for (size_t j = 0; j < dim; ++j) { + result[i] += mat[i * dim + j] * vec[j]; + } + } + return result; +} + +// TODO translate to ggml +static std::vector power_iteration(callback_data & cb_data, const float * matrix, int maxIterations = 1000, float tolerance = 1e-8) { + std::vector b_tensor = std::vector(); + + // random vector gen/norm + std::default_random_engine generator(static_cast(std::time(0))); + std::uniform_real_distribution distribution(0.0, 1.0); + for (int i = 0; i < cb_data.n_embd; ++i) { + b_tensor.push_back(distribution(generator)); + } + normalize_inplace(b_tensor); + + for (int iter = 0; iter < maxIterations; ++iter) { + + // store the previous one so we can check for convergence + std::vector b_prev_tensor = b_tensor; + + // matrix multiplication and renormalize + b_tensor = mul_mat(matrix, b_tensor, cb_data.n_embd); + normalize_inplace(b_tensor); + + // convergence check + float diff = 0.0; + for (int i = 0; i < cb_data.n_embd; ++i) { + diff += std::pow(b_tensor[i] - b_prev_tensor[i], 2); + } + if (std::sqrt(diff) < tolerance) { + break; + } + } + + return b_tensor; +} + +// TODO translate to ggml +static void pca(callback_data & cb_data) { + for (size_t i = 0; i < cb_data.v_diff.size(); i++) { + float* matrix = square_diff(cb_data, i); + std::vector eigenvector = power_iteration(cb_data, matrix); + cb_data.v_final.push_back(&eigenvector[0]); + delete[] matrix; + // TODO make your print outputs nicer + std::cout << "Done with layer " << i << "\n"; + } +} + +template +static std::string to_string(const T & val) { + std::stringstream ss; + ss << val; + return ss.str(); +} + +static void export_gguf(callback_data & cb_data, const std::string fname) { + struct gguf_context * ctx = gguf_init_empty(); + + gguf_set_val_str(ctx, "general.architecture", "controlvector"); + gguf_set_val_str(ctx, "controlvector.model_hint", "mistral"); // TODO steal this from the model somehow (arch) + gguf_set_val_i32(ctx, "controlvector.layer_count", cb_data.v_final.size()); + + //size_t buf_size = 3u*cb_data.n_embd*sizeof(float); // TODO how much size do i need??? + size_t buf_size = 128u*1024u*4096u; + std::vector buf(buf_size); + + // TODO customize mem size - I have no idea + struct ggml_init_params params = { + /*.mem_size =*/ buf_size, + /*.mem_buffer =*/ buf.data(), + /*.no_alloc =*/ false, + }; + + struct ggml_context * ctx_data = ggml_init(params); + + // TODO direction tensor invalid??? probably because you start at 0. see below + for (int i = 0; i < cb_data.v_final.size(); i++) { + const std::string name = "direction." + to_string(i+1); // TODO figure out how to get the number for direction - dl repeng locally and debug + // clone the repo and use importlib + // git clone https://github.com/vgel/repeng.git + + struct ggml_tensor * cur = ggml_new_tensor_1d(ctx_data, GGML_TYPE_F32, cb_data.n_embd); + + std::cout << "Made it past tensor creation"; + + ggml_set_name(cur, name.c_str()); + std::cout << "Made it past tensor name set"; + + // whining about buf != NULL + // TODO figure out how to set data + //ggml_backend_tensor_set(cur, cb_data.v_final[i], 0, cb_data.n_embd * sizeof(float)); // if this doesn't work refer to gguf.cpp example + { + float * data = (float *) cur->data; + for(int j = 0; j < ggml_nelements(cur); j++) { + data[j] = cb_data.v_final[i][j]; + } + } + std::cout << "Made it past tensor backend set"; + + gguf_add_tensor(ctx, cur); + std::cout << "Added tensor " << i << "\n"; + } + + std::cout << "Writing file\n"; + + gguf_write_to_file(ctx, fname.c_str(), false); + + printf("%s: wrote file '%s;\n", __func__, fname.c_str()); + + ggml_free(ctx_data); + gguf_free(ctx); +} + +// END NON-GGML IMPLEMENTATION + int main(int argc, char ** argv) { callback_data cb_data; std::string prompt_pos = "happy"; @@ -167,6 +324,11 @@ int main(int argc, char ** argv) { calc_diff(cb_data); printf("%f %f \n", cb_data.v_diff[0][4096], cb_data.v_diff[0][4096]); + pca(cb_data); + // TODO --outfile + std::cout << "Done with PCA" << "\n"; + export_gguf(cb_data, "controlvector.gguf"); + //llama_print_timings(ctx); llama_free(ctx);