proof-of-concept stdlib implementation

Implements PCA and file writing using mostly standard libraries. The output is recognized as a functional control vector, but outputs gibberish.
This commit is contained in:
Christian Zhou-Zheng 2024-05-30 00:31:29 -04:00
parent b30bea3257
commit 73747fe8eb
2 changed files with 163 additions and 0 deletions

View file

@ -12,6 +12,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
if (EMSCRIPTEN)
else()
add_subdirectory(control-vector-generator)
add_subdirectory(baby-llama)
add_subdirectory(batched)
add_subdirectory(batched-bench)

View file

@ -17,6 +17,7 @@ struct callback_data {
std::vector<float *> v_pos; // vector of matrices of size [n_embd, n_tokens]
std::vector<float *> v_neg; // vector of matrices of size [n_embd, n_tokens]
std::vector<float *> v_diff; // vector of matrices of size [n_embd, n_tokens]
std::vector<float *> v_final; // vector of finished vectors of size [n_embd]
};
static std::string ggml_ne_string(const ggml_tensor * t) {
@ -112,6 +113,162 @@ static void calc_diff(callback_data & cb_data) {
}
}
// BEGIN NON-GGML IMPLEMENTATION
// TODO translate to ggml
// this probably doesn't want to be here - put it into the compute graph as a step in processing each layer
static float* square_diff(callback_data & cb_data, size_t idx) {
float* result = new float[cb_data.n_embd * cb_data.n_embd];
std::memset(result, 0, cb_data.n_embd * cb_data.n_embd * sizeof(float));
for (size_t i = 0; i < cb_data.n_embd; i++) {
for (size_t j = 0; j < cb_data.n_embd; j++) {
float sum = 0.0f;
for (size_t k = 0; k < cb_data.n_tokens; k++) {
sum += cb_data.v_diff[idx][i * cb_data.n_tokens + k] * cb_data.v_diff[idx][j * cb_data.n_tokens + k];
}
result[i * cb_data.n_embd + j] = sum;
}
}
return result;
}
// TODO translate to ggml
static void normalize_inplace(std::vector<float> & vec) {
// inefficient(?) norm computation
float norm = 0.0f;
for (const float& val : vec) {
norm += val * val;
}
norm = std::sqrt(norm);
for (float& val : vec) {
val /= norm;
}
}
// TODO translate to ggml
static std::vector<float> mul_mat(const float * mat, const std::vector<float> & vec, size_t dim) {
std::vector<float> result(dim, 0.0f);
for (size_t i = 0; i < dim; ++i) {
for (size_t j = 0; j < dim; ++j) {
result[i] += mat[i * dim + j] * vec[j];
}
}
return result;
}
// TODO translate to ggml
static std::vector<float> power_iteration(callback_data & cb_data, const float * matrix, int maxIterations = 1000, float tolerance = 1e-8) {
std::vector<float> b_tensor = std::vector<float>();
// random vector gen/norm
std::default_random_engine generator(static_cast<unsigned int>(std::time(0)));
std::uniform_real_distribution<float> distribution(0.0, 1.0);
for (int i = 0; i < cb_data.n_embd; ++i) {
b_tensor.push_back(distribution(generator));
}
normalize_inplace(b_tensor);
for (int iter = 0; iter < maxIterations; ++iter) {
// store the previous one so we can check for convergence
std::vector<float> b_prev_tensor = b_tensor;
// matrix multiplication and renormalize
b_tensor = mul_mat(matrix, b_tensor, cb_data.n_embd);
normalize_inplace(b_tensor);
// convergence check
float diff = 0.0;
for (int i = 0; i < cb_data.n_embd; ++i) {
diff += std::pow(b_tensor[i] - b_prev_tensor[i], 2);
}
if (std::sqrt(diff) < tolerance) {
break;
}
}
return b_tensor;
}
// TODO translate to ggml
static void pca(callback_data & cb_data) {
for (size_t i = 0; i < cb_data.v_diff.size(); i++) {
float* matrix = square_diff(cb_data, i);
std::vector<float> eigenvector = power_iteration(cb_data, matrix);
cb_data.v_final.push_back(&eigenvector[0]);
delete[] matrix;
// TODO make your print outputs nicer
std::cout << "Done with layer " << i << "\n";
}
}
template <typename T>
static std::string to_string(const T & val) {
std::stringstream ss;
ss << val;
return ss.str();
}
static void export_gguf(callback_data & cb_data, const std::string fname) {
struct gguf_context * ctx = gguf_init_empty();
gguf_set_val_str(ctx, "general.architecture", "controlvector");
gguf_set_val_str(ctx, "controlvector.model_hint", "mistral"); // TODO steal this from the model somehow (arch)
gguf_set_val_i32(ctx, "controlvector.layer_count", cb_data.v_final.size());
//size_t buf_size = 3u*cb_data.n_embd*sizeof(float); // TODO how much size do i need???
size_t buf_size = 128u*1024u*4096u;
std::vector<uint8_t> buf(buf_size);
// TODO customize mem size - I have no idea
struct ggml_init_params params = {
/*.mem_size =*/ buf_size,
/*.mem_buffer =*/ buf.data(),
/*.no_alloc =*/ false,
};
struct ggml_context * ctx_data = ggml_init(params);
// TODO direction tensor invalid??? probably because you start at 0. see below
for (int i = 0; i < cb_data.v_final.size(); i++) {
const std::string name = "direction." + to_string(i+1); // TODO figure out how to get the number for direction - dl repeng locally and debug
// clone the repo and use importlib
// git clone https://github.com/vgel/repeng.git
struct ggml_tensor * cur = ggml_new_tensor_1d(ctx_data, GGML_TYPE_F32, cb_data.n_embd);
std::cout << "Made it past tensor creation";
ggml_set_name(cur, name.c_str());
std::cout << "Made it past tensor name set";
// whining about buf != NULL
// TODO figure out how to set data
//ggml_backend_tensor_set(cur, cb_data.v_final[i], 0, cb_data.n_embd * sizeof(float)); // if this doesn't work refer to gguf.cpp example
{
float * data = (float *) cur->data;
for(int j = 0; j < ggml_nelements(cur); j++) {
data[j] = cb_data.v_final[i][j];
}
}
std::cout << "Made it past tensor backend set";
gguf_add_tensor(ctx, cur);
std::cout << "Added tensor " << i << "\n";
}
std::cout << "Writing file\n";
gguf_write_to_file(ctx, fname.c_str(), false);
printf("%s: wrote file '%s;\n", __func__, fname.c_str());
ggml_free(ctx_data);
gguf_free(ctx);
}
// END NON-GGML IMPLEMENTATION
int main(int argc, char ** argv) {
callback_data cb_data;
std::string prompt_pos = "happy";
@ -167,6 +324,11 @@ int main(int argc, char ** argv) {
calc_diff(cb_data);
printf("%f %f \n", cb_data.v_diff[0][4096], cb_data.v_diff[0][4096]);
pca(cb_data);
// TODO --outfile
std::cout << "Done with PCA" << "\n";
export_gguf(cb_data, "controlvector.gguf");
//llama_print_timings(ctx);
llama_free(ctx);