From 3cd8dde0d1357b7f11bdd25c45d5bf5e97e284a0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 24 Mar 2023 06:22:28 +0200 Subject: [PATCH 1/3] Revert "Fix memory allocation issues and seg faults" This reverts commit 4870e455b3653f7d7769fa5772b2c90ffad088df. Will provide the correct fix later --- llama.cpp | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/llama.cpp b/llama.cpp index cf796cce3..d55219256 100644 --- a/llama.cpp +++ b/llama.cpp @@ -102,9 +102,6 @@ struct llama_context { // decode output (2-dimensional array: [n_tokens][n_vocab]) std::vector logits; bool logits_all = false; - - // work buffer for transformer evaluation - std::vector buf_eval; }; struct llama_context_params llama_context_default_params() { @@ -630,19 +627,27 @@ static bool llama_eval_internal( const int n_rot = hparams.n_embd/hparams.n_head; auto & mem_per_token = lctx.mem_per_token; - auto & buf_eval = lctx.buf_eval; - if (mem_per_token*(n_past + N + 16) > buf_eval.size()) { - const size_t buf_size_new = 1.618*buf_eval.size(); + // TODO: fix this hardcoded size + static size_t buf_size = 512u*1024*1024; + static void * buf = malloc(buf_size); - //fprintf(stderr, "\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_eval.size(), buf_size_new); + if (mem_per_token > 0 && mem_per_token*N > buf_size) { + const size_t buf_size_new = 1.3*(mem_per_token*N); // add 30% to account for ggml object overhead + //fprintf(stderr, "\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); - buf_eval.resize(buf_size_new); + // reallocate + buf_size = buf_size_new; + buf = realloc(buf, buf_size); + if (buf == nullptr) { + fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size); + return false; + } } struct ggml_init_params params = { - /*.mem_size =*/ buf_eval.size(), - /*.mem_buffer =*/ buf_eval.data(), + /*.mem_size =*/ buf_size, + /*.mem_buffer =*/ buf, }; struct ggml_context * ctx0 = ggml_init(params); @@ -827,11 +832,10 @@ static bool llama_eval_internal( memcpy(logits_out.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); } - if (N == 1) { - mem_per_token = ggml_used_mem(ctx0)/(n_past + N); + if (mem_per_token == 0) { + mem_per_token = ggml_used_mem(ctx0)/N; } - - //fprintf(stderr, "\nused_mem = %zu, %zu MB\n", ggml_used_mem(ctx0), ggml_used_mem(ctx0)/1024/1024); + //fprintf(stderr, "used_mem = %zu\n", ggml_used_mem(ctx0)); ggml_free(ctx0); @@ -1412,8 +1416,6 @@ struct llama_context * llama_init_from_file( return nullptr; } - ctx->buf_eval.resize(512u*1024u*1024u); - return ctx; } From b6b268d4415fd3b3e53f22b6619b724d4928f713 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 24 Mar 2023 09:13:35 +0200 Subject: [PATCH 2/3] Add link to Roadmap discussion --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ee8dc1dcb..06799b5b3 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++ **Hot topics:** +- [Roadmap (short-term)](https://github.com/ggerganov/llama.cpp/discussions/457) - New C-style API is now available: https://github.com/ggerganov/llama.cpp/pull/370 -- [Added Alpaca support](https://github.com/ggerganov/llama.cpp#instruction-mode-with-alpaca) - Cache input prompts for faster initialization: https://github.com/ggerganov/llama.cpp/issues/64 - Create a `llama.cpp` logo: https://github.com/ggerganov/llama.cpp/issues/105 From 8d4a855c241ecb0f3ddc03447fe56002ebf27a37 Mon Sep 17 00:00:00 2001 From: Luciano Date: Fri, 24 Mar 2023 08:05:13 -0700 Subject: [PATCH 3/3] Add embedding mode with arg flag. Currently working (#282) * working but ugly * add arg flag, not working on embedding mode * typo * Working! Thanks to @nullhook * make params argument instead of hardcoded boolean. remove useless time check * start doing the instructions but not finished. This probably doesnt compile * Embeddings extraction support --------- Co-authored-by: Georgi Gerganov --- llama.cpp | 56 +++++++++++++++++++++++++++++++++++++++++++++---------- llama.h | 5 +++++ main.cpp | 23 +++++++++++++++++++++++ utils.cpp | 4 ++++ utils.h | 4 ++++ 5 files changed, 82 insertions(+), 10 deletions(-) diff --git a/llama.cpp b/llama.cpp index d55219256..d8c771529 100644 --- a/llama.cpp +++ b/llama.cpp @@ -102,6 +102,9 @@ struct llama_context { // decode output (2-dimensional array: [n_tokens][n_vocab]) std::vector logits; bool logits_all = false; + + // input embedding (1-dimensional array: [n_embd]) + std::vector embedding; }; struct llama_context_params llama_context_default_params() { @@ -112,6 +115,7 @@ struct llama_context_params llama_context_default_params() { /*.f16_kv =*/ false, /*.logits_all =*/ false, /*.vocab_only =*/ false, + /*.embedding =*/ false, }; return result; @@ -592,8 +596,6 @@ static bool llama_model_load( fin.close(); } - lctx.logits.reserve(lctx.model.hparams.n_ctx); - lctx.t_load_us = ggml_time_us() - t_start_us; return true; @@ -791,6 +793,9 @@ static bool llama_eval_internal( inpL = cur; } + // used at the end to optionally extract the embeddings + struct ggml_tensor * embeddings = NULL; + // norm { inpL = ggml_rms_norm(ctx0, inpL); @@ -799,6 +804,8 @@ static bool llama_eval_internal( inpL = ggml_mul(ctx0, ggml_repeat(ctx0, model.norm, inpL), inpL); + + embeddings = inpL; } // lm_head @@ -821,15 +828,26 @@ static bool llama_eval_internal( //embd_w.resize(n_vocab*N); //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); - auto & logits_out = lctx.logits; + // extract logits + { + auto & logits_out = lctx.logits; - if (lctx.logits_all) { - logits_out.resize(n_vocab * N); - memcpy(logits_out.data(), (float *) ggml_get_data(inpL), sizeof(float)*n_vocab*N); - } else { - // return result for just the last token - logits_out.resize(n_vocab); - memcpy(logits_out.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); + if (lctx.logits_all) { + logits_out.resize(n_vocab * N); + memcpy(logits_out.data(), (float *) ggml_get_data(inpL), sizeof(float)*n_vocab*N); + } else { + // return result for just the last token + logits_out.resize(n_vocab); + memcpy(logits_out.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); + } + } + + // extract embeddings + if (lctx.embedding.size()) { + auto & embedding_out = lctx.embedding; + + embedding_out.resize(n_embd); + memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd); } if (mem_per_token == 0) { @@ -1416,6 +1434,20 @@ struct llama_context * llama_init_from_file( return nullptr; } + // reserve memory for context buffers + { + const auto & hparams = ctx->model.hparams; + if (params.logits_all) { + ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab); + } else { + ctx->logits.reserve(hparams.n_ctx); + } + + if (params.embedding){ + ctx->embedding.reserve(hparams.n_embd); + } + } + return ctx; } @@ -1484,6 +1516,10 @@ float * llama_get_logits(struct llama_context * ctx) { return ctx->logits.data(); } +float * llama_get_embeddings(struct llama_context * ctx) { + return ctx->embedding.data(); +} + const char * llama_token_to_str(struct llama_context * ctx, llama_token token) { if (token >= llama_n_vocab(ctx)) { return nullptr; diff --git a/llama.h b/llama.h index 3df9ed1fd..209b4dbe8 100644 --- a/llama.h +++ b/llama.h @@ -53,6 +53,7 @@ extern "C" { bool f16_kv; // use fp16 for KV cache bool logits_all; // the llama_eval() call computes all logits, not just the last one bool vocab_only; // only load the vocabulary, no weights + bool embedding; // embedding mode only }; LLAMA_API struct llama_context_params llama_context_default_params(); @@ -108,6 +109,10 @@ extern "C" { // Cols: n_vocab LLAMA_API float * llama_get_logits(struct llama_context * ctx); + // Get the embeddings for the input + // shape: [n_embd] (1-dimensional) + LLAMA_API float * llama_get_embeddings(struct llama_context * ctx); + // Token Id -> String. Uses the vocabulary in the provided context LLAMA_API const char * llama_token_to_str(struct llama_context * ctx, llama_token token); diff --git a/main.cpp b/main.cpp index 5ba6d5a75..46a80ff87 100644 --- a/main.cpp +++ b/main.cpp @@ -199,6 +199,7 @@ int main(int argc, char ** argv) { lparams.seed = params.seed; lparams.f16_kv = params.memory_f16; lparams.logits_all = params.perplexity; + lparams.embedding = params.embedding; ctx = llama_init_from_file(params.model.c_str(), lparams); @@ -292,6 +293,7 @@ int main(int argc, char ** argv) { std::vector embd; + int last_n_size = params.repeat_last_n; std::vector last_n_tokens(last_n_size); std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); @@ -324,6 +326,27 @@ int main(int argc, char ** argv) { // the first thing we will do is to output the prompt, so set color accordingly set_console_state(CONSOLE_STATE_PROMPT); + if (params.embedding){ + embd = embd_inp; + + if (embd.size() > 0) { + if (llama_eval(ctx, embd.data(), embd.size(), n_past, params.n_threads)) { + fprintf(stderr, "%s : failed to eval\n", __func__); + return 1; + } + } + + const auto embeddings = llama_get_embeddings(ctx); + + // TODO: print / use the embeddings + + if (params.use_color) { + printf(ANSI_COLOR_RESET); + } + + return 0; + } + while (remaining_tokens > 0 || params.interactive) { // predict if (embd.size() > 0) { diff --git a/utils.cpp b/utils.cpp index 45c9cabb1..0df89af0b 100644 --- a/utils.cpp +++ b/utils.cpp @@ -117,6 +117,10 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { params.model = argv[i]; } else if (arg == "-i" || arg == "--interactive") { params.interactive = true; + } else if (arg == "--embedding") { + params.embedding = true; + } else if (arg == "--interactive-start") { + params.interactive = true; } else if (arg == "--interactive-first") { params.interactive_start = true; } else if (arg == "-ins" || arg == "--instruct") { diff --git a/utils.h b/utils.h index b0de556c9..8120c123b 100644 --- a/utils.h +++ b/utils.h @@ -32,13 +32,17 @@ struct gpt_params { std::string model = "models/lamma-7B/ggml-model.bin"; // model path std::string prompt = ""; + std::vector antiprompt; // string upon seeing which more user input is prompted bool memory_f16 = false; // use f16 instead of f32 for memory kv bool random_prompt = false; // do not randomize prompt if none provided bool use_color = false; // use color to distinguish generations and inputs bool interactive = false; // interactive mode + + bool embedding = false; // get only sentence embedding bool interactive_start = false; // wait for user input immediately + bool instruct = false; // instruction mode (used for Alpaca models) bool ignore_eos = false; // do not stop generating after eos bool perplexity = false; // compute perplexity over the prompt