From b839231337e72c0154413b49baaeb2c62669709d Mon Sep 17 00:00:00 2001 From: anzz1 Date: Tue, 21 Mar 2023 10:25:46 +0200 Subject: [PATCH] cmdline option for custom amount of model parts (--n_parts N) --- main.cpp | 9 +++++---- utils.cpp | 3 +++ utils.h | 1 + 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/main.cpp b/main.cpp index 3321818d3..4b0229a64 100644 --- a/main.cpp +++ b/main.cpp @@ -90,7 +90,7 @@ struct llama_model { }; // load the model's weights from a file -bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab & vocab, int n_ctx, ggml_type memory_type = GGML_TYPE_F32) { +bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab & vocab, int n_ctx, int n_parts, ggml_type memory_type = GGML_TYPE_F32) { fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str()); std::vector f_buf(1024*1024); @@ -127,7 +127,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab } int n_ff = 0; - int n_parts = 0; // load hparams { @@ -145,7 +144,9 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab hparams.n_ctx = n_ctx; n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult; - n_parts = LLAMA_N_PARTS.at(hparams.n_embd); + + if (n_parts < 1) + n_parts = LLAMA_N_PARTS.at(hparams.n_embd); fprintf(stderr, "%s: n_vocab = %d\n", __func__, hparams.n_vocab); fprintf(stderr, "%s: n_ctx = %d\n", __func__, hparams.n_ctx); @@ -839,7 +840,7 @@ int main(int argc, char ** argv) { { const ggml_type memory_type = params.memory_f16 ? GGML_TYPE_F16 : GGML_TYPE_F32; const int64_t t_start_us = ggml_time_us(); - if (!llama_model_load(params.model, model, vocab, params.n_ctx, memory_type)) { + if (!llama_model_load(params.model, model, vocab, params.n_ctx, params.n_parts, memory_type)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } diff --git a/utils.cpp b/utils.cpp index 188f114e9..50699c7ee 100644 --- a/utils.cpp +++ b/utils.cpp @@ -74,6 +74,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { params.antiprompt.push_back(argv[++i]); } else if (arg == "--ignore-eos") { params.ignore_eos = true; + } else if (arg == "--n_parts") { + params.n_parts = std::stoi(argv[++i]); } else if (arg == "-h" || arg == "--help") { gpt_print_usage(argc, argv, params); exit(0); @@ -116,6 +118,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, " --ignore-eos ignore end of stream token and continue generating\n"); fprintf(stderr, " --memory_f16 use f16 instead of f32 for memory key+value\n"); fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp); + fprintf(stderr, " --n_parts N number of model parts (default: -1 = determine from dimensions)\n"); fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); fprintf(stderr, " -m FNAME, --model FNAME\n"); fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); diff --git a/utils.h b/utils.h index 65fe02ba1..24e9b1b7e 100644 --- a/utils.h +++ b/utils.h @@ -19,6 +19,7 @@ struct gpt_params { int32_t repeat_last_n = 64; // last n tokens to penalize int32_t n_ctx = 512; //context size bool memory_f16 = false; // use f16 instead of f32 for memory kv + int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions) // sampling parameters int32_t top_k = 40;