From 2d5d82e915c65ff22143fe0eb661102985855d3b Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Thu, 28 Dec 2023 11:48:21 +0800 Subject: [PATCH] addlocate gpt_params on heap instead to avoid rare segfault --- gpttype_adapter.cpp | 223 ++++++++++++++++++++++---------------------- klite.embd | 20 ++++ 2 files changed, 132 insertions(+), 111 deletions(-) diff --git a/gpttype_adapter.cpp b/gpttype_adapter.cpp index 705f7958d..e69260f9c 100644 --- a/gpttype_adapter.cpp +++ b/gpttype_adapter.cpp @@ -73,7 +73,7 @@ static llama_v2_context * llama_ctx_v2; static llama_v3_context * llama_ctx_v3; static llama_context * llama_ctx_v4; -static gpt_params params; +static gpt_params * kcpp_params; static int max_context_limit_at_load = 0; static int n_past = 0; static int n_threads = 4; @@ -677,14 +677,15 @@ void PurgeMissingTokens(llama_context * ctx, std::vector ¤t_context_t ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format, FileFormatExtraMeta file_format_meta) { ggml_time_init(); + kcpp_params = new gpt_params(); //allocate on heap to avoid linux segfault. yes this leaks memory. file_format = in_file_format; - n_threads = params.n_threads = inputs.threads; - n_blasthreads = params.n_threads_batch = inputs.blasthreads; + n_threads = kcpp_params->n_threads = inputs.threads; + n_blasthreads = kcpp_params->n_threads_batch = inputs.blasthreads; bool isGguf = (file_format == FileFormat::GGUF_LLAMA || file_format==FileFormat::GGUF_FALCON); - n_batch = params.n_batch = (isGguf?normalbatchsize:smallbatchsize); - modelname = params.model = inputs.model_filename; + n_batch = kcpp_params->n_batch = (isGguf?normalbatchsize:smallbatchsize); + modelname = kcpp_params->model = inputs.model_filename; useSmartContext = inputs.use_smartcontext; useContextShift = inputs.use_contextshift; debugmode = inputs.debugmode; @@ -703,13 +704,13 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in clamped_max_context_length = 16384; } - params.n_ctx = clamped_max_context_length; + kcpp_params->n_ctx = clamped_max_context_length; max_context_limit_at_load = clamped_max_context_length; neox_ctx_v2.hparams.n_ctx = neox_ctx_v3.hparams.n_ctx = gptj_ctx_v1.hparams.n_ctx = gptj_ctx_v2.hparams.n_ctx = gptj_ctx_v3.hparams.n_ctx = gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx = gpt2_ctx_v3.hparams.n_ctx - = mpt_ctx_v3.hparams.n_ctx = params.n_ctx; + = mpt_ctx_v3.hparams.n_ctx = kcpp_params->n_ctx; //determine rope scaling params float rope_freq_scale = 1.0f; @@ -725,14 +726,14 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in else { rope_freq_scale = 1.0f; - if (params.n_ctx <= 2048) //normie mode + if (kcpp_params->n_ctx <= 2048) //normie mode { rope_freq_base = 10000.0f; } else { //approximate NTK aware ctx - auto effectivenctx = params.n_ctx; + auto effectivenctx = kcpp_params->n_ctx; if((file_format == FileFormat::GGUF_LLAMA || file_format==FileFormat::GGUF_FALCON) && file_format_meta.n_ctx_train > 2048) { float factor = file_format_meta.n_ctx_train/2048; @@ -822,7 +823,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in //determine mem per token const std::vector tmp = {1, 2, 3, 4}; - llama_v2_eval(llama_ctx_v2, tmp.data(), tmp.size(), 0, params.n_threads); + llama_v2_eval(llama_ctx_v2, tmp.data(), tmp.size(), 0, kcpp_params->n_threads); return ModelLoadResult::SUCCESS; } else if(file_format == FileFormat::GGJT_3) @@ -889,7 +890,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in //determine mem per token const std::vector tmp = {1, 2, 3, 4}; - auto er = llama_v3_eval(llama_ctx_v3, tmp.data(), tmp.size(), 0, params.n_threads); + auto er = llama_v3_eval(llama_ctx_v3, tmp.data(), tmp.size(), 0, kcpp_params->n_threads); if(er!=0) { printf("\nLLAMA EVAL returned nonzero!\n"); @@ -1119,7 +1120,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in rwkv_ctx_v3->logits_out = (float *)malloc(logitbufsiz); rwkv_ctx_v3->state_in = nullptr; - bool testeval = rwkv_eval(rwkv_ctx_v3, params.n_threads, 0, rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, rwkv_ctx_v3->logits_out); + bool testeval = rwkv_eval(rwkv_ctx_v3, kcpp_params->n_threads, 0, rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, rwkv_ctx_v3->logits_out); if (!testeval) { printf("\nError: RWKV Init Eval Failed!\n"); @@ -1136,10 +1137,10 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in } else if (file_format == FileFormat::GPT2_1) { - ModelLoadResult res = legacy_gpt2_model_load(params.model, gpt2_ctx_v1, vocab, file_format); + ModelLoadResult res = legacy_gpt2_model_load(kcpp_params->model, gpt2_ctx_v1, vocab, file_format); if(res==ModelLoadResult::FAIL) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_params->model.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) @@ -1151,17 +1152,17 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in n_vocab = gpt2_ctx_v1.hparams.n_vocab; // determine the required inference memory per token: - legacy_gpt2_eval(gpt2_ctx_v1, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format); + legacy_gpt2_eval(gpt2_ctx_v1, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format); return ModelLoadResult::SUCCESS; } else if (file_format == FileFormat::GPT2_2 || file_format==FileFormat::GPT2_3 || file_format==FileFormat::GPT2_4) { if(file_format==FileFormat::GPT2_4) { - ModelLoadResult res = gpt2_model_load(params.model, gpt2_ctx_v3, vocab, file_format, inputs.gpulayers); + ModelLoadResult res = gpt2_model_load(kcpp_params->model, gpt2_ctx_v3, vocab, file_format, inputs.gpulayers); if(res==ModelLoadResult::FAIL) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_params->model.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) @@ -1173,7 +1174,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in n_vocab = gpt2_ctx_v3.hparams.n_vocab; // determine the required inference memory per token: - gpt2_eval(gpt2_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch); + gpt2_eval(gpt2_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch); return ModelLoadResult::SUCCESS; } else @@ -1181,10 +1182,10 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in //newer format has bit unshuffling SetQuantsUnshuffled(file_format == FileFormat::GPT2_3); - ModelLoadResult res = gpt2_v2_model_load(params.model, gpt2_ctx_v2, vocab, file_format, inputs.gpulayers); + ModelLoadResult res = gpt2_v2_model_load(kcpp_params->model, gpt2_ctx_v2, vocab, file_format, inputs.gpulayers); if(res==ModelLoadResult::FAIL) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_params->model.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) @@ -1196,16 +1197,16 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in n_vocab = gpt2_ctx_v2.hparams.n_vocab; // determine the required inference memory per token: - gpt2_v2_eval(gpt2_ctx_v2, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format); + gpt2_v2_eval(gpt2_ctx_v2, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format); return ModelLoadResult::SUCCESS; } } else if (file_format == FileFormat::GPTJ_1 || file_format == FileFormat::GPTJ_2) { - ModelLoadResult res = legacy_gptj_model_load(params.model, gptj_ctx_v1, vocab, file_format); + ModelLoadResult res = legacy_gptj_model_load(kcpp_params->model, gptj_ctx_v1, vocab, file_format); if(res==ModelLoadResult::FAIL) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_params->model.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) @@ -1217,7 +1218,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in n_vocab = gptj_ctx_v1.hparams.n_vocab; // determine the required inference memory per token: - legacy_gptj_eval(gptj_ctx_v1, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format); + legacy_gptj_eval(gptj_ctx_v1, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format); //if the logits are NAN or duplicated, it means the model is incompatible if(logits.size()>0 && IsNanCheck(logits[0])) @@ -1233,10 +1234,10 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in { if(file_format == FileFormat::GPTJ_5) { - ModelLoadResult loadresult = gptj_model_load(params.model, gptj_ctx_v3, vocab, inputs.gpulayers); + ModelLoadResult loadresult = gptj_model_load(kcpp_params->model, gptj_ctx_v3, vocab, inputs.gpulayers); if (loadresult == ModelLoadResult::FAIL) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_params->model.c_str()); return loadresult; } else if (loadresult == ModelLoadResult::RETRY_LOAD) @@ -1248,14 +1249,14 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in n_vocab = gptj_ctx_v3.hparams.n_vocab; // determine the required inference memory per token: - gptj_eval(gptj_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch); + gptj_eval(gptj_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch); //if the logits are NAN or duplicated, it means the model is incompatible std::vector oldlogits(logits); //this is another hack because they change the library - we run the eval through the model //twice and compare logits. if they give the same logits for different inputs, model is broken - gptj_eval(gptj_ctx_v3, params.n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token, use_scratch); + gptj_eval(gptj_ctx_v3, kcpp_params->n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token, use_scratch); if(logits.size()>0 && (IsNanCheck(logits[0]) || LogitsDuplicated(oldlogits,logits))) { @@ -1271,10 +1272,10 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in //newer format has bit unshuffling SetQuantsUnshuffled(file_format == FileFormat::GPTJ_4); - ModelLoadResult loadresult = gptj_v2_model_load(params.model, gptj_ctx_v2, vocab, inputs.gpulayers); + ModelLoadResult loadresult = gptj_v2_model_load(kcpp_params->model, gptj_ctx_v2, vocab, inputs.gpulayers); if (loadresult == ModelLoadResult::FAIL) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_params->model.c_str()); return loadresult; } else if (loadresult == ModelLoadResult::RETRY_LOAD) @@ -1286,14 +1287,14 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in n_vocab = gptj_ctx_v2.hparams.n_vocab; // determine the required inference memory per token: - gptj_v2_eval(gptj_ctx_v2, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); + gptj_v2_eval(gptj_ctx_v2, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); //if the logits are NAN or duplicated, it means the model is incompatible std::vector oldlogits(logits); //this is another hack because they change the library - we run the eval through the model //twice and compare logits. if they give the same logits for different inputs, model is broken - gptj_v2_eval(gptj_ctx_v2, params.n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token); + gptj_v2_eval(gptj_ctx_v2, kcpp_params->n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token); if(logits.size()>0 && (IsNanCheck(logits[0]) || LogitsDuplicated(oldlogits,logits))) { @@ -1309,10 +1310,10 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in { if(file_format==FileFormat::NEOX_6|| file_format==FileFormat::NEOX_7) { - ModelLoadResult res = gpt_neox_model_load(params.model, neox_ctx_v3, vocab, file_format, inputs.gpulayers); + ModelLoadResult res = gpt_neox_model_load(kcpp_params->model, neox_ctx_v3, vocab, file_format, inputs.gpulayers); if(res==ModelLoadResult::FAIL) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_params->model.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) @@ -1324,7 +1325,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in n_vocab = neox_ctx_v3.hparams.n_vocab; // determine the required inference memory per token: - gpt_neox_eval(neox_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch); + gpt_neox_eval(neox_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch); return ModelLoadResult::SUCCESS; } @@ -1333,10 +1334,10 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in //newer format has bit unshuffling SetQuantsUnshuffled(file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5); - ModelLoadResult res = gpt_neox_v2_model_load(params.model, neox_ctx_v2, vocab, file_format); + ModelLoadResult res = gpt_neox_v2_model_load(kcpp_params->model, neox_ctx_v2, vocab, file_format); if(res==ModelLoadResult::FAIL) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_params->model.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) @@ -1348,7 +1349,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in n_vocab = neox_ctx_v2.hparams.n_vocab; // determine the required inference memory per token: - gpt_neox_v2_eval(neox_ctx_v2, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); + gpt_neox_v2_eval(neox_ctx_v2, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); if(logits.size()>0 && file_format==FileFormat::NEOX_2 && !IsNanCheck(logits[0])) { @@ -1356,7 +1357,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in std::vector test_embd = ::gpt_tokenize(vocab, "1 2 3 4 5 6 7"); auto orig_par_res = neox_ctx_v2.hparams.par_res; neox_ctx_v2.hparams.par_res = 0; //test with residual false - gpt_neox_v2_eval(neox_ctx_v2, params.n_threads, 0, test_embd, logits, mem_per_token); + gpt_neox_v2_eval(neox_ctx_v2, kcpp_params->n_threads, 0, test_embd, logits, mem_per_token); neox_ctx_v2.hparams.par_res = orig_par_res; int topid = std::max_element(logits.begin(),logits.end())-logits.begin(); std::string predicted = vocab.id_to_token[topid].c_str(); @@ -1375,17 +1376,17 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in } else if(file_format==FileFormat::MPT_1) { - bool res = mpt_model_load(params.model, mpt_ctx_v3, vocab, inputs.gpulayers); + bool res = mpt_model_load(kcpp_params->model, mpt_ctx_v3, vocab, inputs.gpulayers); if(res==false) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_params->model.c_str()); return ModelLoadResult::FAIL; } n_vocab = mpt_ctx_v3.hparams.n_vocab; // determine the required inference memory per token: - mpt_eval(mpt_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, false, mem_per_token, use_scratch); + mpt_eval(mpt_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, false, mem_per_token, use_scratch); return ModelLoadResult::SUCCESS; } else @@ -1456,25 +1457,25 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o } std::string addedmemory = inputs.memory; - params.prompt = inputs.prompt; - params.seed = inputs.seed; - params.n_predict = inputs.max_length; - params.top_k = inputs.top_k; - params.top_p = inputs.top_p; - params.min_p = inputs.min_p; - params.typical_p = inputs.typical_p; - params.tfs_z = inputs.tfs; - params.temp = inputs.temperature; - params.repeat_last_n = inputs.rep_pen_range; - params.repeat_penalty = inputs.rep_pen; - params.presence_penalty = inputs.presence_penalty; - params.mirostat = inputs.mirostat; - params.mirostat_eta = inputs.mirostat_eta; - params.mirostat_tau = inputs.mirostat_tau; - params.n_ctx = inputs.max_context_length; - params.n_batch = n_batch; - params.n_threads = n_threads; - params.n_threads_batch = n_blasthreads; + kcpp_params->prompt = inputs.prompt; + kcpp_params->seed = inputs.seed; + kcpp_params->n_predict = inputs.max_length; + kcpp_params->top_k = inputs.top_k; + kcpp_params->top_p = inputs.top_p; + kcpp_params->min_p = inputs.min_p; + kcpp_params->typical_p = inputs.typical_p; + kcpp_params->tfs_z = inputs.tfs; + kcpp_params->temp = inputs.temperature; + kcpp_params->repeat_last_n = inputs.rep_pen_range; + kcpp_params->repeat_penalty = inputs.rep_pen; + kcpp_params->presence_penalty = inputs.presence_penalty; + kcpp_params->mirostat = inputs.mirostat; + kcpp_params->mirostat_eta = inputs.mirostat_eta; + kcpp_params->mirostat_tau = inputs.mirostat_tau; + kcpp_params->n_ctx = inputs.max_context_length; + kcpp_params->n_batch = n_batch; + kcpp_params->n_threads = n_threads; + kcpp_params->n_threads_batch = n_blasthreads; bool stream_sse = inputs.stream_sse; bool allow_regular_prints = (debugmode!=-1 && !inputs.quiet) || debugmode >= 1; @@ -1498,37 +1499,37 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o current_grammar = grammarstr; - if (params.repeat_last_n < 1) + if (kcpp_params->repeat_last_n < 1) { - params.repeat_last_n = 1; + kcpp_params->repeat_last_n = 1; } - if (params.top_k < 1) + if (kcpp_params->top_k < 1) { - params.top_k = n_vocab; // all tokens in the vocabulary should be considered if top k is disabled + kcpp_params->top_k = n_vocab; // all tokens in the vocabulary should be considered if top k is disabled } - if (params.seed <= 0 || params.seed==0xFFFFFFFF) + if (kcpp_params->seed <= 0 || kcpp_params->seed==0xFFFFFFFF) { - params.seed = time(NULL); + kcpp_params->seed = time(NULL); } // tokenize the prompt std::vector embd_inp; std::vector embd_inp_mem; //for storing added memory - TokenizeString(params.prompt, embd_inp, file_format); + TokenizeString(kcpp_params->prompt, embd_inp, file_format); if(addedmemory!="") { TokenizeString(addedmemory, embd_inp_mem, file_format); } //truncate to front of the prompt if its too long - int32_t nctx = params.n_ctx; + int32_t nctx = kcpp_params->n_ctx; - if (embd_inp.size() + params.n_predict > nctx) + if (embd_inp.size() + kcpp_params->n_predict > nctx) { //get bos token std::vector bos; TokenizeString("", bos, file_format); - int offset = embd_inp.size() - nctx + params.n_predict; + int offset = embd_inp.size() - nctx + kcpp_params->n_predict; embd_inp = std::vector(embd_inp.begin() + offset, embd_inp.end()); //replace bos into front if exists if(bos.size()>0 && embd_inp.size()>0) @@ -1548,9 +1549,9 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o } //shorten memory if needed - if (embd_inp_mem.size() + params.n_predict + 4 > nctx) + if (embd_inp_mem.size() + kcpp_params->n_predict + 4 > nctx) { - int offset = embd_inp_mem.size() - nctx + params.n_predict + 4; + int offset = embd_inp_mem.size() - nctx + kcpp_params->n_predict + 4; embd_inp_mem = std::vector(embd_inp_mem.begin() + offset, embd_inp_mem.end()); //replace bos into front if exists if(bos.size()>0 && embd_inp_mem.size()>0) @@ -1561,7 +1562,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o //shorten main prompt by trimming the front if needed int addmemtokens = embd_inp_mem.size(); - int totalsize = (addmemtokens + embd_inp.size() + params.n_predict); + int totalsize = (addmemtokens + embd_inp.size() + kcpp_params->n_predict); if(totalsize > nctx) { int excess = totalsize - nctx; @@ -1580,7 +1581,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o //determine how much npast we have to rewind from the current state std::vector embd; - int last_n_size = params.repeat_last_n; + int last_n_size = kcpp_params->repeat_last_n; last_n_tokens.resize(last_n_size); std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); @@ -1614,8 +1615,8 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o file_format==FileFormat::RWKV_2); bool blasmode = (approved_format && embd_inp.size() >= 32 && ggml_cpu_has_blas() && blasbatchsize!=-1); // bool blasmode = false; - int original_batch = params.n_batch; - int original_threads = params.n_threads; + int original_batch = kcpp_params->n_batch; + int original_threads = kcpp_params->n_threads; if (blasmode) { //for non llama, limit to 256 @@ -1625,27 +1626,27 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o bbs = (blasbatchsize > 256 ? 256 : blasbatchsize); } - params.n_batch = bbs; //received reports of 1024 and above crashing on some models + kcpp_params->n_batch = bbs; //received reports of 1024 and above crashing on some models if(!ggml_cpu_has_gpublas()) { //does not limit here for gguf anymore. this is kept for older models. //new models will override threads inside decode fn. - params.n_threads = 1; - params.n_threads_batch = 1; + kcpp_params->n_threads = 1; + kcpp_params->n_threads_batch = 1; } else { - params.n_threads = n_blasthreads; - params.n_threads_batch = n_blasthreads; + kcpp_params->n_threads = n_blasthreads; + kcpp_params->n_threads_batch = n_blasthreads; } } current_context_tokens.resize(n_past); - remaining_tokens = params.n_predict; + remaining_tokens = kcpp_params->n_predict; stopper_unused_tokens = 0; int input_consumed = 0; - std::mt19937 rng(params.seed); + std::mt19937 rng(kcpp_params->seed); //prepare sampler order std::vector sampler_order; @@ -1766,11 +1767,11 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o if (file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2) { - evalres = (llama_v2_eval(llama_ctx_v2, embd.data(), embdsize, n_past, params.n_threads)==0); + evalres = (llama_v2_eval(llama_ctx_v2, embd.data(), embdsize, n_past, kcpp_params->n_threads)==0); } else if(file_format == FileFormat::GGJT_3) { - evalres = (llama_v3_eval(llama_ctx_v3, embd.data(), embdsize, n_past, params.n_threads)==0); + evalres = (llama_v3_eval(llama_ctx_v3, embd.data(), embdsize, n_past, kcpp_params->n_threads)==0); } else if(file_format == FileFormat::GGUF_LLAMA || file_format==FileFormat::GGUF_FALCON) { @@ -1788,12 +1789,12 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o { if(embd.size()>1) { - evalres = rwkv_eval_sequence(rwkv_ctx_v3, params.n_threads, (uint32_t*)embd.data(), embd.size(), rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, rwkv_ctx_v3->logits_out); + evalres = rwkv_eval_sequence(rwkv_ctx_v3, kcpp_params->n_threads, (uint32_t*)embd.data(), embd.size(), rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, rwkv_ctx_v3->logits_out); } else { bool ignoreLogits = (!startedsampling && ((int)embd_inp.size() > input_consumed + 2)); - evalres = rwkv_eval(rwkv_ctx_v3, params.n_threads, embd[0], rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, ignoreLogits?nullptr:rwkv_ctx_v3->logits_out); + evalres = rwkv_eval(rwkv_ctx_v3, kcpp_params->n_threads, embd[0], rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, ignoreLogits?nullptr:rwkv_ctx_v3->logits_out); } memcpy(logits.data(), rwkv_ctx_v3->logits_out, sizeof(float) * rwkv_vocab.size()); @@ -1802,39 +1803,39 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o } else if(file_format==FileFormat::GPT2_1) { - evalres = legacy_gpt2_eval(gpt2_ctx_v1, params.n_threads, n_past, embd, logits, mem_per_token, file_format); + evalres = legacy_gpt2_eval(gpt2_ctx_v1, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, file_format); } else if(file_format==FileFormat::GPT2_2 || file_format==FileFormat::GPT2_3) { - evalres = gpt2_v2_eval(gpt2_ctx_v2, params.n_threads, n_past, embd, logits, mem_per_token, file_format); + evalres = gpt2_v2_eval(gpt2_ctx_v2, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, file_format); } else if(file_format==FileFormat::GPT2_4) { - evalres = gpt2_eval(gpt2_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token, use_scratch); + evalres = gpt2_eval(gpt2_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, use_scratch); } else if(file_format==FileFormat::NEOX_1 || file_format == FileFormat::NEOX_2 || file_format == FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5) { - evalres = gpt_neox_v2_eval(neox_ctx_v2, params.n_threads, n_past, embd, logits, mem_per_token); + evalres = gpt_neox_v2_eval(neox_ctx_v2, kcpp_params->n_threads, n_past, embd, logits, mem_per_token); } else if(file_format==FileFormat::NEOX_6|| file_format==FileFormat::NEOX_7) { - evalres = gpt_neox_eval(neox_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token, use_scratch); + evalres = gpt_neox_eval(neox_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, use_scratch); } else if(file_format==FileFormat::GPTJ_1 || file_format==FileFormat::GPTJ_2) { - evalres = legacy_gptj_eval(gptj_ctx_v1, params.n_threads, n_past, embd, logits, mem_per_token, file_format); + evalres = legacy_gptj_eval(gptj_ctx_v1, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, file_format); } else if(file_format==FileFormat::GPTJ_3 || file_format==FileFormat::GPTJ_4) { - evalres = gptj_v2_eval(gptj_ctx_v2, params.n_threads, n_past, embd, logits, mem_per_token); + evalres = gptj_v2_eval(gptj_ctx_v2, kcpp_params->n_threads, n_past, embd, logits, mem_per_token); } else if(file_format==FileFormat::GPTJ_5) { - evalres = gptj_eval(gptj_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token, use_scratch); + evalres = gptj_eval(gptj_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, use_scratch); } else if(file_format==FileFormat::MPT_1) { - evalres = mpt_eval(mpt_ctx_v3, params.n_threads, n_past, embd, logits, false, mem_per_token, use_scratch); + evalres = mpt_eval(mpt_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, false, mem_per_token, use_scratch); } else { @@ -1856,21 +1857,21 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o if ((int)embd_inp.size() <= input_consumed) { // out of user input, sample next token - const float top_k = params.top_k; - const float top_p = params.top_p; - const float min_p = params.min_p; - const float temp = params.temp; + const float top_k = kcpp_params->top_k; + const float top_p = kcpp_params->top_p; + const float min_p = kcpp_params->min_p; + const float temp = kcpp_params->temp; const float top_a = inputs.top_a; - const float repeat_penalty = params.repeat_penalty; - const float presence_penalty = params.presence_penalty; - const float typical_p = params.typical_p; - const float tfs_z = params.tfs_z; + const float repeat_penalty = kcpp_params->repeat_penalty; + const float presence_penalty = kcpp_params->presence_penalty; + const float typical_p = kcpp_params->typical_p; + const float tfs_z = kcpp_params->tfs_z; if (!startedsampling) { startedsampling = true; - params.n_batch = original_batch; - params.n_threads = original_threads; + kcpp_params->n_batch = original_batch; + kcpp_params->n_threads = original_threads; time1 = timer_check(); timer_start(); if(allow_regular_prints) @@ -1920,7 +1921,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o id = SampleLogits(logitsPtr, nctx, n_vocab, last_n_size, repeat_penalty, presence_penalty, top_k, top_a, top_p, min_p, typical_p, tfs_z, temp, rng, - params.mirostat, params.mirostat_tau, params.mirostat_eta, sampler_order, grammar); + kcpp_params->mirostat, kcpp_params->mirostat_tau, kcpp_params->mirostat_eta, sampler_order, grammar); if (grammar != nullptr) { grammar_accept_token(file_format, n_vocab, grammar, id); @@ -1950,7 +1951,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o if (startedsampling && allow_regular_prints) { - printf("\rGenerating (%d / %d tokens)", (params.n_predict - remaining_tokens), params.n_predict); + printf("\rGenerating (%d / %d tokens)", (kcpp_params->n_predict - remaining_tokens), kcpp_params->n_predict); } if(debugmode==1 && top_picks.size()>0) { @@ -2009,7 +2010,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o last_n_tokens.push_back(embd_inp[input_consumed]); current_context_tokens.push_back(embd_inp[input_consumed]); ++input_consumed; - if ((int)embd.size() >= params.n_batch) + if ((int)embd.size() >= kcpp_params->n_batch) { break; } @@ -2018,7 +2019,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o } time2 = timer_check(); float pt1 = (time1*1000.0/(embd_inp.size()==0?1:embd_inp.size())); - int realnpredict = params.n_predict-stopper_unused_tokens; + int realnpredict = kcpp_params->n_predict-stopper_unused_tokens; float pt2 = (time2*1000.0/(realnpredict==0?1:realnpredict)); float tokens_per_second = (realnpredict == 0 ? 0 : realnpredict / (time1 + time2)); printf("\nContextLimit: %d/%d, Processing:%.2fs (%.1fms/T), Generation:%.2fs (%.1fms/T), Total:%.2fs (%.2fT/s)",current_context_tokens.size(),nctx, time1, pt1, time2, pt2, (time1 + time2), tokens_per_second); diff --git a/klite.embd b/klite.embd index 9ddde3975..faf83130f 100644 --- a/klite.embd +++ b/klite.embd @@ -3349,6 +3349,7 @@ Current version: 102 gui_type_chat: 1, //0=standard, 1=messenger, 2=aesthetic gui_type_instruct: 0, //0=standard, 1=messenger, 2=aesthetic multiline_replies: true, + multiline_replies_adventure: true, allow_continue_chat: false, idle_responses: 0, idle_duration: 60, @@ -7209,6 +7210,7 @@ Current version: 102 toggle_generate_images_mode(true); } document.getElementById("multiline_replies").checked = localsettings.multiline_replies; + document.getElementById("multiline_replies_adventure").checked = localsettings.multiline_replies_adventure; document.getElementById("allow_continue_chat").checked = localsettings.allow_continue_chat; document.getElementById("idle_responses").value = localsettings.idle_responses; document.getElementById("idle_duration").value = localsettings.idle_duration; @@ -7391,6 +7393,7 @@ Current version: 102 localsettings.gui_type_instruct = document.getElementById("gui_type").value; } localsettings.multiline_replies = (document.getElementById("multiline_replies").checked ? true : false); + localsettings.multiline_replies_adventure = (document.getElementById("multiline_replies_adventure").checked ? true : false); localsettings.allow_continue_chat = (document.getElementById("allow_continue_chat").checked ? true : false); localsettings.idle_responses = document.getElementById("idle_responses").value; localsettings.idle_duration = document.getElementById("idle_duration").value; @@ -8639,6 +8642,10 @@ Current version: 102 if (localsettings.opmode == 2) //stop on new action found { seqs = ["\n\> "]; + if(!localsettings.multiline_replies_adventure) + { + seqs.push("\n"); + } } if (localsettings.opmode == 3) //stop on selfname found { @@ -9436,6 +9443,15 @@ Current version: 102 splitresponse = gentxt.split("\n\> "); gentxt = splitresponse[0]; } + if(!localsettings.multiline_replies_adventure) + { + let foundnl = gentxt.indexOf("\n"); + if (foundnl != -1) //if found, truncate to it + { + splitresponse = gentxt.split("\n"); + gentxt = splitresponse[0]; + } + } } //if we are in chatmode, truncate to my first response @@ -12285,6 +12301,10 @@ Current version: 102