From f39a7460890de883b0d68d45d75d1780984ca76e Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Fri, 23 Jun 2023 22:45:22 +0800 Subject: [PATCH] bug fixes for openblas --- gpttype_adapter.cpp | 21 +++++++++++---------- otherarch/gpt2_v3.cpp | 22 +++++++--------------- otherarch/gptj_v3.cpp | 19 +++++++------------ otherarch/mpt_v3.cpp | 27 ++++++++------------------- otherarch/neox_v3.cpp | 21 +++++++-------------- 5 files changed, 40 insertions(+), 70 deletions(-) diff --git a/gpttype_adapter.cpp b/gpttype_adapter.cpp index 20093f205..8c716c84a 100644 --- a/gpttype_adapter.cpp +++ b/gpttype_adapter.cpp @@ -313,7 +313,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in = gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx = gpt2_ctx_v3.hparams.n_ctx = mpt_ctx_v3.hparams.n_ctx = params.n_ctx; - bool calc_mem_with_scratch = ggml_cpu_has_gpublas(); + bool use_scratch = ggml_cpu_has_gpublas(); printf("System Info: %s\n", llama_print_system_info()); SetQuantsUnshuffled(false); @@ -549,7 +549,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in return res; } // determine the required inference memory per token: - gpt2_eval(gpt2_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, calc_mem_with_scratch); + gpt2_eval(gpt2_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch); return ModelLoadResult::SUCCESS; } else @@ -616,14 +616,14 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in } // determine the required inference memory per token: - gptj_eval(gptj_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, calc_mem_with_scratch); + gptj_eval(gptj_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch); //if the logits are NAN or duplicated, it means the model is incompatible std::vector oldlogits(logits); //this is another hack because they change the library - we run the eval through the model //twice and compare logits. if they give the same logits for different inputs, model is broken - gptj_eval(gptj_ctx_v3, params.n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token, calc_mem_with_scratch); + gptj_eval(gptj_ctx_v3, params.n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token, use_scratch); if(logits.size()>0 && (IsNanCheck(logits[0]) || LogitsDuplicated(oldlogits,logits))) { @@ -688,7 +688,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in } // determine the required inference memory per token: - gpt_neox_eval(neox_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, calc_mem_with_scratch); + gpt_neox_eval(neox_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch); return ModelLoadResult::SUCCESS; } @@ -745,7 +745,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in } // determine the required inference memory per token: - mpt_eval(mpt_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, false, mem_per_token, calc_mem_with_scratch); + mpt_eval(mpt_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, false, mem_per_token, use_scratch); return ModelLoadResult::SUCCESS; } else @@ -904,6 +904,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o concat_output = ""; bool startedsampling = false; + bool use_scratch = true; timer_start(); double time1 = 0, time2 = 0; @@ -1078,7 +1079,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o } else if(file_format==FileFormat::GPT2_4) { - evalres = gpt2_eval(gpt2_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token); + evalres = gpt2_eval(gpt2_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token, use_scratch); } else if(file_format==FileFormat::NEOX_1 || file_format == FileFormat::NEOX_2 || file_format == FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5) { @@ -1086,7 +1087,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o } else if(file_format==FileFormat::NEOX_6|| file_format==FileFormat::NEOX_7) { - evalres = gpt_neox_eval(neox_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token); + evalres = gpt_neox_eval(neox_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token, use_scratch); } else if(file_format==FileFormat::GPTJ_1 || file_format==FileFormat::GPTJ_2) { @@ -1098,11 +1099,11 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o } else if(file_format==FileFormat::GPTJ_5) { - evalres = gptj_eval(gptj_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token); + evalres = gptj_eval(gptj_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token, use_scratch); } else if(file_format==FileFormat::MPT_1) { - evalres = mpt_eval(mpt_ctx_v3, params.n_threads, n_past, embd, logits, false, mem_per_token); + evalres = mpt_eval(mpt_ctx_v3, params.n_threads, n_past, embd, logits, false, mem_per_token, use_scratch); } else { diff --git a/otherarch/gpt2_v3.cpp b/otherarch/gpt2_v3.cpp index f8b82fdd4..b716fe212 100644 --- a/otherarch/gpt2_v3.cpp +++ b/otherarch/gpt2_v3.cpp @@ -389,7 +389,7 @@ bool gpt2_eval( const std::vector & embd_inp, std::vector & embd_w, size_t & mem_per_token, - bool use_scratch=true) { + bool use_scratch) { const int N = embd_inp.size(); const auto & hparams = model.hparams; @@ -405,22 +405,14 @@ bool gpt2_eval( // use 2 scratch buffers // TODO: very hacky solution - reimplement in a more elegant way - static size_t scr0_size = (n_ctx>1024?512u:256u)*1024*1024; - static void * scr0; + static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024; + static size_t scr1_size = (n_embd>2400?512u:256u)*1024*1024; - static size_t scr1_size = (n_ctx>1024?512u:256u)*1024*1024; - static void * scr1; + static void * scr0 = malloc(scr0_size); + static void * scr1 = malloc(scr1_size); - if(use_scratch) - { - scr0 = malloc(scr0_size); - scr1 = malloc(scr1_size); - } - - size_t scratch_needed_mem = mem_per_token*N; - - if (mem_per_token > 0 && scratch_needed_mem*1.1 > buf_size) { - const size_t buf_size_new = 64u*1024*1024 + 1.2*(scratch_needed_mem); // add 10% to account for ggml object overhead + if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) { + const size_t buf_size_new = 320u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); // reallocate diff --git a/otherarch/gptj_v3.cpp b/otherarch/gptj_v3.cpp index 8df2025f0..031a2c051 100644 --- a/otherarch/gptj_v3.cpp +++ b/otherarch/gptj_v3.cpp @@ -383,7 +383,7 @@ bool gptj_eval( const std::vector & embd_inp, std::vector & embd_w, size_t & mem_per_token, - bool use_scratch=true) { + bool use_scratch) { const int N = embd_inp.size(); const auto & hparams = model.hparams; @@ -400,19 +400,14 @@ bool gptj_eval( // use 2 scratch buffers // TODO: very hacky solution - reimplement in a more elegant way - static size_t scr0_size = (n_ctx>1024?512u:256u)*1024*1024; - static void * scr0; + static size_t scr0_size = 512u*1024*1024; + static size_t scr1_size = 512u*1024*1024; - static size_t scr1_size = (n_ctx>1024?512u:256u)*1024*1024; - static void * scr1; - if(use_scratch) - { - scr0 = malloc(scr0_size); - scr1 = malloc(scr1_size); - } + static void * scr0 = malloc(scr0_size); + static void * scr1 = malloc(scr1_size); - if (mem_per_token > 0 && 32u*1024*1024 + mem_per_token*N*1.2 > buf_size) { - const size_t buf_size_new = 64u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead + if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) { + const size_t buf_size_new = 320u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); // reallocate diff --git a/otherarch/mpt_v3.cpp b/otherarch/mpt_v3.cpp index ac4f321a2..5d66f91f5 100644 --- a/otherarch/mpt_v3.cpp +++ b/otherarch/mpt_v3.cpp @@ -317,7 +317,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo // bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past, const std::vector & embd_inp, std::vector & embd_w, - bool logits_all, size_t & mem_per_token, bool use_scratch=true) { + bool logits_all, size_t & mem_per_token, bool use_scratch) { const int N = embd_inp.size(); const auto & hparams = model.hparams; @@ -333,26 +333,15 @@ bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past, // use 2 scratch buffers // TODO: very hacky solution - reimplement in a more elegant way + //MPT 30B needs more scratch memory + static size_t scr0_size = (n_embd>=7168?2048u:1024u)*1024*1024; + static size_t scr1_size = (n_embd>=7168?2048u:1024u)*1024*1024; - static size_t scr0_size = (n_ctx>2048?1024u:512u)*1024*1024; - static size_t scr1_size = (n_ctx>2048?1024u:512u)*1024*1024; + static void * scr0 = malloc(scr0_size); + static void * scr1 = malloc(scr1_size); - if(n_embd>=7168) //MPT 30B needs more scratch memory - { - scr0_size *= 2; - scr1_size *= 2; - } - - static void * scr0; - static void * scr1; - if(use_scratch) - { - scr0 = malloc(scr0_size); - scr1 = malloc(scr1_size); - } - - if (mem_per_token > 0 && mem_per_token * N *1.1 > buf_size) { - const size_t buf_size_new = 64u*1024*1024 + 1.2 * (mem_per_token * N); // add 10% to account for ggml object overhead + if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) { + const size_t buf_size_new = 320u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead // printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, // buf_size, buf_size_new); // reallocate diff --git a/otherarch/neox_v3.cpp b/otherarch/neox_v3.cpp index 40e1d1e18..37f5ad9ae 100644 --- a/otherarch/neox_v3.cpp +++ b/otherarch/neox_v3.cpp @@ -401,7 +401,7 @@ bool gpt_neox_eval( const std::vector & embd_inp, std::vector & embd_w, size_t & mem_per_token, - bool use_scratch=true) { + bool use_scratch) { const int N = embd_inp.size(); const auto & hparams = model.hparams; @@ -418,21 +418,14 @@ bool gpt_neox_eval( // use 2 scratch buffers // TODO: very hacky solution - reimplement in a more elegant way - static size_t scr0_size = (n_ctx>1024?512u:256u)*1024*1024; - static void * scr0; + static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024; + static size_t scr1_size = (n_embd>2400?512u:256u)*1024*1024; - static size_t scr1_size = (n_ctx>1024?512u:256u)*1024*1024; - static void * scr1; - if(use_scratch) - { - scr0 = malloc(scr0_size); - scr1 = malloc(scr1_size); - } + static void * scr0 = malloc(scr0_size); + static void * scr1 = malloc(scr1_size); - size_t scratch_needed_mem = mem_per_token*N; - - if (mem_per_token > 0 && scratch_needed_mem*1.1 > buf_size) { - const size_t buf_size_new = 64u*1024*1024 + 1.2*(scratch_needed_mem); // add 10% to account for ggml object overhead + if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) { + const size_t buf_size_new = 360u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); // reallocate