bug fixes for openblas

This commit is contained in:
Concedo 2023-06-23 22:45:22 +08:00
parent 43c2891afa
commit f39a746089
5 changed files with 40 additions and 70 deletions

View file

@ -313,7 +313,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
= gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx = gpt2_ctx_v3.hparams.n_ctx = gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx = gpt2_ctx_v3.hparams.n_ctx
= mpt_ctx_v3.hparams.n_ctx = params.n_ctx; = mpt_ctx_v3.hparams.n_ctx = params.n_ctx;
bool calc_mem_with_scratch = ggml_cpu_has_gpublas(); bool use_scratch = ggml_cpu_has_gpublas();
printf("System Info: %s\n", llama_print_system_info()); printf("System Info: %s\n", llama_print_system_info());
SetQuantsUnshuffled(false); SetQuantsUnshuffled(false);
@ -549,7 +549,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
return res; return res;
} }
// determine the required inference memory per token: // determine the required inference memory per token:
gpt2_eval(gpt2_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, calc_mem_with_scratch); gpt2_eval(gpt2_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch);
return ModelLoadResult::SUCCESS; return ModelLoadResult::SUCCESS;
} }
else else
@ -616,14 +616,14 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
} }
// determine the required inference memory per token: // determine the required inference memory per token:
gptj_eval(gptj_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, calc_mem_with_scratch); gptj_eval(gptj_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch);
//if the logits are NAN or duplicated, it means the model is incompatible //if the logits are NAN or duplicated, it means the model is incompatible
std::vector<float> oldlogits(logits); std::vector<float> oldlogits(logits);
//this is another hack because they change the library - we run the eval through the model //this is another hack because they change the library - we run the eval through the model
//twice and compare logits. if they give the same logits for different inputs, model is broken //twice and compare logits. if they give the same logits for different inputs, model is broken
gptj_eval(gptj_ctx_v3, params.n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token, calc_mem_with_scratch); gptj_eval(gptj_ctx_v3, params.n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token, use_scratch);
if(logits.size()>0 && (IsNanCheck(logits[0]) || LogitsDuplicated(oldlogits,logits))) if(logits.size()>0 && (IsNanCheck(logits[0]) || LogitsDuplicated(oldlogits,logits)))
{ {
@ -688,7 +688,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
} }
// determine the required inference memory per token: // determine the required inference memory per token:
gpt_neox_eval(neox_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, calc_mem_with_scratch); gpt_neox_eval(neox_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch);
return ModelLoadResult::SUCCESS; return ModelLoadResult::SUCCESS;
} }
@ -745,7 +745,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
} }
// determine the required inference memory per token: // determine the required inference memory per token:
mpt_eval(mpt_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, false, mem_per_token, calc_mem_with_scratch); mpt_eval(mpt_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, false, mem_per_token, use_scratch);
return ModelLoadResult::SUCCESS; return ModelLoadResult::SUCCESS;
} }
else else
@ -904,6 +904,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
concat_output = ""; concat_output = "";
bool startedsampling = false; bool startedsampling = false;
bool use_scratch = true;
timer_start(); timer_start();
double time1 = 0, time2 = 0; double time1 = 0, time2 = 0;
@ -1078,7 +1079,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
} }
else if(file_format==FileFormat::GPT2_4) else if(file_format==FileFormat::GPT2_4)
{ {
evalres = gpt2_eval(gpt2_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token); evalres = gpt2_eval(gpt2_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token, use_scratch);
} }
else if(file_format==FileFormat::NEOX_1 || file_format == FileFormat::NEOX_2 || file_format == FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5) else if(file_format==FileFormat::NEOX_1 || file_format == FileFormat::NEOX_2 || file_format == FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5)
{ {
@ -1086,7 +1087,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
} }
else if(file_format==FileFormat::NEOX_6|| file_format==FileFormat::NEOX_7) else if(file_format==FileFormat::NEOX_6|| file_format==FileFormat::NEOX_7)
{ {
evalres = gpt_neox_eval(neox_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token); evalres = gpt_neox_eval(neox_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token, use_scratch);
} }
else if(file_format==FileFormat::GPTJ_1 || file_format==FileFormat::GPTJ_2) else if(file_format==FileFormat::GPTJ_1 || file_format==FileFormat::GPTJ_2)
{ {
@ -1098,11 +1099,11 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
} }
else if(file_format==FileFormat::GPTJ_5) else if(file_format==FileFormat::GPTJ_5)
{ {
evalres = gptj_eval(gptj_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token); evalres = gptj_eval(gptj_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token, use_scratch);
} }
else if(file_format==FileFormat::MPT_1) else if(file_format==FileFormat::MPT_1)
{ {
evalres = mpt_eval(mpt_ctx_v3, params.n_threads, n_past, embd, logits, false, mem_per_token); evalres = mpt_eval(mpt_ctx_v3, params.n_threads, n_past, embd, logits, false, mem_per_token, use_scratch);
} }
else else
{ {

View file

@ -389,7 +389,7 @@ bool gpt2_eval(
const std::vector<gpt_vocab::id> & embd_inp, const std::vector<gpt_vocab::id> & embd_inp,
std::vector<float> & embd_w, std::vector<float> & embd_w,
size_t & mem_per_token, size_t & mem_per_token,
bool use_scratch=true) { bool use_scratch) {
const int N = embd_inp.size(); const int N = embd_inp.size();
const auto & hparams = model.hparams; const auto & hparams = model.hparams;
@ -405,22 +405,14 @@ bool gpt2_eval(
// use 2 scratch buffers // use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way // TODO: very hacky solution - reimplement in a more elegant way
static size_t scr0_size = (n_ctx>1024?512u:256u)*1024*1024; static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024;
static void * scr0; static size_t scr1_size = (n_embd>2400?512u:256u)*1024*1024;
static size_t scr1_size = (n_ctx>1024?512u:256u)*1024*1024; static void * scr0 = malloc(scr0_size);
static void * scr1; static void * scr1 = malloc(scr1_size);
if(use_scratch) if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) {
{ const size_t buf_size_new = 320u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead
scr0 = malloc(scr0_size);
scr1 = malloc(scr1_size);
}
size_t scratch_needed_mem = mem_per_token*N;
if (mem_per_token > 0 && scratch_needed_mem*1.1 > buf_size) {
const size_t buf_size_new = 64u*1024*1024 + 1.2*(scratch_needed_mem); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
// reallocate // reallocate

View file

@ -383,7 +383,7 @@ bool gptj_eval(
const std::vector<gpt_vocab::id> & embd_inp, const std::vector<gpt_vocab::id> & embd_inp,
std::vector<float> & embd_w, std::vector<float> & embd_w,
size_t & mem_per_token, size_t & mem_per_token,
bool use_scratch=true) { bool use_scratch) {
const int N = embd_inp.size(); const int N = embd_inp.size();
const auto & hparams = model.hparams; const auto & hparams = model.hparams;
@ -400,19 +400,14 @@ bool gptj_eval(
// use 2 scratch buffers // use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way // TODO: very hacky solution - reimplement in a more elegant way
static size_t scr0_size = (n_ctx>1024?512u:256u)*1024*1024; static size_t scr0_size = 512u*1024*1024;
static void * scr0; static size_t scr1_size = 512u*1024*1024;
static size_t scr1_size = (n_ctx>1024?512u:256u)*1024*1024; static void * scr0 = malloc(scr0_size);
static void * scr1; static void * scr1 = malloc(scr1_size);
if(use_scratch)
{
scr0 = malloc(scr0_size);
scr1 = malloc(scr1_size);
}
if (mem_per_token > 0 && 32u*1024*1024 + mem_per_token*N*1.2 > buf_size) { if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) {
const size_t buf_size_new = 64u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead const size_t buf_size_new = 320u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
// reallocate // reallocate

View file

@ -317,7 +317,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
// //
bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past, bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past,
const std::vector<gpt_vocab::id> & embd_inp, std::vector<float> & embd_w, const std::vector<gpt_vocab::id> & embd_inp, std::vector<float> & embd_w,
bool logits_all, size_t & mem_per_token, bool use_scratch=true) { bool logits_all, size_t & mem_per_token, bool use_scratch) {
const int N = embd_inp.size(); const int N = embd_inp.size();
const auto & hparams = model.hparams; const auto & hparams = model.hparams;
@ -333,26 +333,15 @@ bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past,
// use 2 scratch buffers // use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way // TODO: very hacky solution - reimplement in a more elegant way
//MPT 30B needs more scratch memory
static size_t scr0_size = (n_embd>=7168?2048u:1024u)*1024*1024;
static size_t scr1_size = (n_embd>=7168?2048u:1024u)*1024*1024;
static size_t scr0_size = (n_ctx>2048?1024u:512u)*1024*1024; static void * scr0 = malloc(scr0_size);
static size_t scr1_size = (n_ctx>2048?1024u:512u)*1024*1024; static void * scr1 = malloc(scr1_size);
if(n_embd>=7168) //MPT 30B needs more scratch memory if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) {
{ const size_t buf_size_new = 320u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead
scr0_size *= 2;
scr1_size *= 2;
}
static void * scr0;
static void * scr1;
if(use_scratch)
{
scr0 = malloc(scr0_size);
scr1 = malloc(scr1_size);
}
if (mem_per_token > 0 && mem_per_token * N *1.1 > buf_size) {
const size_t buf_size_new = 64u*1024*1024 + 1.2 * (mem_per_token * N); // add 10% to account for ggml object overhead
// printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, // printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__,
// buf_size, buf_size_new); // buf_size, buf_size_new);
// reallocate // reallocate

View file

@ -401,7 +401,7 @@ bool gpt_neox_eval(
const std::vector<gpt_vocab::id> & embd_inp, const std::vector<gpt_vocab::id> & embd_inp,
std::vector<float> & embd_w, std::vector<float> & embd_w,
size_t & mem_per_token, size_t & mem_per_token,
bool use_scratch=true) { bool use_scratch) {
const int N = embd_inp.size(); const int N = embd_inp.size();
const auto & hparams = model.hparams; const auto & hparams = model.hparams;
@ -418,21 +418,14 @@ bool gpt_neox_eval(
// use 2 scratch buffers // use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way // TODO: very hacky solution - reimplement in a more elegant way
static size_t scr0_size = (n_ctx>1024?512u:256u)*1024*1024; static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024;
static void * scr0; static size_t scr1_size = (n_embd>2400?512u:256u)*1024*1024;
static size_t scr1_size = (n_ctx>1024?512u:256u)*1024*1024; static void * scr0 = malloc(scr0_size);
static void * scr1; static void * scr1 = malloc(scr1_size);
if(use_scratch)
{
scr0 = malloc(scr0_size);
scr1 = malloc(scr1_size);
}
size_t scratch_needed_mem = mem_per_token*N; if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) {
const size_t buf_size_new = 360u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead
if (mem_per_token > 0 && scratch_needed_mem*1.1 > buf_size) {
const size_t buf_size_new = 64u*1024*1024 + 1.2*(scratch_needed_mem); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
// reallocate // reallocate