hopefully fixed the ooms for good

This commit is contained in:
Concedo 2023-04-23 13:49:50 +08:00
parent 3f21bd81f3
commit 4e1ea2ac61
4 changed files with 8 additions and 7 deletions

View file

@ -32,6 +32,7 @@ What does it mean? You get llama.cpp with a fancy UI, persistent stories, editin
- CLBlast - tested with https://github.com/CNugteren/CLBlast . If you wish to compile it you will need to reference the OpenCL files. It will only generate the ".lib" file if you compile using MSVC.
- OpenBLAS - tested with https://github.com/xianyi/OpenBLAS .
- Move the respectives .lib files to the /lib folder of your project, overwriting the older files.
- Also, replace the existing versions of the corresponding .dll files located in the project directory root (e.g. libopenblas.dll).
- Make the KoboldCPP project using the instructions above.
## OSX and Linux

View file

@ -371,7 +371,7 @@ bool gpt2_eval(
const int n_vocab = hparams.n_vocab;
//todo: there is a bug that causes the buffer to oom and I cannot figure it out, hack to increase size for now
static size_t buf_size = 512u*1024*1024;
static size_t buf_size = 256u*1024*1024;
static void * buf = malloc(buf_size);
if (mem_per_token > 0 && mem_per_token*N*1.6 > buf_size) {

View file

@ -382,11 +382,11 @@ bool gptj_eval(
const int d_key = n_embd/n_head;
//todo: there is a bug that causes the buffer to oom and I cannot figure it out, hack to increase size for now
static size_t buf_size = 512u*1024*1024;
static size_t buf_size = 256u*1024*1024;
static void * buf = malloc(buf_size);
if (mem_per_token > 0 && mem_per_token*N*1.4 > buf_size) {
const size_t buf_size_new = 1.6*(mem_per_token*N); // add 10% to account for ggml object overhead
if (mem_per_token > 0 && mem_per_token*N*1.6 > buf_size) {
const size_t buf_size_new = 1.8*(mem_per_token*N); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
// reallocate

View file

@ -364,11 +364,11 @@ bool stablelm_eval(
const int n_vocab = hparams.n_vocab;
const int n_rot = hparams.n_rot;
static size_t buf_size = 512u*1024*1024;
static size_t buf_size = 256u*1024*1024;
static void * buf = malloc(buf_size);
if (mem_per_token > 0 && mem_per_token*N*1.4 > buf_size) {
const size_t buf_size_new = 1.6*(mem_per_token*N); // add 10% to account for ggml object overhead
if (mem_per_token > 0 && mem_per_token*N*1.6 > buf_size) {
const size_t buf_size_new = 1.8*(mem_per_token*N); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
// reallocate