Scale buf_size linearly with n_ctx
This appear to solve https://github.com/ggerganov/llama.cpp/issues/153 where error of "ggml_new_tensor_impl: not enough space in the context's memory pool" is thrown in interactive mode. At least the out of memory error come from `ctx0` used here. Although I am not familiar with the code base enough to tell if this is indeed the cause.
This commit is contained in:
parent
721311070e
commit
7b8858415e
1 changed files with 1 additions and 3 deletions
4
main.cpp
4
main.cpp
|
@ -549,9 +549,7 @@ bool llama_eval(
|
||||||
|
|
||||||
const int d_key = n_embd/n_head;
|
const int d_key = n_embd/n_head;
|
||||||
|
|
||||||
// TODO: check if this size scales with n_ctx linearly and remove constant. somehow I feel it wasn't the case
|
static size_t buf_size = (size_t)hparams.n_ctx*1024*1024;
|
||||||
// static size_t buf_size = hparams.n_ctx*1024*1024;
|
|
||||||
static size_t buf_size = 512u*1024*1024;
|
|
||||||
static void * buf = malloc(buf_size);
|
static void * buf = malloc(buf_size);
|
||||||
|
|
||||||
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue