gpt j use scratch buffers

This commit is contained in:
Concedo 2023-06-21 16:10:31 +08:00
parent 266d47a4b9
commit dfdd20240c

View file

@ -367,8 +367,16 @@ bool gptj_eval(
static size_t buf_size = 256u*1024*1024;
static void * buf = malloc(buf_size);
if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) {
const size_t buf_size_new = 320u*1024*1024 + 1.6*(mem_per_token*N); // add 10% to account for ggml object overhead
// use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way
static size_t scr0_size = (n_ctx>1024?512u:256u)*1024*1024;
static void * scr0 = malloc(scr0_size);
static size_t scr1_size = (n_ctx>1024?512u:256u)*1024*1024;
static void * scr1 = malloc(scr1_size);
if (mem_per_token > 0 && mem_per_token*N*1.05 > buf_size) {
const size_t buf_size_new = 64u*1024*1024 + 1.15*(mem_per_token*N); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
// reallocate
@ -403,6 +411,8 @@ bool gptj_eval(
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * cur;
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
// norm
{
cur = ggml_norm(ctx0, inpL);
@ -490,6 +500,8 @@ bool gptj_eval(
cur);
}
ggml_set_scratch(ctx0, { 0, scr1_size, scr1, });
struct ggml_tensor * inpFF = cur;
// feed-forward network
@ -525,6 +537,8 @@ bool gptj_eval(
inpL = ggml_add(ctx0, cur, inpL);
}
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
// norm
{
inpL = ggml_norm(ctx0, inpL);
@ -537,6 +551,8 @@ bool gptj_eval(
ggml_repeat(ctx0, model.ln_f_b, inpL));
}
ggml_set_scratch(ctx0, { 0, 0, nullptr, });
// lm_head
{
inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL);