diff --git a/otherarch/gpt2_v3.cpp b/otherarch/gpt2_v3.cpp index ba2222f99..4be0a08b0 100644 --- a/otherarch/gpt2_v3.cpp +++ b/otherarch/gpt2_v3.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include "model_adapter.h" @@ -39,6 +40,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g } } + int32_t origmaxctx = model.hparams.n_ctx; + // load hparams { auto & hparams = model.hparams; @@ -53,7 +56,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); - printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); + printf("%s: n_ctx = %d (%d)\n", __func__, hparams.n_ctx,origmaxctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); @@ -154,8 +157,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v + ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k + ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v ctx_size += (6 + 12*n_layer)*1024; // object overhead @@ -256,7 +259,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; - const int n_mem = n_layer*n_ctx; + const int n_mem = n_layer*std::max(origmaxctx,n_ctx); const int n_elements = n_embd*n_mem; model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements); diff --git a/otherarch/gptj_v3.cpp b/otherarch/gptj_v3.cpp index 0f0f82105..2931ece5f 100644 --- a/otherarch/gptj_v3.cpp +++ b/otherarch/gptj_v3.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include "model_adapter.h" @@ -39,6 +40,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g } } + int32_t origmaxctx = model.hparams.n_ctx; + // load hparams { auto & hparams = model.hparams; @@ -54,7 +57,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); - printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); + printf("%s: n_ctx = %d (%d)\n", __func__, hparams.n_ctx,origmaxctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); @@ -138,8 +141,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v + ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k + ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v ctx_size += (5 + 10*n_layer)*512; // object overhead @@ -232,7 +235,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; - const int n_mem = n_layer*n_ctx; + const int n_mem = n_layer*std::max(origmaxctx,n_ctx); const int n_elements = n_embd*n_mem; model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements); diff --git a/otherarch/mpt_v3.cpp b/otherarch/mpt_v3.cpp index f7ab03ec0..46ac0bd8b 100644 --- a/otherarch/mpt_v3.cpp +++ b/otherarch/mpt_v3.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include "model_adapter.h" diff --git a/otherarch/neox_v3.cpp b/otherarch/neox_v3.cpp index 3084bbda7..4f79171bd 100644 --- a/otherarch/neox_v3.cpp +++ b/otherarch/neox_v3.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #if defined(GGML_USE_CLBLAST) #include "ggml-opencl.h" @@ -37,6 +38,8 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model & } } + int32_t origmaxctx = model.hparams.n_ctx; + // load hparams { auto & hparams = model.hparams; @@ -53,7 +56,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model & const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); - printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); + printf("%s: n_ctx = %d (%d)\n", __func__, hparams.n_ctx,origmaxctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); @@ -133,8 +136,8 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model & ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v + ctx_size += std::max((size_t)origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k + ctx_size += std::max((size_t)origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v ctx_size += (6 + 16*n_layer)*1024; // object overhead @@ -232,7 +235,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model & const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; - const int64_t n_mem = n_layer*n_ctx; + const int64_t n_mem = n_layer*std::max(origmaxctx,n_ctx); const int64_t n_elements = n_embd*n_mem; model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);