From 1490cdd71dce04c097b81a73b601fd7a740cdae5 Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Wed, 5 Apr 2023 15:53:07 +0800 Subject: [PATCH] change GPT-J and GPT2 KVs to use fp16 instead --- koboldcpp.dll | Bin 3292581 -> 3292581 bytes koboldcpp_blas.dll | Bin 3296095 -> 3296095 bytes otherarch/gpt2_v1.cpp | 10 ++++++---- otherarch/gptj_v1.cpp | 10 ++++++---- otherarch/gptj_v2.cpp | 10 ++++++---- 5 files changed, 18 insertions(+), 12 deletions(-) diff --git a/koboldcpp.dll b/koboldcpp.dll index e8ca6e513ddcbad8426fd14cd2bae3659a03fb7b..b682d241c8672d91d86db750a19c7532651950da 100644 GIT binary patch delta 309 zcmWm6El)yG9DwnA!1EVD1wj79YbeJmnewj?@s3@1f`MgEdwe8!|R*?2NJc#Y(@Xw0~QdE2V?Cx?Ds_ zw%_gI(;QhwtW$l+ delta 309 zcmWm6xlRI607l`tpudO<2rjsb3oav|ktfc$tsAm#xb5_OWE9?3bY&EMRCJLf z{hIRRNljsz{5jPPd3>gq4B5L@flSr)og@4Ax=hIPyk7VGH|&&EjT#T?4nB8*e50Tl z42&RwQ6w>jaZF$mDWoxl46>NU3}!Kh9OkirMJ!<%D_F%E@>s_PHnD{Qwy}d+fZmhV%&fg$KoqabLP{Tn&as+lz$moW0kQXW5dQqjMZ;eE^n0f6K&}!9o)C& zHb#E9QYL?0@sp(KN}v4c%5#bw4WvrG4&*CGPDXMU)sEvV-rYa{;aaYO)>yv(KUq&2 zNooZh$R~dTMOo4|bw~a*)z6WgmP+JHOJ4=D+tE!@p689;g1_kgQWtUUN#Em7K2+7{ zu@R0C9sNfJ) Q9N`!zIK^4q2y4aoA1X<1H~;_u diff --git a/otherarch/gpt2_v1.cpp b/otherarch/gpt2_v1.cpp index fd11d17a2..f40e1feae 100644 --- a/otherarch/gpt2_v1.cpp +++ b/otherarch/gpt2_v1.cpp @@ -85,6 +85,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g auto & ctx = model.ctx; + auto memory_type = GGML_V1_TYPE_F16; + size_t ctx_size = 0; { @@ -119,8 +121,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_size(wtype)); // c_mlp_proj_w ctx_size += n_layer*( n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // memory_v + ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(memory_type); // memory_k + ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(memory_type); // memory_v ctx_size += (6 + 12*n_layer)*256; // object overhead @@ -218,8 +220,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; - model.memory_k = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements); - model.memory_v = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements); + model.memory_k = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements); + model.memory_v = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements); const size_t memory_size = ggml_v1_nbytes(model.memory_k) + ggml_v1_nbytes(model.memory_v); diff --git a/otherarch/gptj_v1.cpp b/otherarch/gptj_v1.cpp index a51ee21ab..e88f5917a 100644 --- a/otherarch/gptj_v1.cpp +++ b/otherarch/gptj_v1.cpp @@ -103,6 +103,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1 auto & ctx = model.ctx; + auto memory_type = GGML_V1_TYPE_F16; + size_t ctx_size = 0; { @@ -136,8 +138,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1 ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_sizef(wtype)); // c_mlp_proj_w_trans ctx_size += n_layer*( n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // memory_v + ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(memory_type); // memory_k + ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(memory_type); // memory_v ctx_size += (5 + 10*n_layer)*256; // object overhead @@ -240,8 +242,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1 const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; - model.memory_k = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements); - model.memory_v = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements); + model.memory_k = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements); + model.memory_v = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements); const size_t memory_size = ggml_v1_nbytes(model.memory_k) + ggml_v1_nbytes(model.memory_v); diff --git a/otherarch/gptj_v2.cpp b/otherarch/gptj_v2.cpp index cdb1ae4b7..511824c16 100644 --- a/otherarch/gptj_v2.cpp +++ b/otherarch/gptj_v2.cpp @@ -103,6 +103,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g auto & ctx = model.ctx; + auto memory_type = GGML_TYPE_F16; + size_t ctx_size = 0; { @@ -136,8 +138,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v + ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k + ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v ctx_size += (5 + 10*n_layer)*256; // object overhead @@ -234,8 +236,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; - model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); + model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements); + model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements); const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);