diff --git a/koboldcpp.dll b/koboldcpp.dll index e8ca6e513..b682d241c 100644 Binary files a/koboldcpp.dll and b/koboldcpp.dll differ diff --git a/koboldcpp_blas.dll b/koboldcpp_blas.dll index 15c20c12d..0c1cfa481 100644 Binary files a/koboldcpp_blas.dll and b/koboldcpp_blas.dll differ diff --git a/otherarch/gpt2_v1.cpp b/otherarch/gpt2_v1.cpp index fd11d17a2..f40e1feae 100644 --- a/otherarch/gpt2_v1.cpp +++ b/otherarch/gpt2_v1.cpp @@ -85,6 +85,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g auto & ctx = model.ctx; + auto memory_type = GGML_V1_TYPE_F16; + size_t ctx_size = 0; { @@ -119,8 +121,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_size(wtype)); // c_mlp_proj_w ctx_size += n_layer*( n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // memory_v + ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(memory_type); // memory_k + ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(memory_type); // memory_v ctx_size += (6 + 12*n_layer)*256; // object overhead @@ -218,8 +220,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; - model.memory_k = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements); - model.memory_v = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements); + model.memory_k = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements); + model.memory_v = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements); const size_t memory_size = ggml_v1_nbytes(model.memory_k) + ggml_v1_nbytes(model.memory_v); diff --git a/otherarch/gptj_v1.cpp b/otherarch/gptj_v1.cpp index a51ee21ab..e88f5917a 100644 --- a/otherarch/gptj_v1.cpp +++ b/otherarch/gptj_v1.cpp @@ -103,6 +103,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1 auto & ctx = model.ctx; + auto memory_type = GGML_V1_TYPE_F16; + size_t ctx_size = 0; { @@ -136,8 +138,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1 ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_sizef(wtype)); // c_mlp_proj_w_trans ctx_size += n_layer*( n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // memory_v + ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(memory_type); // memory_k + ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(memory_type); // memory_v ctx_size += (5 + 10*n_layer)*256; // object overhead @@ -240,8 +242,8 @@ ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1 const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; - model.memory_k = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements); - model.memory_v = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements); + model.memory_k = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements); + model.memory_v = ggml_v1_new_tensor_1d(ctx, memory_type, n_elements); const size_t memory_size = ggml_v1_nbytes(model.memory_k) + ggml_v1_nbytes(model.memory_v); diff --git a/otherarch/gptj_v2.cpp b/otherarch/gptj_v2.cpp index cdb1ae4b7..511824c16 100644 --- a/otherarch/gptj_v2.cpp +++ b/otherarch/gptj_v2.cpp @@ -103,6 +103,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g auto & ctx = model.ctx; + auto memory_type = GGML_TYPE_F16; + size_t ctx_size = 0; { @@ -136,8 +138,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v + ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k + ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v ctx_size += (5 + 10*n_layer)*256; // object overhead @@ -234,8 +236,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; - model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); + model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements); + model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements); const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);