From f162b2ef3fd267dfe4076e204e551de8d2d8c07e Mon Sep 17 00:00:00 2001 From: Nexesenex <124105151+Nexesenex@users.noreply.github.com> Date: Tue, 26 Mar 2024 02:22:04 +0100 Subject: [PATCH] Update llama.cpp - correction embd.weight GQA-4 & qkv.weight to K-Quants Q2_K embed for GQ4 because it helps Mistral 7b. I didn't test a model with attn.qkv weight, so better to be conservative with a K-Quant. --- llama.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index 3910d50f7..f38d159f1 100644 --- a/llama.cpp +++ b/llama.cpp @@ -12467,7 +12467,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n new_type = GGML_TYPE_Q2_K; } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_XS) { - if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q2_K; + if (qs.model.hparams.n_gqa() == 4 || qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q2_K; else new_type = GGML_TYPE_IQ2_S; } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) { @@ -12506,7 +12506,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n else new_type = GGML_TYPE_IQ2_XXS; } else if (name.find("attn_qkv.weight") != std::string::npos) { - if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ4_XS; + if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q4_K; else new_type = GGML_TYPE_Q2_K; } } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||