Update llama.cpp - correction embd.weight GQA-4 & qkv.weight to K-Quants

Q2_K embed for GQ4 because it helps Mistral 7b.
I didn't test a model with attn.qkv weight, so better to be conservative with a K-Quant.
This commit is contained in:
Nexesenex 2024-03-26 02:22:04 +01:00 committed by GitHub
parent 9c27b0e6ea
commit f162b2ef3f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -12467,7 +12467,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
new_type = GGML_TYPE_Q2_K; new_type = GGML_TYPE_Q2_K;
} }
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_XS) { else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_XS) {
if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q2_K; if (qs.model.hparams.n_gqa() == 4 || qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q2_K;
else new_type = GGML_TYPE_IQ2_S; else new_type = GGML_TYPE_IQ2_S;
} }
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) { else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
@ -12506,7 +12506,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
else new_type = GGML_TYPE_IQ2_XXS; else new_type = GGML_TYPE_IQ2_XXS;
} }
else if (name.find("attn_qkv.weight") != std::string::npos) { else if (name.find("attn_qkv.weight") != std::string::npos) {
if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ4_XS; if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q4_K;
else new_type = GGML_TYPE_Q2_K; else new_type = GGML_TYPE_Q2_K;
} }
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||