Update llama.cpp - adjustements

attn.v.weight in Q4_K for all MOEs & models with GQA4, Mistral (PPL4096 benefits quite a lot) and incidentally CodeLlama34b (which is for coding anyway and isn't exploitable in IQ1 quants).
Yi 34b gets IQ3_S for now, more tests are needed due to perplexity huge increase problems with IQ4_XS and Q4_K for attn.v.weight on my test model (Kyllene 1.1).
This commit is contained in:
Nexesenex 2024-04-01 14:11:20 +02:00 committed by GitHub
parent ed4be6bb0d
commit dce3e27ba2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -12819,9 +12819,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
} }
else if (name.find("attn_v.weight") != std::string::npos) { else if (name.find("attn_v.weight") != std::string::npos) {
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_Q5_K; if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_Q5_K;
else if (qs.model.hparams.n_gqa() >= 8 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K; else if (qs.model.hparams.n_gqa() >= 8 || qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q4_K;
else if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ3_S; else if (qs.model.hparams.n_gqa() == 7) new_type = GGML_TYPE_IQ3_S;
else if (qs.model.hparams.n_gqa() >= 2) new_type = GGML_TYPE_IQ3_XXS; else if (qs.model.hparams.n_gqa() >= 4) new_type = GGML_TYPE_Q4_K;
else if (qs.model.hparams.n_gqa() >= 2) new_type = GGML_TYPE_IQ3_S;
else new_type = GGML_TYPE_Q2_K; else new_type = GGML_TYPE_Q2_K;
++qs.i_attention_wv; ++qs.i_attention_wv;
} }