Update llama.cpp - adjustements non-FFN layer tensors
This commit is contained in:
parent
b3553335a3
commit
066efbb18f
1 changed files with 5 additions and 2 deletions
|
@ -12479,17 +12479,20 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
|||
}
|
||||
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_XS) {
|
||||
if (name.find("attn_q.weight") != std::string::npos) {
|
||||
if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ2_S;
|
||||
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_IQ2_S;
|
||||
else if (qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_IQ2_XS;
|
||||
else if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ2_XXS;
|
||||
}
|
||||
if (name.find("attn_k.weight") != std::string::npos) {
|
||||
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_IQ4_XS;
|
||||
else if (qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_IQ3_S;
|
||||
else if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ3_XXS;
|
||||
else if (qs.model.hparams.n_gqa() >= 8) new_type = GGML_TYPE_IQ2_S;
|
||||
else if (qs.model.hparams.n_gqa() >= 4) new_type = GGML_TYPE_IQ2_XS;
|
||||
else if (qs.model.hparams.n_gqa() >= 2) new_type = GGML_TYPE_IQ2_XXS;
|
||||
}
|
||||
else if (name.find("attn_v.weight") != std::string::npos) {
|
||||
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_IQ4_XS;
|
||||
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_Q5_K;
|
||||
else if (qs.model.hparams.n_gqa() >= 8 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_IQ4_XS;
|
||||
else if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ3_S;
|
||||
else if (qs.model.hparams.n_gqa() >= 2) new_type = GGML_TYPE_IQ3_XXS;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue