Update llama.cpp - Non-FFN layer-tensors strategy
This commit is contained in:
parent
1c4da5ddac
commit
ddc7701588
1 changed files with 30 additions and 1 deletions
31
llama.cpp
31
llama.cpp
|
@ -12468,7 +12468,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||||
}
|
}
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_XS) {
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_XS) {
|
||||||
if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q2_K;
|
if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q2_K;
|
||||||
new_type = GGML_TYPE_IQ2_S;
|
else new_type = GGML_TYPE_IQ2_S;
|
||||||
}
|
}
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
|
||||||
new_type = GGML_TYPE_IQ3_S;
|
new_type = GGML_TYPE_IQ3_S;
|
||||||
|
@ -12477,6 +12477,35 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||||
new_type = GGML_TYPE_IQ3_S;
|
new_type = GGML_TYPE_IQ3_S;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_XS) {
|
||||||
|
if (name.find("attn_q.weight") != std::string::npos) {
|
||||||
|
if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ2_S;
|
||||||
|
}
|
||||||
|
if (name.find("attn_k.weight") != std::string::npos) {
|
||||||
|
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_IQ4_XS;
|
||||||
|
else if (qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_IQ3_S;
|
||||||
|
else if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ3_XXS;
|
||||||
|
else if (qs.model.hparams.n_gqa() >= 4) new_type = GGML_TYPE_IQ2_XS;
|
||||||
|
else if (qs.model.hparams.n_gqa() >= 2) new_type = GGML_TYPE_IQ2_XXS;
|
||||||
|
}
|
||||||
|
else if (name.find("attn_v.weight") != std::string::npos) {
|
||||||
|
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_IQ4_XS;
|
||||||
|
else if (qs.model.hparams.n_gqa() >= 8 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_IQ4_XS;
|
||||||
|
else if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ3_S;
|
||||||
|
else if (qs.model.hparams.n_gqa() >= 2) new_type = GGML_TYPE_IQ3_XXS;
|
||||||
|
else new_type = GGML_TYPE_Q2_K;
|
||||||
|
++qs.i_attention_wv;
|
||||||
|
}
|
||||||
|
else if (name.find("attn_output.weight") != std::string::npos) {
|
||||||
|
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_IQ4_XS;
|
||||||
|
else if (qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_IQ3_S;
|
||||||
|
else if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ3_XXS;
|
||||||
|
else new_type = GGML_TYPE_IQ2_XXS;
|
||||||
|
}
|
||||||
|
else if (name.find("attn_qkv.weight") != std::string::npos) {
|
||||||
|
if (qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ4_XS;
|
||||||
|
else new_type = GGML_TYPE_Q2_K;
|
||||||
|
}
|
||||||
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
|
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
|
||||||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
|
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
|
||||||
if (name.find("attn_v.weight") != std::string::npos) {
|
if (name.find("attn_v.weight") != std::string::npos) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue