Simplify IQ4_XSR
But leave in place as a "demo" the more complex template set by Ikawrakow to customize the layers quants, with the added attn_q, attn_k, and attn_output tensors.
This commit is contained in:
parent
8c10533409
commit
8c9017bfbe
1 changed files with 6 additions and 8 deletions
|
@ -15508,7 +15508,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||||
else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 || new_type == GGML_TYPE_Q4_0_8_8) {
|
else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 || new_type == GGML_TYPE_Q4_0_8_8) {
|
||||||
new_type = GGML_TYPE_Q4_0;
|
new_type = GGML_TYPE_Q4_0;
|
||||||
}
|
}
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) new_type = GGML_TYPE_Q8_0;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) new_type = GGML_TYPE_IQ4_XS;
|
||||||
}
|
}
|
||||||
} else if (name.find("attn_v.weight") != std::string::npos) {
|
} else if (name.find("attn_v.weight") != std::string::npos) {
|
||||||
if (qs.model.hparams.n_expert >= 4) {
|
if (qs.model.hparams.n_expert >= 4) {
|
||||||
|
@ -15568,8 +15568,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) {
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) {
|
||||||
if (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) {
|
if (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) {
|
||||||
new_type = qs.i_attention_wv < qs.n_attention_wv/8 ? GGML_TYPE_Q6_K :
|
new_type = qs.i_attention_wv < qs.n_attention_wv/8 ? GGML_TYPE_Q5_K :
|
||||||
use_more_bits(qs.i_attention_wv, qs.n_attention_wv) ? GGML_TYPE_Q5_K : GGML_TYPE_Q5_K;
|
use_more_bits(qs.i_attention_wv, qs.n_attention_wv) ? GGML_TYPE_Q6_K : GGML_TYPE_Q5_K;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
++qs.i_attention_wv;
|
++qs.i_attention_wv;
|
||||||
|
@ -15622,7 +15622,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) {
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) {
|
||||||
if (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) {
|
if (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) {
|
||||||
new_type = qs.i_attention_wk < qs.n_attention_wk/8 ? GGML_TYPE_Q5_K :
|
new_type = qs.i_attention_wk < qs.n_attention_wk/8 ? GGML_TYPE_Q5_K :
|
||||||
use_more_bits(qs.i_attention_wk, qs.n_attention_wk) ? GGML_TYPE_IQ4_XS : GGML_TYPE_IQ4_XS;
|
use_more_bits(qs.i_attention_wk, qs.n_attention_wk) ? GGML_TYPE_Q5_K : GGML_TYPE_Q5_K;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
++qs.i_attention_wk;
|
++qs.i_attention_wk;
|
||||||
|
@ -15704,7 +15704,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||||
}
|
}
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) {
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) {
|
||||||
if (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) {
|
if (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) {
|
||||||
new_type = i_layer < n_layer/8 ? GGML_TYPE_Q5_K :
|
new_type = i_layer < n_layer/8 ? GGML_TYPE_IQ4_XS :
|
||||||
use_more_bits(i_layer, n_layer) ? GGML_TYPE_IQ4_XS : GGML_TYPE_IQ4_XS;
|
use_more_bits(i_layer, n_layer) ? GGML_TYPE_IQ4_XS : GGML_TYPE_IQ4_XS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15737,7 +15737,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XL) new_type = GGML_TYPE_IQ4_XS;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XL) new_type = GGML_TYPE_IQ4_XS;
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) {
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR) {
|
||||||
if (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) {
|
if (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) {
|
||||||
new_type = qs.i_attention_wo < qs.n_attention_wo/8 ? GGML_TYPE_Q5_K :
|
new_type = qs.i_attention_wo < qs.n_attention_wo/8 ? GGML_TYPE_IQ4_XS :
|
||||||
use_more_bits(qs.i_attention_wo, qs.n_attention_wo) ? GGML_TYPE_IQ4_XS : GGML_TYPE_IQ4_XS;
|
use_more_bits(qs.i_attention_wo, qs.n_attention_wo) ? GGML_TYPE_IQ4_XS : GGML_TYPE_IQ4_XS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15783,7 +15783,6 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XL && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ3_XXS;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XL && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ3_XXS;
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ3_S;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ3_S;
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XL && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ4_XS;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XL && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ4_XS;
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR && (i_layer < n_layer/8)) new_type = GGML_TYPE_Q5_K;
|
|
||||||
++qs.i_ffn_gate;
|
++qs.i_ffn_gate;
|
||||||
}
|
}
|
||||||
else if (name.find("ffn_up") != std::string::npos) {
|
else if (name.find("ffn_up") != std::string::npos) {
|
||||||
|
@ -15799,7 +15798,6 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XL && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ3_XXS;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XL && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ3_XXS;
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ3_S;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ3_S;
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XL && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ4_XS;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XL && (use_more_bits(i_layer, n_layer))) new_type = GGML_TYPE_IQ4_XS;
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR && (i_layer < n_layer/8)) new_type = GGML_TYPE_Q5_K;
|
|
||||||
++qs.i_ffn_up;
|
++qs.i_ffn_up;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue