gemma : use Q8_0 for the token_embd.weight tensor
This commit is contained in:
parent
89febfed93
commit
f181e601a1
1 changed files with 3 additions and 0 deletions
|
@ -10499,6 +10499,9 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty
|
|||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
|
||||
new_type = GGML_TYPE_Q4_K;
|
||||
}
|
||||
else if (arch == LLM_ARCH_GEMMA) {
|
||||
new_type = GGML_TYPE_Q8_0;
|
||||
}
|
||||
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) {
|
||||
if (name.find("attn_v.weight") != std::string::npos) {
|
||||
if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue