From 4be44b7c33c3e6f18fd58316e0304cace4bfd460 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 12 Feb 2024 18:55:37 +0200 Subject: [PATCH] iq1_s: use IQ2_XXS for attn_output At a cost of 0.04 extra bpw this gives a big improvement in PPL. --- llama.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/llama.cpp b/llama.cpp index e66d42b71..2e91b5a87 100644 --- a/llama.cpp +++ b/llama.cpp @@ -10127,6 +10127,9 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty if (qs.i_ffn_down < qs.n_ffn_down/8) new_type = GGML_TYPE_Q2_K; ++qs.i_ffn_down; } + else if (name.find("attn_output.weight") != std::string::npos) { + if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) new_type = GGML_TYPE_IQ2_XXS; + } } else if (name.find("attn_v.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) { new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;