From d7546fda641494b3c43afd21a0176378e7515525 Mon Sep 17 00:00:00 2001 From: Pierrick HYMBERT Date: Sun, 7 Apr 2024 15:59:07 +0200 Subject: [PATCH] llama: quantize: remove wrong look for tensor qkv name as it was badly missing the .weight suffix --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 5922db5a5..2f61faa9b 100644 --- a/llama.cpp +++ b/llama.cpp @@ -13717,7 +13717,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s const std::string name = ggml_get_name(meta); // TODO: avoid hardcoded tensor names - use the TN_* constants - if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos || name == LLM_TN(model.arch)(LLM_TENSOR_ATTN_QKV, "weight")) { + if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) { ++qs.n_attention_wv; } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) { qs.has_output = true;