llama: dbrx: quantize fix n_attention_wv tensor name

This commit is contained in:
Pierrick HYMBERT 2024-04-07 05:09:17 +02:00
parent b6522a9f5b
commit dccb012637

View file

@ -13717,7 +13717,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
const std::string name = ggml_get_name(meta);
// TODO: avoid hardcoded tensor names - use the TN_* constants
if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos || name.find("attn.Wqkv.weight") != std::string::npos) {
if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos || name == LLM_TN(model.arch)(LLM_TENSOR_ATTN_QKV, "weight")) {
++qs.n_attention_wv;
} else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
qs.has_output = true;