From 7bab4c055c1446a6fd3cf8aadae22cffab335a90 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Mon, 8 Apr 2024 14:41:39 -0400 Subject: [PATCH] llama : fix parentheses in attention layer count sanity check There was otherwise a warning when compiling. --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 2bcca46a8..b16ddc64c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -13483,7 +13483,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s // - qs.n_attention_wv == 0 for Mamba models // - qs.n_attention_wv == model.hparams.n_layer for Transformer models // - GGML_ASSERT(qs.n_attention_wv == 0 || qs.n_attention_wv == (int)model.hparams.n_layer && "n_attention_wv is unexpected"); + GGML_ASSERT((qs.n_attention_wv == 0 || qs.n_attention_wv == (int)model.hparams.n_layer) && "n_attention_wv is unexpected"); size_t total_size_org = 0; size_t total_size_new = 0;