From 7fae1c4ee2d20748450e29fbe98f556645b50a73 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Thu, 20 Apr 2023 18:40:33 +0200 Subject: [PATCH] Avoiding compiler confusion After changing chunk_size to const int as suggested by @ggerganov, clang and GCC starting to warn me that I don't need to capture it in the lambda. So, I removed it from the capture list. But that makes the MSVC build fail. So, making it a constexpr to make every compiler happy. --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index f48149209..85c0bd9f0 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1649,7 +1649,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s new_data = work.addr; std::vector hist_cur(1 << 4, 0); - const int chunk_size = 32 * 512; + constexpr int chunk_size = 32 * 512; const int nchunk = (nelements + chunk_size - 1)/chunk_size; const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1; if (nthread_use < 2) {