From 7ed5aca9ca6145a431282ba948160aa1b62e7aad Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Mon, 5 Jun 2023 13:36:35 -0600 Subject: [PATCH] Clean up thread blocks with spares calculation a bit --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 7a0f32a3a..20a41f3f6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2160,7 +2160,7 @@ static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llam std::vector workers; for (auto tnum = 0, in_buff_offs = 0, out_buff_offs = 0; tnum < nthread; tnum++) { - auto thr_blocks = blocks_per_thread + (tnum == nthread - 1 && spare_blocks ? spare_blocks : 0); // num blocks for this thread + auto thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread auto thr_elems = thr_blocks * block_size; // number of elements for this thread auto thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread