From 33a004e9cc04288b0e798dcc0b0d373a7a0fc758 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 8 Apr 2024 12:49:04 +0300 Subject: [PATCH] llama : more metal-friendly KV cache PAD --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index e91ad7285..cb1015e8e 100644 --- a/llama.cpp +++ b/llama.cpp @@ -11508,7 +11508,7 @@ static int llama_decode_internal( // a heuristic, to avoid attending the full cache if it is not yet utilized // after enough generations, the benefit from this heuristic disappears // if we start defragmenting the cache, the benefit from this will be more important - kv_self.n = std::min(kv_self.size, std::max(256u, GGML_PAD(llama_kv_cache_cell_max(kv_self), 256))); + kv_self.n = std::min(kv_self.size, std::max(128u, GGML_PAD(llama_kv_cache_cell_max(kv_self), 128))); //kv_self.n = llama_kv_cache_cell_max(kv_self); } }