From 32b8ce5b96e015b99e47e488d74142c5b500be29 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 6 Feb 2025 13:07:10 +0200 Subject: [PATCH] cont : better logic ggml-ci --- src/llama.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llama.cpp b/src/llama.cpp index 7db6728e0..ea9e0773a 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -8803,8 +8803,8 @@ static int llama_decode_impl( // decide if we need to defrag the kv cache if (cparams.causal_attn && cparams.defrag_thold >= 0.0f) { // - do not defrag small contexts (i.e. < 2048 tokens) - // - do not defrag if the padding is bigger than the defrag threshold - const float fragmentation = (kv_self.n >= 2048 && kv_self.n*cparams.defrag_thold >= llama_kv_cache_get_padding(cparams)) ? 1.0f - float(kv_self.used)/float(kv_self.n) : 0.0f; + // - count the padding towards the number of used tokens + const float fragmentation = kv_self.n >= 2048 ? 1.0f - float(kv_self.used + llama_kv_cache_get_padding(cparams))/float(kv_self.n) : 0.0f; // queue defragmentation for next llama_kv_cache_update if (fragmentation > cparams.defrag_thold) {