From 491f211b4caf36f90eb350ecf53d570029ce91ad Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 11 Oct 2024 21:14:47 +0300 Subject: [PATCH] llama : improve infill sampler ggml-ci --- src/llama-sampling.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index 96a979018..d71516153 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -1791,7 +1791,6 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_ for (size_t i = 0; i < cur_p->size; ++i) { LOG_DBG_CUR("%s: cur_p[%3zu] = { id: %6d, p: %.6f, logit: %6.3f }\n", __func__, i, cur_p->data[i].id, cur_p->data[i].p, cur_p->data[i].logit); } -#endif float p_txt_sum = 0.0f; float p_eog_sum = 0.0f;