From 73fbbd15269fdbfa4f2987750aa51d216d3a7eca Mon Sep 17 00:00:00 2001 From: John <78893154+cmp-nct@users.noreply.github.com> Date: Mon, 22 Jan 2024 21:57:25 +0100 Subject: [PATCH] Update llama.cpp --- llama.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index 02028b900..2c43d72fb 100644 --- a/llama.cpp +++ b/llama.cpp @@ -8004,8 +8004,13 @@ void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * can if (k == (int) candidates->size) { std::sort(candidates->data, candidates->data + candidates->size, comp); } else { - std::nth_element(candidates->data, candidates->data + k, candidates->data + candidates->size, comp); // separate stack to top-k - std::sort(candidates->data, candidates->data + k, comp); // Sort the top-k stack + if (k > 3000) { + // this needs a closer look, tests on multiple platforms. On Intel I7 13th gen with VC compilers the performance is equal at ~2500 top-k. Before that partial_sort is faster. + std::nth_element(candidates->data, candidates->data + k, candidates->data + candidates->size, comp); // separate stack to top-k + std::sort(candidates->data, candidates->data + k, comp); // Sort the top-k stack + } else { + std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp); + } } candidates->sorted = true; }