From a9fdcfd138c088975d66a97b2946e92a9c323a3d Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Tue, 20 Aug 2024 09:55:31 +0100 Subject: [PATCH] squash! llama : std::move llm_bigram_bpe from work_queue Rename lama_priority_queue -> llama_priority_queue. --- src/llama-vocab.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 781ee05c6..323660ef5 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -322,7 +322,7 @@ private: // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused template, typename Compare = std::less> -class lama_priority_queue : public std::priority_queue { +class llama_priority_queue : public std::priority_queue { public: using std::priority_queue::priority_queue; @@ -344,7 +344,7 @@ struct llm_bigram_bpe { }; using queue_storage = std::vector; - using queue = lama_priority_queue; + using queue = llama_priority_queue; llm_symbol::index left; llm_symbol::index right; std::string text;