From 768c43f852e0d0493787cf93b7ef3cea9966d5f0 Mon Sep 17 00:00:00 2001 From: zhenweijin Date: Mon, 23 Sep 2024 10:24:34 +0800 Subject: [PATCH] remove unused fileds to avoid unused filed build error --- src/llama-vocab.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 80ca7335d..1790c6aeb 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -201,8 +201,7 @@ struct llm_tokenizer_spm : llm_tokenizer { }; struct llm_tokenizer_spm_session { - llm_tokenizer_spm_session(const llama_vocab & vocab) : vocab(vocab), - spm_tokenizer(static_cast(vocab.tokenizer)) {} + llm_tokenizer_spm_session(const llama_vocab & vocab) : vocab(vocab) {} void tokenize(const std::string & text, std::vector & output) { @@ -320,7 +319,8 @@ private: } const llama_vocab & vocab; - const llm_tokenizer_spm * spm_tokenizer; // currently unused + // currently unused + // const llm_tokenizer_spm * spm_tokenizer; std::vector symbols; llm_bigram_spm::queue work_queue; @@ -668,8 +668,7 @@ struct llm_tokenizer_wpm : llm_tokenizer { }; struct llm_tokenizer_wpm_session { - llm_tokenizer_wpm_session(const llama_vocab & vocab) : vocab(vocab), - wpm_tokenizer(static_cast(vocab.tokenizer)) {} + llm_tokenizer_wpm_session(const llama_vocab & vocab) : vocab(vocab) {} void tokenize(const std::string & text, std::vector & output) { const auto & token_map = vocab.token_to_id; @@ -773,7 +772,8 @@ struct llm_tokenizer_wpm_session { private: const llama_vocab & vocab; - const llm_tokenizer_wpm * wpm_tokenizer; + // currently unused + // const llm_tokenizer_wpm * wpm_tokenizer; }; //