From e18bc6aaf3b547890609ed254ee5248e720e5840 Mon Sep 17 00:00:00 2001 From: amd-lalithnc Date: Fri, 17 May 2024 12:31:58 +0530 Subject: [PATCH] convert : fix Qwen/Qwen-7b conversion (#7308) --- convert-hf-to-gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index cd875fa4a..2810e1e41 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -526,7 +526,7 @@ class Model: # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined added_vocab = tokenizer.special_tokens - reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in (vocab | added_vocab).items()} + reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()} for i in range(vocab_size): if i not in reverse_vocab: