From f2588b0b700fdacec45f4e6e1154d1d6ba5dc1ba Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 24 Apr 2024 10:19:38 +0300 Subject: [PATCH] convert : fix set_vocab_sentencepiece --- convert-hf-to-gguf.py | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 5763b6664..202a46f3c 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -332,7 +332,12 @@ class Model(ABC): tokenizer = SentencePieceProcessor(str(tokenizer_path)) vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) + tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] + scores: list[float] = [-10000.0] * vocab_size + toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size + for token_id in range(tokenizer.vocab_size()): + piece = tokenizer.id_to_piece(token_id) text = piece.encode("utf-8") score = tokenizer.get_score(token_id) @@ -347,9 +352,9 @@ class Model(ABC): elif tokenizer.is_byte(token_id): toktype = SentencePieceTokenTypes.BYTE - tokens.append(text) - scores.append(score) - toktypes.append(toktype) + tokens[token_id] = text + scores[token_id] = score + toktypes[token_id] = toktype added_tokens_file = self.dir_model / 'added_tokens.json' if added_tokens_file.is_file(): @@ -357,23 +362,14 @@ class Model(ABC): added_tokens_json = json.load(f) for key in added_tokens_json: - key = key.encode("utf-8") - if key not in tokens: - tokens.append(key) - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.USER_DEFINED) + token_id = added_tokens_json[key] + if (token_id >= vocab_size): + print(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') + continue - if vocab_size > len(tokens): - pad_count = vocab_size - len(tokens) - print( - f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]" - ) - for i in range(1, pad_count + 1): - tokens.append(f"[PAD{i}]") - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.UNUSED) - - assert len(tokens) == vocab_size + tokens[token_id] = key.encode("utf-8") + scores[token_id] = -1000.0 + toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED self.gguf_writer.add_tokenizer_model("llama") self.gguf_writer.add_token_list(tokens)