From 08f183c22971f4d51572a803407a51b8b2209d8e Mon Sep 17 00:00:00 2001 From: cebtenzzre Date: Mon, 30 Oct 2023 23:05:05 -0400 Subject: [PATCH] convert : restore Falcon vocab padding --- convert-bloom-hf-to-gguf.py | 1 - convert-falcon-hf-to-gguf.py | 18 +++++++++++++----- convert-gptneox-hf-to-gguf.py | 1 - convert-mpt-hf-to-gguf.py | 1 - convert-refact-hf-to-gguf.py | 1 - convert-starcoder-hf-to-gguf.py | 1 - 6 files changed, 13 insertions(+), 10 deletions(-) diff --git a/convert-bloom-hf-to-gguf.py b/convert-bloom-hf-to-gguf.py index 6e866d943..367ab3390 100755 --- a/convert-bloom-hf-to-gguf.py +++ b/convert-bloom-hf-to-gguf.py @@ -102,7 +102,6 @@ gguf_writer.add_file_type(ftype) print("gguf: get tokenizer metadata") tokens: list[bytearray] = [] -scores: list[float] = [] toktypes: list[int] = [] # gpt2 tokenizer diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index 8e8f3c3f8..d20cf6893 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -125,7 +125,6 @@ gguf_writer.add_file_type(ftype) print("gguf: get tokenizer metadata") tokens: list[bytearray] = [] -scores: list[float] = [] toktypes: list[int] = [] # gpt2 tokenizer @@ -141,15 +140,24 @@ tokenizer = AutoTokenizer.from_pretrained(dir_model) vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) assert max(tokenizer.vocab.values()) < vocab_size +added_vocab = tokenizer.get_added_vocab() reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} for i in range(vocab_size): - tokens.append(reverse_vocab[i]) - scores.append(0.0) # dummy - toktypes.append(gguf.TokenType.NORMAL) + if i not in reverse_vocab: + tokens.append(f"[PAD{i}]") + toktypes.append(gguf.TokenType.USER_DEFINED) + elif reverse_vocab[i] in added_vocab: + tokens.append(reverse_vocab[i]) + if tokenizer.added_tokens_decoder[i].special: + toktypes.append(gguf.TokenType.CONTROL) + else: + toktypes.append(gguf.TokenType.USER_DEFINED) + else: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.NORMAL) gguf_writer.add_token_list(tokens) -gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index 02d1fdf16..d13331881 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -107,7 +107,6 @@ gguf_writer.add_layer_norm_eps(hparams["layer_norm_eps"]) print("gguf: get tokenizer metadata") tokens: list[bytearray] = [] -scores: list[float] = [] toktypes: list[int] = [] # gpt2 tokenizer diff --git a/convert-mpt-hf-to-gguf.py b/convert-mpt-hf-to-gguf.py index 70d154b3f..4f401e082 100755 --- a/convert-mpt-hf-to-gguf.py +++ b/convert-mpt-hf-to-gguf.py @@ -110,7 +110,6 @@ gguf_writer.add_max_alibi_bias(hparams["attn_config"]["alibi_bias_max"]) print("gguf: get tokenizer metadata") tokens: list[bytearray] = [] -scores: list[float] = [] toktypes: list[int] = [] # gpt2 tokenizer diff --git a/convert-refact-hf-to-gguf.py b/convert-refact-hf-to-gguf.py index f0cfe84d8..3f68dd26b 100755 --- a/convert-refact-hf-to-gguf.py +++ b/convert-refact-hf-to-gguf.py @@ -123,7 +123,6 @@ gguf_writer.add_file_type(ftype) print("gguf: get tokenizer metadata") tokens: list[bytearray] = [] -scores: list[float] = [] toktypes: list[int] = [] # gpt2 tokenizer diff --git a/convert-starcoder-hf-to-gguf.py b/convert-starcoder-hf-to-gguf.py index a9bfed85e..56255852f 100755 --- a/convert-starcoder-hf-to-gguf.py +++ b/convert-starcoder-hf-to-gguf.py @@ -95,7 +95,6 @@ gguf_writer.add_file_type(ftype) print("gguf: get tokenizer metadata") tokens: list[bytearray] = [] -scores: list[float] = [] toktypes: list[int] = [] # gpt2 tokenizer