Update special token handling in conversion scripts for gpt2 derived tokenizers (#3746)
We still have the heads up in `README.md` regarding `bpe` tokenizers and this patch is needed for - a couple of tokenizer tests - some more `special` and `non-special` added tokens handling (as far as I understand it) * Update special token handling * Add mpt
This commit is contained in:
parent
5be6c803fa
commit
69a6735087
5 changed files with 56 additions and 19 deletions
|
@ -136,9 +136,11 @@ for i in range(vocab_size):
|
|||
tokens.append(f"[PAD{i}]")
|
||||
toktypes.append(gguf.TokenType.USER_DEFINED)
|
||||
elif reverse_vocab[i] in added_vocab:
|
||||
# NOTE: wouldn't we like to distinguish CONTROL tokens here?
|
||||
tokens.append(reverse_vocab[i])
|
||||
toktypes.append(gguf.TokenType.USER_DEFINED)
|
||||
if tokenizer.added_tokens_decoder[i].special:
|
||||
toktypes.append(gguf.TokenType.CONTROL)
|
||||
else:
|
||||
toktypes.append(gguf.TokenType.USER_DEFINED)
|
||||
else:
|
||||
tokens.append(reverse_vocab[i])
|
||||
toktypes.append(gguf.TokenType.NORMAL)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue