convert : fix set_vocab_sentencepiece
This commit is contained in:
parent
3fec68be4e
commit
f2588b0b70
1 changed files with 15 additions and 19 deletions
|
@ -332,7 +332,12 @@ class Model(ABC):
|
||||||
tokenizer = SentencePieceProcessor(str(tokenizer_path))
|
tokenizer = SentencePieceProcessor(str(tokenizer_path))
|
||||||
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
|
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
|
||||||
|
|
||||||
|
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
|
||||||
|
scores: list[float] = [-10000.0] * vocab_size
|
||||||
|
toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
|
||||||
|
|
||||||
for token_id in range(tokenizer.vocab_size()):
|
for token_id in range(tokenizer.vocab_size()):
|
||||||
|
|
||||||
piece = tokenizer.id_to_piece(token_id)
|
piece = tokenizer.id_to_piece(token_id)
|
||||||
text = piece.encode("utf-8")
|
text = piece.encode("utf-8")
|
||||||
score = tokenizer.get_score(token_id)
|
score = tokenizer.get_score(token_id)
|
||||||
|
@ -347,9 +352,9 @@ class Model(ABC):
|
||||||
elif tokenizer.is_byte(token_id):
|
elif tokenizer.is_byte(token_id):
|
||||||
toktype = SentencePieceTokenTypes.BYTE
|
toktype = SentencePieceTokenTypes.BYTE
|
||||||
|
|
||||||
tokens.append(text)
|
tokens[token_id] = text
|
||||||
scores.append(score)
|
scores[token_id] = score
|
||||||
toktypes.append(toktype)
|
toktypes[token_id] = toktype
|
||||||
|
|
||||||
added_tokens_file = self.dir_model / 'added_tokens.json'
|
added_tokens_file = self.dir_model / 'added_tokens.json'
|
||||||
if added_tokens_file.is_file():
|
if added_tokens_file.is_file():
|
||||||
|
@ -357,23 +362,14 @@ class Model(ABC):
|
||||||
added_tokens_json = json.load(f)
|
added_tokens_json = json.load(f)
|
||||||
|
|
||||||
for key in added_tokens_json:
|
for key in added_tokens_json:
|
||||||
key = key.encode("utf-8")
|
token_id = added_tokens_json[key]
|
||||||
if key not in tokens:
|
if (token_id >= vocab_size):
|
||||||
tokens.append(key)
|
print(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
|
||||||
scores.append(-1000.0)
|
continue
|
||||||
toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
|
|
||||||
|
|
||||||
if vocab_size > len(tokens):
|
tokens[token_id] = key.encode("utf-8")
|
||||||
pad_count = vocab_size - len(tokens)
|
scores[token_id] = -1000.0
|
||||||
print(
|
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
|
||||||
f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]"
|
|
||||||
)
|
|
||||||
for i in range(1, pad_count + 1):
|
|
||||||
tokens.append(f"[PAD{i}]")
|
|
||||||
scores.append(-1000.0)
|
|
||||||
toktypes.append(SentencePieceTokenTypes.UNUSED)
|
|
||||||
|
|
||||||
assert len(tokens) == vocab_size
|
|
||||||
|
|
||||||
self.gguf_writer.add_tokenizer_model("llama")
|
self.gguf_writer.add_tokenizer_model("llama")
|
||||||
self.gguf_writer.add_token_list(tokens)
|
self.gguf_writer.add_token_list(tokens)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue