Initial implementation of handling merges and special tokens
This commit is contained in:
parent
795c0c6e9d
commit
ea43267819
1 changed files with 94 additions and 20 deletions
114
convert.py
114
convert.py
|
@ -353,7 +353,7 @@ class BpeVocab:
|
||||||
yield from self.added_tokens()
|
yield from self.added_tokens()
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
return f"BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
return f"<BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
||||||
|
|
||||||
|
|
||||||
class SentencePieceVocab:
|
class SentencePieceVocab:
|
||||||
|
@ -416,6 +416,72 @@ class SentencePieceVocab:
|
||||||
|
|
||||||
Vocab = Union[BpeVocab, SentencePieceVocab]
|
Vocab = Union[BpeVocab, SentencePieceVocab]
|
||||||
|
|
||||||
|
class SpecialVocab:
|
||||||
|
merges: List[str] = []
|
||||||
|
special_token_types: Tuple[str, ...] = tuple(('bos', 'eos', 'unk', 'sep', 'pad'))
|
||||||
|
special_token_ids: Dict[str, int] = {}
|
||||||
|
|
||||||
|
def __init__(self, path: Path, special_token_types: Optional[Tuple[str]] = None):
|
||||||
|
self.special_token_ids = {}
|
||||||
|
if special_token_types is not None:
|
||||||
|
self.special_token_types = special_token_types
|
||||||
|
self.load(path)
|
||||||
|
|
||||||
|
def load(self, path: Path):
|
||||||
|
if not self.try_load_from_tokenizer_json(path):
|
||||||
|
self.try_load_from_config_json(path)
|
||||||
|
|
||||||
|
def try_load_from_tokenizer_json(self, path: Path) -> bool:
|
||||||
|
tokenizer_file = path / 'tokenizer.json'
|
||||||
|
if not tokenizer_file.is_file():
|
||||||
|
return False
|
||||||
|
with open(tokenizer_file, 'r', encoding = 'utf-8') as f:
|
||||||
|
tokenizer = json.load(f)
|
||||||
|
merges = tokenizer.get('model', {}).get('merges')
|
||||||
|
if isinstance(merges, list) and len(merges) > 0 and isinstance(merges[0], str):
|
||||||
|
self.merges = merges
|
||||||
|
tokenizer_config_file = path / 'tokenizer_config.json'
|
||||||
|
added_tokens = tokenizer.get('added_tokens')
|
||||||
|
if added_tokens is None or not tokenizer_config_file.is_file():
|
||||||
|
return True
|
||||||
|
with open(tokenizer_config_file, 'r', encoding = 'utf-8') as f:
|
||||||
|
tokenizer_config = json.load(f)
|
||||||
|
for typ in self.special_token_types:
|
||||||
|
tc_content = (tokenizer_config.get(f'{typ}_token') or {}).get('content')
|
||||||
|
if not isinstance(tc_content, str):
|
||||||
|
continue
|
||||||
|
for maybe_token_id in (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content):
|
||||||
|
if isinstance(maybe_token_id, int):
|
||||||
|
self.special_token_ids[typ] = maybe_token_id
|
||||||
|
break
|
||||||
|
return True
|
||||||
|
|
||||||
|
def try_load_from_config_json(self, path: Path) -> bool:
|
||||||
|
config_file = path / 'config.json'
|
||||||
|
if not config_file.is_file():
|
||||||
|
return False
|
||||||
|
with open(config_file, 'r', encoding = 'utf-8') as f:
|
||||||
|
config = json.load(f)
|
||||||
|
for typ in self.special_token_types:
|
||||||
|
maybe_token_id = config.get(f'{typ}_token_id')
|
||||||
|
if isinstance(maybe_token_id, int):
|
||||||
|
self.special_token_ids[typ] = maybe_token_id
|
||||||
|
return True
|
||||||
|
|
||||||
|
def add_to_gguf(self, gw: gguf.GGUFWriter):
|
||||||
|
if len(self.merges) > 0:
|
||||||
|
print(f'SpecialVocab: Adding {len(self.merges)} merge(s).')
|
||||||
|
gw.add_token_merges(self.merges)
|
||||||
|
for typ, tokid in self.special_token_ids.items():
|
||||||
|
handler: Optional[Callable[[int], None]] = getattr(gw, f'add_{typ}_token_id', None)
|
||||||
|
if handler is None:
|
||||||
|
print(f'SpecialVocab: WARNING: No handler for special token type {typ} with id {tokid} - skipping')
|
||||||
|
continue
|
||||||
|
print(f'SpecialVocab: Setting special token type {typ} to {tokid}')
|
||||||
|
handler(tokid)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f'<SpecialVocab with {len(self.merges)} merges and special tokens {self.special_token_ids if self.special_token_ids else "unset"}>'
|
||||||
|
|
||||||
#
|
#
|
||||||
# data loading
|
# data loading
|
||||||
|
@ -843,6 +909,9 @@ class OutputFile:
|
||||||
self.gguf.add_token_scores(scores)
|
self.gguf.add_token_scores(scores)
|
||||||
self.gguf.add_token_types(toktypes)
|
self.gguf.add_token_types(toktypes)
|
||||||
|
|
||||||
|
def add_meta_special_vocab(self, svocab: SpecialVocab) -> None:
|
||||||
|
svocab.add_to_gguf(self.gguf)
|
||||||
|
|
||||||
def add_tensor_info(self, name: str, tensor: LazyTensor) -> None:
|
def add_tensor_info(self, name: str, tensor: LazyTensor) -> None:
|
||||||
n_elements = int(np.prod(tensor.shape))
|
n_elements = int(np.prod(tensor.shape))
|
||||||
raw_dtype = getattr(tensor.data_type, 'ggml_type', None)
|
raw_dtype = getattr(tensor.data_type, 'ggml_type', None)
|
||||||
|
@ -887,7 +956,7 @@ class OutputFile:
|
||||||
return dt.quantize(arr)
|
return dt.quantize(arr)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, concurrency: int = DEFAULT_CONCURRENCY) -> None:
|
def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, svocab: SpecialVocab, concurrency: int = DEFAULT_CONCURRENCY) -> None:
|
||||||
check_vocab_size(params, vocab)
|
check_vocab_size(params, vocab)
|
||||||
|
|
||||||
of = OutputFile(fname_out)
|
of = OutputFile(fname_out)
|
||||||
|
@ -895,6 +964,7 @@ class OutputFile:
|
||||||
# meta data
|
# meta data
|
||||||
of.add_meta_arch(params)
|
of.add_meta_arch(params)
|
||||||
of.add_meta_vocab(vocab)
|
of.add_meta_vocab(vocab)
|
||||||
|
of.add_meta_special_vocab(svocab)
|
||||||
|
|
||||||
# tensor info
|
# tensor info
|
||||||
for name, lazy_tensor in model.items():
|
for name, lazy_tensor in model.items():
|
||||||
|
@ -1120,6 +1190,10 @@ def main(args_in: Optional[List[str]] = None) -> None:
|
||||||
|
|
||||||
model_plus = load_some_model(args.model)
|
model_plus = load_some_model(args.model)
|
||||||
|
|
||||||
|
if args.dump:
|
||||||
|
do_dump_model(model_plus)
|
||||||
|
return
|
||||||
|
|
||||||
params = Params.load(model_plus)
|
params = Params.load(model_plus)
|
||||||
if params.n_ctx == -1:
|
if params.n_ctx == -1:
|
||||||
if args.ctx is None:
|
if args.ctx is None:
|
||||||
|
@ -1140,33 +1214,33 @@ def main(args_in: Optional[List[str]] = None) -> None:
|
||||||
|
|
||||||
vocab: Vocab
|
vocab: Vocab
|
||||||
if args.vocab_only:
|
if args.vocab_only:
|
||||||
|
# FIXME: Handle special vocab here also.
|
||||||
vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype)
|
vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype)
|
||||||
assert args.outfile, "need --outfile if using --vocab-only"
|
assert args.outfile, "need --outfile if using --vocab-only"
|
||||||
outfile = args.outfile
|
outfile = args.outfile
|
||||||
OutputFile.write_vocab_only(outfile, params, vocab)
|
OutputFile.write_vocab_only(outfile, params, vocab)
|
||||||
print(f"Wrote {outfile}")
|
print(f"Wrote {outfile}")
|
||||||
|
return
|
||||||
|
|
||||||
|
if model_plus.vocab is not None and args.vocab_dir is None:
|
||||||
|
vocab = model_plus.vocab
|
||||||
else:
|
else:
|
||||||
if args.dump:
|
vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent
|
||||||
do_dump_model(model_plus)
|
vocab = load_vocab(vocab_dir, args.vocabtype)
|
||||||
return
|
# FIXME: Try to respect vocab_dir somehow?
|
||||||
|
special_vocab = SpecialVocab(model_plus.paths[0].parent)
|
||||||
|
|
||||||
if model_plus.vocab is not None and args.vocab_dir is None:
|
model = model_plus.model
|
||||||
vocab = model_plus.vocab
|
model = convert_model_names(model, params)
|
||||||
else:
|
ftype = pick_output_type(model, args.outtype)
|
||||||
vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent
|
model = convert_to_output_type(model, ftype)
|
||||||
vocab = load_vocab(vocab_dir, args.vocabtype)
|
outfile = args.outfile or default_outfile(model_plus.paths, ftype)
|
||||||
|
|
||||||
model = model_plus.model
|
params.ftype = ftype
|
||||||
model = convert_model_names(model, params)
|
print(f"Writing {outfile}, format {ftype}")
|
||||||
ftype = pick_output_type(model, args.outtype)
|
|
||||||
model = convert_to_output_type(model, ftype)
|
|
||||||
outfile = args.outfile or default_outfile(model_plus.paths, ftype)
|
|
||||||
|
|
||||||
params.ftype = ftype
|
OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab, concurrency = args.concurrency)
|
||||||
print(f"Writing {outfile}, format {ftype}")
|
print(f"Wrote {outfile}")
|
||||||
|
|
||||||
OutputFile.write_all(outfile, ftype, params, model, vocab, concurrency = args.concurrency)
|
|
||||||
print(f"Wrote {outfile}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue