From ea43267819e2dae786dae1a96a159e9fd0bb7e13 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 27 Aug 2023 12:31:24 -0600 Subject: [PATCH] Initial implementation of handling merges and special tokens --- convert.py | 114 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 94 insertions(+), 20 deletions(-) diff --git a/convert.py b/convert.py index 49cc4b3dd..2ca8a018d 100755 --- a/convert.py +++ b/convert.py @@ -353,7 +353,7 @@ class BpeVocab: yield from self.added_tokens() def __repr__(self) -> str: - return f"BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>" + return f"" class SentencePieceVocab: @@ -416,6 +416,72 @@ class SentencePieceVocab: Vocab = Union[BpeVocab, SentencePieceVocab] +class SpecialVocab: + merges: List[str] = [] + special_token_types: Tuple[str, ...] = tuple(('bos', 'eos', 'unk', 'sep', 'pad')) + special_token_ids: Dict[str, int] = {} + + def __init__(self, path: Path, special_token_types: Optional[Tuple[str]] = None): + self.special_token_ids = {} + if special_token_types is not None: + self.special_token_types = special_token_types + self.load(path) + + def load(self, path: Path): + if not self.try_load_from_tokenizer_json(path): + self.try_load_from_config_json(path) + + def try_load_from_tokenizer_json(self, path: Path) -> bool: + tokenizer_file = path / 'tokenizer.json' + if not tokenizer_file.is_file(): + return False + with open(tokenizer_file, 'r', encoding = 'utf-8') as f: + tokenizer = json.load(f) + merges = tokenizer.get('model', {}).get('merges') + if isinstance(merges, list) and len(merges) > 0 and isinstance(merges[0], str): + self.merges = merges + tokenizer_config_file = path / 'tokenizer_config.json' + added_tokens = tokenizer.get('added_tokens') + if added_tokens is None or not tokenizer_config_file.is_file(): + return True + with open(tokenizer_config_file, 'r', encoding = 'utf-8') as f: + tokenizer_config = json.load(f) + for typ in self.special_token_types: + tc_content = (tokenizer_config.get(f'{typ}_token') or {}).get('content') + if not isinstance(tc_content, str): + continue + for maybe_token_id in (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content): + if isinstance(maybe_token_id, int): + self.special_token_ids[typ] = maybe_token_id + break + return True + + def try_load_from_config_json(self, path: Path) -> bool: + config_file = path / 'config.json' + if not config_file.is_file(): + return False + with open(config_file, 'r', encoding = 'utf-8') as f: + config = json.load(f) + for typ in self.special_token_types: + maybe_token_id = config.get(f'{typ}_token_id') + if isinstance(maybe_token_id, int): + self.special_token_ids[typ] = maybe_token_id + return True + + def add_to_gguf(self, gw: gguf.GGUFWriter): + if len(self.merges) > 0: + print(f'SpecialVocab: Adding {len(self.merges)} merge(s).') + gw.add_token_merges(self.merges) + for typ, tokid in self.special_token_ids.items(): + handler: Optional[Callable[[int], None]] = getattr(gw, f'add_{typ}_token_id', None) + if handler is None: + print(f'SpecialVocab: WARNING: No handler for special token type {typ} with id {tokid} - skipping') + continue + print(f'SpecialVocab: Setting special token type {typ} to {tokid}') + handler(tokid) + + def __repr__(self): + return f'' # # data loading @@ -843,6 +909,9 @@ class OutputFile: self.gguf.add_token_scores(scores) self.gguf.add_token_types(toktypes) + def add_meta_special_vocab(self, svocab: SpecialVocab) -> None: + svocab.add_to_gguf(self.gguf) + def add_tensor_info(self, name: str, tensor: LazyTensor) -> None: n_elements = int(np.prod(tensor.shape)) raw_dtype = getattr(tensor.data_type, 'ggml_type', None) @@ -887,7 +956,7 @@ class OutputFile: return dt.quantize(arr) @staticmethod - def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, concurrency: int = DEFAULT_CONCURRENCY) -> None: + def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, svocab: SpecialVocab, concurrency: int = DEFAULT_CONCURRENCY) -> None: check_vocab_size(params, vocab) of = OutputFile(fname_out) @@ -895,6 +964,7 @@ class OutputFile: # meta data of.add_meta_arch(params) of.add_meta_vocab(vocab) + of.add_meta_special_vocab(svocab) # tensor info for name, lazy_tensor in model.items(): @@ -1120,6 +1190,10 @@ def main(args_in: Optional[List[str]] = None) -> None: model_plus = load_some_model(args.model) + if args.dump: + do_dump_model(model_plus) + return + params = Params.load(model_plus) if params.n_ctx == -1: if args.ctx is None: @@ -1140,33 +1214,33 @@ def main(args_in: Optional[List[str]] = None) -> None: vocab: Vocab if args.vocab_only: + # FIXME: Handle special vocab here also. vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) assert args.outfile, "need --outfile if using --vocab-only" outfile = args.outfile OutputFile.write_vocab_only(outfile, params, vocab) print(f"Wrote {outfile}") + return + + if model_plus.vocab is not None and args.vocab_dir is None: + vocab = model_plus.vocab else: - if args.dump: - do_dump_model(model_plus) - return + vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent + vocab = load_vocab(vocab_dir, args.vocabtype) + # FIXME: Try to respect vocab_dir somehow? + special_vocab = SpecialVocab(model_plus.paths[0].parent) - if model_plus.vocab is not None and args.vocab_dir is None: - vocab = model_plus.vocab - else: - vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent - vocab = load_vocab(vocab_dir, args.vocabtype) + model = model_plus.model + model = convert_model_names(model, params) + ftype = pick_output_type(model, args.outtype) + model = convert_to_output_type(model, ftype) + outfile = args.outfile or default_outfile(model_plus.paths, ftype) - model = model_plus.model - model = convert_model_names(model, params) - ftype = pick_output_type(model, args.outtype) - model = convert_to_output_type(model, ftype) - outfile = args.outfile or default_outfile(model_plus.paths, ftype) + params.ftype = ftype + print(f"Writing {outfile}, format {ftype}") - params.ftype = ftype - print(f"Writing {outfile}, format {ftype}") - - OutputFile.write_all(outfile, ftype, params, model, vocab, concurrency = args.concurrency) - print(f"Wrote {outfile}") + OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab, concurrency = args.concurrency) + print(f"Wrote {outfile}") if __name__ == '__main__':