* Tekken pre-tokenizer no longer uses clean_up_tokenization_spaces

* Updated chkhsh for Tekken tokenizer
This commit is contained in:
Michael Coppola 2024-07-19 19:09:08 -04:00
parent 8506e13940
commit dd5a0bfffc
2 changed files with 2 additions and 1 deletions

View file

@ -593,7 +593,7 @@ class Model:
if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
# ref: https://huggingface.co/core42/jais-13b
res = "jais"
if chkhsh == "aa78fe8b04bc622b077520b1fb3d3a5c6f7a53dd375e2361e62599be3cf58de1":
if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
# ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
res = "tekken"

View file

@ -5523,6 +5523,7 @@ static void llm_load_vocab(
} else if (
tokenizer_pre == "tekken") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_TEKKEN;
vocab.tokenizer_clean_spaces = false;
vocab.tokenizer_ignore_merges = true;
vocab.tokenizer_add_bos = true;
} else {