From dd5a0bfffc9983258cc8827da4a476a907b853fe Mon Sep 17 00:00:00 2001 From: Michael Coppola Date: Fri, 19 Jul 2024 19:09:08 -0400 Subject: [PATCH] * Tekken pre-tokenizer no longer uses clean_up_tokenization_spaces * Updated chkhsh for Tekken tokenizer --- convert_hf_to_gguf.py | 2 +- src/llama.cpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index af225fb23..5ed9e9a0c 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -593,7 +593,7 @@ class Model: if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901": # ref: https://huggingface.co/core42/jais-13b res = "jais" - if chkhsh == "aa78fe8b04bc622b077520b1fb3d3a5c6f7a53dd375e2361e62599be3cf58de1": + if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e": # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407 res = "tekken" diff --git a/src/llama.cpp b/src/llama.cpp index 64cc49149..dac28365b 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -5523,6 +5523,7 @@ static void llm_load_vocab( } else if ( tokenizer_pre == "tekken") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_TEKKEN; + vocab.tokenizer_clean_spaces = false; vocab.tokenizer_ignore_merges = true; vocab.tokenizer_add_bos = true; } else {