From 2c8f62fd408bb6118561ff1d24423e8151925cc5 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Fri, 17 May 2024 14:29:22 +0300 Subject: [PATCH] Add Viking-7B tokenizer support --- convert-hf-to-gguf-update.py | 1 + convert-hf-to-gguf.py | 3 +++ llama.cpp | 12 +++++++++--- llama.h | 1 + 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py index 84b72348d..4c4658c85 100755 --- a/convert-hf-to-gguf-update.py +++ b/convert-hf-to-gguf-update.py @@ -82,6 +82,7 @@ models = [ {"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", }, {"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", }, {"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", }, + {"name": "viking", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Viking-7B", }, # Same for 13B and 33B ] diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index a342f6b1c..6ae494bfd 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -476,6 +476,9 @@ class Model: if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d": # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct res = "smaug-bpe" + if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee": + # ref: https://huggingface.co/LumiOpen/Viking-7B + res = "viking" if res is None: logger.warning("\n") diff --git a/llama.cpp b/llama.cpp index f67cb7e23..2e0fdba22 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4549,9 +4549,10 @@ static void llm_load_vocab( tokenizer_pre == "default") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; } else if ( - tokenizer_pre == "llama3" || - tokenizer_pre == "llama-v3" || - tokenizer_pre == "llama-bpe") { + tokenizer_pre == "llama3" || + tokenizer_pre == "llama-v3" || + tokenizer_pre == "llama-bpe" || + tokenizer_pre == "viking-7b") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3; } else if ( tokenizer_pre == "deepseek-llm") { @@ -12580,6 +12581,11 @@ struct llm_tokenizer_bpe { "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", }); break; + case LLAMA_VOCAB_PRE_TYPE_VIKING: + word_collection = unicode_regex_split(text, { + " ?[^(\\s|[.,!?…。,、।۔،])]+", + }); + break; default: // default regex for BPE tokenization pre-processing word_collection = unicode_regex_split(text, { diff --git a/llama.h b/llama.h index 3e4474bb9..1e6319522 100644 --- a/llama.h +++ b/llama.h @@ -86,6 +86,7 @@ extern "C" { LLAMA_VOCAB_PRE_TYPE_OLMO = 12, LLAMA_VOCAB_PRE_TYPE_DBRX = 13, LLAMA_VOCAB_PRE_TYPE_SMAUG = 14, + LLAMA_VOCAB_PRE_TYPE_VIKING = 15, }; // note: these values should be synchronized with ggml_rope