From 25be8f5cd5c9ea500d10588ae90c1a51816ad066 Mon Sep 17 00:00:00 2001 From: nopperl <54780682+nopperl@users.noreply.github.com> Date: Tue, 7 May 2024 20:51:54 +0200 Subject: [PATCH] implment olmo pre-tokenizer type in llama.cpp --- llama.cpp | 4 ++++ llama.h | 1 + 2 files changed, 5 insertions(+) diff --git a/llama.cpp b/llama.cpp index aeb5c08df..07fde3619 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4389,6 +4389,9 @@ static void llm_load_vocab( } else if ( tokenizer_pre == "command-r") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R; + } else if ( + tokenizer_pre == "olmo") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO; } else { throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); } @@ -12248,6 +12251,7 @@ struct llm_tokenizer_bpe { }); break; case LLAMA_VOCAB_PRE_TYPE_GPT2: + case LLAMA_VOCAB_PRE_TYPE_OLMO: word_collection = unicode_regex_split(text, { "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)", }); diff --git a/llama.h b/llama.h index e2fd53ab7..d7f4bf8ea 100644 --- a/llama.h +++ b/llama.h @@ -81,6 +81,7 @@ extern "C" { LLAMA_VOCAB_PRE_TYPE_GPT2 = 7, LLAMA_VOCAB_PRE_TYPE_REFACT = 8, LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9, + LLAMA_VOCAB_PRE_TYPE_OLMO = 10, }; // note: these values should be synchronized with ggml_rope