Add BPE pre-tokenization for Command-R/R+.
This commit is contained in:
parent
03fb8a002d
commit
d5d67316e6
5 changed files with 22 additions and 0 deletions
|
@ -64,6 +64,7 @@ models = [
|
||||||
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
|
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
|
||||||
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
|
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
|
||||||
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
|
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
|
||||||
|
{"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
|
||||||
]
|
]
|
||||||
|
|
||||||
# make directory "models/tokenizers" if it doesn't exist
|
# make directory "models/tokenizers" if it doesn't exist
|
||||||
|
@ -104,6 +105,14 @@ for model in models:
|
||||||
save_path = f"models/tokenizers/{name}/tokenizer.json"
|
save_path = f"models/tokenizers/{name}/tokenizer.json"
|
||||||
download_file_with_auth(url, token, save_path)
|
download_file_with_auth(url, token, save_path)
|
||||||
|
|
||||||
|
# if downloaded file is less than 1KB, we likely need to download an LFS instead
|
||||||
|
if os.path.getsize(save_path) < 1024:
|
||||||
|
# remove the file
|
||||||
|
os.remove(save_path)
|
||||||
|
url = f"{repo}/resolve/main/tokenizer.json"
|
||||||
|
save_path = f"models/tokenizers/{name}/tokenizer.json"
|
||||||
|
download_file_with_auth(url, token, save_path)
|
||||||
|
|
||||||
if tokt == TOKENIZER_TYPE.SPM:
|
if tokt == TOKENIZER_TYPE.SPM:
|
||||||
url = f"{repo}/resolve/main/tokenizer.model"
|
url = f"{repo}/resolve/main/tokenizer.model"
|
||||||
save_path = f"models/tokenizers/{name}/tokenizer.model"
|
save_path = f"models/tokenizers/{name}/tokenizer.model"
|
||||||
|
|
|
@ -311,6 +311,9 @@ class Model(ABC):
|
||||||
if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
|
if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
|
||||||
# ref: https://huggingface.co/smallcloudai/Refact-1_6-base
|
# ref: https://huggingface.co/smallcloudai/Refact-1_6-base
|
||||||
res = "refact"
|
res = "refact"
|
||||||
|
if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
|
||||||
|
# ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
|
||||||
|
res = "command-r"
|
||||||
|
|
||||||
if res is None:
|
if res is None:
|
||||||
logger.warning("\n")
|
logger.warning("\n")
|
||||||
|
|
|
@ -4386,6 +4386,9 @@ static void llm_load_vocab(
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "refact") {
|
tokenizer_pre == "refact") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT;
|
||||||
|
} else if (
|
||||||
|
tokenizer_pre == "command-r") {
|
||||||
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
|
||||||
} else {
|
} else {
|
||||||
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
||||||
}
|
}
|
||||||
|
@ -12248,6 +12251,11 @@ struct llm_tokenizer_bpe {
|
||||||
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
|
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
|
||||||
});
|
});
|
||||||
break;
|
break;
|
||||||
|
case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
|
||||||
|
word_collection = unicode_regex_split(text, {
|
||||||
|
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
|
||||||
|
});
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
// default regex for BPE tokenization pre-processing
|
// default regex for BPE tokenization pre-processing
|
||||||
word_collection = unicode_regex_split(text, {
|
word_collection = unicode_regex_split(text, {
|
||||||
|
|
1
llama.h
1
llama.h
|
@ -80,6 +80,7 @@ extern "C" {
|
||||||
LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
|
LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
|
||||||
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
|
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
|
||||||
LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
|
LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
|
||||||
|
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
|
||||||
};
|
};
|
||||||
|
|
||||||
// note: these values should be synchronized with ggml_rope
|
// note: these values should be synchronized with ggml_rope
|
||||||
|
|
|
@ -83,6 +83,7 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${CMAKE
|
||||||
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
|
||||||
llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf)
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf)
|
||||||
llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
|
||||||
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-command-r.gguf)
|
||||||
|
|
||||||
# build test-tokenizer-1-bpe target once and add many tests
|
# build test-tokenizer-1-bpe target once and add many tests
|
||||||
add_executable(test-tokenizer-1-bpe test-tokenizer-1-bpe.cpp)
|
add_executable(test-tokenizer-1-bpe test-tokenizer-1-bpe.cpp)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue