Do not use special tokens when matching in RWKV tokenizer

This commit is contained in:
Layl Bongers 2024-04-12 16:28:54 +02:00 committed by Molly Sophia
parent 865167d01a
commit 7cac72a80b
2 changed files with 11 additions and 2 deletions

View file

@ -1170,6 +1170,11 @@ struct llm_tokenizer_rwkv {
while (position < text.size()) { while (position < text.size()) {
// Iterate through possible tokens backwards, starting with the largest // Iterate through possible tokens backwards, starting with the largest
for (int32_t i = (int32_t)tokens.size() - 1; i >= 0; i--) { for (int32_t i = (int32_t)tokens.size() - 1; i >= 0; i--) {
// Skip tokens that aren't normal type, we can't match on those
if (vocab.id_to_token[i].attr != LLAMA_TOKEN_TYPE_NORMAL) {
continue;
}
uint32_t token_size = tokens[i].size(); uint32_t token_size = tokens[i].size();
// If there's not enough left for this token // If there's not enough left for this token

View file

@ -5931,8 +5931,8 @@ static void llm_load_vocab(
vocab.type = LLAMA_VOCAB_TYPE_RWKV; vocab.type = LLAMA_VOCAB_TYPE_RWKV;
// default special tokens // default special tokens
vocab.special_bos_id = 0; vocab.special_bos_id = -1;
vocab.special_eos_id = 0; vocab.special_eos_id = -1;
vocab.special_unk_id = -1; vocab.special_unk_id = -1;
vocab.special_sep_id = -1; vocab.special_sep_id = -1;
vocab.special_pad_id = -1; vocab.special_pad_id = -1;
@ -8223,6 +8223,10 @@ static bool llm_load_tensors(
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
} }
} break; } break;
case LLM_ARCH_RWKV:
{
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
}
default: default:
throw std::runtime_error("unknown architecture"); throw std::runtime_error("unknown architecture");
} }