diff --git a/llama.cpp b/llama.cpp index daaa138b9..c5a1fa0f6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -12818,6 +12818,13 @@ static std::vector llama_tokenize_internal(const llama_vocab & } } + if (add_special && vocab.special_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) { + LLAMA_LOG_WARN( + "%s: Added a BOS token to the prompt as specified by the model but the prompt " + "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. " + "Are you sure this is what you want?\n", __FUNCTION__); + } + if (add_special && vocab.special_add_eos == 1) { GGML_ASSERT(vocab.special_eos_id != -1); output.push_back(vocab.special_eos_id); @@ -12844,6 +12851,13 @@ static std::vector llama_tokenize_internal(const llama_vocab & } } + if (add_special && vocab.special_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) { + LLAMA_LOG_WARN( + "%s: Added a BOS token to the prompt as specified by the model but the prompt " + "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. " + "Are you sure this is what you want?\n", __FUNCTION__); + } + if (add_special && vocab.special_add_eos == 1) { GGML_ASSERT(vocab.special_add_eos != -1); output.push_back(vocab.special_eos_id);