From 14be9d91412477d468f163d5ce096640c40e5059 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 15 Oct 2023 08:00:53 -0600 Subject: [PATCH] Fix BPE newline check, only I could break something so simple --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 03c73ee7b..f393a52f1 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2236,7 +2236,7 @@ static void llm_load_vocab( vocab.linefeed_id = llama_byte_to_token(vocab, '\n'); } else { const std::vector ids = llama_tokenize_internal(vocab, "\u010A", false); - GGML_ASSERT(ids.size() == 1 && "model vocab missing newline token"); + GGML_ASSERT(ids.empty() && "model vocab missing newline token"); vocab.linefeed_id = ids[0]; }