diff --git a/examples/low_level_api_chat_cpp.py b/examples/low_level_api_chat_cpp.py index 7a932a36f..2e24e8683 100644 --- a/examples/low_level_api_chat_cpp.py +++ b/examples/low_level_api_chat_cpp.py @@ -201,7 +201,7 @@ n_keep = {self.params.n_keep} # tokenize a prompt def _tokenize(self, prompt, bos=True): _arr = (llama_cpp.llama_token * (len(prompt) + 1))() - _n = llama_cpp.llama_tokenize(self.ctx, prompt.encode("utf8"), _arr, len(_arr), bos) + _n = llama_cpp.llama_tokenize(self.ctx, prompt.encode("utf8", errors="ignore"), _arr, len(_arr), bos) return _arr[:_n] def set_color(self, c):