From 6012ad651f85621b264c974de67af47de24e758d Mon Sep 17 00:00:00 2001 From: ngxson Date: Sat, 17 Feb 2024 16:45:31 +0100 Subject: [PATCH] add clarification for llama_chat_apply_template --- llama.h | 1 + 1 file changed, 1 insertion(+) diff --git a/llama.h b/llama.h index ff2c47019..e0fe2e0bb 100644 --- a/llama.h +++ b/llama.h @@ -706,6 +706,7 @@ extern "C" { /// Apply chat template and maybe tokenize it. Inspired by hf apply_chat_template() on python. /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model" + /// NOTE: This function only support some know jinja templates. It is not a jinja parser. /// @param custom_template A Jinja template to use for this conversion. If this is nullptr, the model’s default chat template will be used instead. /// @param msg Pointer to a list of multiple llama_chat_message /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.