minja: minimalist Jinja templating engine for LLM chat templates

This commit is contained in:
ochafik 2024-09-25 16:01:18 +01:00
parent 5b6d5040d5
commit eaca756ecc
60 changed files with 4959 additions and 0 deletions

View file

@ -54,6 +54,7 @@ TEST_TARGETS = \
tests/test-grammar-integration \
tests/test-grammar-parser \
tests/test-json-schema-to-grammar \
tests/test-minja \
tests/test-llama-grammar \
tests/test-log \
tests/test-model-load-cancel \
@ -1573,6 +1574,11 @@ tests/test-antiprompts: tests/test-antiprompts.cpp \
$(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
tests/test-minja: tests/test-minja.cpp \
$(OBJ_ALL)
$(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
tests/test-grad0: tests/test-grad0.cpp \
$(OBJ_GGML)
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)

View file

@ -62,6 +62,7 @@ add_library(${TARGET} STATIC
json.hpp
log.cpp
log.h
minja.hpp
ngram-cache.cpp
ngram-cache.h
sampling.cpp

2497
common/minja.hpp Normal file

File diff suppressed because it is too large Load diff

View file

@ -123,6 +123,7 @@ llama_target_and_test(test-barrier.cpp)
# llama_target_and_test(test-opt.cpp) # SLOW
llama_target_and_test(test-backend-ops.cpp)
llama_target_and_test(test-antiprompts.cpp)
llama_target_and_test(test-minja.cpp)
llama_target_and_test(test-rope.cpp)

View file

@ -0,0 +1,15 @@
{
"messages": [
{
"role": "user",
"content": "What's your favourite LLM framework?"
},
{
"role": "assistant",
"content": "llama.cpp!"
}
],
"add_generation_prompt": true,
"bos_token": "<|startoftext|>",
"eos_token": "<|endoftext|>"
}

View file

@ -0,0 +1,19 @@
{
"messages": [
{
"role": "system",
"content": "You only tell the truth."
},
{
"role": "user",
"content": "What's your favourite LLM framework?"
},
{
"role": "assistant",
"content": "llama.cpp!"
}
],
"add_generation_prompt": true,
"bos_token": "<|startoftext|>",
"eos_token": "<|endoftext|>"
}

View file

@ -0,0 +1,164 @@
{
"messages": [
{
"role": "user",
"content": "Print a hello world message with python."
},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_1",
"type": "function",
"function": {
"arguments": {"code": "print('Hello, World!')"},
"name": "ipython"
}
}
]
},
{
"role": "tool",
"name": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}"
},
{
"role": "assistant",
"content": "Anything else?"
},
{
"role": "user",
"content": "Test a tautology."
},
{
"role": "assistant",
"content": null,
"tool_calls": [
{
"id": "call_2",
"type": "function",
"function": {
"arguments": {"condition":true},
"name": "test"
}
}
]
},
{
"role": "tool",
"name": "test",
"content": "true"
},
{
"role": "assistant",
"content": "Truth is definitely true."
},
{
"role": "user",
"content": "Check it on the web."
},
{
"role": "assistant",
"content": null,
"tool_calls": [
{
"id": "call_3",
"type": "function",
"function": {
"arguments": {"query": "what is truth anyway am I right?"},
"name": "brave_search"
}
}
]
},
{
"role": "tool",
"name": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}"
},
{
"role": "assistant",
"content": "I don't need the web to answer you but I did check, as you asked. What now?"
}
],
"add_generation_prompt": true,
"bos_token": "<|startoftext|>",
"eos_token": "<|endoftext|>",
"builtin_tools": [
"wolfram_alpha",
"brave_search"
],
"cutting_knowledge_date": "2023-04-01",
"todays_date": "2024-09-03",
"tools": [
{
"type": "function",
"function": {
"name": "ipython",
"description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
"parameters": {
"type": "object",
"properties": {
"code": {
"type": "string",
"description": "The code to run in the ipython interpreter."
}
},
"required": ["code"]
}
}
},
{
"type": "function",
"function": {
"name": "brave_search",
"description": "Executes a web search with Brave.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to search for."
}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "wolfram_alpha",
"description": "Executes a query with Wolfram Alpha.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to execute."
}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "test",
"description": "Runs a test.",
"parameters": {
"type": "object",
"properties": {
"condition": {
"type": "boolean",
"description": "The condition to test."
}
},
"required": ["condition"]
}
}
}
]
}

View file

@ -0,0 +1,5 @@
<|startoftext|><|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|startoftext|><|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,11 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,13 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,58 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "ipython", "description": "ipython(code: str) - Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.
Args:
code(str): The code to run in the ipython interpreter.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}
{"type": "function", "function": {"name": "brave_search", "description": "brave_search(query: str) - Executes a web search with Brave.
Args:
query(str): The query to search for.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "wolfram_alpha", "description": "wolfram_alpha(query: str) - Executes a query with Wolfram Alpha.
Args:
query(str): The query to execute.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "test", "description": "test(condition: bool) - Runs a test.
Args:
condition(bool): The condition to test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}} </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "ipython", "arguments": {"code": "print('Hello, World!')"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"stdout": "Hello, World!"}
</tool_response>
<|im_end|><|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "test", "arguments": {"condition": true}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
true
</tool_response>
<|im_end|><|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "brave_search", "arguments": {"query": "what is truth anyway am I right?"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}
</tool_response>
<|im_end|><|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,5 @@
<|startoftext|><|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|startoftext|><|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,11 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,13 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,58 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "ipython", "description": "ipython(code: str) - Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.
Args:
code(str): The code to run in the ipython interpreter.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}
{"type": "function", "function": {"name": "brave_search", "description": "brave_search(query: str) - Executes a web search with Brave.
Args:
query(str): The query to search for.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "wolfram_alpha", "description": "wolfram_alpha(query: str) - Executes a query with Wolfram Alpha.
Args:
query(str): The query to execute.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "test", "description": "test(condition: bool) - Runs a test.
Args:
condition(bool): The condition to test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}} </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "ipython", "arguments": {"code": "print('Hello, World!')"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"stdout": "Hello, World!"}
</tool_response>
<|im_end|><|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "test", "arguments": {"condition": true}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
true
</tool_response>
<|im_end|><|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "brave_search", "arguments": {"query": "what is truth anyway am I right?"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}
</tool_response>
<|im_end|><|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|startoftext|><|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|startoftext|><|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,11 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,13 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,58 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "ipython", "description": "ipython(code: str) - Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.
Args:
code(str): The code to run in the ipython interpreter.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}
{"type": "function", "function": {"name": "brave_search", "description": "brave_search(query: str) - Executes a web search with Brave.
Args:
query(str): The query to search for.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "wolfram_alpha", "description": "wolfram_alpha(query: str) - Executes a query with Wolfram Alpha.
Args:
query(str): The query to execute.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "test", "description": "test(condition: bool) - Runs a test.
Args:
condition(bool): The condition to test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}} </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "ipython", "arguments": {"code": "print('Hello, World!')"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"stdout": "Hello, World!"}
</tool_response>
<|im_end|><|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "test", "arguments": {"condition": true}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
true
</tool_response>
<|im_end|><|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "brave_search", "arguments": {"query": "what is truth anyway am I right?"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}
</tool_response>
<|im_end|><|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|im_start|>system
You are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,56 @@
<|im_start|>system
You are Qwen, created by Alibaba Cloud. You are a helpful assistant.
# Tools
You may call one or more functions to assist with the user query.
You are provided with function signatures within <tools></tools> XML tags:
<tools>
{"type": "function", "function": {"name": "ipython", "description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}}
{"type": "function", "function": {"name": "brave_search", "description": "Executes a web search with Brave.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}}
{"type": "function", "function": {"name": "wolfram_alpha", "description": "Executes a query with Wolfram Alpha.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}}
{"type": "function", "function": {"name": "test", "description": "Runs a test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}}}
</tools>
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
<tool_call>
{"name": <function-name>, "arguments": <args-json-object>}
</tool_call><|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "ipython", "arguments": {"code": "print('Hello, World!')"}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
{"stdout": "Hello, World!"}
</tool_response><|im_end|>
<|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "test", "arguments": {"condition": true}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
true
</tool_response><|im_end|>
<|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "brave_search", "arguments": {"query": "what is truth anyway am I right?"}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}
</tool_response><|im_end|>
<|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|im_start|>system
Please reason step by step, and put your final answer within \boxed{}.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,7 @@
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,56 @@
<|im_start|>system
Please reason step by step, and put your final answer within \boxed{}.
# Tools
You may call one or more functions to assist with the user query.
You are provided with function signatures within <tools></tools> XML tags:
<tools>
{"type": "function", "function": {"name": "ipython", "description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}}
{"type": "function", "function": {"name": "brave_search", "description": "Executes a web search with Brave.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}}
{"type": "function", "function": {"name": "wolfram_alpha", "description": "Executes a query with Wolfram Alpha.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}}
{"type": "function", "function": {"name": "test", "description": "Runs a test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}}}
</tools>
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
<tool_call>
{"name": <function-name>, "arguments": <args-json-object>}
</tool_call><|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "ipython", "arguments": {"code": "print('Hello, World!')"}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
{"stdout": "Hello, World!"}
</tool_response><|im_end|>
<|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "test", "arguments": {"condition": true}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
true
</tool_response><|im_end|>
<|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "brave_search", "arguments": {"query": "what is truth anyway am I right?"}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}
</tool_response><|im_end|>
<|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -0,0 +1,5 @@
<|startoftext|><start_of_turn>user
What's your favourite LLM framework?<end_of_turn>
<start_of_turn>model
llama.cpp!<end_of_turn>
<start_of_turn>model

View file

@ -0,0 +1,21 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
You are capable of executing available function(s) if required.
Only execute function(s) when absolutely necessary.
Ask for the required input to:recipient==all
Use JSON for function arguments.
Respond in this format:
>>>${recipient}
${content}
Available functions:
// Supported function definitions that should be called when necessary.
namespace functions {
} // namespace functions<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>all
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>

View file

@ -0,0 +1,23 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
You are capable of executing available function(s) if required.
Only execute function(s) when absolutely necessary.
Ask for the required input to:recipient==all
Use JSON for function arguments.
Respond in this format:
>>>${recipient}
${content}
Available functions:
// Supported function definitions that should be called when necessary.
namespace functions {
} // namespace functions<|eot_id|><|start_header_id|>system<|end_header_id|>
You only tell the truth.<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>all
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>

View file

@ -0,0 +1 @@
ERROR: can only concatenate str (not "dict") to str

View file

@ -0,0 +1,11 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
Today Date: 26 Jul 2024
<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -0,0 +1,11 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
Today Date: 26 Jul 2024
You only tell the truth.<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -0,0 +1,118 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Environment: ipython
Tools: wolfram_alpha, brave_search
Cutting Knowledge Date: December 2023
Today Date: 26 Jul 2024
<|eot_id|><|start_header_id|>user<|end_header_id|>
Given the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.
Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.Do not use variables.
{
"type": "function",
"function": {
"name": "ipython",
"description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
"parameters": {
"type": "object",
"properties": {
"code": {
"type": "string",
"description": "The code to run in the ipython interpreter."
}
},
"required": [
"code"
]
}
}
}
{
"type": "function",
"function": {
"name": "brave_search",
"description": "Executes a web search with Brave.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to search for."
}
},
"required": [
"query"
]
}
}
}
{
"type": "function",
"function": {
"name": "wolfram_alpha",
"description": "Executes a query with Wolfram Alpha.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to execute."
}
},
"required": [
"query"
]
}
}
}
{
"type": "function",
"function": {
"name": "test",
"description": "Runs a test.",
"parameters": {
"type": "object",
"properties": {
"condition": {
"type": "boolean",
"description": "The condition to test."
}
},
"required": [
"condition"
]
}
}
}
Print a hello world message with python.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
{"name": "ipython", "parameters": {"code": "print('Hello, World!')"}}<|eom_id|><|start_header_id|>ipython<|end_header_id|>
"{\"stdout\": \"Hello, World!\"}"<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Anything else?<|eot_id|><|start_header_id|>user<|end_header_id|>
Test a tautology.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
{"name": "test", "parameters": {"condition": true}}<|eom_id|><|start_header_id|>ipython<|end_header_id|>
"true"<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Truth is definitely true.<|eot_id|><|start_header_id|>user<|end_header_id|>
Check it on the web.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
<|python_tag|>brave_search.call(query="what is truth anyway am I right?")<|eom_id|><|start_header_id|>ipython<|end_header_id|>
"{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}"<|eot_id|><|start_header_id|>assistant<|end_header_id|>
I don't need the web to answer you but I did check, as you asked. What now?<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -0,0 +1,5 @@
<|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>
<|assistant|>

View file

@ -0,0 +1,7 @@
<|system|>
You only tell the truth.<|end|>
<|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>
<|assistant|>

View file

@ -0,0 +1 @@
<|startoftext|> [INST] What's your favourite LLM framework? [/INST] llama.cpp!<|endoftext|>

View file

@ -0,0 +1,3 @@
<|startoftext|> [INST] You only tell the truth.
What's your favourite LLM framework? [/INST] llama.cpp!<|endoftext|>

View file

@ -0,0 +1,4 @@
{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '
' + message['content'] + '<|im_end|>' + '
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
' }}{% endif %}

View file

@ -0,0 +1,152 @@
{%- macro json_to_python_type(json_spec) %}
{%- set basic_type_map = {
"string": "str",
"number": "float",
"integer": "int",
"boolean": "bool"
} %}
{%- if basic_type_map[json_spec.type] is defined %}
{{- basic_type_map[json_spec.type] }}
{%- elif json_spec.type == "array" %}
{{- "list[" + json_to_python_type(json_spec|items) + "]"}}
{%- elif json_spec.type == "object" %}
{%- if json_spec.additionalProperties is defined %}
{{- "dict[str, " + json_to_python_type(json_spec.additionalProperties) + ']'}}
{%- else %}
{{- "dict" }}
{%- endif %}
{%- elif json_spec.type is iterable %}
{{- "Union[" }}
{%- for t in json_spec.type %}
{{- json_to_python_type({"type": t}) }}
{%- if not loop.last %}
{{- "," }}
{%- endif %}
{%- endfor %}
{{- "]" }}
{%- else %}
{{- "Any" }}
{%- endif %}
{%- endmacro %}
{{- bos_token }}
{{- '<|im_start|>system
' }}
{{- "You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> " }}
{%- for tool in tools %}
{%- if tool.function is defined %}
{%- set tool = tool.function %}
{%- endif %}
{{- '{"type": "function", "function": ' }}
{{- '{"name": "' + tool.name + '", ' }}
{{- '"description": "' + tool.name + '(' }}
{%- for param_name, param_fields in tool.parameters.properties|items %}
{{- param_name + ": " + json_to_python_type(param_fields) }}
{%- if not loop.last %}
{{- ", " }}
{%- endif %}
{%- endfor %}
{{- ")" }}
{%- if tool.return is defined %}
{{- " -> " + json_to_python_type(tool.return) }}
{%- endif %}
{{- " - " + tool.description + "
" }}
{%- for param_name, param_fields in tool.parameters.properties|items %}
{%- if loop.first %}
{{- " Args:
" }}
{%- endif %}
{{- " " + param_name + "(" + json_to_python_type(param_fields) + "): " + param_fields.description|trim }}
{%- endfor %}
{%- if tool.return is defined and tool.return.description is defined %}
{{- "
Returns:
" + tool.return.description }}
{%- endif %}
{{- '"' }}
{{- ', "parameters": ' }}
{%- if tool.parameters.properties | length == 0 %}
{{- "{}" }}
{%- else %}
{{- tool.parameters|tojson }}
{%- endif %}
{{- "}" }}
{%- if not loop.last %}
{{- "
" }}
{%- endif %}
{%- endfor %}
{{- " </tools>" }}
{{- 'Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
' }}
{{- "For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
" }}
{{- "<tool_call>
" }}
{{- '{"name": <function-name>, "arguments": <args-dict>}
' }}
{{- '</tool_call><|im_end|>
' }}
{%- for message in messages %}
{%- if message.role == "user" or message.role == "system" or (message.role == "assistant" and message.tool_calls is not defined) %}
{{- '<|im_start|>' + message.role + '
' + message.content + '<|im_end|>' + '
' }}
{%- elif message.role == "assistant" %}
{{- '<|im_start|>' + message.role }}
{%- for tool_call in message.tool_calls %}
{{- '
<tool_call>
' }} {%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '{' }}
{{- '"name": "' }}
{{- tool_call.name }}
{{- '"' }}
{{- ', '}}
{%- if tool_call.arguments is defined %}
{{- '"arguments": ' }}
{%- if tool_call.arguments is string %}
{{- tool_call.arguments }}
{%- else %}
{{- tool_call.arguments|tojson }}
{%- endif %}
{%- endif %}
{{- '}' }}
{{- '
</tool_call>' }}
{%- endfor %}
{{- '<|im_end|>
' }}
{%- elif message.role == "tool" %}
{%- if loop.previtem and loop.previtem.role != "tool" %}
{{- '<|im_start|>tool
' }}
{%- endif %}
{{- '<tool_response>
' }}
{{- message.content }}
{%- if not loop.last %}
{{- '
</tool_response>
' }}
{%- else %}
{{- '
</tool_response>' }}
{%- endif %}
{%- if not loop.last and loop.nextitem.role != "tool" %}
{{- '<|im_end|>' }}
{%- elif loop.last %}
{{- '<|im_end|>' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant
' }}
{%- endif %}

View file

@ -0,0 +1,4 @@
{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '
' + message['content'] + '<|im_end|>' + '
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
' }}{% endif %}

View file

@ -0,0 +1,152 @@
{%- macro json_to_python_type(json_spec) %}
{%- set basic_type_map = {
"string": "str",
"number": "float",
"integer": "int",
"boolean": "bool"
} %}
{%- if basic_type_map[json_spec.type] is defined %}
{{- basic_type_map[json_spec.type] }}
{%- elif json_spec.type == "array" %}
{{- "list[" + json_to_python_type(json_spec|items) + "]"}}
{%- elif json_spec.type == "object" %}
{%- if json_spec.additionalProperties is defined %}
{{- "dict[str, " + json_to_python_type(json_spec.additionalProperties) + ']'}}
{%- else %}
{{- "dict" }}
{%- endif %}
{%- elif json_spec.type is iterable %}
{{- "Union[" }}
{%- for t in json_spec.type %}
{{- json_to_python_type({"type": t}) }}
{%- if not loop.last %}
{{- "," }}
{%- endif %}
{%- endfor %}
{{- "]" }}
{%- else %}
{{- "Any" }}
{%- endif %}
{%- endmacro %}
{{- bos_token }}
{{- '<|im_start|>system
' }}
{{- "You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> " }}
{%- for tool in tools %}
{%- if tool.function is defined %}
{%- set tool = tool.function %}
{%- endif %}
{{- '{"type": "function", "function": ' }}
{{- '{"name": "' + tool.name + '", ' }}
{{- '"description": "' + tool.name + '(' }}
{%- for param_name, param_fields in tool.parameters.properties|items %}
{{- param_name + ": " + json_to_python_type(param_fields) }}
{%- if not loop.last %}
{{- ", " }}
{%- endif %}
{%- endfor %}
{{- ")" }}
{%- if tool.return is defined %}
{{- " -> " + json_to_python_type(tool.return) }}
{%- endif %}
{{- " - " + tool.description + "
" }}
{%- for param_name, param_fields in tool.parameters.properties|items %}
{%- if loop.first %}
{{- " Args:
" }}
{%- endif %}
{{- " " + param_name + "(" + json_to_python_type(param_fields) + "): " + param_fields.description|trim }}
{%- endfor %}
{%- if tool.return is defined and tool.return.description is defined %}
{{- "
Returns:
" + tool.return.description }}
{%- endif %}
{{- '"' }}
{{- ', "parameters": ' }}
{%- if tool.parameters.properties | length == 0 %}
{{- "{}" }}
{%- else %}
{{- tool.parameters|tojson }}
{%- endif %}
{{- "}" }}
{%- if not loop.last %}
{{- "
" }}
{%- endif %}
{%- endfor %}
{{- " </tools>" }}
{{- 'Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
' }}
{{- "For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
" }}
{{- "<tool_call>
" }}
{{- '{"name": <function-name>, "arguments": <args-dict>}
' }}
{{- '</tool_call><|im_end|>
' }}
{%- for message in messages %}
{%- if message.role == "user" or message.role == "system" or (message.role == "assistant" and message.tool_calls is not defined) %}
{{- '<|im_start|>' + message.role + '
' + message.content + '<|im_end|>' + '
' }}
{%- elif message.role == "assistant" %}
{{- '<|im_start|>' + message.role }}
{%- for tool_call in message.tool_calls %}
{{- '
<tool_call>
' }} {%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '{' }}
{{- '"name": "' }}
{{- tool_call.name }}
{{- '"' }}
{{- ', '}}
{%- if tool_call.arguments is defined %}
{{- '"arguments": ' }}
{%- if tool_call.arguments is string %}
{{- tool_call.arguments }}
{%- else %}
{{- tool_call.arguments|tojson }}
{%- endif %}
{%- endif %}
{{- '}' }}
{{- '
</tool_call>' }}
{%- endfor %}
{{- '<|im_end|>
' }}
{%- elif message.role == "tool" %}
{%- if loop.previtem and loop.previtem.role != "tool" %}
{{- '<|im_start|>tool
' }}
{%- endif %}
{{- '<tool_response>
' }}
{{- message.content }}
{%- if not loop.last %}
{{- '
</tool_response>
' }}
{%- else %}
{{- '
</tool_response>' }}
{%- endif %}
{%- if not loop.last and loop.nextitem.role != "tool" %}
{{- '<|im_end|>' }}
{%- elif loop.last %}
{{- '<|im_end|>' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant
' }}
{%- endif %}

View file

@ -0,0 +1,6 @@
{{bos_token}}{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system
You are a helpful assistant.<|im_end|>
' }}{% endif %}{{'<|im_start|>' + message['role'] + '
' + message['content'] + '<|im_end|>' + '
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
' }}{% endif %}

View file

@ -0,0 +1,152 @@
{%- macro json_to_python_type(json_spec) %}
{%- set basic_type_map = {
"string": "str",
"number": "float",
"integer": "int",
"boolean": "bool"
} %}
{%- if basic_type_map[json_spec.type] is defined %}
{{- basic_type_map[json_spec.type] }}
{%- elif json_spec.type == "array" %}
{{- "list[" + json_to_python_type(json_spec|items) + "]"}}
{%- elif json_spec.type == "object" %}
{%- if json_spec.additionalProperties is defined %}
{{- "dict[str, " + json_to_python_type(json_spec.additionalProperties) + ']'}}
{%- else %}
{{- "dict" }}
{%- endif %}
{%- elif json_spec.type is iterable %}
{{- "Union[" }}
{%- for t in json_spec.type %}
{{- json_to_python_type({"type": t}) }}
{%- if not loop.last %}
{{- "," }}
{%- endif %}
{%- endfor %}
{{- "]" }}
{%- else %}
{{- "Any" }}
{%- endif %}
{%- endmacro %}
{{- bos_token }}
{{- '<|im_start|>system
' }}
{{- "You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> " }}
{%- for tool in tools %}
{%- if tool.function is defined %}
{%- set tool = tool.function %}
{%- endif %}
{{- '{"type": "function", "function": ' }}
{{- '{"name": "' + tool.name + '", ' }}
{{- '"description": "' + tool.name + '(' }}
{%- for param_name, param_fields in tool.parameters.properties|items %}
{{- param_name + ": " + json_to_python_type(param_fields) }}
{%- if not loop.last %}
{{- ", " }}
{%- endif %}
{%- endfor %}
{{- ")" }}
{%- if tool.return is defined %}
{{- " -> " + json_to_python_type(tool.return) }}
{%- endif %}
{{- " - " + tool.description + "
" }}
{%- for param_name, param_fields in tool.parameters.properties|items %}
{%- if loop.first %}
{{- " Args:
" }}
{%- endif %}
{{- " " + param_name + "(" + json_to_python_type(param_fields) + "): " + param_fields.description|trim }}
{%- endfor %}
{%- if tool.return is defined and tool.return.description is defined %}
{{- "
Returns:
" + tool.return.description }}
{%- endif %}
{{- '"' }}
{{- ', "parameters": ' }}
{%- if tool.parameters.properties | length == 0 %}
{{- "{}" }}
{%- else %}
{{- tool.parameters|tojson }}
{%- endif %}
{{- "}" }}
{%- if not loop.last %}
{{- "
" }}
{%- endif %}
{%- endfor %}
{{- " </tools>" }}
{{- 'Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
' }}
{{- "For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
" }}
{{- "<tool_call>
" }}
{{- '{"name": <function-name>, "arguments": <args-dict>}
' }}
{{- '</tool_call><|im_end|>
' }}
{%- for message in messages %}
{%- if message.role == "user" or message.role == "system" or (message.role == "assistant" and message.tool_calls is not defined) %}
{{- '<|im_start|>' + message.role + '
' + message.content + '<|im_end|>' + '
' }}
{%- elif message.role == "assistant" %}
{{- '<|im_start|>' + message.role }}
{%- for tool_call in message.tool_calls %}
{{- '
<tool_call>
' }} {%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '{' }}
{{- '"name": "' }}
{{- tool_call.name }}
{{- '"' }}
{{- ', '}}
{%- if tool_call.arguments is defined %}
{{- '"arguments": ' }}
{%- if tool_call.arguments is string %}
{{- tool_call.arguments }}
{%- else %}
{{- tool_call.arguments|tojson }}
{%- endif %}
{%- endif %}
{{- '}' }}
{{- '
</tool_call>' }}
{%- endfor %}
{{- '<|im_end|>
' }}
{%- elif message.role == "tool" %}
{%- if loop.previtem and loop.previtem.role != "tool" %}
{{- '<|im_start|>tool
' }}
{%- endif %}
{{- '<tool_response>
' }}
{{- message.content }}
{%- if not loop.last %}
{{- '
</tool_response>
' }}
{%- else %}
{{- '
</tool_response>' }}
{%- endif %}
{%- if not loop.last and loop.nextitem.role != "tool" %}
{{- '<|im_end|>' }}
{%- elif loop.last %}
{{- '<|im_end|>' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant
' }}
{%- endif %}

View file

@ -0,0 +1,6 @@
{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system
You are a helpful assistant.<|im_end|>
' }}{% endif %}{{'<|im_start|>' + message['role'] + '
' + message['content'] + '<|im_end|>' + '
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
' }}{% endif %}

View file

@ -0,0 +1,7 @@
{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
You are a helpful assistant.<|im_end|>
{% endif %}<|im_start|>{{ message['role'] }}
{% if message['content'] is string %}{{ message['content'] }}<|im_end|>
{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
{% endif %}

View file

@ -0,0 +1,54 @@
{%- if tools %}
{{- '<|im_start|>system\n' }}
{%- if messages[0]['role'] == 'system' %}
{{- messages[0]['content'] }}
{%- else %}
{{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
{%- endif %}
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
{%- for tool in tools %}
{{- "\n" }}
{{- tool | tojson }}
{%- endfor %}
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
{%- else %}
{%- if messages[0]['role'] == 'system' %}
{{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
{%- else %}
{{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- for message in messages %}
{%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
{%- elif message.role == "assistant" %}
{{- '<|im_start|>' + message.role }}
{%- if message.content %}
{{- '\n' + message.content }}
{%- endif %}
{%- for tool_call in message.tool_calls %}
{%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '\n<tool_call>\n{"name": "' }}
{{- tool_call.name }}
{{- '", "arguments": ' }}
{{- tool_call.arguments | tojson }}
{{- '}\n</tool_call>' }}
{%- endfor %}
{{- '<|im_end|>\n' }}
{%- elif message.role == "tool" %}
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
{{- '<|im_start|>user' }}
{%- endif %}
{{- '\n<tool_response>\n' }}
{{- message.content }}
{{- '\n</tool_response>' }}
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
{{- '<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- endif %}

View file

@ -0,0 +1,54 @@
{%- if tools %}
{{- '<|im_start|>system\n' }}
{%- if messages[0]['role'] == 'system' %}
{{- messages[0]['content'] }}
{%- else %}
{{- 'Please reason step by step, and put your final answer within \\boxed{}.' }}
{%- endif %}
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
{%- for tool in tools %}
{{- "\n" }}
{{- tool | tojson }}
{%- endfor %}
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
{%- else %}
{%- if messages[0]['role'] == 'system' %}
{{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
{%- else %}
{{- '<|im_start|>system\nPlease reason step by step, and put your final answer within \\boxed{}.<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- for message in messages %}
{%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
{%- elif message.role == "assistant" %}
{{- '<|im_start|>' + message.role }}
{%- if message.content %}
{{- '\n' + message.content }}
{%- endif %}
{%- for tool_call in message.tool_calls %}
{%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '\n<tool_call>\n{"name": "' }}
{{- tool_call.name }}
{{- '", "arguments": ' }}
{{- tool_call.arguments | tojson }}
{{- '}\n</tool_call>' }}
{%- endfor %}
{{- '<|im_end|>\n' }}
{%- elif message.role == "tool" %}
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
{{- '<|im_start|>user' }}
{%- endif %}
{{- '\n<tool_response>\n' }}
{{- message.content }}
{{- '\n</tool_response>' }}
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
{{- '<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- endif %}

View file

@ -0,0 +1,4 @@
{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '
' + message['content'] | trim + '<end_of_turn>
' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model
'}}{% endif %}

View file

@ -0,0 +1,287 @@
{# version=v3.llama3 #}{%- macro append_new_param_info(param_declaration, comment_info, examples_info, depth) -%}
{%- set offset = "" -%}
{%- if depth >= 1 -%}
{%- set offset = " " * depth -%}
{%- endif -%}
{%- if comment_info != "<|NONE|>" -%}
{{ "\n" + offset + comment_info }}
{%- if examples_info | length > 0 -%}
{# Append each example info #}
{%- for example in examples_info -%}
{{ "\n" + offset + "// " + example|string|replace("'", '"') }}
{%- endfor -%}
{%- endif -%}
{%- endif -%}
{{ "\n" + offset + param_declaration }}
{%- endmacro -%}
{%- macro convert_data_type(param_type) -%}
{%- if param_type == "integer" or param_type == "float" -%}
{{ "number" }}
{%- else -%}
{{ param_type }}
{%- endif -%}
{%- endmacro -%}
{%- macro get_param_type(param) -%}
{%- set param_type = "any" -%}
{%- if "type" in param -%}
{%- set raw_param_type = param["type"] -%}
{%- if raw_param_type is iterable and raw_param_type is not string -%}
{%- set param_type = raw_param_type | join(" | ") -%}
{%- else -%}
{%- set param_type = raw_param_type -%}
{%- endif -%}
{{ convert_data_type(param_type) }}
{%- elif "oneOf" in param -%}
{%- set one_of_types = param["oneOf"]|selectattr("type", "defined")|list -%}
{%- set one_of_types = one_of_types|map(attribute="type")|unique|list -%}
{{ convert_data_type(one_of_types | join(" | ")) }}
{%- endif -%}
{%- endmacro -%}
{%- macro get_format_param(param) -%}
{%- if "format" in param -%}
{{ param["format"] }}
{%- elif "oneOf" in param -%}
{%- set formats = [] -%}
{%- for item in param["oneOf"] -%}
{%- if "format" in item -%}
{%- if item["format"] == param["oneOf"][-1]["format"] -%}
{{ item["format"] }}
{%- else -%}
{{ item["format"] + " or "}}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{{ "<|NONE|>" }}
{%- endif -%}
{%- endmacro -%}
{%- macro get_param_info(param) -%}
{%- set param_type = param.get("type", "any") -%}
{%- set format_param = get_format_param(param) -%}
{%- if "description" in param or "default" in param or format_param != "<|NONE|>" or param["maximum"] or param["minimum"] or param["maxLength"] or param["minLength"] -%}
{{ "//" }}
{%- if "description" in param -%}
{%- set desc = param["description"] -%}
{%- if not desc.endswith(".") -%}
{%- set desc = desc + "." -%}
{%- endif -%}
{{ " " + desc }}
{%- endif -%}
{%- if "default" in param -%}
{%- set default_value = param["default"] -%}
{%- if param_type == "string" -%}
{%- set default_value = '"' ~ default_value ~ '"' -%}
{%- endif -%}
{{ " Default=" ~ default_value ~ "." }}
{%- endif -%}
{%- set format_param = get_format_param(param) -%}
{%- if format_param != "<|NONE|>" -%}
{{ " Format=" ~ format_param }}
{%- endif -%}
{%- for field, field_name in [("maximum", "Maximum"), ("minimum", "Minimum"), ("maxLength", "Maximum length"), ("minLength", "Minimum length")] -%}
{%- if field in param -%}
{{ " " + field_name ~ "=" ~ param[field] }}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{{ "<|NONE|>"}}
{%- endif -%}
{%- endmacro -%}
{%- macro get_enum_option_str(enum_options) -%}
{%- for v in enum_options -%}
{%- if v is string -%}
{{ '"' + v + '"' }}
{%- else -%}
{{ v }}
{%- endif -%}
{%- if enum_options|length > 0 and v != enum_options[-1] -%}
{{ " | " }}
{%- endif -%}
{%- endfor -%}
{%- endmacro -%}
{%- macro get_array_typescript(param_name, param_dic, depth) -%}
{%- set offset = '' -%}
{%- if depth >= 1 -%}
{%- set offset = " " * depth -%}
{%- endif -%}
{%- set items_info = param_dic.get('items', {}) -%}
{%- if items_info|length == 0 -%}
{%- if param_name -%}
{{ "\n" + offset + param_name + ": []" }}
{%- else -%}
{{ "\n" + offset + "[]" }}
{%- endif -%}
{%- else -%}
{%- set array_type = get_param_type(items_info) -%}
{%- if array_type == 'object' -%}
{%- if param_name -%}
{{ "\n" + offset + param_name + ": {" }}
{%- else -%}
{{ "\n" + offset + "{" }}
{%- endif -%}
{{ get_parameter_typescript(items_info.get('properties', {}), items_info.get('required', []), depth + 1) -}}
{{- "\n" + offset + "}[]" }}
{%- elif array_type == 'array' -%}
{%- set item_info = get_array_typescript(None, items_info, depth + 1) -%}
{%- if not param_name -%}
{{ "\n" + item_info + "[]" }}
{%- else -%}
{{ "\n" + offset + param_name + ": " + item_info|trim + "[]" }}
{%- endif -%}
{%- else -%}
{%- if 'enum' in items_info -%}
{%- set item_type = get_enum_option_str(items_info['enum']) -%}
{%- if param_name is none -%}
{{ "(" + item_type + ")[]"}}
{%- else -%}
{{ "\n" + offset + param_name + ": (" + item_type + ")[]" }}
{%- endif -%}
{%- else -%}
{%- if param_name is none -%}
{{ "\n" + array_type + "[]" }}
{%- else -%}
{{ "\n" + offset + param_name + ": " + array_type + "[]," }}
{%- endif -%}
{%- endif -%}
{%- endif -%}
{%- endif -%}
{%- endmacro -%}
{%- macro get_parameter_typescript(properties, required_params, depth=0) -%}
{%- set res = "" -%}
{%- for param_name, param in properties.items() -%}
{%- if param is mapping -%}
{%- set comment_info = get_param_info(param) -%}
{# Param Examples #}
{%- set examples_info = [] -%}
{%- if "examples" in param -%}
{%- set examples_info = ["Example " + param_name + ":"] -%}
{%- set examples_info = examples_info + param["examples"] -%}
{%- endif -%}
{# Param Name declaration #}
{%- set param_declaration = param_name -%}
{%- if required_params is iterable and param_name not in required_params -%}
{%- set param_declaration = param_declaration + "?" -%}
{%- endif -%}
{%- set param_type = get_param_type(param) -%}
{# Handle indentation based on depth #}
{%- set offset = "" -%}
{%- if depth >= 1 -%}
{%- set offset = " " * depth -%}
{%- endif -%}
{%- if param_type == "object" -%}
{%- if comment_info != "<|NONE|>" -%}
{{ "\n" + offset + comment_info }}
{%- endif -%}
{%- if examples_info|length > 0 -%}
{%- for example in examples_info -%}
{{ "\n" + offset + "// " + example|string|replace("'", '"') }}
{%- endfor -%}
{%- endif -%}
{%- set param_declaration = param_declaration + ": {" -%}
{{ "\n" + offset + param_declaration -}}
{{- get_parameter_typescript(param.get("properties", {}), param.get("required", []), depth + 1) -}}
{{- "\n" + offset + "}," }}
{%- elif param_type == "array" -%}
{%- set item_info = param.get("items", {}) -%}
{%- if "type" not in item_info -%}
{%- set param_declaration = param_declaration + ": []," -%}
{{ append_new_param_info(param_declaration, comment_info, examples_info, depth) }}
{%- else -%}
{%- if comment_info != "<|NONE|>" -%}
{{ "\n" + offset + comment_info }}
{%- endif -%}
{%- if examples_info|length > 0 -%}
{%- for example in examples_info -%}
{{ "\n" + offset + "// " + example|string|replace("'", '"') }}
{%- endfor -%}
{%- endif -%}
{%- set array_declaration = get_array_typescript(param_declaration, param, depth) -%}
{%- if not array_declaration.endswith(",") -%}
{%- set array_declaration = array_declaration + "," -%}
{%- endif -%}
{{ array_declaration}}
{%- endif -%}
{%- else -%}
{%- if "enum" in param -%}
{%- set param_type = get_enum_option_str(param["enum"]) -%}
{%- endif -%}
{%- if "nullable" in param and param["nullable"] -%}
{%- set param_type = param_type + " | null" -%}
{%- endif -%}
{%- set param_declaration = param_declaration + ": " + param_type + "," -%}
{{ append_new_param_info(param_declaration, comment_info, examples_info, depth) }}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- endmacro -%}
{%- macro generate_schema_from_functions(functions, namespace='functions') -%}
{{ "// Supported function definitions that should be called when necessary.\n" -}}
{{- "namespace " + namespace + " {\n\n" -}}
{%- for function in functions -%}
{%- if function.get("function") -%}
{%- set function = function.get("function") -%}
{%- endif -%}
{%- set function_name = function.get("name") -%}
{%- if function_name -%}
{%- set description = function.get('description', '') -%}
{%- set parameters = function.get('parameters', {}) -%}
{{- "// " + description + "\n" -}}
{{- "type " + function_name -}}
{%- if parameters and parameters.get("properties") -%}
{{- " = (_: {" -}}
{%- set required_params = parameters.get("required", []) -%}
{{ get_parameter_typescript(parameters.get("properties"), required_params, 0) -}}
{{- "\n}) => any;\n\n" }}
{%- else -%}
{{ " = () => any;\n\n" }}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{{ "} // namespace " + namespace }}
{%- endmacro -%}
{%- if not tools -%}
{%- set tools = [] -%}
{%- endif -%}
{{ bos_token + '<|start_header_id|>system<|end_header_id|>\n\nYou are capable of executing available function(s) if required.\nOnly execute function(s) when absolutely necessary.\nAsk for the required input to:recipient==all\nUse JSON for function arguments.\nRespond in this format:\n>>>${recipient}\n${content}\nAvailable functions:\n' + generate_schema_from_functions(tools) + '<|eot_id|>' -}}
{%- if tools|length > 0 and tools|selectattr("type", "equalto", "code_interpreter")|list|length > 0 -%}
{{ '<|start_header_id|>system<|end_header_id|>\n\nWhen you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 60.0 seconds. The drive at \'/mnt/data\' can be used to save and persist user files.<|eot_id|>' }}
{%- endif -%}
{%- for message in messages -%}
{%- if message['role'] == 'user' or message['role'] == 'system' -%}
{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }}
{%- elif message['role'] == 'tool' -%}
{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }}
{%- else -%}
{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'}}
{%- if message['content'] -%}
{{ '>>>all\n' + message['content'] }}
{%- endif -%}
{%- if 'tool_calls' in message and message['tool_calls'] -%}
{%- for tool_call in message['tool_calls'] -%}
{{ '>>>' + tool_call['function']['name'] + '\n' + tool_call['function']['arguments'] }}
{%- endfor -%}
{%- endif -%}
{{ '<|eot_id|>' }}
{%- endif -%}
{%- endfor -%}
{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n>>>' }}{% endif %}

View file

@ -0,0 +1,109 @@
{{- bos_token }}
{%- if custom_tools is defined %}
{%- set tools = custom_tools %}
{%- endif %}
{%- if not tools_in_user_message is defined %}
{%- set tools_in_user_message = true %}
{%- endif %}
{%- if not date_string is defined %}
{%- set date_string = "26 Jul 2024" %}
{%- endif %}
{%- if not tools is defined %}
{%- set tools = none %}
{%- endif %}
{#- This block extracts the system message, so we can slot it into the right place. #}
{%- if messages[0]['role'] == 'system' %}
{%- set system_message = messages[0]['content']|trim %}
{%- set messages = messages[1:] %}
{%- else %}
{%- set system_message = "" %}
{%- endif %}
{#- System message + builtin tools #}
{{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
{%- if builtin_tools is defined or tools is not none %}
{{- "Environment: ipython\n" }}
{%- endif %}
{%- if builtin_tools is defined %}
{{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}}
{%- endif %}
{{- "Cutting Knowledge Date: December 2023\n" }}
{{- "Today Date: " + date_string + "\n\n" }}
{%- if tools is not none and not tools_in_user_message %}
{{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{%- endif %}
{{- system_message }}
{{- "<|eot_id|>" }}
{#- Custom tools are passed in a user message with some extra guidance #}
{%- if tools_in_user_message and not tools is none %}
{#- Extract the first user message so we can plug it in here #}
{%- if messages | length != 0 %}
{%- set first_user_message = messages[0]['content']|trim %}
{%- set messages = messages[1:] %}
{%- else %}
{{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
{%- endif %}
{{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
{{- "Given the following functions, please respond with a JSON for a function call " }}
{{- "with its proper arguments that best answers the given prompt.\n\n" }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{{- first_user_message + "<|eot_id|>"}}
{%- endif %}
{%- for message in messages %}
{%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}
{%- elif 'tool_calls' in message %}
{%- if not message.tool_calls|length == 1 %}
{{- raise_exception("This model only supports single tool-calls at once!") }}
{%- endif %}
{%- set tool_call = message.tool_calls[0].function %}
{%- if builtin_tools is defined and tool_call.name in builtin_tools %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
{{- "<|python_tag|>" + tool_call.name + ".call(" }}
{%- for arg_name, arg_val in tool_call.arguments | items %}
{{- arg_name + '="' + arg_val + '"' }}
{%- if not loop.last %}
{{- ", " }}
{%- endif %}
{%- endfor %}
{{- ")" }}
{%- else %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
{{- '{"name": "' + tool_call.name + '", ' }}
{{- '"parameters": ' }}
{{- tool_call.arguments | tojson }}
{{- "}" }}
{%- endif %}
{%- if builtin_tools is defined %}
{#- This means we're in ipython mode #}
{{- "<|eom_id|>" }}
{%- else %}
{{- "<|eot_id|>" }}
{%- endif %}
{%- elif message.role == "tool" or message.role == "ipython" %}
{{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
{%- if message.content is mapping or message.content is iterable %}
{{- message.content | tojson }}
{%- else %}
{{- message.content }}
{%- endif %}
{{- "<|eot_id|>" }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %}

View file

@ -0,0 +1,8 @@
{% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|>
' + message['content'] + '<|end|>
'}}{% elif message['role'] == 'user' %}{{'<|user|>
' + message['content'] + '<|end|>
'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>
' + message['content'] + '<|end|>
'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>
' }}{% else %}{{ eos_token }}{% endif %}

View file

@ -0,0 +1,24 @@
{%- if messages[0]['role'] == 'system' %}
{%- set system_message = messages[0]['content'] %}
{%- set loop_messages = messages[1:] %}
{%- else %}
{%- set loop_messages = messages %}
{%- endif %}
{{- bos_token }}
{%- for message in loop_messages %}
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}
{{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}
{%- endif %}
{%- if message['role'] == 'user' %}
{%- if loop.first and system_message is defined %}
{{- ' [INST] ' + system_message + '\n\n' + message['content'] + ' [/INST]' }}
{%- else %}
{{- ' [INST] ' + message['content'] + ' [/INST]' }}
{%- endif %}
{%- elif message['role'] == 'assistant' %}
{{- ' ' + message['content'] + eos_token}}
{%- else %}
{{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}
{%- endif %}
{%- endfor %}

434
tests/test-minja.cpp Normal file
View file

@ -0,0 +1,434 @@
/*
Minimalistic Jinja templating engine for llama.cpp. C++11, no deps (single-header), decent language support but very few functions (easy to extend), just whats needed for actual prompt templates.
Models have increasingly complex templates (e.g. Llama 3.1, Hermes 2 Pro w/ tool_use), so we need a proper template engine to get the best out of them.
Supports:
- Full expression syntax
- Statements `{{% %}}`, variable sections `{{ }}`, and comments `{# #}` with pre/post space elision `{%- -%}` / `{{- -}}` / `{#- -#}`
- `if` / `elif` / `else` / `endif`
- `for` (`recursive`) (`if`) / `else` / `endfor` w/ `loop.*` (including `loop.cycle`) and destructuring
- `set` w/ namespaces & destructuring
- `macro` / `endmacro`
- Extensible filters collection: `count`, `dictsort`, `equalto`, `e` / `escape`, `items`, `join`, `joiner`, `namespace`, `raise_exception`, `range`, `reject`, `tojson`, `trim`
Limitations:
- Not supporting most filters & pipes. Only the ones actually used in the templates are implemented.
https://jinja.palletsprojects.com/en/3.0.x/templates/#builtin-filters
- No difference between none and undefined
- Single namespace with all filters / tests / functions / macros / variables
- No tuples (templates seem to rely on lists only)
- No `if` expressions w/o `else` (but `if` statements are fine)
- No `{% raw %}`, `{% block %}`, `{% include %}`, `{% extends %},
Model templates verified to work:
- Meta-Llama-3.1-8B-Instruct
- Phi-3.5-mini-instruct
- Hermes-2-Pro-Llama-3-8B (default & tool_use variants)
- Qwen2-VL-7B-Instruct, Qwen2-7B-Instruct
- Mixtral-8x7B-Instruct-v0.1
TODO:
- Simplify two-pass parsing
- Pass tokens to IfNode and such
- Macro nested set scope = global?
{%- macro get_param_type(param) -%}
{%- set param_type = "any" -%}
- Advertise in / link to https://jbmoelker.github.io/jinja-compat-tests/
*/
#include "minja.hpp"
#include <fstream>
#include <iostream>
#include <string>
#include <json.hpp>
static std::string read_file(const std::string &path) {
std::ifstream fs(path, std::ios_base::binary);
if (!fs.is_open()) {
throw std::runtime_error("Failed to open file: " + path);
}
fs.seekg(0, std::ios_base::end);
auto size = fs.tellg();
fs.seekg(0);
std::string out;
out.resize(static_cast<size_t>(size));
fs.read(&out[0], static_cast<std::streamsize>(size));
return out;
}
static std::vector<std::string> find_files(const std::string & folder, const std::string & ext) {
std::vector<std::string> files;
for (const auto & entry : std::__fs::filesystem::directory_iterator(folder)) {
if (entry.path().extension() == ext)
files.push_back(entry.path().string());
}
return files;
}
static std::string filename_without_extension(const std::string & path) {
auto res = path;
auto pos = res.find_last_of('/');
if (pos != std::string::npos)
res = res.substr(pos + 1);
pos = res.find_last_of('.');
if (pos != std::string::npos)
res = res.substr(0, pos);
return res;
}
static void assert_equals(const std::string & expected, const std::string & actual) {
if (expected != actual) {
std::cerr << "Expected: " << expected << std::endl;
std::cerr << "Actual: " << actual << std::endl;
std::cerr << std::flush;
throw std::runtime_error("Test failed");
}
}
static void announce_test(const std::string & name, const minja::Options & options) {
auto len = name.size();
auto extract = minja::strip(name);
extract = json(name.substr(0, std::min<size_t>(len, 50)) + (len > 50 ? " [...]" : "")).dump();
extract = extract.substr(1, extract.size() - 2);
std::cout << "Testing: " << extract;
static const minja::Options default_options {};
if (options.lstrip_blocks != default_options.lstrip_blocks)
std::cout << " lstrip_blocks=" << options.lstrip_blocks;
if (options.trim_blocks != default_options.trim_blocks)
std::cout << " trim_blocks=" << options.trim_blocks;
std::cout << std::endl << std::flush;
}
static void test_render(const std::string & template_str, const json & bindings, const minja::Options & options, const std::string & expected, const json & expected_context = {}) {
announce_test(template_str, options);
auto root = minja::Parser::parse(template_str, options);
auto context = minja::Context::make(bindings);
std::string actual;
try {
actual = root->render(context);
} catch (const std::runtime_error & e) {
actual = "ERROR: " + std::string(e.what());
}
assert_equals(expected, actual);
if (!expected_context.is_null()) {
// auto dump = context->dump();
for (const auto & kv : expected_context.items()) {
auto value = context->get(kv.key());
if (value != kv.value()) {
std::cerr << "Expected context value for " << kv.key() << ": " << kv.value() << std::endl;
std::cerr << "Actual value: " << value.dump() << std::endl;
std::cerr << std::flush;
throw std::runtime_error("Test failed");
}
}
}
std::cout << "Test passed!" << std::endl << std::flush;
}
static void test_error_contains(const std::string & template_str, const json & bindings, const minja::Options & options, const std::string & expected) {
announce_test(template_str, options);
try {
auto root = minja::Parser::parse(template_str, options);
auto context = minja::Context::make(bindings);
// auto copy = context.is_null() ? Value::object() : std::make_shared<Value>(context);
auto actual = root->render(context);
throw std::runtime_error("Expected error: " + expected + ", but got successful result instead: " + actual);
} catch (const std::runtime_error & e) {
std::string actual(e.what());
if (actual.find(expected) == std::string::npos) {
std::cerr << "Expected: " << expected << std::endl;
std::cerr << "Actual: " << actual << std::endl;
std::cerr << std::flush;
throw std::runtime_error("Test failed");
}
}
std::cout << " passed!" << std::endl << std::flush;
}
static void test_template_features() {
test_render(R"({{ 'a' in {"a": 1} }},{{ 'a' in {} }})", {}, {}, "True,False");
test_render(R"({{ 'a' in ["a"] }},{{ 'a' in [] }})", {}, {}, "True,False");
test_render(R"({{ [{"a": 1}, {"a": 2}, {}] | selectattr("a", "equalto", 1) }})", {}, {}, R"([{'a': 1}])");
test_render(R"({{ [{"a": 1}, {"a": 2}] | map(attribute="a") | list }})", {}, {}, "[1, 2]");
test_render(R"({{ ["", "a"] | map("length") | list }})", {}, {}, "[0, 1]");
test_render(R"({{ range(3) | last }})", {}, {}, "2");
test_render(R"({% set foo = true %}{{ foo is defined }})", {}, {}, "True");
test_render(R"({% set foo = true %}{{ not foo is defined }})", {}, {}, "False");
test_render(R"({{ {"a": "b"} | tojson }})", {}, {}, R"({"a": "b"})");
test_render(R"({{ {"a": "b"} }})", {}, {}, R"({'a': 'b'})");
std::string trim_tmpl =
"\n"
" {% if true %}Hello{% endif %} \n"
"...\n"
"\n";
test_render(
trim_tmpl,
{}, { .trim_blocks = true }, "\n Hello...\n");
test_render(
trim_tmpl,
{}, {}, "\n Hello \n...\n");
test_render(
trim_tmpl,
{}, { .lstrip_blocks = true }, "\nHello \n...\n");
test_render(
trim_tmpl,
{}, { .trim_blocks = true, .lstrip_blocks = true }, "\nHello...\n");
test_render(
R"({%- set separator = joiner(' | ') -%}
{%- for item in ["a", "b", "c"] %}{{ separator() }}{{ item }}{% endfor -%})",
{}, {}, "a | b | c");
test_render("a\nb\n", {}, {}, "a\nb");
test_render(" {{- ' a\n'}}", {}, {.trim_blocks = true}, " a\n");
test_render(
R"(
{%- for x in range(3) -%}
{%- if loop.first -%}
but first, mojitos!
{%- endif -%}
{{ loop.index }}{{ "," if not loop.last -}}
{%- endfor -%}
)", {}, {}, "but first, mojitos!1,2,3");
test_render("{{ 'a' + [] | length + 'b' }}", {}, {}, "a0b");
test_render("{{ [1, 2, 3] | join(', ') + '...' }}", {}, {}, "1, 2, 3...");
test_render("{{ 'Tools: ' + [1, 2, 3] | reject('equalto', 2) | join(', ') + '...' }}", {}, {}, "Tools: 1, 3...");
test_render("{{ [1, 2, 3] | join(', ') }}", {}, {}, "1, 2, 3");
test_render("{% for i in range(3) %}{{i}},{% endfor %}", {}, {}, "0,1,2,");
test_render("{% set foo %}Hello {{ 'there' }}{% endset %}{{ 1 ~ foo ~ 2 }}", {}, {}, "1Hello there2");
test_render("{{ [1, False, null, True, 2, '3', 1, '3', False, null, True] | unique }}", {}, {},
"[1, False, null, True, 2, '3']");
test_render("{{ range(5) | length % 2 }}", {}, {}, "1");
test_render("{{ range(5) | length % 2 == 1 }},{{ [] | length > 0 }}", {}, {}, "True,False");
test_render(
"{{ messages[0]['role'] != 'system' }}",
{{"messages", json::array({json({{"role", "system"}})})}},
{},
"False");
test_render(
R"(
{%- for x, y in [("a", "b"), ("c", "d")] -%}
{{- x }},{{ y -}};
{%- endfor -%}
)", {}, {}, "a,b;c,d;");
test_render("{{ 1 is not string }}", {}, {}, "True");
test_render("{{ 'ab' * 3 }}", {}, {}, "ababab");
test_render("{{ [1, 2, 3][-1] }}", {}, {}, "3");
test_render(
"{%- for i in range(0) -%}NAH{% else %}OK{% endfor %}",
{}, {},
"OK");
test_render(
R"(
{%- for i in range(5) -%}
({{ i }}, {{ loop.cycle('odd', 'even') }}),
{%- endfor -%}
)", {}, {}, "(0, odd),(1, even),(2, odd),(3, even),(4, odd),");
test_render(
"{%- for i in range(5) if i % 2 == 0 -%}\n"
"{{ i }}, first={{ loop.first }}, last={{ loop.last }}, index={{ loop.index }}, index0={{ loop.index0 }}, revindex={{ loop.revindex }}, revindex0={{ loop.revindex0 }}, prev={{ loop.previtem }}, next={{ loop.nextitem }},\n"
"{% endfor -%}",
{}, {},
"0, first=True, last=False, index=1, index0=0, revindex=3, revindex0=2, prev=, next=2,\n"
"2, first=False, last=False, index=2, index0=1, revindex=2, revindex0=1, prev=0, next=4,\n"
"4, first=False, last=True, index=3, index0=2, revindex=1, revindex0=0, prev=2, next=,\n");
test_render(
R"(
{%- set res = [] -%}
{%- for c in ["<", ">", "&", '"'] -%}
{%- set _ = res.append(c | e) -%}
{%- endfor -%}
{{- res | join(", ") -}}
)", {}, {},
R"(&lt;, &gt;, &amp;, &quot;)");
test_render(
R"(
{%- set x = 1 -%}
{%- set y = 2 -%}
{%- macro foo(x, z, w=10) -%}
x={{ x }}, y={{ y }}, z={{ z }}, w={{ w -}}
{%- endmacro -%}
{{- foo(100, 3) -}}
)", {}, {},
R"(x=100, y=2, z=3, w=10)");
test_render(
R"(
{% macro input(name, value='', type='text', size=20) -%}
<input type="{{ type }}" name="{{ name }}" value="{{ value|e }}" size="{{ size }}">
{%- endmacro -%}
<p>{{ input('username') }}</p>
<p>{{ input('password', type='password') }}</p>)",
{}, {}, R"(
<p><input type="text" name="username" value="" size="20"></p>
<p><input type="password" name="password" value="" size="20"></p>)");
test_render(
R"(
{#- The values' default array should be created afresh at each call, unlike the equivalent Python function -#}
{%- macro foo(values=[]) -%}
{%- set _ = values.append(1) -%}
{{- values -}}
{%- endmacro -%}
{{- foo() }} {{ foo() -}})",
{}, {}, R"([1] [1])");
test_render(R"({{ None | items | tojson }}; {{ {1: 2} | items | tojson }})", {}, {}, "[]; [[1, 2]]");
test_render(R"({{ {1: 2, 3: 4, 5: 7} | dictsort | tojson }})", {}, {}, "[[1, 2], [3, 4], [5, 7]]");
test_render(R"({{ {1: 2}.items() }})", {}, {}, "[[1, 2]]");
test_render(R"({{ {1: 2}.get(1) }}; {{ {}.get(1) }}; {{ {}.get(1, 10) }})", {}, {}, "2; ; 10");
test_render(
R"(
{%- for x in [1, 1.2, "a", true, True, false, False, None, [], [1], [1, 2], {}, {"a": 1}, {1: "b"}] -%}
{{- x | tojson -}},
{%- endfor -%}
)", {}, {},
R"(1,1.2,"a",True,True,False,False,null,[],[1],[1, 2],{},{"a": 1},{"1": "b"},)");
test_render(
R"(
{%- set n = namespace(value=1, title='') -%}
{{- n.value }} "{{ n.title }}",
{%- set n.value = 2 -%}
{%- set n.title = 'Hello' -%}
{{- n.value }} "{{ n.title }}")", {}, {}, R"(1 "",2 "Hello")");
test_error_contains(
"{{ (a.b.c) }}",
{{"a", json({{"b", {{"c", 3}}}})}},
{},
"'a' is not defined");
test_render(
"{% set _ = a.b.append(c.d.e) %}{{ a.b }}",
json::parse(R"({
"a": {"b": [1, 2]},
"c": {"d": {"e": 3}}
})"),
{},
"[1, 2, 3]");
test_render(R"(
{%- for x, y in z -%}
{{- x }},{{ y -}};
{%- endfor -%}
)", {{"z", json({json({1, 10}), json({2, 20})})}}, {}, "1,10;2,20;");
test_render(" a {{ 'b' -}} c ", {}, {}, " a bc ");
test_render(" a {{- 'b' }} c ", {}, {}, " ab c ");
test_render("a\n{{- 'b' }}\nc", {}, {}, "ab\nc");
test_render("a\n{{ 'b' -}}\nc", {}, {}, "a\nbc");
test_error_contains("{{ raise_exception('hey') }}", {}, {}, "hey");
test_render("{{ [] is iterable }}", {}, {}, "True");
test_render("{{ [] is not number }}", {}, {}, "True");
test_render("{% set x = [0, 1, 2, 3] %}{{ x[1:] }}{{ x[:2] }}{{ x[1:3] }}", {}, {}, "[1, 2, 3][0, 1][1, 2]");
test_render("{{ ' a ' | trim }}", {}, {}, "a");
test_render("{{ range(3) }}{{ range(4, 7) }}{{ range(0, 10, step=2) }}", {}, {}, "[0, 1, 2][4, 5, 6][0, 2, 4, 6, 8]");
test_render(
R"( {{ "a" -}} b {{- "c" }} )", {}, {},
" abc ");
test_error_contains("{% else %}", {}, {}, "Unexpected else");
test_error_contains("{% endif %}", {}, {}, "Unexpected endif");
test_error_contains("{% elif 1 %}", {}, {}, "Unexpected elif");
test_error_contains("{% endfor %}", {}, {}, "Unexpected endfor");
test_error_contains("{% if 1 %}", {}, {}, "Unterminated if");
test_error_contains("{% for x in 1 %}", {}, {}, "Unterminated for");
test_error_contains("{% if 1 %}{% else %}", {}, {}, "Unterminated if");
test_error_contains("{% if 1 %}{% else %}{% elif 1 %}{% endif %}", {}, {}, "Unterminated if");
test_render("{% if 1 %}{% elif 1 %}{% else %}{% endif %}", {}, {}, "");
test_render(
"{% set x = [] %}{% set _ = x.append(1) %}{{ x | tojson(indent=2) }}", {}, {},
"[\n 1\n]");
test_render(
"{{ not [] }}", {}, {},
"True");
test_render("{{ tool.function.name == 'ipython' }}",
json({{"tool", json({
{"function", {{"name", "ipython"}}}
})}}),
{},
"True");
test_render(R"(
{%- set user = "Olivier" -%}
{%- set greeting = "Hello " ~ user -%}
{{- greeting -}}
)", {}, {}, "Hello Olivier");
}
static void test_chat_templates_with_common_contexts_against_goldens() {
auto jinja_template_files = find_files("tests/chat/templates", ".jinja");
auto context_files = find_files("tests/chat/contexts", ".json");
auto get_golden_file = [&](const std::string & tmpl_file, const std::string & ctx_file) {
auto tmpl_name = filename_without_extension(tmpl_file);
auto ctx_name = filename_without_extension(ctx_file);
auto golden_name = tmpl_name + "-" + ctx_name;
return "tests/chat/goldens/" + golden_name + ".txt";
};
auto fail_with_golden_instructions = [&]() {
throw std::runtime_error("To fetch templates and generate golden files, run `python tests/update_jinja_goldens.py`");
};
if (jinja_template_files.empty()) {
std::cerr << "No Jinja templates found in tests/chat/templates" << std::endl;
fail_with_golden_instructions();
}
const auto options = minja::Options {.trim_blocks = true, .lstrip_blocks = true};
for (const auto & tmpl_file : jinja_template_files) {
std::cout << "# Testing template: " << tmpl_file << std::endl << std::flush;
auto tmpl_str = read_file(tmpl_file);
auto tmpl = minja::Parser::parse(tmpl_str, options);
auto found_goldens = false;
for (const auto & ctx_file : context_files) {
auto ctx = json::parse(read_file(ctx_file));
auto golden_file = get_golden_file(tmpl_file, ctx_file);
if (!std::ifstream(golden_file).is_open()) {
continue;
}
found_goldens = true;
std::cout << " - " << golden_file << std::endl << std::flush;
std::string actual;
try {
actual = tmpl->render(minja::Context::make(ctx));
} catch (const std::runtime_error & e) {
actual = "ERROR: " + std::string(e.what());
}
auto expected = read_file(golden_file);
assert_equals(expected, actual);
}
if (!found_goldens) {
std::cerr << "No golden files found for " << tmpl_file << std::endl;
fail_with_golden_instructions();
}
}
}
/*
cmake -B build -DCMAKE_BUILD_TYPE=Release && cmake --build build -t test-minja -j && ./build/bin/test-minja
*/
int main() {
test_template_features();
if (getenv("LLAMA_SKIP_TESTS_SLOW_ON_EMULATOR")) {
fprintf(stderr, "\033[33mWARNING: Skipping slow tests on emulator.\n\033[0m");
} else {
test_chat_templates_with_common_contexts_against_goldens();
}
return 0;
}

View file

@ -0,0 +1,141 @@
#!/usr/bin/env uv run
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "jinja2",
# "huggingface_hub",
# ]
# ///
'''
Fetches the Jinja2 templates of a few known models and use them to generate prompt goldens for a few predefined chat contexts.
Examples:
python ./tests/update_jinja_goldens.py
https://github.com/huggingface/transformers/blob/main/src/transformers/utils/chat_template_utils.py
'''
import datetime
import glob
import os
from huggingface_hub import hf_hub_download
import json
import jinja2
import jinja2.ext
import re
# import requests
model_ids = [
"NousResearch/Hermes-3-Llama-3.1-70B",
"NousResearch/Hermes-2-Pro-Llama-3-8B",
"NousResearch/Hermes-2-Pro-Mistral-7B",
"meetkai/functionary-medium-v3.2",
"Qwen/Qwen2-7B-Instruct",
"Qwen/Qwen2-VL-7B-Instruct",
"Qwen/Qwen2.5-7B-Instruct", # "Qwen/Qwen2.5-72B-Instruct", "Qwen/Qwen2.5-Coder-7B-Instruct",
"Qwen/Qwen2.5-Math-7B-Instruct", # "Qwen/Qwen2.5-Math-72B-Instruct",
"microsoft/Phi-3.5-mini-instruct",
# Gated models:
"meta-llama/Meta-Llama-3.1-8B-Instruct",
"google/gemma-2-2b-it",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
]
def raise_exception(message: str):
raise ValueError(message)
def tojson(x, ensure_ascii=False, indent=None, separators=None, sort_keys=False):
return json.dumps(x, ensure_ascii=ensure_ascii, indent=indent, separators=separators, sort_keys=sort_keys)
def strftime_now(format):
return datetime.now().strftime(format)
def handle_chat_template(model_id, variant, template_src):
print(f"# {model_id} @ {variant}")
model_name = model_id.replace("/", "-")
base_name = f'{model_name}-{variant}' if variant else model_name
template_file = f'tests/chat/templates/{base_name}.jinja'
print(f'template_file: {template_file}')
with open(template_file, 'w') as f:
f.write(template_src)
print(f"- {template_file}")
env = jinja2.Environment(
trim_blocks=True,
lstrip_blocks=True,
# keep_trailing_newline=False,
extensions=[
jinja2.ext.loopcontrols
])
env.filters['tojson'] = tojson
env.globals['raise_exception'] = raise_exception
env.globals['strftime_now'] = strftime_now
template_handles_tools = 'tools' in template_src
template_hates_the_system = 'System role not supported' in template_src
template = env.from_string(template_src)
context_files = glob.glob('tests/chat/contexts/*.json')
for context_file in context_files:
context_name = context_file.split("/")[-1].replace(".json", "")
with open(context_file, 'r') as f:
context = json.load(f)
if not template_handles_tools and 'tools' in context:
continue
if template_hates_the_system and any(m['role'] == 'system' for m in context['messages']):
continue
output_file = f'tests/chat/goldens/{base_name}-{context_name}.txt'
print(f"- {output_file}")
try:
output = template.render(**context)
except:
# Some templates (e.g. Phi-3-medium-128k's) expect a non-null "content" key in each message.
for message in context["messages"]:
if message.get("content") is None:
message["content"] = ""
try:
output = template.render(**context)
except Exception as e:
print(f" ERROR: {e}")
output = f"ERROR: {e}"
with open(output_file, 'w') as f:
f.write(output)
print()
def main():
for dir in ['tests/chat/templates', 'tests/chat/goldens']:
if not os.path.isdir(dir):
os.mkdir(dir)
for model_id in model_ids:
# response = requests.get(f"https://huggingface.co/{model_id}/resolve/main/tokenizer_config.json")
# response.raise_for_status()
# config_str = response.text
with open(hf_hub_download(repo_id=model_id, filename="tokenizer_config.json")) as f:
config_str = f.read()
try:
config = json.loads(config_str)
except json.JSONDecodeError as e:
# Fix https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct/blob/main/tokenizer_config.json
# (Remove extra '}' near the end of the file)
config = json.loads(re.sub(r'\}([\n\s]*\}[\n\s]*\],[\n\s]*"clean_up_tokenization_spaces")', r'\1', config_str))
chat_template = config['chat_template']
if isinstance(chat_template, str):
handle_chat_template(model_id, None, chat_template)
else:
for ct in chat_template:
handle_chat_template(model_id, ct['name'], ct['template'])
if __name__ == '__main__':
main()