This commit is contained in:
ochafik 2024-12-06 02:12:21 +00:00
parent a469f536c0
commit cbe395d87f
146 changed files with 1 additions and 4049 deletions

View file

@ -119,7 +119,6 @@ llama_target_and_test(test-llama-grammar.cpp)
# llama_target_and_test(test-opt.cpp) # SLOW
llama_target_and_test(test-backend-ops.cpp)
llama_target_and_test(test-antiprompts.cpp)
llama_target_and_test(test-minja.cpp)
llama_target_and_test(test-tool-call.cpp)
llama_target_and_test(test-model-load-cancel.cpp LABEL "model")

View file

@ -1,15 +0,0 @@
{
"messages": [
{
"role": "user",
"content": "What's your favourite LLM framework?"
},
{
"role": "assistant",
"content": "llama.cpp!"
}
],
"add_generation_prompt": true,
"bos_token": "<|startoftext|>",
"eos_token": "<|endoftext|>"
}

View file

@ -1,19 +0,0 @@
{
"messages": [
{
"role": "system",
"content": "You only tell the truth."
},
{
"role": "user",
"content": "What's your favourite LLM framework?"
},
{
"role": "assistant",
"content": "llama.cpp!"
}
],
"add_generation_prompt": true,
"bos_token": "<|startoftext|>",
"eos_token": "<|endoftext|>"
}

View file

@ -1,167 +0,0 @@
{
"messages": [
{
"role": "user",
"content": "Print a hello world message with python."
},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_1___",
"type": "function",
"function": {
"arguments": "{\"code\": \"print('Hello, World!')\"}",
"name": "ipython"
}
}
]
},
{
"role": "tool",
"tool_call_id": "call_1___",
"name": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}"
},
{
"role": "assistant",
"content": "Anything else?"
},
{
"role": "user",
"content": "Test a tautology."
},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_2___",
"type": "function",
"function": {
"arguments": "{\"condition\":true}",
"name": "test"
}
}
]
},
{
"role": "tool",
"tool_call_id": "call_2___",
"name": "test",
"content": "true"
},
{
"role": "assistant",
"content": "Truth is definitely true."
},
{
"role": "user",
"content": "Check it on the web."
},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_3___",
"type": "function",
"function": {
"arguments": "{\"query\": \"what is truth anyway am I right?\"}",
"name": "brave_search"
}
}
]
},
{
"role": "tool",
"tool_call_id": "call_3___",
"name": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}"
},
{
"role": "assistant",
"content": "I don't need the web to answer you but I did check, as you asked. What now?"
}
],
"add_generation_prompt": true,
"bos_token": "<|startoftext|>",
"eos_token": "<|endoftext|>",
"builtin_tools": [
"wolfram_alpha",
"brave_search"
],
"cutting_knowledge_date": "2023-04-01",
"todays_date": "2024-09-03",
"tools": [
{
"type": "function",
"function": {
"name": "ipython",
"description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
"parameters": {
"type": "object",
"properties": {
"code": {
"type": "string",
"description": "The code to run in the ipython interpreter."
}
},
"required": ["code"]
}
}
},
{
"type": "function",
"function": {
"name": "brave_search",
"description": "Executes a web search with Brave.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to search for."
}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "wolfram_alpha",
"description": "Executes a query with Wolfram Alpha.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to execute."
}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "test",
"description": "Runs a test.",
"parameters": {
"type": "object",
"properties": {
"condition": {
"type": "boolean",
"description": "The condition to test."
}
},
"required": ["condition"]
}
}
}
]
}

View file

@ -1 +0,0 @@
<|startoftext|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>What's your favourite LLM framework?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>llama.cpp!<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>

View file

@ -1 +0,0 @@
<|startoftext|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>You only tell the truth.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>What's your favourite LLM framework?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>llama.cpp!<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>

View file

@ -1,49 +0,0 @@
<|startoftext|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Print a hello world message with python.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>Anything else?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Test a tautology.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>Truth is definitely true.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Check it on the web.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>I don't need the web to answer you but I did check, as you asked. What now?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>

View file

@ -1,16 +0,0 @@
<|startoftext|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
# System Preamble
## Basic Rules
You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
# User Preamble
## Task and Context
You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
## Style Guide
Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>What's your favourite LLM framework?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>llama.cpp!<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results></results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line.
Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'.
Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'.
Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>

View file

@ -1,12 +0,0 @@
<|startoftext|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
# System Preamble
## Basic Rules
You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
# User Preamble
You only tell the truth.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>What's your favourite LLM framework?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>llama.cpp!<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results></results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line.
Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'.
Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'.
Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>

View file

@ -1,16 +0,0 @@
<|startoftext|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
# System Preamble
## Basic Rules
You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
# User Preamble
## Task and Context
You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
## Style Guide
Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Print a hello world message with python.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>Anything else?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Test a tautology.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>Truth is definitely true.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Check it on the web.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>I don't need the web to answer you but I did check, as you asked. What now?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results></results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line.
Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'.
Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'.
Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>

View file

@ -1,25 +0,0 @@
<|startoftext|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
# System Preamble
## Basic Rules
You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
# User Preamble
## Task and Context
You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
## Style Guide
Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.
## Available Tools
Here is a list of tools that you have available to you:
<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>What's your favourite LLM framework?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>llama.cpp!<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example:
```json
[
{
"tool_name": title of the tool in the specification,
"parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters
}
]```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>

View file

@ -1,21 +0,0 @@
<|startoftext|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
# System Preamble
## Basic Rules
You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
# User Preamble
You only tell the truth.
## Available Tools
Here is a list of tools that you have available to you:
<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>What's your favourite LLM framework?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>llama.cpp!<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example:
```json
[
{
"tool_name": title of the tool in the specification,
"parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters
}
]```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>

View file

@ -1,93 +0,0 @@
<|startoftext|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
# System Preamble
## Basic Rules
You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
# User Preamble
## Task and Context
You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
## Style Guide
Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.
## Available Tools
Here is a list of tools that you have available to you:
```python
def ipython(code: str) -> List[Dict]:
"""Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.
Args:
code (str): The code to run in the ipython interpreter.
"""
pass
```
```python
def brave_search(query: str) -> List[Dict]:
"""Executes a web search with Brave.
Args:
query (str): The query to search for.
"""
pass
```
```python
def wolfram_alpha(query: str) -> List[Dict]:
"""Executes a query with Wolfram Alpha.
Args:
query (str): The query to execute.
"""
pass
```
```python
def test(condition: bool) -> List[Dict]:
"""Runs a test.
Args:
condition (bool): The condition to test.
"""
pass
```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Print a hello world message with python.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
Action:
```json
[
{
"tool_name": "ipython",
"parameters": "{\"code\": \"print('Hello, World!')\"}"
}
]```
<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results>
{"stdout": "Hello, World!"}</results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>Anything else?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Test a tautology.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
Action:
```json
[
{
"tool_name": "test",
"parameters": "{\"condition\":true}"
}
]```
<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results>
true</results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>Truth is definitely true.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Check it on the web.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
Action:
```json
[
{
"tool_name": "brave_search",
"parameters": "{\"query\": \"what is truth anyway am I right?\"}"
}
]```
<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}</results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>I don't need the web to answer you but I did check, as you asked. What now?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example:
```json
[
{
"tool_name": title of the tool in the specification,
"parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters
}
]```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>

View file

@ -1,5 +0,0 @@
<|startoftext|><|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|startoftext|><|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,73 +0,0 @@
<|startoftext|><|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|im_end|>
<|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|im_end|>
<|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|im_end|>
<|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -1,11 +0,0 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,13 +0,0 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,58 +0,0 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "ipython", "description": "ipython(code: str) - Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.
Args:
code(str): The code to run in the ipython interpreter.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}
{"type": "function", "function": {"name": "brave_search", "description": "brave_search(query: str) - Executes a web search with Brave.
Args:
query(str): The query to search for.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "wolfram_alpha", "description": "wolfram_alpha(query: str) - Executes a query with Wolfram Alpha.
Args:
query(str): The query to execute.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "test", "description": "test(condition: bool) - Runs a test.
Args:
condition(bool): The condition to test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}} </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "ipython", "arguments": {"code": "print('Hello, World!')"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"stdout": "Hello, World!"}
</tool_response>
<|im_end|><|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "test", "arguments": {"condition":true}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
true
</tool_response>
<|im_end|><|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "brave_search", "arguments": {"query": "what is truth anyway am I right?"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}
</tool_response>
<|im_end|><|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -1,5 +0,0 @@
<|startoftext|><|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|startoftext|><|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,73 +0,0 @@
<|startoftext|><|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|im_end|>
<|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|im_end|>
<|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|im_end|>
<|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -1,11 +0,0 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,13 +0,0 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,58 +0,0 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "ipython", "description": "ipython(code: str) - Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.
Args:
code(str): The code to run in the ipython interpreter.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}
{"type": "function", "function": {"name": "brave_search", "description": "brave_search(query: str) - Executes a web search with Brave.
Args:
query(str): The query to search for.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "wolfram_alpha", "description": "wolfram_alpha(query: str) - Executes a query with Wolfram Alpha.
Args:
query(str): The query to execute.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "test", "description": "test(condition: bool) - Runs a test.
Args:
condition(bool): The condition to test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}} </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "ipython", "arguments": {"code": "print('Hello, World!')"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"stdout": "Hello, World!"}
</tool_response>
<|im_end|><|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "test", "arguments": {"condition":true}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
true
</tool_response>
<|im_end|><|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "brave_search", "arguments": {"query": "what is truth anyway am I right?"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}
</tool_response>
<|im_end|><|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|startoftext|><|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|startoftext|><|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,75 +0,0 @@
<|startoftext|><|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|im_end|>
<|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|im_end|>
<|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|im_end|>
<|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -1,11 +0,0 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,13 +0,0 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,58 +0,0 @@
<|startoftext|><|im_start|>system
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "ipython", "description": "ipython(code: str) - Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.
Args:
code(str): The code to run in the ipython interpreter.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}
{"type": "function", "function": {"name": "brave_search", "description": "brave_search(query: str) - Executes a web search with Brave.
Args:
query(str): The query to search for.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "wolfram_alpha", "description": "wolfram_alpha(query: str) - Executes a query with Wolfram Alpha.
Args:
query(str): The query to execute.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}
{"type": "function", "function": {"name": "test", "description": "test(condition: bool) - Runs a test.
Args:
condition(bool): The condition to test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}} </tools>Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
<tool_call>
{"name": <function-name>, "arguments": <args-dict>}
</tool_call><|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "ipython", "arguments": {"code": "print('Hello, World!')"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"stdout": "Hello, World!"}
</tool_response>
<|im_end|><|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "test", "arguments": {"condition":true}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
true
</tool_response>
<|im_end|><|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "brave_search", "arguments": {"query": "what is truth anyway am I right?"}}
</tool_call><|im_end|>
<|im_start|>tool
<tool_response>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}
</tool_response>
<|im_end|><|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -1,3 +0,0 @@
<|startoftext|>Human: What's your favourite LLM framework?
Assistant: <|endoftext|>llama.cpp!<|endoftext|>

View file

@ -1,4 +0,0 @@
<|startoftext|>Human: You only tell the truth.
What's your favourite LLM framework?
Assistant: <|endoftext|>llama.cpp!<|endoftext|>

View file

@ -1,61 +0,0 @@
<|startoftext|>Human: Print a hello world message with python.
Assistant: <|endoftext|>{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|endoftext|>Human: {
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}
Assistant: <|endoftext|>Anything else?<|endoftext|>Human: Test a tautology.
Assistant: <|endoftext|>{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|endoftext|>Human: {
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}
Assistant: <|endoftext|>Truth is definitely true.<|endoftext|>Human: Check it on the web.
Assistant: <|endoftext|>{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|endoftext|>Human: {
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}
Assistant: <|endoftext|>I don't need the web to answer you but I did check, as you asked. What now?<|endoftext|>

View file

@ -1,7 +0,0 @@
<|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,75 +0,0 @@
<|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|im_end|>
<|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|im_end|>
<|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|im_end|>
<|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,75 +0,0 @@
<|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|im_end|>
<|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|im_end|>
<|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|im_end|>
<|im_start|>user
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|im_end|>
<|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|im_start|>system
You are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,56 +0,0 @@
<|im_start|>system
You are Qwen, created by Alibaba Cloud. You are a helpful assistant.
# Tools
You may call one or more functions to assist with the user query.
You are provided with function signatures within <tools></tools> XML tags:
<tools>
{"type": "function", "function": {"name": "ipython", "description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}}
{"type": "function", "function": {"name": "brave_search", "description": "Executes a web search with Brave.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}}
{"type": "function", "function": {"name": "wolfram_alpha", "description": "Executes a query with Wolfram Alpha.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}}
{"type": "function", "function": {"name": "test", "description": "Runs a test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}}}
</tools>
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
<tool_call>
{"name": <function-name>, "arguments": <args-json-object>}
</tool_call><|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "ipython", "arguments": {"code": "print('Hello, World!')"}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
{"stdout": "Hello, World!"}
</tool_response><|im_end|>
<|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "test", "arguments": {"condition": true}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
true
</tool_response><|im_end|>
<|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "brave_search", "arguments": {"query": "what is truth anyway am I right?"}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}
</tool_response><|im_end|>
<|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|im_start|>system
Please reason step by step, and put your final answer within \boxed{}.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,7 +0,0 @@
<|im_start|>system
You only tell the truth.<|im_end|>
<|im_start|>user
What's your favourite LLM framework?<|im_end|>
<|im_start|>assistant
llama.cpp!<|im_end|>
<|im_start|>assistant

View file

@ -1,56 +0,0 @@
<|im_start|>system
Please reason step by step, and put your final answer within \boxed{}.
# Tools
You may call one or more functions to assist with the user query.
You are provided with function signatures within <tools></tools> XML tags:
<tools>
{"type": "function", "function": {"name": "ipython", "description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}}
{"type": "function", "function": {"name": "brave_search", "description": "Executes a web search with Brave.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}}
{"type": "function", "function": {"name": "wolfram_alpha", "description": "Executes a query with Wolfram Alpha.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}}
{"type": "function", "function": {"name": "test", "description": "Runs a test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}}}
</tools>
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
<tool_call>
{"name": <function-name>, "arguments": <args-json-object>}
</tool_call><|im_end|>
<|im_start|>user
Print a hello world message with python.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "ipython", "arguments": {"code": "print('Hello, World!')"}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
{"stdout": "Hello, World!"}
</tool_response><|im_end|>
<|im_start|>assistant
Anything else?<|im_end|>
<|im_start|>user
Test a tautology.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "test", "arguments": {"condition": true}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
true
</tool_response><|im_end|>
<|im_start|>assistant
Truth is definitely true.<|im_end|>
<|im_start|>user
Check it on the web.<|im_end|>
<|im_start|>assistant
<tool_call>
{"name": "brave_search", "arguments": {"query": "what is truth anyway am I right?"}}
</tool_call><|im_end|>
<|im_start|>user
<tool_response>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}
</tool_response><|im_end|>
<|im_start|>assistant
I don't need the web to answer you but I did check, as you asked. What now?<|im_end|>
<|im_start|>assistant

View file

@ -1 +0,0 @@
What's your favourite LLM framework? [/INST] llama.cpp! <|endoftext|>

View file

@ -1,5 +0,0 @@
[INST] <<SYS>>
You only tell the truth.
<</SYS>>
What's your favourite LLM framework? [/INST] llama.cpp! <|endoftext|>

View file

@ -1,49 +0,0 @@
Print a hello world message with python. [/INST] {
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
} <|endoftext|><|startoftext|>[INST] {
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
} [/INST] Anything else? <|endoftext|><|startoftext|>[INST] Test a tautology. [/INST] {
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
} <|endoftext|><|startoftext|>[INST] {
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
} [/INST] Truth is definitely true. <|endoftext|><|startoftext|>[INST] Check it on the web. [/INST] {
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
} <|endoftext|><|startoftext|>[INST] {
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
} [/INST] I don't need the web to answer you but I did check, as you asked. What now? <|endoftext|>

View file

@ -1 +0,0 @@
<|startoftext|> Question: What's your favourite LLM framework? Answer: llama.cpp!<|endoftext|> Answer:

View file

@ -1 +0,0 @@
<|startoftext|>You only tell the truth. Question: What's your favourite LLM framework? Answer: llama.cpp!<|endoftext|> Answer:

View file

@ -1,49 +0,0 @@
<|startoftext|> Question: Print a hello world message with python. Answer: {
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|endoftext|> Question: {
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
} Answer: Anything else?<|endoftext|> Question: Test a tautology. Answer: {
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|endoftext|> Question: {
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
} Answer: Truth is definitely true.<|endoftext|> Question: Check it on the web. Answer: {
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|endoftext|> Question: {
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
} Answer: I don't need the web to answer you but I did check, as you asked. What now?<|endoftext|> Answer:

View file

@ -1,5 +0,0 @@
<|startoftext|>[INST] <<SYS>>
Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.
<</SYS>>
What's your favourite LLM framework? [/INST] llama.cpp! <|endoftext|>

View file

@ -1,5 +0,0 @@
<|startoftext|>[INST] <<SYS>>
You only tell the truth.
<</SYS>>
What's your favourite LLM framework? [/INST] llama.cpp! <|endoftext|>

View file

@ -1,53 +0,0 @@
<|startoftext|>[INST] <<SYS>>
Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.
<</SYS>>
Print a hello world message with python. [/INST] {
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
} <|endoftext|>[INST] {
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
} [/INST] Anything else? <|endoftext|>[INST] Test a tautology. [/INST] {
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
} <|endoftext|>[INST] {
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
} [/INST] Truth is definitely true. <|endoftext|>[INST] Check it on the web. [/INST] {
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
} <|endoftext|>[INST] {
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
} [/INST] I don't need the web to answer you but I did check, as you asked. What now? <|endoftext|>

View file

@ -1,3 +0,0 @@
<|startoftext|>User: What's your favourite LLM framework?
Assistant: llama.cpp!<|endoftext|>Assistant:

View file

@ -1,5 +0,0 @@
<|startoftext|>You only tell the truth.
User: What's your favourite LLM framework?
Assistant: llama.cpp!<|endoftext|>Assistant:

View file

@ -1,61 +0,0 @@
<|startoftext|>User: Print a hello world message with python.
Assistant: {
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|endoftext|>User: {
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}
Assistant: Anything else?<|endoftext|>User: Test a tautology.
Assistant: {
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|endoftext|>User: {
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}
Assistant: Truth is definitely true.<|endoftext|>User: Check it on the web.
Assistant: {
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|endoftext|>User: {
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}
Assistant: I don't need the web to answer you but I did check, as you asked. What now?<|endoftext|>Assistant:

View file

@ -1 +0,0 @@
<|startoftext|><User>What's your favourite LLM framework?<Assistant>llama.cpp!<end▁of▁sentence><Assistant>

View file

@ -1 +0,0 @@
<|startoftext|>You only tell the truth.<User>What's your favourite LLM framework?<Assistant>llama.cpp!<end▁of▁sentence><Assistant>

View file

@ -1,49 +0,0 @@
<|startoftext|><User>Print a hello world message with python.<Assistant>{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<end▁of▁sentence><User>{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<Assistant>Anything else?<end▁of▁sentence><User>Test a tautology.<Assistant>{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<end▁of▁sentence><User>{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<Assistant>Truth is definitely true.<end▁of▁sentence><User>Check it on the web.<Assistant>{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<end▁of▁sentence><User>{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<Assistant>I don't need the web to answer you but I did check, as you asked. What now?<end▁of▁sentence><Assistant>

View file

@ -1,7 +0,0 @@
<|startoftext|>You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer
### Instruction:
What's your favourite LLM framework?
### Response:
llama.cpp!
<|EOT|>
### Response:

View file

@ -1,6 +0,0 @@
<|startoftext|>You only tell the truth.### Instruction:
What's your favourite LLM framework?
### Response:
llama.cpp!
<|EOT|>
### Response:

View file

@ -1,80 +0,0 @@
<|startoftext|>You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer
### Instruction:
Print a hello world message with python.
### Response:
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}
<|EOT|>
### Instruction:
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}
### Response:
Anything else?
<|EOT|>
### Instruction:
Test a tautology.
### Response:
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}
<|EOT|>
### Instruction:
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}
### Response:
Truth is definitely true.
<|EOT|>
### Instruction:
Check it on the web.
### Response:
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}
<|EOT|>
### Instruction:
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}
### Response:
I don't need the web to answer you but I did check, as you asked. What now?
<|EOT|>
### Response:

View file

@ -1,5 +0,0 @@
<|startoftext|><start_of_turn>user
What's your favourite LLM framework?<end_of_turn>
<start_of_turn>model
llama.cpp!<end_of_turn>
<start_of_turn>model

View file

@ -1,6 +0,0 @@
<|startoftext|><start_of_turn>user
You only tell the truth.
What's your favourite LLM framework?<end_of_turn>
<start_of_turn>model
llama.cpp!<end_of_turn>
<start_of_turn>model

View file

@ -1,73 +0,0 @@
<|startoftext|><start_of_turn>user
Print a hello world message with python.<end_of_turn>
<start_of_turn>model
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<end_of_turn>
<start_of_turn>user
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<end_of_turn>
<start_of_turn>model
Anything else?<end_of_turn>
<start_of_turn>user
Test a tautology.<end_of_turn>
<start_of_turn>model
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<end_of_turn>
<start_of_turn>user
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<end_of_turn>
<start_of_turn>model
Truth is definitely true.<end_of_turn>
<start_of_turn>user
Check it on the web.<end_of_turn>
<start_of_turn>model
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<end_of_turn>
<start_of_turn>user
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<end_of_turn>
<start_of_turn>model
I don't need the web to answer you but I did check, as you asked. What now?<end_of_turn>
<start_of_turn>model

View file

@ -1,5 +0,0 @@
<|startoftext|><start_of_turn>user
What's your favourite LLM framework?<end_of_turn>
<start_of_turn>model
llama.cpp!<end_of_turn>
<start_of_turn>model

View file

@ -1,6 +0,0 @@
<|startoftext|><start_of_turn>user
You only tell the truth.
What's your favourite LLM framework?<end_of_turn>
<start_of_turn>model
llama.cpp!<end_of_turn>
<start_of_turn>model

View file

@ -1,73 +0,0 @@
<|startoftext|><start_of_turn>user
Print a hello world message with python.<end_of_turn>
<start_of_turn>model
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<end_of_turn>
<start_of_turn>user
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<end_of_turn>
<start_of_turn>model
Anything else?<end_of_turn>
<start_of_turn>user
Test a tautology.<end_of_turn>
<start_of_turn>model
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<end_of_turn>
<start_of_turn>user
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<end_of_turn>
<start_of_turn>model
Truth is definitely true.<end_of_turn>
<start_of_turn>user
Check it on the web.<end_of_turn>
<start_of_turn>model
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<end_of_turn>
<start_of_turn>user
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<end_of_turn>
<start_of_turn>model
I don't need the web to answer you but I did check, as you asked. What now?<end_of_turn>
<start_of_turn>model

View file

@ -1 +0,0 @@
<用户>What's your favourite LLM framework?<AI>llama.cpp!

View file

@ -1 +0,0 @@
You only tell the truth.<用户>What's your favourite LLM framework?<AI>llama.cpp!

View file

@ -1,49 +0,0 @@
<用户>Print a hello world message with python.<AI>{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<用户>{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<AI>Anything else?<用户>Test a tautology.<AI>{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<用户>{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<AI>Truth is definitely true.<用户>Check it on the web.<AI>{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<用户>{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<AI>I don't need the web to answer you but I did check, as you asked. What now?

View file

@ -1,11 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -1,13 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
<|eot_id|><|start_header_id|>system<|end_header_id|>
You only tell the truth.<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -1,66 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
You have access to the following functions:
Use the function 'ipython' to 'Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.'
{"name": "ipython", "description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to run in the ipython interpreter."}}, "required": ["code"]}}
Use the function 'brave_search' to 'Executes a web search with Brave.'
{"name": "brave_search", "description": "Executes a web search with Brave.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to search for."}}, "required": ["query"]}}
Use the function 'wolfram_alpha' to 'Executes a query with Wolfram Alpha.'
{"name": "wolfram_alpha", "description": "Executes a query with Wolfram Alpha.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The query to execute."}}, "required": ["query"]}}
Use the function 'test' to 'Runs a test.'
{"name": "test", "description": "Runs a test.", "parameters": {"type": "object", "properties": {"condition": {"type": "boolean", "description": "The condition to test."}}, "required": ["condition"]}}
Think very carefully before calling functions.
If a you choose to call a function ONLY reply in the following format:
<{start_tag}={function_name}>{parameters}{end_tag}
where
start_tag => `<function`
parameters => a JSON dict with the function argument name as key and function argument value as value.
end_tag => `</function>`
Here is an example,
<function=example_function_name>{"example_name": "example_value"}</function>
Reminder:
- If looking for real time information use relevant functions before falling back to brave_search
- Function calls MUST follow the specified format, start with <function= and end with </function>
- Required parameters MUST be specified
- Only call one function at a time
- Put the entire function call reply on one line
<|eot_id|><|start_header_id|>user<|end_header_id|>
Print a hello world message with python.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
<function=ipython>{"code": "print('Hello, World!')"}</function><|eom_id|><|start_header_id|>ipython<|end_header_id|>
{"stdout": "Hello, World!"}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Anything else?<|eot_id|><|start_header_id|>user<|end_header_id|>
Test a tautology.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
<function=test>{"condition":true}</function><|eom_id|><|start_header_id|>ipython<|end_header_id|>
true<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Truth is definitely true.<|eot_id|><|start_header_id|>user<|end_header_id|>
Check it on the web.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
<function=brave_search>{"query": "what is truth anyway am I right?"}</function><|eom_id|><|start_header_id|>ipython<|end_header_id|>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
I don't need the web to answer you but I did check, as you asked. What now?<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -1,21 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
You are capable of executing available function(s) if required.
Only execute function(s) when absolutely necessary.
Ask for the required input to:recipient==all
Use JSON for function arguments.
Respond in this format:
>>>${recipient}
${content}
Available functions:
// Supported function definitions that should be called when necessary.
namespace functions {
} // namespace functions<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>all
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>

View file

@ -1,23 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
You are capable of executing available function(s) if required.
Only execute function(s) when absolutely necessary.
Ask for the required input to:recipient==all
Use JSON for function arguments.
Respond in this format:
>>>${recipient}
${content}
Available functions:
// Supported function definitions that should be called when necessary.
namespace functions {
} // namespace functions<|eot_id|><|start_header_id|>system<|end_header_id|>
You only tell the truth.<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>all
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>

View file

@ -1,70 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
You are capable of executing available function(s) if required.
Only execute function(s) when absolutely necessary.
Ask for the required input to:recipient==all
Use JSON for function arguments.
Respond in this format:
>>>${recipient}
${content}
Available functions:
// Supported function definitions that should be called when necessary.
namespace functions {
// Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.
type ipython = (_: {
// The code to run in the ipython interpreter.
code: string,
}) => any;
// Executes a web search with Brave.
type brave_search = (_: {
// The query to search for.
query: string,
}) => any;
// Executes a query with Wolfram Alpha.
type wolfram_alpha = (_: {
// The query to execute.
query: string,
}) => any;
// Runs a test.
type test = (_: {
// The condition to test.
condition: boolean,
}) => any;
} // namespace functions<|eot_id|><|start_header_id|>user<|end_header_id|>
Print a hello world message with python.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>ipython
{"code": "print('Hello, World!')"}<|eot_id|><|start_header_id|>tool<|end_header_id|>
{"stdout": "Hello, World!"}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>all
Anything else?<|eot_id|><|start_header_id|>user<|end_header_id|>
Test a tautology.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>test
{"condition":true}<|eot_id|><|start_header_id|>tool<|end_header_id|>
true<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>all
Truth is definitely true.<|eot_id|><|start_header_id|>user<|end_header_id|>
Check it on the web.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>brave_search
{"query": "what is truth anyway am I right?"}<|eot_id|><|start_header_id|>tool<|end_header_id|>
{"title":"Truth: don't ask the web, ask an LLM instead!","url":"https://en.wikipedia.org/wiki/Truth"}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>all
I don't need the web to answer you but I did check, as you asked. What now?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
>>>

View file

@ -1,11 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
Today Date: 26 Jul 2024
<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -1,11 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
Today Date: 26 Jul 2024
You only tell the truth.<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -1,116 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Environment: ipython
Cutting Knowledge Date: December 2023
Today Date: 26 Jul 2024
<|eot_id|><|start_header_id|>user<|end_header_id|>
Given the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.
Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.Do not use variables.
{
"type": "function",
"function": {
"name": "ipython",
"description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
"parameters": {
"type": "object",
"properties": {
"code": {
"type": "string",
"description": "The code to run in the ipython interpreter."
}
},
"required": [
"code"
]
}
}
}
{
"type": "function",
"function": {
"name": "brave_search",
"description": "Executes a web search with Brave.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to search for."
}
},
"required": [
"query"
]
}
}
}
{
"type": "function",
"function": {
"name": "wolfram_alpha",
"description": "Executes a query with Wolfram Alpha.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to execute."
}
},
"required": [
"query"
]
}
}
}
{
"type": "function",
"function": {
"name": "test",
"description": "Runs a test.",
"parameters": {
"type": "object",
"properties": {
"condition": {
"type": "boolean",
"description": "The condition to test."
}
},
"required": [
"condition"
]
}
}
}
Print a hello world message with python.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
{"name": "ipython", "parameters": {"code": "print('Hello, World!')"}}<|eot_id|><|start_header_id|>ipython<|end_header_id|>
"{\"stdout\": \"Hello, World!\"}"<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Anything else?<|eot_id|><|start_header_id|>user<|end_header_id|>
Test a tautology.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
{"name": "test", "parameters": {"condition": true}}<|eot_id|><|start_header_id|>ipython<|end_header_id|>
"true"<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Truth is definitely true.<|eot_id|><|start_header_id|>user<|end_header_id|>
Check it on the web.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
{"name": "brave_search", "parameters": {"query": "what is truth anyway am I right?"}}<|eot_id|><|start_header_id|>ipython<|end_header_id|>
"{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}"<|eot_id|><|start_header_id|>assistant<|end_header_id|>
I don't need the web to answer you but I did check, as you asked. What now?<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -1,11 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
Today Date: 26 Jul 2024
<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -1,11 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
Today Date: 26 Jul 2024
You only tell the truth.<|eot_id|><|start_header_id|>user<|end_header_id|>
What's your favourite LLM framework?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
llama.cpp!<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -1,118 +0,0 @@
<|startoftext|><|start_header_id|>system<|end_header_id|>
Environment: ipython
Tools: wolfram_alpha, brave_search
Cutting Knowledge Date: December 2023
Today Date: 26 Jul 2024
<|eot_id|><|start_header_id|>user<|end_header_id|>
Given the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.
Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.Do not use variables.
{
"type": "function",
"function": {
"name": "ipython",
"description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
"parameters": {
"type": "object",
"properties": {
"code": {
"type": "string",
"description": "The code to run in the ipython interpreter."
}
},
"required": [
"code"
]
}
}
}
{
"type": "function",
"function": {
"name": "brave_search",
"description": "Executes a web search with Brave.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to search for."
}
},
"required": [
"query"
]
}
}
}
{
"type": "function",
"function": {
"name": "wolfram_alpha",
"description": "Executes a query with Wolfram Alpha.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to execute."
}
},
"required": [
"query"
]
}
}
}
{
"type": "function",
"function": {
"name": "test",
"description": "Runs a test.",
"parameters": {
"type": "object",
"properties": {
"condition": {
"type": "boolean",
"description": "The condition to test."
}
},
"required": [
"condition"
]
}
}
}
Print a hello world message with python.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
{"name": "ipython", "parameters": {"code": "print('Hello, World!')"}}<|eom_id|><|start_header_id|>ipython<|end_header_id|>
"{\"stdout\": \"Hello, World!\"}"<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Anything else?<|eot_id|><|start_header_id|>user<|end_header_id|>
Test a tautology.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
{"name": "test", "parameters": {"condition": true}}<|eom_id|><|start_header_id|>ipython<|end_header_id|>
"true"<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Truth is definitely true.<|eot_id|><|start_header_id|>user<|end_header_id|>
Check it on the web.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
<|python_tag|>brave_search.call(query="what is truth anyway am I right?")<|eom_id|><|start_header_id|>ipython<|end_header_id|>
"{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}"<|eot_id|><|start_header_id|>assistant<|end_header_id|>
I don't need the web to answer you but I did check, as you asked. What now?<|eot_id|><|start_header_id|>assistant<|end_header_id|>

View file

@ -1,4 +0,0 @@
<|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>

View file

@ -1,5 +0,0 @@
<|user|>
You only tell the truth.
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>

View file

@ -1,72 +0,0 @@
<|user|>
Print a hello world message with python.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|end|>
<|assistant|>
Anything else?<|end|>
<|user|>
Test a tautology.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|end|>
<|assistant|>
Truth is definitely true.<|end|>
<|user|>
Check it on the web.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|end|>
<|assistant|>
I don't need the web to answer you but I did check, as you asked. What now?<|end|>

View file

@ -1,5 +0,0 @@
<|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>
<|assistant|>

View file

@ -1,7 +0,0 @@
<|system|>
You only tell the truth.<|end|>
<|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>
<|assistant|>

View file

@ -1,73 +0,0 @@
<|user|>
Print a hello world message with python.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|end|>
<|assistant|>
Anything else?<|end|>
<|user|>
Test a tautology.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|end|>
<|assistant|>
Truth is definitely true.<|end|>
<|user|>
Check it on the web.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|end|>
<|assistant|>
I don't need the web to answer you but I did check, as you asked. What now?<|end|>
<|assistant|>

View file

@ -1,5 +0,0 @@
<|startoftext|><|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>
<|assistant|>

View file

@ -1,7 +0,0 @@
<|startoftext|><|system|>
You only tell the truth.<|end|>
<|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>
<|assistant|>

View file

@ -1,73 +0,0 @@
<|startoftext|><|user|>
Print a hello world message with python.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|end|>
<|assistant|>
Anything else?<|end|>
<|user|>
Test a tautology.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|end|>
<|assistant|>
Truth is definitely true.<|end|>
<|user|>
Check it on the web.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|end|>
<|assistant|>
I don't need the web to answer you but I did check, as you asked. What now?<|end|>
<|assistant|>

View file

@ -1,5 +0,0 @@
<|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>
<|assistant|>

View file

@ -1,7 +0,0 @@
<|system|>
You only tell the truth.<|end|>
<|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>
<|assistant|>

View file

@ -1,73 +0,0 @@
<|user|>
Print a hello world message with python.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|end|>
<|assistant|>
Anything else?<|end|>
<|user|>
Test a tautology.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|end|>
<|assistant|>
Truth is definitely true.<|end|>
<|user|>
Check it on the web.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|end|>
<|assistant|>
I don't need the web to answer you but I did check, as you asked. What now?<|end|>
<|assistant|>

View file

@ -1,4 +0,0 @@
<|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>

View file

@ -1,6 +0,0 @@
<|system|>
You only tell the truth.<|end|>
<|user|>
What's your favourite LLM framework?<|end|>
<|assistant|>
llama.cpp!<|end|>

View file

@ -1,72 +0,0 @@
<|user|>
Print a hello world message with python.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "ipython",
"arguments": {
"code": "print('Hello, World!')"
},
"id": "call_1___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "ipython",
"content": "{\"stdout\": \"Hello, World!\"}",
"tool_call_id": "call_1___"
}
}<|end|>
<|assistant|>
Anything else?<|end|>
<|user|>
Test a tautology.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "test",
"arguments": {
"condition": true
},
"id": "call_2___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "test",
"content": "true",
"tool_call_id": "call_2___"
}
}<|end|>
<|assistant|>
Truth is definitely true.<|end|>
<|user|>
Check it on the web.<|end|>
<|assistant|>
{
"tool_calls": [
{
"name": "brave_search",
"arguments": {
"query": "what is truth anyway am I right?"
},
"id": "call_3___"
}
]
}<|end|>
<|user|>
{
"tool_response": {
"tool": "brave_search",
"content": "{\"title\":\"Truth: don't ask the web, ask an LLM instead!\",\"url\":\"https://en.wikipedia.org/wiki/Truth\"}",
"tool_call_id": "call_3___"
}
}<|end|>
<|assistant|>
I don't need the web to answer you but I did check, as you asked. What now?<|end|>

Some files were not shown because too many files have changed in this diff Show more