use "built-in" instead of "supported"

This commit is contained in:
Xuan Son Nguyen 2024-12-02 13:41:09 +01:00
parent 47b0528ce9
commit 7f6e7570db
3 changed files with 5 additions and 5 deletions

View file

@ -990,8 +990,8 @@ extern "C" {
char * buf,
int32_t length);
// Get list of supported chat templates
int32_t llama_chat_supported_templates(const char ** output, size_t len);
// Get list of built-in chat templates
int32_t llama_chat_builtin_templates(const char ** output, size_t len);
//
// Sampling API

View file

@ -22370,7 +22370,7 @@ int32_t llama_chat_apply_template(
return res;
}
int32_t llama_chat_supported_templates(const char ** output, size_t len) {
int32_t llama_chat_builtin_templates(const char ** output, size_t len) {
auto it = LLM_CHAT_TEMPLATES.begin();
for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) {
output[i] = it->first.c_str();

View file

@ -135,10 +135,10 @@ int main(void) {
// list all supported templates
std::vector<const char *> supported_tmpl(1024);
res = llama_chat_supported_templates(supported_tmpl.data(), supported_tmpl.size());
res = llama_chat_builtin_templates(supported_tmpl.data(), supported_tmpl.size());
assert(res > 0);
supported_tmpl.resize(res);
printf("Supported templates:\n");
printf("Built-in chat templates:\n");
for (auto tmpl : supported_tmpl) {
printf(" %s\n", tmpl);
}