rename: common_chat_template[s]

This commit is contained in:
ochafik 2025-01-20 23:42:07 +00:00
parent 8348c605ac
commit ee475d2f51
5 changed files with 17 additions and 17 deletions

View file

@ -1749,7 +1749,7 @@ bool common_chat_verify_template(const std::string & tmpl, bool use_jinja) {
}
std::string common_chat_apply_template(
const llama_chat_template & tmpl,
const common_chat_template & tmpl,
const std::vector<common_chat_msg> & msgs,
bool add_ass,
bool use_jinja) {
@ -1791,7 +1791,7 @@ std::string common_chat_apply_template(
}
std::string common_chat_format_single(
const llama_chat_template & tmpl,
const common_chat_template & tmpl,
const std::vector<common_chat_msg> & past_msg,
const common_chat_msg & new_msg,
bool add_ass,
@ -1811,7 +1811,7 @@ std::string common_chat_format_single(
return ss.str();
}
std::string common_chat_format_example(const llama_chat_template & tmpl, bool use_jinja) {
std::string common_chat_format_example(const common_chat_template & tmpl, bool use_jinja) {
std::vector<common_chat_msg> msgs = {
{"system", "You are a helpful assistant"},
{"user", "Hello"},
@ -1821,7 +1821,7 @@ std::string common_chat_format_example(const llama_chat_template & tmpl, bool us
return common_chat_apply_template(tmpl, msgs, true, use_jinja);
}
llama_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override)
common_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override)
{
auto vocab = llama_model_get_vocab(model);
std::string default_template_src = chat_template_override;

View file

@ -611,26 +611,26 @@ namespace minja {
class chat_template;
}
typedef minja::chat_template llama_chat_template;
typedef minja::chat_template common_chat_template;
struct llama_chat_templates {
struct common_chat_templates {
bool has_explicit_template; // Model had builtin template or template overridde was specified.
std::unique_ptr<llama_chat_template> template_default; // always set (defaults to chatml)
std::unique_ptr<llama_chat_template> template_tool_use;
std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
std::unique_ptr<common_chat_template> template_tool_use;
};
// CPP wrapper for llama_chat_apply_template
// If the built-in template is not supported, we default to chatml
// If the custom "tmpl" is not supported, we throw an error
std::string common_chat_apply_template(
const llama_chat_template & tmpl,
const common_chat_template & tmpl,
const std::vector<common_chat_msg> & chat,
bool add_ass,
bool use_jinja);
// Format single message, while taking into account the position of that message in chat history
std::string common_chat_format_single(
const llama_chat_template & tmpl,
const common_chat_template & tmpl,
const std::vector<common_chat_msg> & past_msg,
const common_chat_msg & new_msg,
bool add_ass,
@ -638,9 +638,9 @@ std::string common_chat_format_single(
// Returns an example of formatted chat
std::string common_chat_format_example(
const llama_chat_template & tmpl, bool use_jinja);
const common_chat_template & tmpl, bool use_jinja);
llama_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override);
common_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override);
//
// KV cache utils

View file

@ -717,7 +717,7 @@ static void add_message(const char * role, const std::string & text, LlamaData &
}
// Function to apply the chat template and resize `formatted` if needed
static int apply_chat_template(const llama_chat_template & tmpl, LlamaData & llama_data, const bool append, bool use_jinja) {
static int apply_chat_template(const common_chat_template & tmpl, LlamaData & llama_data, const bool append, bool use_jinja) {
if (use_jinja) {
json messages = json::array();
for (const auto & msg : llama_data.messages) {
@ -893,7 +893,7 @@ static int generate_response(LlamaData & llama_data, const std::string & prompt,
}
// Helper function to apply the chat template and handle errors
static int apply_chat_template_with_error_handling(const llama_chat_template & tmpl, LlamaData & llama_data, const bool append, int & output_length, bool use_jinja) {
static int apply_chat_template_with_error_handling(const common_chat_template & tmpl, LlamaData & llama_data, const bool append, int & output_length, bool use_jinja) {
const int new_len = apply_chat_template(tmpl, llama_data, append, use_jinja);
if (new_len < 0) {
printe("failed to apply the chat template\n");

View file

@ -1689,7 +1689,7 @@ struct server_context {
// Necessary similarity of prompt for slot selection
float slot_prompt_similarity = 0.0f;
llama_chat_templates chat_templates;
common_chat_templates chat_templates;
~server_context() {
// Clear any sampling context

View file

@ -351,7 +351,7 @@ static llama_tokens format_infill(
}
// Format given chat. If tmpl is empty, we take the template from model metadata
inline std::string format_chat(const llama_chat_template & tmpl, const std::vector<json> & messages) {
inline std::string format_chat(const common_chat_template & tmpl, const std::vector<json> & messages) {
std::vector<common_chat_msg> chat;
for (size_t i = 0; i < messages.size(); ++i) {
@ -580,7 +580,7 @@ static json oaicompat_completion_params_parse(const json & body) {
static json oaicompat_completion_params_parse(
const json & body, /* openai api json semantics */
const llama_chat_template & tmpl,
const common_chat_template & tmpl,
bool use_jinja)
{
json llama_params;