fix gcc error + lint

This commit is contained in:
ochafik 2024-09-26 19:18:40 +01:00
parent 2926089c5d
commit c88c932d98
2 changed files with 4 additions and 4 deletions

View file

@ -9,7 +9,7 @@ using json = nlohmann::ordered_json;
enum llama_tool_call_style { enum llama_tool_call_style {
Unknown, UnknownToolCallStyle,
Llama31, Llama31,
FunctionaryV3Llama3, FunctionaryV3Llama3,
FunctionaryV3Llama31, FunctionaryV3Llama31,
@ -20,7 +20,7 @@ class llama_chat_template {
public: public:
private: private:
llama_tool_call_style _tool_call_style = Unknown; llama_tool_call_style _tool_call_style = UnknownToolCallStyle;
bool _supports_tools = true; bool _supports_tools = true;
// Meta-Llama-3.1-8B-Instruct's template expects arguments to be an object. // Meta-Llama-3.1-8B-Instruct's template expects arguments to be an object.
// Most other templates (and OpenAI's API) expect the arguments object to be stringified. // Most other templates (and OpenAI's API) expect the arguments object to be stringified.

View file

@ -59,7 +59,7 @@ static T json_value(const json & body, const std::string & key, const T & defaul
// //
// Format given chat. If tmpl is empty, we take the template from model metadata // Format given chat. If tmpl is empty, we take the template from model metadata
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages, const json & tools, bool use_jinja) { inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
std::vector<llama_chat_msg> chat; std::vector<llama_chat_msg> chat;
for (size_t i = 0; i < messages.size(); ++i) { for (size_t i = 0; i < messages.size(); ++i) {
@ -396,7 +396,7 @@ static json oaicompat_completion_params_parse(
} }
llama_params["prompt"] = tmpl.apply(body.at("messages"), tools, /* add_generation_prompt= */ true); llama_params["prompt"] = tmpl.apply(body.at("messages"), tools, /* add_generation_prompt= */ true);
} else { } else {
llama_params["prompt"] = format_chat(model, tmpl.chat_template(), body.at("messages"), tools, /* use_jinja= */ false); llama_params["prompt"] = format_chat(model, tmpl.chat_template(), body.at("messages"));
} }
// Handle "n" field // Handle "n" field