rm unneeded vocab

This commit is contained in:
Olivier Chafik 2025-02-03 19:59:50 +00:00
parent 7dc271fb37
commit c6214ee9d6
4 changed files with 10 additions and 11 deletions

View file

@ -1911,9 +1911,9 @@ struct server_context {
}});
GGML_ASSERT(templates.template_default);
try {
common_chat_params_init(*templates.template_default, inputs, vocab);
common_chat_params_init(*templates.template_default, inputs);
if (templates.template_tool_use) {
common_chat_params_init(*templates.template_tool_use, inputs, vocab);
common_chat_params_init(*templates.template_tool_use, inputs);
}
return true;
} catch (const std::exception & e) {
@ -4052,7 +4052,7 @@ int main(int argc, char ** argv) {
}
auto body = json::parse(req.body);
json data = oaicompat_completion_params_parse(body, params.use_jinja, ctx_server.chat_templates, llama_model_get_vocab(ctx_server.model));
json data = oaicompat_completion_params_parse(body, params.use_jinja, ctx_server.chat_templates);
return handle_completions_impl(
SERVER_TASK_TYPE_COMPLETION,
@ -4065,7 +4065,7 @@ int main(int argc, char ** argv) {
// same with handle_chat_completions, but without inference part
const auto handle_apply_template = [&ctx_server, &params, &res_ok](const httplib::Request & req, httplib::Response & res) {
auto body = json::parse(req.body);
json data = oaicompat_completion_params_parse(body, params.use_jinja, ctx_server.chat_templates, llama_model_get_vocab(ctx_server.model));
json data = oaicompat_completion_params_parse(body, params.use_jinja, ctx_server.chat_templates);
res_ok(res, {{ "prompt", std::move(data.at("prompt")) }});
};

View file

@ -582,8 +582,7 @@ static json oaicompat_completion_params_parse(const json & body) {
static json oaicompat_completion_params_parse(
const json & body, /* openai api json semantics */
bool use_jinja,
const common_chat_templates & chat_templates,
const llama_vocab * vocab)
const common_chat_templates & chat_templates)
{
json llama_params;
const auto & tmpl = body.contains("tools") && chat_templates.template_tool_use
@ -649,7 +648,7 @@ static json oaicompat_completion_params_parse(
inputs.stream = stream;
// TODO: support mixing schema w/ tools beyond generic format.
inputs.json_schema = json_value(llama_params, "json_schema", json());
auto chat_params = common_chat_params_init(tmpl, inputs, vocab);
auto chat_params = common_chat_params_init(tmpl, inputs);
llama_params["chat_format"] = static_cast<int>(chat_params.format);
llama_params["prompt"] = chat_params.prompt;