fix /slots endpoint
This commit is contained in:
parent
1881ffaf3e
commit
01da1ed9b6
2 changed files with 3 additions and 2 deletions
|
@ -2233,6 +2233,7 @@ struct server_context {
|
||||||
|
|
||||||
auto res = std::make_unique<server_task_result_metrics>();
|
auto res = std::make_unique<server_task_result_metrics>();
|
||||||
res->id = task.id;
|
res->id = task.id;
|
||||||
|
res->slots_data = slots_data;
|
||||||
res->n_idle_slots = n_idle_slots;
|
res->n_idle_slots = n_idle_slots;
|
||||||
res->n_processing_slots = n_processing_slots;
|
res->n_processing_slots = n_processing_slots;
|
||||||
res->n_tasks_deferred = queue_tasks.queue_tasks_deferred.size();
|
res->n_tasks_deferred = queue_tasks.queue_tasks_deferred.size();
|
||||||
|
|
|
@ -327,12 +327,12 @@ static std::string llama_get_chat_template(const struct llama_model * model) {
|
||||||
std::string template_key = "tokenizer.chat_template";
|
std::string template_key = "tokenizer.chat_template";
|
||||||
// call with NULL buffer to get the total size of the string
|
// call with NULL buffer to get the total size of the string
|
||||||
int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0);
|
int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0);
|
||||||
if (res < 0) {
|
if (res < 2) {
|
||||||
return "";
|
return "";
|
||||||
} else {
|
} else {
|
||||||
std::vector<char> model_template(res, 0);
|
std::vector<char> model_template(res, 0);
|
||||||
llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
|
llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
|
||||||
return std::string(model_template.data(), model_template.size());
|
return std::string(model_template.data(), model_template.size() - 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue