server : (refactor) no more json in server_task input (#10691)

* server : (refactor) no more json in server_task input

* add test for slots endpoint

* add tests for /props and /slots

* remove task inf_type

* fix CI by adding safe_json_to_str

* add "model_path" to /props

* update readme
This commit is contained in:
Xuan Son Nguyen 2024-12-07 20:21:09 +01:00 committed by GitHub
parent d9c3ba2b77
commit 3573fa8e7b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 427 additions and 384 deletions

View file

@ -164,6 +164,9 @@ static std::vector<llama_tokens> tokenize_input_prompts(llama_context * ctx, con
} else {
throw std::runtime_error("\"prompt\" must be a string, an list of tokens, a list of mixed strings & tokens, or a list of prompts");
}
if (result.empty()) {
throw std::runtime_error("\"prompt\" must not be empty");
}
return result;
}
@ -496,8 +499,6 @@ static json oaicompat_completion_params_parse(
const std::string & chat_template) {
json llama_params;
llama_params["__oaicompat"] = true;
// Apply chat template to the list of messages
llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
@ -648,3 +649,18 @@ static json format_detokenized_response(const std::string & content) {
{"content", content}
};
}
static json format_logit_bias(const std::vector<llama_logit_bias> & logit_bias) {
json data = json::array();
for (const auto & lb : logit_bias) {
data.push_back(json{
{"bias", lb.bias},
{"token", lb.token},
});
}
return data;
}
static std::string safe_json_to_str(json data) {
return data.dump(-1, ' ', false, json::error_handler_t::replace);
}