Merge pull request #7 from anon998/logging-reuse

Reuse format_generation_settings for logging.
This commit is contained in:
Randall Fitzgerald 2023-05-31 17:08:12 -04:00 committed by GitHub
commit f2e1130901
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -507,6 +507,31 @@ bool server_params_parse(int argc, char **argv, server_params &sparams, gpt_para
return true;
}
json format_generation_settings(llama_server_context &llama) {
const bool ignore_eos = -INFINITY == llama.params.logit_bias[llama_token_eos()];
return json {
{ "seed", llama.params.seed },
{ "temp", llama.params.temp },
{ "top_k", llama.params.top_k },
{ "top_p", llama.params.top_p },
{ "tfs_z", llama.params.tfs_z },
{ "typical_p", llama.params.typical_p },
{ "repeat_last_n", llama.params.repeat_last_n },
{ "repeat_penalty", llama.params.repeat_penalty },
{ "presence_penalty", llama.params.presence_penalty },
{ "frequency_penalty", llama.params.frequency_penalty },
{ "mirostat", llama.params.mirostat },
{ "mirostat_tau", llama.params.mirostat_tau },
{ "mirostat_eta", llama.params.mirostat_eta },
{ "penalize_nl", llama.params.penalize_nl },
{ "stop", llama.params.antiprompt },
{ "n_predict", llama.params.n_predict },
{ "n_keep", llama.params.n_keep },
{ "ignore_eos", ignore_eos },
{ "stream", llama.stream },
};
}
bool parse_options_completion(json body, llama_server_context& llama, Response &res)
{
gpt_params default_params;
@ -608,112 +633,27 @@ bool parse_options_completion(json body, llama_server_context& llama, Response &
res.status = 400;
return false;
}
llama.params.antiprompt.clear();
if (!body["stop"].is_null()) {
llama.params.antiprompt = body["stop"].get<std::vector<std::string>>();
} else {
llama.params.antiprompt.clear();
const auto stop = body["stop"].get<std::vector<std::string>>();
std::copy_if(stop.begin(), stop.end(),
std::back_inserter(llama.params.antiprompt),
[](const std::string &str) { return !str.empty(); });
}
if (llama.verbose) {
std::string tmp_stop =
std::accumulate(llama.params.antiprompt.begin(), llama.params.antiprompt.end(),
std::string{}, [](std::string a, std::string b) {
return a + (a != "" ? ", \"" : "\"") + b + "\"";
});
json tmp = format_generation_settings(llama);
fprintf(stderr,
"-------------------------\n"
"/completion parameters: {\n"
" stream: %d,\n"
" ignore_eos: %d,\n"
" frequency_penalty: %f,\n"
" mirostat: %d,\n"
" mirostat_eta: %f,\n"
" mirostat_tau: %f,\n"
" n_keep: %d,\n"
" n_predict: %d,\n"
" penalize_nl: %d,\n"
" presence_penalty: %f,\n"
" repeat_last_n: %d,\n"
" repeat_penalty: %f,\n"
" seed: %d,\n"
" stop: [%s],\n"
" temperature: %f,\n"
" tfs_z: %f,\n"
" top_k: %d,\n"
" top_p: %f,\n"
" typical_p: %f,\n"
"}\nPROMPT[%s]\n",
llama.stream, -INFINITY == llama.params.logit_bias[llama_token_eos()],
llama.params.frequency_penalty, llama.params.mirostat,
llama.params.mirostat_eta, llama.params.mirostat_tau, llama.params.n_keep,
llama.params.n_predict, llama.params.penalize_nl,
llama.params.presence_penalty, llama.params.repeat_last_n,
llama.params.repeat_penalty, llama.params.seed, tmp_stop.c_str(),
llama.params.temp, llama.params.tfs_z, llama.params.top_k, llama.params.top_p,
llama.params.typical_p, llama.params.prompt.c_str());
"/completion parameters: %s\n"
"PROMPT[%s]\n",
tmp.dump(4, ' ', false, json::error_handler_t::replace).c_str(),
llama.params.prompt.c_str());
}
return true;
}
json format_generation_settings(const llama_server_context& llama) {
return json {
{ "seed", llama.params.seed },
{ "temp", llama.params.temp },
{ "top_k", llama.params.top_k },
{ "top_p", llama.params.top_p },
{ "tfs_z", llama.params.tfs_z },
{ "typical_p", llama.params.typical_p },
{ "repeat_last_n", llama.params.repeat_last_n },
{ "repeat_penalty", llama.params.repeat_penalty },
{ "presence_penalty", llama.params.presence_penalty },
{ "frequency_penalty", llama.params.frequency_penalty },
{ "mirostat", llama.params.mirostat },
{ "mirostat_tau", llama.params.mirostat_tau },
{ "mirostat_eta", llama.params.mirostat_eta },
{ "penalize_nl", llama.params.penalize_nl }
};
}
std::string log(const Request &req, const Response &res)
{
std::string s;
s += "============ REQUEST ===========\n";
s += "< ";
s += req.method;
s += " ";
s += req.path;
s += " ";
s += req.version;
s += "\n";
if (!req.body.empty()) {
std::string line;
std::istringstream stream(req.body);
while (std::getline(stream, line)) {
s += "< " + line + "\n";
}
}
s += "------------ RESPONSE ------------\n> ";
s += res.version;
s += " ";
s += std::to_string(res.status);
s += "\n";
if (!res.body.empty()) {
std::string line;
std::istringstream stream(res.body);
while (std::getline(stream, line)) {
s += "> " + line + "\n";
}
}
return s;
}
int main(int argc, char **argv)
{
llama_init_backend();