fix linter warnings + make variables const

This commit is contained in:
anon 2023-06-13 14:28:52 -03:00
parent 575cf23862
commit 7df316b728

View file

@ -67,9 +67,10 @@ static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
return ret; return ret;
} }
static void server_log(const char * level, const char * function, int line, const char * message, nlohmann::ordered_json extra) { static void server_log(const char * level, const char * function, int line,
const char * message, const nlohmann::ordered_json & extra) {
nlohmann::ordered_json log { nlohmann::ordered_json log {
{ "timestamp", time(NULL) }, { "timestamp", time(nullptr) },
{ "level", level }, { "level", level },
{ "function", function }, { "function", function },
{ "line", line }, { "line", line },
@ -80,7 +81,7 @@ static void server_log(const char * level, const char * function, int line, cons
log.merge_patch(extra); log.merge_patch(extra);
} }
std::string str = log.dump(-1, ' ', false, json::error_handler_t::replace); const std::string str = log.dump(-1, ' ', false, json::error_handler_t::replace);
fprintf(stdout, "%.*s\n", (int)str.size(), str.data()); fprintf(stdout, "%.*s\n", (int)str.size(), str.data());
fflush(stdout); fflush(stdout);
} }
@ -105,7 +106,7 @@ static bool server_verbose = false;
struct llama_server_context { struct llama_server_context {
bool stream = false; bool stream = false;
bool has_next_token = false; bool has_next_token = false;
std::string generated_text = ""; std::string generated_text;
size_t num_tokens_predicted = 0; size_t num_tokens_predicted = 0;
size_t n_past = 0; size_t n_past = 0;
@ -150,7 +151,7 @@ struct llama_server_context {
bool loadModel(const gpt_params & params_) { bool loadModel(const gpt_params & params_) {
params = params_; params = params_;
ctx = llama_init_from_gpt_params(params); ctx = llama_init_from_gpt_params(params);
if (ctx == NULL) { if (ctx == nullptr) {
LOG_ERROR("unable to load model", { { "model", params_.model } }); LOG_ERROR("unable to load model", { { "model", params_.model } });
return false; return false;
} }
@ -267,7 +268,9 @@ struct llama_server_context {
const float mirostat_tau = params.mirostat_tau; const float mirostat_tau = params.mirostat_tau;
const float mirostat_eta = params.mirostat_eta; const float mirostat_eta = params.mirostat_eta;
const bool penalize_nl = params.penalize_nl; const bool penalize_nl = params.penalize_nl;
llama_token id = 0; { llama_token id = 0;
{
auto * logits = llama_get_logits(ctx); auto * logits = llama_get_logits(ctx);
auto n_vocab = llama_n_vocab(ctx); auto n_vocab = llama_n_vocab(ctx);
@ -370,9 +373,9 @@ struct llama_server_context {
} }
std::string doCompletion() { std::string doCompletion() {
llama_token token = nextToken(); const llama_token token = nextToken();
std::string token_text = token == -1 ? "" : llama_token_to_str(ctx, token); const std::string token_text = token == -1 ? "" : llama_token_to_str(ctx, token);
generated_text += token_text; generated_text += token_text;
if (multibyte_pending > 0) { if (multibyte_pending > 0) {
@ -546,12 +549,12 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
std::vector<std::string> split_arg{ it, {} }; std::vector<std::string> split_arg{ it, {} };
GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES); GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) { for (size_t i_device = 0; i_device < LLAMA_MAX_DEVICES; ++i_device) {
if (i < split_arg.size()) { if (i_device < split_arg.size()) {
params.tensor_split[i] = std::stof(split_arg[i]); params.tensor_split[i_device] = std::stof(split_arg[i_device]);
} }
else { else {
params.tensor_split[i] = 0.0f; params.tensor_split[i_device] = 0.0f;
} }
} }
#else #else
@ -663,7 +666,7 @@ static json format_tokenizer_response(const std::vector<llama_token> & tokens) {
}; };
} }
bool parse_options_completion(json body, llama_server_context & llama) { static void parse_options_completion(const json & body, llama_server_context & llama) {
gpt_params default_params; gpt_params default_params;
llama.stream = body.value("stream", false); llama.stream = body.value("stream", false);
@ -691,7 +694,7 @@ bool parse_options_completion(json body, llama_server_context & llama) {
} }
if (body["logit_bias"].is_array()) { if (body["logit_bias"].is_array()) {
int n_vocab = llama_n_vocab(llama.ctx); const int n_vocab = llama_n_vocab(llama.ctx);
for (const auto & el : body["logit_bias"]) { for (const auto & el : body["logit_bias"]) {
if (el.is_array() && el.size() == 2 && el[0].is_number_integer()) { if (el.is_array() && el.size() == 2 && el[0].is_number_integer()) {
llama_token tok = el[0].get<llama_token>(); llama_token tok = el[0].get<llama_token>();
@ -715,8 +718,6 @@ bool parse_options_completion(json body, llama_server_context & llama) {
} }
LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama)); LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama));
return true;
} }
static void log_server_request(const Request & req, const Response & res) { static void log_server_request(const Request & req, const Response & res) {
@ -773,13 +774,10 @@ int main(int argc, char ** argv) {
}); });
svr.Post("/completion", [&llama](const Request & req, Response & res) { svr.Post("/completion", [&llama](const Request & req, Response & res) {
llama.rewind(); llama.rewind();
llama_reset_timings(llama.ctx); llama_reset_timings(llama.ctx);
if (!parse_options_completion(json::parse(req.body), llama)) { parse_options_completion(json::parse(req.body), llama);
return;
}
llama.loadPrompt(); llama.loadPrompt();
llama.beginCompletion(); llama.beginCompletion();
@ -802,7 +800,7 @@ int main(int argc, char ** argv) {
llama.generated_text.end()); llama.generated_text.end());
} }
json data = format_final_response(llama, llama.generated_text); const json data = format_final_response(llama, llama.generated_text);
llama_print_timings(llama.ctx); llama_print_timings(llama.ctx);
@ -820,7 +818,7 @@ int main(int argc, char ** argv) {
size_t pos = std::min(sent_count, llama.generated_text.size()); size_t pos = std::min(sent_count, llama.generated_text.size());
const char* str_test = llama.generated_text.c_str() + pos; const std::string str_test = llama.generated_text.substr(pos);
size_t stop_pos = size_t stop_pos =
llama.findStoppingStrings(str_test, token_text.size(), STOP_FULL); llama.findStoppingStrings(str_test, token_text.size(), STOP_FULL);
if (stop_pos != std::string::npos) { if (stop_pos != std::string::npos) {
@ -833,18 +831,15 @@ int main(int argc, char ** argv) {
STOP_PARTIAL); STOP_PARTIAL);
} }
std::string to_send = llama.generated_text.substr(pos, stop_pos); const std::string to_send = llama.generated_text.substr(pos, stop_pos);
sent_count += to_send.size(); sent_count += to_send.size();
json data; const json data = llama.has_next_token
if (llama.has_next_token) { ? format_partial_response(to_send)
data = format_partial_response(to_send);
} else {
// Generation is done, send extra information. // Generation is done, send extra information.
data = format_final_response(llama, to_send); : format_final_response(llama, to_send);
}
std::string str = const std::string str =
"data: " + "data: " +
data.dump(-1, ' ', false, json::error_handler_t::replace) + data.dump(-1, ' ', false, json::error_handler_t::replace) +
"\n\n"; "\n\n";
@ -873,10 +868,10 @@ int main(int argc, char ** argv) {
}); });
svr.Post("/tokenize", [&llama](const Request & req, Response & res) { svr.Post("/tokenize", [&llama](const Request & req, Response & res) {
json body = json::parse(req.body); const json body = json::parse(req.body);
std::string content = body["content"].get<std::string>(); const std::string content = body["content"].get<std::string>();
std::vector<llama_token> tokens = ::llama_tokenize(llama.ctx, content, false); const std::vector<llama_token> tokens = llama_tokenize(llama.ctx, content, false);
json data = format_tokenizer_response(tokens); const json data = format_tokenizer_response(tokens);
return res.set_content(data.dump(), "application/json"); return res.set_content(data.dump(), "application/json");
}); });