From a8a9f1968956ebd65a44ed460d015e4cf60c1d65 Mon Sep 17 00:00:00 2001 From: anon Date: Fri, 2 Jun 2023 05:57:20 -0300 Subject: [PATCH] small fixes --- examples/server/server.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index fcdc38e8a..d195fb167 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -137,8 +137,6 @@ struct llama_server_context void beginCompletion() { // number of tokens to keep when resetting context - - n_remain = params.n_predict; llama_set_rng_seed(ctx, params.seed); } @@ -192,9 +190,8 @@ struct llama_server_context auto n_vocab = llama_n_vocab(ctx); // Apply params.logit_bias map - for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) - { - logits[it->first] += it->second; + for (const auto &it : params.logit_bias) { + logits[it.first] += it.second; } std::vector candidates; @@ -271,7 +268,7 @@ struct llama_server_context return result; } - has_next_token = params.n_predict == -1 ? true : n_remain != 0; + has_next_token = params.n_predict == -1 || n_remain != 0; return result; } @@ -330,7 +327,7 @@ struct llama_server_context std::vector embedding(std::string content, int threads) { content.insert(0, 1, ' '); std::vector tokens = ::llama_tokenize(ctx, content, true); - if (tokens.size() > 0) + if (!tokens.empty()) { if (llama_eval(ctx, tokens.data(), tokens.size(), 0, threads)) { @@ -340,7 +337,7 @@ struct llama_server_context } } const int n_embd = llama_n_embd(ctx); - const auto embeddings = llama_get_embeddings(ctx); + auto *const embeddings = llama_get_embeddings(ctx); std::vector embeddings_(embeddings, embeddings + n_embd); return embeddings_; }