From 05a5a485b8e41737058df1815b33e2043ad1677c Mon Sep 17 00:00:00 2001 From: anon Date: Fri, 2 Jun 2023 17:52:04 -0300 Subject: [PATCH] make help text load faster --- examples/server/server.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index ecae8ecc3..9b653a2f6 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -756,8 +756,6 @@ bool parse_options_completion(json body, llama_server_context& llama, Response & int main(int argc, char **argv) { - llama_init_backend(); - // own arguments required by this example gpt_params params; server_params sparams; @@ -775,6 +773,8 @@ int main(int argc, char **argv) params.model_alias = params.model; } + llama_init_backend(); + fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); fprintf(stderr, "system_info: n_threads = %d / %d | %s\n\n", params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());