From 3ff27d30e3da54d8e3e44374f47dcc9b67656d41 Mon Sep 17 00:00:00 2001 From: digiwombat Date: Fri, 2 Jun 2023 09:20:53 -0400 Subject: [PATCH] Fixed up a few things in embedding mode. --- examples/server/server.cpp | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 038fd16c8..334bf88c5 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -958,23 +958,34 @@ int main(int argc, char **argv) svr.Post("/embedding", [&llama](const Request &req, Response &res) { + json data; if(!llama.params.embedding) { std::vector empty; - json data = { - {"embedding", empty}}; - fprintf(stderr, "[llama-server] : You need enable embedding mode adding: --embedding option\n"); + data = { + {"embedding", empty}, + {"error", "Server is not in embedding mode."} }; + fprintf(stderr, "[llama-server] : You need to enable embedding mode by adding --embedding when launching the server.\n"); return res.set_content(data.dump(llama.json_indent), "application/json"); } json body = json::parse(req.body); - std::string content = body["content"].get(); - int threads = body["threads"].get(); - json data = { - {"embedding", llama.embedding(content, threads) } }; + if (body["content"].is_null()) { + std::vector empty; + data = { + {"embedding", empty}, + {"error", "The embedding content was not set."} }; + fprintf(stderr, "[llama-server] : The embedding content was not set.\n"); + } + else + { + std::string content = body["content"].get(); + data = { + {"embedding", llama.embedding(content, llama.params.n_threads) } }; + } return res.set_content(data.dump(llama.json_indent), "application/json"); }); if(params.embedding) { - fprintf(stderr, "NOTE: Mode embedding enabled. Completion function doesn't work in this mode.\n"); + fprintf(stderr, "NOTE: Embedding mode enabled. Completion is disabled in this mode.\n"); } svr.set_logger([](const Request& req, const Response& res) {