From cbd853eda989269140778df147c1430369bc89c1 Mon Sep 17 00:00:00 2001 From: HanishKVC Date: Thu, 23 May 2024 18:51:36 +0530 Subject: [PATCH] SimpleChat:ChatRequestOptions: max_tokens As some times based on the query from the user, the ai model may get into a run away kind of generation with repeatations etal, so adding max_tokens to try and limit this run away behaviour, if possible. --- examples/server/public_simplechat/simplechat.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/server/public_simplechat/simplechat.js b/examples/server/public_simplechat/simplechat.js index 2ddab0ec2..a93a89105 100644 --- a/examples/server/public_simplechat/simplechat.js +++ b/examples/server/public_simplechat/simplechat.js @@ -34,7 +34,8 @@ let gUsageMsg = ` // Add needed fields wrt json object to be sent wrt LLM web services completions endpoint. let gChatRequestOptions = { - "temperature": 0.7 + "temperature": 0.7, + "max_tokens": 2048 }; class SimpleChat {