diff --git a/examples/server/public_simplechat/readme.md b/examples/server/public_simplechat/readme.md index 6781e3567..40f4a0ffa 100644 --- a/examples/server/public_simplechat/readme.md +++ b/examples/server/public_simplechat/readme.md @@ -11,6 +11,9 @@ in a simple way with minimal code from a common code base. Inturn additionally i multiple independent back and forth chatting to an extent, with the ai llm model at a basic level, with their own system prompts. +This allows seeing the generated text / ai-model response in oneshot at the end, after it is fully generated, +or potentially as it is being generated, in a streamed manner from the server/ai-model. + The UI follows a responsive web design so that the layout can adapt to available display space in a usable enough manner, in general. @@ -58,6 +61,7 @@ Once inside * chat (default) vs completion mode * try trim garbage in response or not * amount of chat history in the context sent to server/ai-model + * oneshot or streamed mode. * In completion mode * logic by default doesnt insert any role specific "ROLE: " prefix wrt each role's message. diff --git a/examples/server/public_simplechat/simplechat.js b/examples/server/public_simplechat/simplechat.js index b416f7383..f9418578a 100644 --- a/examples/server/public_simplechat/simplechat.js +++ b/examples/server/public_simplechat/simplechat.js @@ -150,6 +150,7 @@ class SimpleChat { gMe.show_info(div); } } + return last; } /** @@ -316,8 +317,10 @@ class SimpleChat { * Handle the multipart response from server/ai-model * @param {Response} resp * @param {string} apiEP + * @param {HTMLDivElement} elDiv */ - async handle_response_multipart(resp, apiEP) { + async handle_response_multipart(resp, apiEP, elDiv) { + let elP = ui.el_create_append_p("", elDiv); if (!resp.body) { throw Error("ERRR:SimpleChat:SC:ReadJsonEarly:No body..."); } @@ -343,6 +346,8 @@ class SimpleChat { gotBody += this.response_extract_stream(curJson, apiEP); } } + elP.innerText = gotBody; + elP.scrollIntoView(false); if (done) { break; } @@ -367,15 +372,16 @@ class SimpleChat { * Also take care of the optional garbage trimming. * @param {Response} resp * @param {string} apiEP + * @param {HTMLDivElement} elDiv */ - async handle_response(resp, apiEP) { + async handle_response(resp, apiEP, elDiv) { let theResp = { assistant: "", trimmed: "", } let origMsg; if (gMe.bStream) { - origMsg = await this.handle_response_multipart(resp, apiEP); + origMsg = await this.handle_response_multipart(resp, apiEP, elDiv); } else { origMsg = await this.handle_response_oneshot(resp, apiEP); } @@ -546,7 +552,7 @@ class MultiChatUI { body: theBody, }); - let theResp = await chat.handle_response(resp, apiEP); + let theResp = await chat.handle_response(resp, apiEP, this.elDivChat); chat.add(Roles.Assistant, theResp.assistant); if (chatId == this.curChatId) { chat.show(this.elDivChat);