SimpleChat: Rename to apiRequestOptions from chatRequestOptions
So that it is not wrongly assumed that these request options are used only for chat/completions endpoint. Rather these are used for both the end points, so rename to match semantic better.
This commit is contained in:
parent
e4aeafc54f
commit
bc336248bc
2 changed files with 24 additions and 24 deletions
|
@ -179,15 +179,15 @@ It is attached to the document object. Some of these can also be updated using t
|
||||||
The histogram/freq based trimming logic is currently tuned for english language wrt its
|
The histogram/freq based trimming logic is currently tuned for english language wrt its
|
||||||
is-it-a-alpabetic|numeral-char regex match logic.
|
is-it-a-alpabetic|numeral-char regex match logic.
|
||||||
|
|
||||||
chatRequestOptions - maintains the list of options/fields to send along with chat request,
|
apiRequestOptions - maintains the list of options/fields to send along with api request,
|
||||||
irrespective of whether /chat/completions or /completions endpoint.
|
irrespective of whether /chat/completions or /completions endpoint.
|
||||||
|
|
||||||
If you want to add additional options/fields to send to the server/ai-model, and or
|
If you want to add additional options/fields to send to the server/ai-model, and or
|
||||||
modify the existing options value or remove them, for now you can update this global var
|
modify the existing options value or remove them, for now you can update this global var
|
||||||
using browser's development-tools/console.
|
using browser's development-tools/console.
|
||||||
|
|
||||||
For string, numeric and boolean fields in chatRequestOptions, including even those added by a
|
For string, numeric and boolean fields in apiRequestOptions, including even those added by a
|
||||||
user at runtime by directly modifying gMe.chatRequestOptions, setting ui entries will be auto
|
user at runtime by directly modifying gMe.apiRequestOptions, setting ui entries will be auto
|
||||||
created.
|
created.
|
||||||
|
|
||||||
cache_prompt option supported by example/server is allowed to be controlled by user, so that
|
cache_prompt option supported by example/server is allowed to be controlled by user, so that
|
||||||
|
@ -212,10 +212,10 @@ It is attached to the document object. Some of these can also be updated using t
|
||||||
>0 : Send the latest chat history from the latest system prompt, limited to specified cnt.
|
>0 : Send the latest chat history from the latest system prompt, limited to specified cnt.
|
||||||
|
|
||||||
|
|
||||||
By using gMe's iRecentUserMsgCnt and chatRequestOptions.max_tokens one can try to control the
|
By using gMe's iRecentUserMsgCnt and apiRequestOptions.max_tokens/n_predict one can try to control
|
||||||
implications of loading of the ai-model's context window by chat history, wrt chat response to
|
the implications of loading of the ai-model's context window by chat history, wrt chat response to
|
||||||
some extent in a simple crude way. You may also want to control the context size enabled when
|
some extent in a simple crude way. You may also want to control the context size enabled when the
|
||||||
the server loads ai-model, on the server end.
|
server loads ai-model, on the server end.
|
||||||
|
|
||||||
|
|
||||||
Sometimes the browser may be stuborn with caching of the file, so your updates to html/css/js
|
Sometimes the browser may be stuborn with caching of the file, so your updates to html/css/js
|
||||||
|
@ -252,12 +252,12 @@ also be started with a model context size of 1k or more, to be on safe side.
|
||||||
internal n_predict, for now add the same here on the client side, maybe later add max_tokens
|
internal n_predict, for now add the same here on the client side, maybe later add max_tokens
|
||||||
to /completions endpoint handling code on server side.
|
to /completions endpoint handling code on server side.
|
||||||
|
|
||||||
NOTE: One may want to experiment with frequency/presence penalty fields in chatRequestOptions
|
NOTE: One may want to experiment with frequency/presence penalty fields in apiRequestOptions
|
||||||
wrt the set of fields sent to server along with the user query. To check how the model behaves
|
wrt the set of fields sent to server along with the user query, to check how the model behaves
|
||||||
wrt repeatations in general in the generated text response.
|
wrt repeatations in general in the generated text response.
|
||||||
|
|
||||||
A end-user can change these behaviour by editing gMe from browser's devel-tool/console or by
|
A end-user can change these behaviour by editing gMe from browser's devel-tool/console or by
|
||||||
using the providing settings ui.
|
using the provided settings ui (for settings exposed through the ui).
|
||||||
|
|
||||||
|
|
||||||
### OpenAi / Equivalent API WebService
|
### OpenAi / Equivalent API WebService
|
||||||
|
@ -268,7 +268,7 @@ for a minimal chatting experimentation by setting the below.
|
||||||
* the baseUrl in settings ui
|
* the baseUrl in settings ui
|
||||||
* https://api.openai.com/v1 or similar
|
* https://api.openai.com/v1 or similar
|
||||||
|
|
||||||
* Wrt request body - gMe.chatRequestOptions
|
* Wrt request body - gMe.apiRequestOptions
|
||||||
* model (settings ui)
|
* model (settings ui)
|
||||||
* any additional fields if required in future
|
* any additional fields if required in future
|
||||||
|
|
||||||
|
|
|
@ -222,8 +222,8 @@ class SimpleChat {
|
||||||
* @param {Object} obj
|
* @param {Object} obj
|
||||||
*/
|
*/
|
||||||
request_jsonstr_extend(obj) {
|
request_jsonstr_extend(obj) {
|
||||||
for(let k in gMe.chatRequestOptions) {
|
for(let k in gMe.apiRequestOptions) {
|
||||||
obj[k] = gMe.chatRequestOptions[k];
|
obj[k] = gMe.apiRequestOptions[k];
|
||||||
}
|
}
|
||||||
if (gMe.bStream) {
|
if (gMe.bStream) {
|
||||||
obj["stream"] = true;
|
obj["stream"] = true;
|
||||||
|
@ -740,7 +740,7 @@ class Me {
|
||||||
"Authorization": "", // Authorization: Bearer OPENAI_API_KEY
|
"Authorization": "", // Authorization: Bearer OPENAI_API_KEY
|
||||||
}
|
}
|
||||||
// Add needed fields wrt json object to be sent wrt LLM web services completions endpoint.
|
// Add needed fields wrt json object to be sent wrt LLM web services completions endpoint.
|
||||||
this.chatRequestOptions = {
|
this.apiRequestOptions = {
|
||||||
"model": "gpt-3.5-turbo",
|
"model": "gpt-3.5-turbo",
|
||||||
"temperature": 0.7,
|
"temperature": 0.7,
|
||||||
"max_tokens": 1024,
|
"max_tokens": 1024,
|
||||||
|
@ -813,40 +813,40 @@ class Me {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ui.el_create_append_p(`chatRequestOptions:${JSON.stringify(this.chatRequestOptions, null, " - ")}`, elDiv);
|
ui.el_create_append_p(`apiRequestOptions:${JSON.stringify(this.apiRequestOptions, null, " - ")}`, elDiv);
|
||||||
ui.el_create_append_p(`headers:${JSON.stringify(this.headers, null, " - ")}`, elDiv);
|
ui.el_create_append_p(`headers:${JSON.stringify(this.headers, null, " - ")}`, elDiv);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Auto create ui input elements for fields in ChatRequestOptions
|
* Auto create ui input elements for fields in apiRequestOptions
|
||||||
* Currently supports text and number field types.
|
* Currently supports text and number field types.
|
||||||
* @param {HTMLDivElement} elDiv
|
* @param {HTMLDivElement} elDiv
|
||||||
*/
|
*/
|
||||||
show_settings_chatrequestoptions(elDiv) {
|
show_settings_apirequestoptions(elDiv) {
|
||||||
let typeDict = {
|
let typeDict = {
|
||||||
"string": "text",
|
"string": "text",
|
||||||
"number": "number",
|
"number": "number",
|
||||||
};
|
};
|
||||||
let fs = document.createElement("fieldset");
|
let fs = document.createElement("fieldset");
|
||||||
let legend = document.createElement("legend");
|
let legend = document.createElement("legend");
|
||||||
legend.innerText = "ChatRequestOptions";
|
legend.innerText = "ApiRequestOptions";
|
||||||
fs.appendChild(legend);
|
fs.appendChild(legend);
|
||||||
elDiv.appendChild(fs);
|
elDiv.appendChild(fs);
|
||||||
for(const k in this.chatRequestOptions) {
|
for(const k in this.apiRequestOptions) {
|
||||||
let val = this.chatRequestOptions[k];
|
let val = this.apiRequestOptions[k];
|
||||||
let type = typeof(val);
|
let type = typeof(val);
|
||||||
if (((type == "string") || (type == "number"))) {
|
if (((type == "string") || (type == "number"))) {
|
||||||
let inp = ui.el_creatediv_input(`Set${k}`, k, typeDict[type], this.chatRequestOptions[k], (val)=>{
|
let inp = ui.el_creatediv_input(`Set${k}`, k, typeDict[type], this.apiRequestOptions[k], (val)=>{
|
||||||
if (type == "number") {
|
if (type == "number") {
|
||||||
val = Number(val);
|
val = Number(val);
|
||||||
}
|
}
|
||||||
this.chatRequestOptions[k] = val;
|
this.apiRequestOptions[k] = val;
|
||||||
});
|
});
|
||||||
fs.appendChild(inp.div);
|
fs.appendChild(inp.div);
|
||||||
} else if (type == "boolean") {
|
} else if (type == "boolean") {
|
||||||
let bbtn = ui.el_creatediv_boolbutton(`Set{k}`, k, {true: "true", false: "false"}, val, (userVal)=>{
|
let bbtn = ui.el_creatediv_boolbutton(`Set{k}`, k, {true: "true", false: "false"}, val, (userVal)=>{
|
||||||
this.chatRequestOptions[k] = userVal;
|
this.apiRequestOptions[k] = userVal;
|
||||||
});
|
});
|
||||||
fs.appendChild(bbtn.div);
|
fs.appendChild(bbtn.div);
|
||||||
}
|
}
|
||||||
|
@ -880,7 +880,7 @@ class Me {
|
||||||
});
|
});
|
||||||
elDiv.appendChild(bb.div);
|
elDiv.appendChild(bb.div);
|
||||||
|
|
||||||
this.show_settings_chatrequestoptions(elDiv);
|
this.show_settings_apirequestoptions(elDiv);
|
||||||
|
|
||||||
let sel = ui.el_creatediv_select("SetApiEP", "ApiEndPoint", ApiEP.Type, this.apiEP, (val)=>{
|
let sel = ui.el_creatediv_select("SetApiEP", "ApiEndPoint", ApiEP.Type, this.apiEP, (val)=>{
|
||||||
this.apiEP = ApiEP.Type[val];
|
this.apiEP = ApiEP.Type[val];
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue