fix toggle state localstorage
This commit is contained in:
parent
c2badb4697
commit
87bcbbb6c2
1 changed files with 317 additions and 301 deletions
|
@ -452,8 +452,20 @@
|
||||||
const handleToggleChange = (e) => {
|
const handleToggleChange = (e) => {
|
||||||
const isChecked = e.target.checked;
|
const isChecked = e.target.checked;
|
||||||
session.value = { ...session.value, type: isChecked ? 'completion' : 'chat' };
|
session.value = { ...session.value, type: isChecked ? 'completion' : 'chat' };
|
||||||
|
localStorage.setItem('toggleState', isChecked);
|
||||||
}
|
}
|
||||||
//
|
//
|
||||||
|
const loadToggleState = () => {
|
||||||
|
const storedState = localStorage.getItem('toggleState');
|
||||||
|
if (storedState !== null) {
|
||||||
|
const isChecked = storedState === 'true';
|
||||||
|
document.getElementById('toggle').checked = isChecked;
|
||||||
|
session.value = { ...session.value, type: isChecked ? 'completion' : 'chat' };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//
|
||||||
|
document.addEventListener('DOMContentLoaded', loadToggleState);
|
||||||
|
//
|
||||||
//
|
//
|
||||||
// function to update the prompt format
|
// function to update the prompt format
|
||||||
function updatePromptFormat(e) {
|
function updatePromptFormat(e) {
|
||||||
|
@ -464,16 +476,16 @@ function updatePromptFormat(e) {
|
||||||
...promptFormats[promptFormat]
|
...promptFormats[promptFormat]
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
// llama.cpp's default setting
|
// Use vicuna as llama.cpp's default setting, since it's most common
|
||||||
session.value = {
|
session.value = {
|
||||||
...session.value,
|
...session.value,
|
||||||
template: "{{prompt}}\n\n{{history}}\n{{char}}:",
|
template: "{{prompt}}\n{{history}}{{char}}",
|
||||||
historyTemplate: "{{name}}: {{message}}",
|
historyTemplate: "{{name}}: {{message}}\n",
|
||||||
char: "Assistant",
|
char: "ASSISTANT",
|
||||||
user: "User"
|
user: "USER"
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
console.log('Updated session value:', session.value); //
|
console.log('Updated session value:', session.value);
|
||||||
}
|
}
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
|
@ -492,7 +504,7 @@ function addEventListenersWhenAvailable() {
|
||||||
themeSelector.addEventListener('change', function(event) {
|
themeSelector.addEventListener('change', function(event) {
|
||||||
// event-handler-code...
|
// event-handler-code...
|
||||||
});
|
});
|
||||||
// placeholder event listeners here
|
// placeholder event listeners
|
||||||
} else {
|
} else {
|
||||||
// if the element is not there yet, wait ahead
|
// if the element is not there yet, wait ahead
|
||||||
requestAnimationFrame(addEventListenersWhenAvailable);
|
requestAnimationFrame(addEventListenersWhenAvailable);
|
||||||
|
@ -534,316 +546,320 @@ function updateSystemPrompt(e) {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
const ConfigForm = (props) => {
|
const ConfigForm = (props) => {
|
||||||
const updateSession = (el) => session.value = { ...session.value, [el.target.name]: el.target.value }
|
const updateSession = (el) => session.value = { ...session.value, [el.target.name]: el.target.value }
|
||||||
const updateParams = (el) => params.value = { ...params.value, [el.target.name]: el.target.value }
|
const updateParams = (el) => params.value = { ...params.value, [el.target.name]: el.target.value }
|
||||||
const updateParamsFloat = (el) => params.value = { ...params.value, [el.target.name]: parseFloat(el.target.value) }
|
const updateParamsFloat = (el) => params.value = { ...params.value, [el.target.name]: parseFloat(el.target.value) }
|
||||||
const updateParamsInt = (el) => params.value = { ...params.value, [el.target.name]: Math.floor(parseFloat(el.target.value)) }
|
const updateParamsInt = (el) => params.value = { ...params.value, [el.target.name]: Math.floor(parseFloat(el.target.value)) }
|
||||||
const updateParamsBool = (el) => params.value = { ...params.value, [el.target.name]: el.target.checked }
|
const updateParamsBool = (el) => params.value = { ...params.value, [el.target.name]: el.target.checked }
|
||||||
|
|
||||||
const grammarJsonSchemaPropOrder = signal('')
|
const grammarJsonSchemaPropOrder = signal('')
|
||||||
const updateGrammarJsonSchemaPropOrder = (el) => grammarJsonSchemaPropOrder.value = el.target.value
|
const updateGrammarJsonSchemaPropOrder = (el) => grammarJsonSchemaPropOrder.value = el.target.value
|
||||||
const convertJSONSchemaGrammar = async () => {
|
const convertJSONSchemaGrammar = async () => {
|
||||||
try {
|
try {
|
||||||
let schema = JSON.parse(params.value.grammar)
|
let schema = JSON.parse(params.value.grammar)
|
||||||
const converter = new SchemaConverter({
|
const converter = new SchemaConverter({
|
||||||
prop_order: grammarJsonSchemaPropOrder.value
|
prop_order: grammarJsonSchemaPropOrder.value
|
||||||
.split(',')
|
.split(',')
|
||||||
.reduce((acc, cur, i) => ({ ...acc, [cur.trim()]: i }), {}),
|
.reduce((acc, cur, i) => ({ ...acc, [cur.trim()]: i }), {}),
|
||||||
allow_fetch: true,
|
allow_fetch: true,
|
||||||
})
|
})
|
||||||
schema = await converter.resolveRefs(schema, 'input')
|
schema = await converter.resolveRefs(schema, 'input')
|
||||||
converter.visit(schema, '')
|
converter.visit(schema, '')
|
||||||
params.value = {
|
params.value = {
|
||||||
...params.value,
|
...params.value,
|
||||||
grammar: converter.formatGrammar(),
|
grammar: converter.formatGrammar(),
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
alert(`Convert failed: ${e.message}`)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
} catch (e) {
|
||||||
|
alert(`Convert failed: ${e.message}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const FloatField = ({ label, title, max, min, name, step, value }) => {
|
const FloatField = ({ label, title, max, min, name, step, value }) => {
|
||||||
return html`
|
return html`
|
||||||
<div>
|
<div>
|
||||||
<label for="${name}"><span title="${title}">${label}</span></label>
|
<label for="${name}"><span title="${title}">${label}</span></label>
|
||||||
<input type="range" id="${name}" min="${min}" max="${max}" step="${step}" name="${name}" value="${value}" oninput=${updateParamsFloat} title="${title}" />
|
<input type="range" id="${name}" min="${min}" max="${max}" step="${step}" name="${name}" value="${value}" oninput=${updateParamsFloat} title="${title}" />
|
||||||
<span id="${name}-value">${value}</span>
|
<span id="${name}-value">${value}</span>
|
||||||
</div>
|
</div>
|
||||||
`
|
`
|
||||||
};
|
};
|
||||||
|
|
||||||
const IntField = ({ label, title, max, min, step, name, value }) => {
|
const IntField = ({ label, title, max, min, step, name, value }) => {
|
||||||
return html`
|
return html`
|
||||||
<div>
|
<div>
|
||||||
<label for="${name}"><span title="${title}">${label}</span></label>
|
<label for="${name}"><span title="${title}">${label}</span></label>
|
||||||
<input type="range" id="${name}" min="${min}" max="${max}" step="${step}" name="${name}" value="${value}" oninput=${updateParamsInt} title="${title}" />
|
<input type="range" id="${name}" min="${min}" max="${max}" step="${step}" name="${name}" value="${value}" oninput=${updateParamsInt} title="${title}" />
|
||||||
<span id="${name}-value">${value}</span>
|
<span id="${name}-value">${value}</span>
|
||||||
</div>
|
</div>
|
||||||
`
|
`
|
||||||
};
|
};
|
||||||
|
|
||||||
const BoolField = ({ label, title, name, value }) => {
|
const BoolField = ({ label, title, name, value }) => {
|
||||||
return html`
|
return html`
|
||||||
<div>
|
<div>
|
||||||
<label for="${name}"><span title="${title}">${label}</span></label>
|
<label for="${name}"><span title="${title}">${label}</span></label>
|
||||||
<input type="checkbox" id="${name}" name="${name}" checked="${value}" onclick=${updateParamsBool} title="${title}" />
|
<input type="checkbox" id="${name}" name="${name}" checked="${value}" onclick=${updateParamsBool} title="${title}" />
|
||||||
</div>
|
</div>
|
||||||
`
|
`
|
||||||
};
|
};
|
||||||
|
|
||||||
const userTemplateReset = (e) => {
|
const userTemplateReset = (e) => {
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
userTemplateResetToDefaultAndApply()
|
userTemplateResetToDefaultAndApply()
|
||||||
}
|
}
|
||||||
|
|
||||||
const UserTemplateResetButton = () => {
|
const UserTemplateResetButton = () => {
|
||||||
if (selectedUserTemplate.value.name == 'default') {
|
if (selectedUserTemplate.value.name == 'default') {
|
||||||
return html`
|
|
||||||
<button class="reset-button" id="id_reset" onclick="${userTemplateReset}">Reset</button>
|
|
||||||
`
|
|
||||||
}
|
|
||||||
|
|
||||||
return html`
|
|
||||||
<div class="button-container">
|
|
||||||
<button class="reset-button" title="Caution: This resets the entire form." onclick="${userTemplateReset}">Reset</button>
|
|
||||||
</div>
|
|
||||||
`
|
|
||||||
};
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
// autosave template on every change
|
|
||||||
userTemplateAutosave()
|
|
||||||
}, [session.value, params.value])
|
|
||||||
|
|
||||||
const GrammarControl = () => (
|
|
||||||
html`
|
|
||||||
<div>
|
|
||||||
<div class="grammar">
|
|
||||||
<label for="template"></label>
|
|
||||||
<textarea id="grammar" name="grammar" placeholder="Use GBNF or JSON-Scheme + Converter" value="${params.value.grammar}" rows=4 oninput=${updateParams}/>
|
|
||||||
</div>
|
|
||||||
<div class="grammar-columns">
|
|
||||||
<div class="json-schema-controls">
|
|
||||||
<input type="text" name="prop-order" placeholder="Order: prop1,prop2,prop3" oninput=${updateGrammarJsonSchemaPropOrder} />
|
|
||||||
<button type="button" class="button-grammar" onclick=${convertJSONSchemaGrammar}>Convert JSON-Scheme</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
`
|
|
||||||
);
|
|
||||||
|
|
||||||
const PromptControlFieldSet = () => (
|
|
||||||
html`
|
|
||||||
<fieldset>
|
|
||||||
<div class="input-container">
|
|
||||||
<label for="prompt" class="input-label">System</label>
|
|
||||||
<textarea
|
|
||||||
id="prompt"
|
|
||||||
class="persistent-input"
|
|
||||||
name="prompt"
|
|
||||||
placeholder="The following models do not support System Prompt by design: OpenChat, Orion, Phi-3, Starling"
|
|
||||||
value="${session.value.prompt}"
|
|
||||||
oninput=${updateSession}
|
|
||||||
></textarea>
|
|
||||||
</div>
|
|
||||||
</fieldset>
|
|
||||||
`
|
|
||||||
);
|
|
||||||
|
|
||||||
const ChatConfigForm = () => (
|
|
||||||
html`
|
|
||||||
<fieldset class="dropdowns">
|
|
||||||
<div>
|
|
||||||
<select id="promptFormat" name="promptFormat" onchange=${updatePromptFormat}>
|
|
||||||
<option value="default">Prompt Style</option>
|
|
||||||
<option value=""></option>
|
|
||||||
<optgroup label="Common Prompt-Styles">
|
|
||||||
<option value="alpaca">Alpaca</option>
|
|
||||||
<option value="chatml">ChatML</option>
|
|
||||||
<option value="llama2">Llama 2</option>
|
|
||||||
<option value="llama3">Llama 3</option>
|
|
||||||
<option value="phi3">Phi-3</option>
|
|
||||||
<option value="openchat">OpenChat/Starling</option>
|
|
||||||
<option value="vicuna">Vicuna</option>
|
|
||||||
<option value=""></option>
|
|
||||||
</optgroup>
|
|
||||||
<optgroup label="More Prompt-Styles">
|
|
||||||
<option value="vicuna">Airoboros L2</option>
|
|
||||||
<option value="vicuna">BakLLaVA-1</option>
|
|
||||||
<option value="codeCherryPop">Code Cherry Pop</option>
|
|
||||||
<option value="deepseekCoder">Deepseek Coder</option>
|
|
||||||
<option value="chatml">Dolphin Mistral</option>
|
|
||||||
<option value="chatml">evolvedSeeker 1.3B</option>
|
|
||||||
<option value="goliath120b">Goliath 120B</option>
|
|
||||||
<option value="jordan">Jordan</option>
|
|
||||||
<option value="llava">LLaVA</option>
|
|
||||||
<option value="leoHessianai">Leo Hessianai</option>
|
|
||||||
<option value="leoMistral">Leo Mistral</option>
|
|
||||||
<option value="marx">Marx</option>
|
|
||||||
<option value="med42">Med42</option>
|
|
||||||
<option value="metaMath">MetaMath</option>
|
|
||||||
<option value="mistralInstruct">Mistral Instruct</option>
|
|
||||||
<option value="mistralOpenOrca">Mistral 7B OpenOrca</option>
|
|
||||||
<option value="mythomax">MythoMax</option>
|
|
||||||
<option value="neuralchat">Neural Chat</option>
|
|
||||||
<option value="nousCapybara">Nous Capybara</option>
|
|
||||||
<option value="nousHermes">Nous Hermes</option>
|
|
||||||
<option value="openchatMath">OpenChat Math</option>
|
|
||||||
<option value="openhermes2Mistral">OpenHermes 2.5-Mistral</option>
|
|
||||||
<option value="orcamini">Orca Mini v3</option>
|
|
||||||
<option value="orion">Orion</option>
|
|
||||||
<option value="sauerkrautLM">SauerkrautLM</option>
|
|
||||||
<option value="samantha">Samantha</option>
|
|
||||||
<option value="samanthaMistral">Samantha Mistral</option>
|
|
||||||
<option value="scarlett">Scarlett</option>
|
|
||||||
<option value="starlingCode">Starling Coding</option>
|
|
||||||
<option value="sydney">Sydney</option>
|
|
||||||
<option value="synthia">Synthia</option>
|
|
||||||
<option value="tess">Tess</option>
|
|
||||||
<option value="yi34b">Yi-34B</option>
|
|
||||||
<option value="zephyr">Zephyr</option>
|
|
||||||
<option value=""></option>
|
|
||||||
</optgroup>
|
|
||||||
</select>
|
|
||||||
<select id="SystemPrompt" name="SystemPrompt" onchange=${updateSystemPrompt}>
|
|
||||||
<option value="default">System Prompt</option>
|
|
||||||
<option value="empty">None</option>
|
|
||||||
<option value="airoboros">Airoboros</option>
|
|
||||||
<option value="alpaca">Alpaca</option>
|
|
||||||
<option value="atlas">Atlas</option>
|
|
||||||
<option value="atlas_de">Atlas - DE</option>
|
|
||||||
<option value="cot">Chain of Tought</option>
|
|
||||||
<option value="deduce">Critical Thinking</option>
|
|
||||||
<option value="deepseekcoder">Deepseek Coder</option>
|
|
||||||
<option value="jordan">Jordan</option>
|
|
||||||
<option value="leomistral">Leo Mistral</option>
|
|
||||||
<option value="med42">Med42</option>
|
|
||||||
<option value="migeltot">Migel's Tree of Thought</option>
|
|
||||||
<option value="mistralopenorca">Mistral OpenOrca</option>
|
|
||||||
<option value="orcamini">Orca Mini</option>
|
|
||||||
<option value="samantha">Samantha</option>
|
|
||||||
<option value="sauerkraut">Sauerkraut</option>
|
|
||||||
<option value="scarlett">Scarlett</option>
|
|
||||||
<option value="synthia">Synthia</option>
|
|
||||||
</select>
|
|
||||||
<!--<select id="systemLanguage" name="systemLanguage">-->
|
|
||||||
<!--<option value="default">English</option>-->
|
|
||||||
<!--<option value="DE">German</option>-->
|
|
||||||
<!--<option value="placeholderLanguage">Placeholder</option>-->
|
|
||||||
<!--</select>-->
|
|
||||||
</div>
|
|
||||||
</fieldset>
|
|
||||||
${PromptControlFieldSet()}
|
|
||||||
<fieldset>
|
|
||||||
<details open>
|
|
||||||
<summary><span class="summary-title" id="id_prompt-style">Prompt Style</span></summary>
|
|
||||||
<fieldset class="names">
|
|
||||||
<div>
|
|
||||||
<label for="user" id="id_user-name">User ID</label>
|
|
||||||
<input type="text" id="user" name="user" value="${session.value.user}" oninput=${updateSession} />
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<label for="bot" id="id_bot-name">AI ID</label>
|
|
||||||
<input type="text" id="bot" name="char" value="${session.value.char}" oninput=${updateSession} />
|
|
||||||
</div>
|
|
||||||
</fieldset>
|
|
||||||
<div class="two-columns">
|
|
||||||
<div>
|
|
||||||
<div class="input-container">
|
|
||||||
<label for="template" class="input-label-sec" id_prompt-template>Prompt Template</label>
|
|
||||||
<textarea id="template" class="persistent-input-sec" name="template" value="${session.value.template}" rows=6 oninput=${updateSession}/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<div class="input-container">
|
|
||||||
<label for="template" class="input-label-sec" id="id_history-template">Chat History</label>
|
|
||||||
<textarea id="history-template" class="persistent-input-sec" name="historyTemplate" value="${session.value.historyTemplate}" rows=1 oninput=${updateSession}/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</details>
|
|
||||||
<details>
|
|
||||||
<summary><span class="summary-title" id="id_grammar-title" id_grammar-title>Grammar</span></summary>
|
|
||||||
${GrammarControl()}
|
|
||||||
</details>
|
|
||||||
|
|
||||||
</fieldset>
|
|
||||||
`
|
|
||||||
);
|
|
||||||
|
|
||||||
const CompletionConfigForm = () => (
|
|
||||||
html`
|
|
||||||
${PromptControlFieldSet()}
|
|
||||||
<fieldset>
|
|
||||||
<details>
|
|
||||||
<summary><span class="summary-title" id="id_grammar-title" id_grammar-title>Grammar</span></summary>
|
|
||||||
${GrammarControl()}
|
|
||||||
</details>
|
|
||||||
</fieldset>
|
|
||||||
`
|
|
||||||
);
|
|
||||||
// todo toggle button et api field et reset button in one nice row
|
|
||||||
return html`
|
return html`
|
||||||
<form>
|
<button class="reset-button" id="id_reset" onclick="${userTemplateReset}">Reset</button>
|
||||||
<fieldset class="two">
|
|
||||||
<input type="checkbox" id="toggle" class="toggleCheckbox" onchange=${handleToggleChange} />
|
|
||||||
<label for="toggle" class="toggleContainer">
|
|
||||||
<div id="id_toggle-label-chat">Chat</div>
|
|
||||||
<div id="id_toggle-label-complete">Complete</div>
|
|
||||||
</label>
|
|
||||||
<fieldset>
|
|
||||||
|
|
||||||
<input type="text" id="api_key" class="apiKey" name="api_key" value="${params.value.api_key}" placeholder="Enter API key" oninput=${updateParams} />
|
|
||||||
</fieldset>
|
|
||||||
|
|
||||||
<${UserTemplateResetButton}/>
|
|
||||||
</fieldset>
|
|
||||||
|
|
||||||
${session.value.type === 'chat' ? ChatConfigForm() : CompletionConfigForm()}
|
|
||||||
|
|
||||||
<fieldset class="params">
|
|
||||||
${IntField({ label: "Prediction", title: "Set the maximum number of tokens to predict when generating text. Note: May exceed the set limit slightly if the last token is a partial multibyte character. When 0, no tokens will be generated but the prompt is evaluated into the cache. The value -1 means infinity. Default is 358", max: 2048, min: -1, step: 16, name: "n_predict", value: params.value.n_predict, })}
|
|
||||||
${FloatField({ label: "Min-P sampling", title: "The minimum probability for a token to be considered, relative to the probability of the most likely token. Note that it's good practice to disable all other samplers aside from temperature when using min-p. It is also recommenend to go this approach. Default is 0.05 – But consider higher values like ~ 0.4 for non-English text generation. The value 1.0 means disabled", max: 1.0, min: 0.0, name: "min_p", step: 0.01, value: params.value.min_p })}
|
|
||||||
${FloatField({ label: "Repetition Penalty", title: "Control the repetition of token sequences in the generated text. Default is 1.1", max: 2.0, min: 0.0, name: "repeat_penalty", step: 0.01, value: params.value.repeat_penalty })}
|
|
||||||
${FloatField({ label: "Temperature", title: "This will adjust the overall randomness of the generated text. It is the most common sampler. Default is 0.8 but consider using lower values for more factual texts or for non-English text generation", max: 2.0, min: 0.0, name: "temperature", step: 0.01, value: params.value.temperature })}
|
|
||||||
</fieldset>
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><span class="summary-title">Further Options</span></summary>
|
|
||||||
<fieldset class="params">
|
|
||||||
${IntField({ label: "Top-K", title: "Limits the selection of the next token to the K most probable tokens. 1 means no randomness = greedy sampling. If set to 0, it means the entire vocabulary size is considered.", max: 100, min: 0, step: 1, name: "top_k", value: params.value.top_k })}
|
|
||||||
${IntField({ label: "Penalize Last N", title: "The last n tokens that are taken into account to penalise repetitions. A value of 0 means that this function is deactivated and -1 means that the entire size of the context is taken into account.", max: 2048, min: 0, step: 16, name: "repeat_last_n", value: params.value.repeat_last_n })}
|
|
||||||
${FloatField({ label: "Top-P", title: "Limits the selection of the next token to a subset of tokens whose combined probability reaches a threshold value P = top-P. If set to 1, it means the entire vocabulary size is considered.", max: 1.0, min: 0.0, name: "top_p", step: 0.01, value: params.value.top_p })}
|
|
||||||
${FloatField({ label: "Presence Penalty", title: "A penalty that is applied if certain tokens appear repeatedly in the generated text. A higher value leads to fewer repetitions.", max: 1.0, min: 0.0, name: "presence_penalty", step: 0.01, value: params.value.presence_penalty })}
|
|
||||||
${FloatField({ label: "TFS-Z", title: "Activates tail-free sampling, a method used to limit the prediction of tokens that are too frequent. The parameter z controls the strength of this limitation. A value of 1.0 means that this function is deactivated.", max: 1.0, min: 0.0, name: "tfs_z", step: 0.01, value: params.value.tfs_z })}
|
|
||||||
${FloatField({ label: "Frequency Penalty", title: "A penalty that is applied based on the frequency with which certain tokens occur in the training data set. A higher value results in rare tokens being favoured.", max: 1.0, min: 0.0, name: "frequency_penalty", step: 0.01, value: params.value.frequency_penalty })}
|
|
||||||
${FloatField({ label: "Typical-P", title: "Activates local typical sampling, a method used to limit the prediction of tokens that are atypical in the current context. The parameter p controls the strength of this limitation. A value of 1.0 means that this function is deactivated.", max: 1.0, min: 0.0, name: "typical_p", step: 0.01, value: params.value.typical_p })}
|
|
||||||
${IntField({ label: "Min Keep", title: "If greater than 0, samplers are forced to return N possible tokens at minimum. Default is 0", max: 10, min: 0, name: "min_keep", value: params.value.min_keep })}
|
|
||||||
</fieldset>
|
|
||||||
|
|
||||||
<hr style="height: 1px; background-color: #ececf1; border: none;" />
|
|
||||||
|
|
||||||
<fieldset class="three">
|
|
||||||
<label title="The Mirostat sampling method is an algorithm used in natural language processing to improve the quality and coherence of the generated texts. It is an at-runtime-adaptive method that aims to keep the entropy or surprise of a text within a desired range."><input type="radio" name="mirostat" value="0" checked=${params.value.mirostat == 0} oninput=${updateParamsInt} /> Mirostat off</label>
|
|
||||||
<label title="Mirostat version 1 was developed to adjust the probability of predictions so that the surprise in the text remains constant. This means that the algorithm tries to maintain a balance between predictable and surprising words so that the text is neither too monotonous nor too chaotic. V1 is recommended for longer writings, creative texts, etc."><input type="radio" name="mirostat" value="1" checked=${params.value.mirostat == 1} oninput=${updateParamsInt} /> Mirostat v1</label>
|
|
||||||
<label title="Mirostat version 2 builds on the idea of V1 but brings some improvements. V2 is recommended as a general purpose algorithm since it offers more precise control over entropy and reacts more quickly to unwanted deviations. As a result, the generated texts appear even more consistent and coherent, especially for everday life conversations."><input type="radio" name="mirostat" value="2" checked=${params.value.mirostat == 2} oninput=${updateParamsInt} /> Mirostat v2</label>
|
|
||||||
</fieldset>
|
|
||||||
<fieldset class="params">
|
|
||||||
${FloatField({ label: "Entropy tau", title: "Tau controls the desired level of entropy (or 'surprise') in the text. A low tau (e.g. 0.5) would mean that a text is very predictable, but will also be very coherent. A high tau (e.g. 8.0) would mean that the text is very creative and surprising, but may also be difficult to follow because unlikely words will occur frequently.", max: 10.0, min: 0.0, name: "mirostat_tau", step: 0.01, value: params.value.mirostat_tau })}
|
|
||||||
${FloatField({ label: "Learning-rate eta", title: "Eta determines how quickly the Mirostat algorithm adjusts its predictions to achieve the desired entropy. A learning rate that is too high can cause the algorithm to react too quickly and possibly become unstable, because the algorithm will try to maintain a balance between surprises and precision in the context of only a few words. In this way, 'the common thread' could be lost. Whereas a learning rate that is too low means that the algorithm reacts too slowly and a red thread becomes a heavy goods train that takes a long time to come to a halt and change a 'topic station'.", max: 1.0, min: 0.0, name: "mirostat_eta", step: 0.01, value: params.value.mirostat_eta })}
|
|
||||||
</fieldset>
|
|
||||||
|
|
||||||
<hr style="height: 1px; background-color: #ececf1; border: none;" />
|
|
||||||
|
|
||||||
<fieldset class="params">
|
|
||||||
${IntField({ label: "Show Probabilities", title: "If greater than 0, the response also contains the probabilities of top N tokens for each generated token given the sampling settings. The tokens will be colored in gradient from green to red depending on their probabilities. Note that for temperature 0 the tokens are sampled greedily but token probabilities are still being calculated via a simple softmax of the logits without considering any other sampler settings. Defaults to 0", max: 10, min: 0, step: 1, name: "n_probs", value: params.value.n_probs })}
|
|
||||||
</fieldset>
|
|
||||||
</details>
|
|
||||||
</form>
|
|
||||||
`
|
`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return html`
|
||||||
|
<div class="button-container">
|
||||||
|
<button class="reset-button" title="Caution: This resets the entire form." onclick="${userTemplateReset}">Reset</button>
|
||||||
|
</div>
|
||||||
|
`
|
||||||
|
};
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
// autosave template on every change
|
||||||
|
userTemplateAutosave()
|
||||||
|
}, [session.value, params.value])
|
||||||
|
|
||||||
|
const GrammarControl = () => (
|
||||||
|
html`
|
||||||
|
<div>
|
||||||
|
<div class="grammar">
|
||||||
|
<label for="template"></label>
|
||||||
|
<textarea id="grammar" name="grammar" placeholder="Use GBNF or JSON-Scheme + Converter" value="${params.value.grammar}" rows=4 oninput=${updateParams}/>
|
||||||
|
</div>
|
||||||
|
<div class="grammar-columns">
|
||||||
|
<div class="json-schema-controls">
|
||||||
|
<input type="text" name="prop-order" placeholder="Order: prop1,prop2,prop3" oninput=${updateGrammarJsonSchemaPropOrder} />
|
||||||
|
<button type="button" class="button-grammar" onclick=${convertJSONSchemaGrammar}>Convert JSON-Scheme</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`
|
||||||
|
);
|
||||||
|
|
||||||
|
const PromptControlFieldSet = () => (
|
||||||
|
html`
|
||||||
|
<fieldset>
|
||||||
|
<div class="input-container">
|
||||||
|
<label for="prompt" class="input-label">System</label>
|
||||||
|
<textarea
|
||||||
|
id="prompt"
|
||||||
|
class="persistent-input"
|
||||||
|
name="prompt"
|
||||||
|
placeholder="The following models do not support System Prompt by design: OpenChat, Orion, Phi-3, Starling, Yi-6/9/34B-Chat"
|
||||||
|
value="${session.value.prompt}"
|
||||||
|
oninput=${updateSession}
|
||||||
|
></textarea>
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
`
|
||||||
|
);
|
||||||
|
|
||||||
|
const ChatConfigForm = () => (
|
||||||
|
html`
|
||||||
|
<fieldset class="dropdowns">
|
||||||
|
<div>
|
||||||
|
<select id="promptFormat" name="promptFormat" onchange=${updatePromptFormat}>
|
||||||
|
<option value="default">Prompt Style</option>
|
||||||
|
<option value=""></option>
|
||||||
|
<optgroup label="Common Prompt-Styles">
|
||||||
|
<option value="alpaca">Alpaca</option>
|
||||||
|
<option value="chatml">ChatML</option>
|
||||||
|
<option value="commandr">Command R/+</option>
|
||||||
|
<option value="llama2">Llama 2</option>
|
||||||
|
<option value="llama3">Llama 3</option>
|
||||||
|
<option value="phi3">Phi-3</option>
|
||||||
|
<option value="openchat">OpenChat/Starling</option>
|
||||||
|
<option value="vicuna">Vicuna</option>
|
||||||
|
<option value=""></option>
|
||||||
|
</optgroup>
|
||||||
|
<optgroup label="More Prompt-Styles">
|
||||||
|
<option value="vicuna">Airoboros L2</option>
|
||||||
|
<option value="vicuna">BakLLaVA-1</option>
|
||||||
|
<option value="alpaca">Code Cherry Pop</option>
|
||||||
|
<option value="deepseekCoder">Deepseek Coder</option>
|
||||||
|
<option value="chatml">Dolphin Mistral</option>
|
||||||
|
<option value="chatml">evolvedSeeker 1.3B</option>
|
||||||
|
<option value="vicuna">Goliath 120B</option>
|
||||||
|
<option value="vicuna">Jordan</option>
|
||||||
|
<option value="vicuna">LLaVA</option>
|
||||||
|
<option value="chatml">Leo Hessianai</option>
|
||||||
|
<option value="vicuna">Leo Mistral</option>
|
||||||
|
<option value="vicuna">Marx</option>
|
||||||
|
<option value="med42">Med42</option>
|
||||||
|
<option value="alpaca">MetaMath</option>
|
||||||
|
<option value="llama2">Mistral Instruct</option>
|
||||||
|
<option value="chatml">Mistral 7B OpenOrca</option>
|
||||||
|
<option value="alpaca">MythoMax</option>
|
||||||
|
<option value="neuralchat">Neural Chat</option>
|
||||||
|
<option value="vicuna">Nous Capybara</option>
|
||||||
|
<option value="nousHermes">Nous Hermes</option>
|
||||||
|
<option value="openchatMath">OpenChat Math</option>
|
||||||
|
<option value="chatml">OpenHermes 2.5-Mistral</option>
|
||||||
|
<option value="alpaca">Orca Mini v3</option>
|
||||||
|
<option value="orion">Orion</option>
|
||||||
|
<option value="vicuna">Samantha</option>
|
||||||
|
<option value="chatml">Samantha Mistral</option>
|
||||||
|
<option value="sauerkrautLM">SauerkrautLM</option>
|
||||||
|
<option value="vicuna">Scarlett</option>
|
||||||
|
<option value="starlingCode">Starling Coding</option>
|
||||||
|
<option value="alpaca">Sydney</option>
|
||||||
|
<option value="vicuna">Synthia</option>
|
||||||
|
<option value="vicuna">Tess</option>
|
||||||
|
<option value="yi34b">Yi-6/9/34B-Chat</option>
|
||||||
|
<option value="zephyr">Zephyr</option>
|
||||||
|
<option value=""></option>
|
||||||
|
</optgroup>
|
||||||
|
</select>
|
||||||
|
<select id="SystemPrompt" name="SystemPrompt" onchange=${updateSystemPrompt}>
|
||||||
|
<option value="default">System Prompt</option>
|
||||||
|
<option value="empty">None</option>
|
||||||
|
<option value="airoboros">Airoboros</option>
|
||||||
|
<option value="alpaca">Alpaca</option>
|
||||||
|
<option value="atlas">Atlas</option>
|
||||||
|
<option value="atlas_de">Atlas - DE</option>
|
||||||
|
<option value="cot">Chain of Tought</option>
|
||||||
|
<option value="commandrempty">Command R/+ (empty)</option>
|
||||||
|
<option value="commandrexample">Command R/+ (example)</option>
|
||||||
|
<option value="deduce">Critical Thinking</option>
|
||||||
|
<option value="deepseekcoder">Deepseek Coder</option>
|
||||||
|
<option value="jordan">Jordan</option>
|
||||||
|
<option value="leomistral">Leo Mistral</option>
|
||||||
|
<option value="med42">Med42</option>
|
||||||
|
<option value="migeltot">Migel's Tree of Thought</option>
|
||||||
|
<option value="mistralopenorca">Mistral OpenOrca</option>
|
||||||
|
<option value="orcamini">Orca Mini</option>
|
||||||
|
<option value="samantha">Samantha</option>
|
||||||
|
<option value="sauerkraut">Sauerkraut</option>
|
||||||
|
<option value="scarlett">Scarlett</option>
|
||||||
|
<option value="synthia">Synthia</option>
|
||||||
|
<option value="vicuna">Vicuna</option>
|
||||||
|
</select>
|
||||||
|
<!--<select id="systemLanguage" name="systemLanguage">-->
|
||||||
|
<!--<option value="default">English</option>-->
|
||||||
|
<!--<option value="DE">German</option>-->
|
||||||
|
<!--<option value="placeholderLanguage">Placeholder</option>-->
|
||||||
|
<!--</select>-->
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
${PromptControlFieldSet()}
|
||||||
|
<fieldset>
|
||||||
|
<details open>
|
||||||
|
<summary><span class="summary-title" id="id_prompt-style">Prompt Style</span></summary>
|
||||||
|
<fieldset class="names">
|
||||||
|
<div>
|
||||||
|
<label for="user" id="id_user-name">User ID</label>
|
||||||
|
<input type="text" id="user" name="user" value="${session.value.user}" oninput=${updateSession} />
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label for="bot" id="id_bot-name">AI ID</label>
|
||||||
|
<input type="text" id="bot" name="char" value="${session.value.char}" oninput=${updateSession} />
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
<div class="two-columns">
|
||||||
|
<div>
|
||||||
|
<div class="input-container">
|
||||||
|
<label for="template" class="input-label-sec" id_prompt-template>Prompt Template</label>
|
||||||
|
<textarea id="template" class="persistent-input-sec" name="template" value="${session.value.template}" rows=6 oninput=${updateSession}/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<div class="input-container">
|
||||||
|
<label for="template" class="input-label-sec" id="id_history-template">Chat History</label>
|
||||||
|
<textarea id="history-template" class="persistent-input-sec" name="historyTemplate" value="${session.value.historyTemplate}" rows=1 oninput=${updateSession}/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</details>
|
||||||
|
<details>
|
||||||
|
<summary><span class="summary-title" id="id_grammar-title" id_grammar-title>Grammar</span></summary>
|
||||||
|
${GrammarControl()}
|
||||||
|
</details>
|
||||||
|
|
||||||
|
</fieldset>
|
||||||
|
`
|
||||||
|
);
|
||||||
|
|
||||||
|
const CompletionConfigForm = () => (
|
||||||
|
html`
|
||||||
|
${PromptControlFieldSet()}
|
||||||
|
<fieldset>
|
||||||
|
<details>
|
||||||
|
<summary><span class="summary-title" id="id_grammar-title" id_grammar-title>Grammar</span></summary>
|
||||||
|
${GrammarControl()}
|
||||||
|
</details>
|
||||||
|
</fieldset>
|
||||||
|
`
|
||||||
|
);
|
||||||
|
// todo toggle button et api field et reset button in one nice row
|
||||||
|
return html`
|
||||||
|
<form>
|
||||||
|
<fieldset class="two">
|
||||||
|
<input type="checkbox" id="toggle" class="toggleCheckbox" onchange=${handleToggleChange} />
|
||||||
|
<label for="toggle" class="toggleContainer">
|
||||||
|
<div id="id_toggle-label-chat">Chat</div>
|
||||||
|
<div id="id_toggle-label-complete">Complete</div>
|
||||||
|
</label>
|
||||||
|
<fieldset>
|
||||||
|
|
||||||
|
<input type="text" id="api_key" class="apiKey" name="api_key" value="${params.value.api_key}" placeholder="Enter API key" oninput=${updateParams} />
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<${UserTemplateResetButton}/>
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
${session.value.type === 'chat' ? ChatConfigForm() : CompletionConfigForm()}
|
||||||
|
|
||||||
|
<fieldset class="params">
|
||||||
|
${IntField({ label: "Prediction", title: "Set the maximum number of tokens to predict when generating text. Note: May exceed the set limit slightly if the last token is a partial multibyte character. When 0, no tokens will be generated but the prompt is evaluated into the cache. The value -1 means infinity. Default is 358", max: 2048, min: -1, step: 16, name: "n_predict", value: params.value.n_predict, })}
|
||||||
|
${FloatField({ label: "Min-P sampling", title: "The minimum probability for a token to be considered, relative to the probability of the most likely token. Note that it's good practice to disable all other samplers aside from temperature when using min-p. It is also recommenend to go this approach. Default is 0.05 – But consider higher values like ~ 0.4 for non-English text generation. The value 1.0 means disabled", max: 1.0, min: 0.0, name: "min_p", step: 0.01, value: params.value.min_p })}
|
||||||
|
${FloatField({ label: "Repetition Penalty", title: "Control the repetition of token sequences in the generated text. Default is 1.1", max: 2.0, min: 0.0, name: "repeat_penalty", step: 0.01, value: params.value.repeat_penalty })}
|
||||||
|
${FloatField({ label: "Temperature", title: "This will adjust the overall randomness of the generated text. It is the most common sampler. Default is 0.8 but consider using lower values for more factual texts or for non-English text generation", max: 2.0, min: 0.0, name: "temperature", step: 0.01, value: params.value.temperature })}
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><span class="summary-title">Further Options</span></summary>
|
||||||
|
<fieldset class="params">
|
||||||
|
${IntField({ label: "Top-K", title: "Limits the selection of the next token to the K most probable tokens. 1 means no randomness = greedy sampling. If set to 0, it means the entire vocabulary size is considered.", max: 100, min: 0, step: 1, name: "top_k", value: params.value.top_k })}
|
||||||
|
${IntField({ label: "Penalize Last N", title: "The last n tokens that are taken into account to penalise repetitions. A value of 0 means that this function is deactivated and -1 means that the entire size of the context is taken into account.", max: 2048, min: 0, step: 16, name: "repeat_last_n", value: params.value.repeat_last_n })}
|
||||||
|
${FloatField({ label: "Top-P", title: "Limits the selection of the next token to a subset of tokens whose combined probability reaches a threshold value P = top-P. If set to 1, it means the entire vocabulary size is considered.", max: 1.0, min: 0.0, name: "top_p", step: 0.01, value: params.value.top_p })}
|
||||||
|
${FloatField({ label: "Presence Penalty", title: "A penalty that is applied if certain tokens appear repeatedly in the generated text. A higher value leads to fewer repetitions.", max: 1.0, min: 0.0, name: "presence_penalty", step: 0.01, value: params.value.presence_penalty })}
|
||||||
|
${FloatField({ label: "TFS-Z", title: "Activates tail-free sampling, a method used to limit the prediction of tokens that are too frequent. The parameter z controls the strength of this limitation. A value of 1.0 means that this function is deactivated.", max: 1.0, min: 0.0, name: "tfs_z", step: 0.01, value: params.value.tfs_z })}
|
||||||
|
${FloatField({ label: "Frequency Penalty", title: "A penalty that is applied based on the frequency with which certain tokens occur in the training data set. A higher value results in rare tokens being favoured.", max: 1.0, min: 0.0, name: "frequency_penalty", step: 0.01, value: params.value.frequency_penalty })}
|
||||||
|
${FloatField({ label: "Typical-P", title: "Activates local typical sampling, a method used to limit the prediction of tokens that are atypical in the current context. The parameter p controls the strength of this limitation. A value of 1.0 means that this function is deactivated.", max: 1.0, min: 0.0, name: "typical_p", step: 0.01, value: params.value.typical_p })}
|
||||||
|
${IntField({ label: "Min Keep", title: "If greater than 0, samplers are forced to return N possible tokens at minimum. Default is 0", max: 10, min: 0, name: "min_keep", value: params.value.min_keep })}
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<hr style="height: 1px; background-color: #ececf1; border: none;" />
|
||||||
|
|
||||||
|
<fieldset class="three">
|
||||||
|
<label title="The Mirostat sampling method is an algorithm used in natural language processing to improve the quality and coherence of the generated texts. It is an at-runtime-adaptive method that aims to keep the entropy or surprise of a text within a desired range."><input type="radio" name="mirostat" value="0" checked=${params.value.mirostat == 0} oninput=${updateParamsInt} /> Mirostat off</label>
|
||||||
|
<label title="Mirostat version 1 was developed to adjust the probability of predictions so that the surprise in the text remains constant. This means that the algorithm tries to maintain a balance between predictable and surprising words so that the text is neither too monotonous nor too chaotic. V1 is recommended for longer writings, creative texts, etc."><input type="radio" name="mirostat" value="1" checked=${params.value.mirostat == 1} oninput=${updateParamsInt} /> Mirostat v1</label>
|
||||||
|
<label title="Mirostat version 2 builds on the idea of V1 but brings some improvements. V2 is recommended as a general purpose algorithm since it offers more precise control over entropy and reacts more quickly to unwanted deviations. As a result, the generated texts appear even more consistent and coherent, especially for everday life conversations."><input type="radio" name="mirostat" value="2" checked=${params.value.mirostat == 2} oninput=${updateParamsInt} /> Mirostat v2</label>
|
||||||
|
</fieldset>
|
||||||
|
<fieldset class="params">
|
||||||
|
${FloatField({ label: "Entropy tau", title: "Tau controls the desired level of entropy (or 'surprise') in the text. A low tau (e.g. 0.5) would mean that a text is very predictable, but will also be very coherent. A high tau (e.g. 8.0) would mean that the text is very creative and surprising, but may also be difficult to follow because unlikely words will occur frequently.", max: 10.0, min: 0.0, name: "mirostat_tau", step: 0.01, value: params.value.mirostat_tau })}
|
||||||
|
${FloatField({ label: "Learning-rate eta", title: "Eta determines how quickly the Mirostat algorithm adjusts its predictions to achieve the desired entropy. A learning rate that is too high can cause the algorithm to react too quickly and possibly become unstable, because the algorithm will try to maintain a balance between surprises and precision in the context of only a few words. In this way, 'the common thread' could be lost. Whereas a learning rate that is too low means that the algorithm reacts too slowly and a red thread becomes a heavy goods train that takes a long time to come to a halt and change a 'topic station'.", max: 1.0, min: 0.0, name: "mirostat_eta", step: 0.01, value: params.value.mirostat_eta })}
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<hr style="height: 1px; background-color: #ececf1; border: none;" />
|
||||||
|
|
||||||
|
<fieldset class="params">
|
||||||
|
${IntField({ label: "Show Probabilities", title: "If greater than 0, the response also contains the probabilities of top N tokens for each generated token given the sampling settings. The tokens will be colored in gradient from green to red depending on their probabilities. Note that for temperature 0 the tokens are sampled greedily but token probabilities are still being calculated via a simple softmax of the logits without considering any other sampler settings. Defaults to 0", max: 10, min: 0, step: 1, name: "n_probs", value: params.value.n_probs })}
|
||||||
|
</fieldset>
|
||||||
|
</details>
|
||||||
|
</form>
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
// todo - beautify apikey section with css
|
// todo - beautify apikey section with css
|
||||||
|
|
||||||
const probColor = (p) => {
|
const probColor = (p) => {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue