export llama_timings as struct and expose them in server

This commit is contained in:
Tobias Lütke 2023-07-04 21:52:04 -04:00
parent c19daa4eb5
commit efa86bf2a6
No known key found for this signature in database
GPG key ID: 1FC0DBB14164709A
7 changed files with 1170 additions and 1001 deletions

View file

@ -5,6 +5,8 @@ const paramDefaults = {
stop: ["</s>"]
};
let generation_settings = null;
/**
* This function completes the input text using a llama dictionary.
* @param {object} params - The parameters for the completion request.
@ -66,6 +68,9 @@ export const llamaComplete = async (params, controller, callback) => {
// if we got a stop token from server, we will break here
if (result.data.stop) {
if(result.data.generation_settings) {
generation_settings = result.data.generation_settings;
}
break;
}
}
@ -79,3 +84,11 @@ export const llamaComplete = async (params, controller, callback) => {
return content;
}
export const llamaModelInfo = async () => {
if (!generation_settings) {
generation_settings = await fetch("/model.json").then(r => r.json());
}
return generation_settings;
}

View file

@ -6,7 +6,6 @@
<title>llama.cpp - chat</title>
<style>
body {
background-color: #fff;
color: #000;
@ -22,10 +21,6 @@
height: 100%;
}
header, footer {
text-align: center;
}
main {
margin: 3px;
display: flex;
@ -99,6 +94,15 @@
margin: 0.5em 0;
display: block;
}
header, footer {
text-align: center;
}
footer {
font-size: 80%;
color: #888;
}
</style>
<script type="module">
@ -109,7 +113,7 @@
import { llamaComplete } from '/completion.js';
const session = signal({
prompt: "This is a conversation between user and llama, a friendly chatbot. respond in markdown.",
prompt: "This is a conversation between user and llama, a friendly chatbot. respond in simple markdown.",
template: "{{prompt}}\n\n{{history}}\n{{char}}:",
historyTemplate: "{{name}}: {{message}}",
transcript: [],
@ -118,15 +122,6 @@
user: "User",
})
const transcriptUpdate = (transcript) => {
session.value = {
...session.value,
transcript
}
}
const chatStarted = computed(() => session.value.transcript.length > 0)
const params = signal({
n_predict: 400,
temperature: 0.7,
@ -136,8 +131,18 @@
top_p: 0.5,
})
const llamaStats = signal(null)
const controller = signal(null)
const generating = computed(() => controller.value == null )
const chatStarted = computed(() => session.value.transcript.length > 0)
const transcriptUpdate = (transcript) => {
session.value = {
...session.value,
transcript
}
}
// simple template replace
const template = (str, extraSettings) => {
@ -181,7 +186,11 @@
transcriptUpdate([...history, ["{{char}}", currentMessage]])
if (data.stop) {
console.log("-->", data, ' response was:', currentMessage, 'transcript state:', session.value.transcript);
console.log("Completion finished: '", currentMessage, "', summary: ", data);
}
if (data.timings) {
llamaStats.value = data.timings;
}
})
@ -219,13 +228,12 @@
return html`
<form onsubmit=${submit}>
<div>
<textarea type="text" rows=2 onkeypress=${enterSubmits} value="${message}" oninput=${(e) => message.value = e.target.value} placeholder="Say something..."/>
<textarea type="text" rows=2 onkeypress=${enterSubmits} value="${message}" oninput=${(e) => message.value = e.target.value} placeholder="Say something..."/>
</div>
<div class="right">
<button type="submit" disabled=${!generating.value} >Send</button>
<button onclick=${stop} disabled=${generating}>Stop</button>
<button onclick=${reset}>Reset</button>
<button type="submit" disabled=${!generating.value} >Send</button>
<button onclick=${stop} disabled=${generating}>Stop</button>
<button onclick=${reset}>Reset</button>
</div>
</form>
`
@ -243,7 +251,7 @@
}, [messages])
const chatLine = ([user, msg]) => {
return html`<p key=${msg}><strong>${template(user)}:</strong> <${Markdown} text=${template(msg)} /></p>`
return html`<p key=${msg}><strong>${template(user)}:</strong> <${Markdownish} text=${template(msg)} /></p>`
};
return html`
@ -313,39 +321,52 @@
</form>
`
}
const Markdown = (params) => {
const md = params.text
.replace(/^#{1,6} (.*)$/gim, '<h3>$1</h3>')
.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>')
.replace(/__(.*?)__/g, '<strong>$1</strong>')
.replace(/\*(.*?)\*/g, '<em>$1</em>')
.replace(/_(.*?)_/g, '<em>$1</em>')
.replace(/```.*?\n([\s\S]*?)```/g, '<pre><code>$1</code></pre>')
.replace(/`(.*?)`/g, '<code>$1</code>')
.replace(/\n/gim, '<br />');
return html`<span dangerouslySetInnerHTML=${{ __html: md }} />`;
};
// poor mans markdown replacement
const Markdownish = (params) => {
const md = params.text
.replace(/^#{1,6} (.*)$/gim, '<h3>$1</h3>')
.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>')
.replace(/__(.*?)__/g, '<strong>$1</strong>')
.replace(/\*(.*?)\*/g, '<em>$1</em>')
.replace(/_(.*?)_/g, '<em>$1</em>')
.replace(/```.*?\n([\s\S]*?)```/g, '<pre><code>$1</code></pre>')
.replace(/`(.*?)`/g, '<code>$1</code>')
.replace(/\n/gim, '<br />');
return html`<span dangerouslySetInnerHTML=${{ __html: md }} />`;
};
const ModelGenerationInfo = (params) => {
if (!llamaStats.value) {
return html`<span/>`
}
return html`
<span>
${llamaStats.value.predicted_per_token_ms.toFixed()}ms per token, ${llamaStats.value.predicted_per_second.toFixed(2)} tokens per second
</span>
`
}
function App(props) {
return html`
<div id="container">
<header>
<h1>llama.cpp</h1>
</header>
<div id="container">
<header>
<h1>llama.cpp</h1>
</header>
<main id="content">
<${chatStarted.value ? ChatLog : ConfigForm} />
</main>
<main id="content">
<${chatStarted.value ? ChatLog : ConfigForm} />
</main>
<footer id="write">
<${MessageInput} />
</footer>
<section id="write">
<${MessageInput} />
</section>
<footer>
<p>Powered by <a href="https://github.com/ggerganov/llama.cpp">llama.cpp</a> and <a href="https://ggml.ai">ggml.ai</a></p>
</footer>
</div>
<footer>
<p><${ModelGenerationInfo} /></p>
<p>Powered by <a href="https://github.com/ggerganov/llama.cpp">llama.cpp</a> and <a href="https://ggml.ai">ggml.ai</a>.</p>
</footer>
</div>
`;
}