Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Expose generation timings from server & update completions.js #2116

Merged
merged 4 commits into from
Jul 5, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
392 changes: 210 additions & 182 deletions examples/server/completion.js.hpp

Large diffs are not rendered by default.

1,556 changes: 805 additions & 751 deletions examples/server/index.html.hpp

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions examples/server/public/completion.js
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ export async function* llama(prompt, params = {}, config = {}) {

// if we got a stop token from server, we will break here
if (result.data.stop) {
if (result.data.generation_settings) {
generation_settings = result.data.generation_settings;
}
break;
}
}
Expand Down
123 changes: 72 additions & 51 deletions examples/server/public/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
<title>llama.cpp - chat</title>

<style>

body {
background-color: #fff;
color: #000;
Expand All @@ -22,10 +21,6 @@
height: 100%;
}

header, footer {
text-align: center;
}

main {
margin: 3px;
display: flex;
Expand Down Expand Up @@ -99,6 +94,15 @@
margin: 0.5em 0;
display: block;
}

header, footer {
text-align: center;
}

footer {
font-size: 80%;
color: #888;
}
</style>

<script type="module">
Expand All @@ -109,7 +113,7 @@
import { llama } from '/completion.js';

const session = signal({
prompt: "This is a conversation between user and llama, a friendly chatbot. respond in markdown.",
prompt: "This is a conversation between user and llama, a friendly chatbot. respond in simple markdown.",
template: "{{prompt}}\n\n{{history}}\n{{char}}:",
historyTemplate: "{{name}}: {{message}}",
transcript: [],
Expand All @@ -118,15 +122,6 @@
user: "User",
})

const transcriptUpdate = (transcript) => {
session.value = {
...session.value,
transcript
}
}

const chatStarted = computed(() => session.value.transcript.length > 0)

const params = signal({
n_predict: 400,
temperature: 0.7,
Expand All @@ -136,8 +131,18 @@
top_p: 0.5,
})

const llamaStats = signal(null)
const controller = signal(null)

const generating = computed(() => controller.value == null )
const chatStarted = computed(() => session.value.transcript.length > 0)

const transcriptUpdate = (transcript) => {
session.value = {
...session.value,
transcript
}
}

// simple template replace
const template = (str, extraSettings) => {
Expand Down Expand Up @@ -181,7 +186,11 @@
transcriptUpdate([...history, ["{{char}}", currentMessage]])

if (data.stop) {
console.log("-->", data, ' response was:', currentMessage, 'transcript state:', session.value.transcript);
console.log("Completion finished: '", currentMessage, "', summary: ", data);
}

if (data.timings) {
llamaStats.value = data.timings;
}
}

Expand Down Expand Up @@ -219,13 +228,12 @@
return html`
<form onsubmit=${submit}>
<div>
<textarea type="text" rows=2 onkeypress=${enterSubmits} value="${message}" oninput=${(e) => message.value = e.target.value} placeholder="Say something..."/>

<textarea type="text" rows=2 onkeypress=${enterSubmits} value="${message}" oninput=${(e) => message.value = e.target.value} placeholder="Say something..."/>
</div>
<div class="right">
<button type="submit" disabled=${!generating.value} >Send</button>
<button onclick=${stop} disabled=${generating}>Stop</button>
<button onclick=${reset}>Reset</button>
<button type="submit" disabled=${!generating.value} >Send</button>
<button onclick=${stop} disabled=${generating}>Stop</button>
<button onclick=${reset}>Reset</button>
</div>
</form>
`
Expand All @@ -243,7 +251,7 @@
}, [messages])

const chatLine = ([user, msg]) => {
return html`<p key=${msg}><strong>${template(user)}:</strong> <${Markdown} text=${template(msg)} /></p>`
return html`<p key=${msg}><strong>${template(user)}:</strong> <${Markdownish} text=${template(msg)} /></p>`
};

return html`
Expand Down Expand Up @@ -313,39 +321,52 @@
</form>
`
}
const Markdown = (params) => {
const md = params.text
.replace(/^#{1,6} (.*)$/gim, '<h3>$1</h3>')
.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>')
.replace(/__(.*?)__/g, '<strong>$1</strong>')
.replace(/\*(.*?)\*/g, '<em>$1</em>')
.replace(/_(.*?)_/g, '<em>$1</em>')
.replace(/```.*?\n([\s\S]*?)```/g, '<pre><code>$1</code></pre>')
.replace(/`(.*?)`/g, '<code>$1</code>')
.replace(/\n/gim, '<br />');
return html`<span dangerouslySetInnerHTML=${{ __html: md }} />`;
};
// poor mans markdown replacement
const Markdownish = (params) => {
const md = params.text
.replace(/^#{1,6} (.*)$/gim, '<h3>$1</h3>')
.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>')
.replace(/__(.*?)__/g, '<strong>$1</strong>')
.replace(/\*(.*?)\*/g, '<em>$1</em>')
.replace(/_(.*?)_/g, '<em>$1</em>')
.replace(/```.*?\n([\s\S]*?)```/g, '<pre><code>$1</code></pre>')
.replace(/`(.*?)`/g, '<code>$1</code>')
.replace(/\n/gim, '<br />');
return html`<span dangerouslySetInnerHTML=${{ __html: md }} />`;
};

const ModelGenerationInfo = (params) => {
if (!llamaStats.value) {
return html`<span/>`
}
return html`
<span>
${llamaStats.value.predicted_per_token_ms.toFixed()}ms per token, ${llamaStats.value.predicted_per_second.toFixed(2)} tokens per second
</span>
`
}

function App(props) {

return html`
<div id="container">
<header>
<h1>llama.cpp</h1>
</header>

<main id="content">
<${chatStarted.value ? ChatLog : ConfigForm} />
</main>

<footer id="write">
<${MessageInput} />
</footer>

<footer>
<p>Powered by <a href="https://github.com/ggerganov/llama.cpp">llama.cpp</a> and <a href="https://ggml.ai">ggml.ai</a></p>
</footer>
</div>
<div id="container">
<header>
<h1>llama.cpp</h1>
</header>

<main id="content">
<${chatStarted.value ? ChatLog : ConfigForm} />
</main>

<section id="write">
<${MessageInput} />
</section>

<footer>
<p><${ModelGenerationInfo} /></p>
<p>Powered by <a href="https://github.com/ggerganov/llama.cpp">llama.cpp</a> and <a href="https://ggml.ai">ggml.ai</a>.</p>
</footer>
</div>
`;
}

Expand Down
Loading