clearer UI text, add timings to completion log

This commit is contained in:
psugihara 2023-12-28 10:25:58 -08:00
parent cc8cc3face
commit e3a1eb41a0
4 changed files with 26 additions and 7 deletions

View file

@ -1,5 +1,7 @@
import Foundation
// To use this in your own project, add llama.cpp as a swift package dependency
// and uncomment this import line.
// import llama
enum LlamaError: Error {

View file

@ -4,6 +4,7 @@ import Foundation
class LlamaState: ObservableObject {
@Published var messageLog = ""
@Published var cacheCleared = false
let NS_PER_S = 1_000_000_000.0
private var llamaContext: LlamaContext?
private var defaultModelUrl: URL? {
@ -25,7 +26,7 @@ class LlamaState: ObservableObject {
llamaContext = try LlamaContext.create_context(path: modelUrl.path())
messageLog += "Loaded model \(modelUrl.lastPathComponent)\n"
} else {
messageLog += "Select a model from the list\n"
messageLog += "Load a model from the list below\n"
}
}
@ -34,15 +35,29 @@ class LlamaState: ObservableObject {
return
}
let t_start = DispatchTime.now().uptimeNanoseconds
await llamaContext.completion_init(text: text)
let t_heat_end = DispatchTime.now().uptimeNanoseconds
let t_heat = Double(t_heat_end - t_start) / NS_PER_S
messageLog += "\(text)"
while await llamaContext.n_cur < llamaContext.n_len {
let result = await llamaContext.completion_loop()
messageLog += "\(result)"
}
let t_end = DispatchTime.now().uptimeNanoseconds
let t_generation = Double(t_end - t_heat_end) / NS_PER_S
let tokens_per_second = Double(await llamaContext.n_len) / t_generation
await llamaContext.clear()
messageLog += "\n\ndone\n"
messageLog += """
\n
Done
Heat up took \(t_heat)s
Generated \(tokens_per_second) t/s\n
"""
}
func bench() async {
@ -56,10 +71,10 @@ class LlamaState: ObservableObject {
messageLog += await llamaContext.model_info() + "\n"
let t_start = DispatchTime.now().uptimeNanoseconds
await llamaContext.bench(pp: 8, tg: 4, pl: 1) // heat up
let _ = await llamaContext.bench(pp: 8, tg: 4, pl: 1) // heat up
let t_end = DispatchTime.now().uptimeNanoseconds
let t_heat = Double(t_end - t_start) / 1_000_000_000.0
let t_heat = Double(t_end - t_start) / NS_PER_S
messageLog += "Heat up time: \(t_heat) seconds, please wait...\n"
// if more than 5 seconds, then we're probably running on a slow device

View file

@ -63,7 +63,6 @@ struct ContentView: View {
modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q4_0.gguf?download=true",
filename: "tinyllama-1.1b-1t-openorca.Q4_0.gguf"
)
.padding(.top, 4)
DownloadButton(
llamaState: llamaState,
@ -104,7 +103,10 @@ struct ContentView: View {
ContentView.cleanupModelCaches()
llamaState.cacheCleared = true
}
}.font(.system(size: 12))
}
.padding(.top, 4)
.font(.system(size: 12))
.frame(maxWidth: .infinity, alignment: .leading)
}
.padding()
}

View file

@ -93,7 +93,7 @@ struct DownloadButton: View {
print("Error: \(err.localizedDescription)")
}
}) {
Text("\(modelName) (Downloaded)")
Text("Load \(modelName)")
}
} else {
Text("Unknown status")