diff --git a/example/src/App.tsx b/example/src/App.tsx index 52ad9c5c..731de7d7 100644 --- a/example/src/App.tsx +++ b/example/src/App.tsx @@ -176,9 +176,7 @@ export default function App() { addSystemMessage(`Heat up time: ${tHeat}ms`) addSystemMessage('Benchmarking the model...') - const result = await context.bench(512, 128, 1, 3) - - const [ + const { modelDesc, modelSize, modelNParams, @@ -186,7 +184,7 @@ export default function App() { ppStd, tgAvg, tgStd, - ] = JSON.parse(result) + } = await context.bench(512, 128, 1, 3) const size = `${(modelSize / 1024.0 / 1024.0 / 1024.0).toFixed(2)} GiB` const nParams = `${(modelNParams / 1e9).toFixed(2)}B` @@ -209,7 +207,7 @@ export default function App() { return case '/save-session': context.saveSession(`${dirs.DocumentDir}/llama-session.bin`).then(tokensSaved => { - console.log('Session saved:', result) + console.log('Session tokens saved:', tokensSaved) addSystemMessage(`Session saved! ${tokensSaved} tokens saved.`) }).catch(e => { console.log('Session save failed:', e) diff --git a/src/index.ts b/src/index.ts index 87a289ad..cc7843a1 100644 --- a/src/index.ts +++ b/src/index.ts @@ -40,6 +40,16 @@ export type ContextParams = NativeContextParams export type CompletionParams = Omit +export type BenchResult = { + modelDesc: string + modelSize: number + modelNParams: number + ppAvg: number + ppStd: number + tgAvg: number + tgStd: number +} + export class LlamaContext { id: number @@ -116,8 +126,26 @@ export class LlamaContext { return RNLlama.embedding(this.id, text) } - bench(pp: number, tg: number, pl: number, nr: number): Promise { - return RNLlama.bench(this.id, pp, tg, pl, nr) + async bench(pp: number, tg: number, pl: number, nr: number): Promise { + const result = await RNLlama.bench(this.id, pp, tg, pl, nr) + const [ + modelDesc, + modelSize, + modelNParams, + ppAvg, + ppStd, + tgAvg, + tgStd, + ] = JSON.parse(result) + return { + modelDesc, + modelSize, + modelNParams, + ppAvg, + ppStd, + tgAvg, + tgStd, + } } async release(): Promise {