llama.rn
Ƭ BenchResult: Object
Name | Type |
---|---|
modelDesc |
string |
modelNParams |
number |
modelSize |
number |
ppAvg |
number |
ppStd |
number |
tgAvg |
number |
tgStd |
number |
Ƭ CompletionParams: Omit
<NativeCompletionParams
, "emit_partial_completion"
| "prompt"
> & { messages?
: RNLlamaOAICompatibleMessage
[] ; prompt?
: string
}
Ƭ ContextParams: NativeContextParams
Ƭ TokenData: Object
Name | Type |
---|---|
completion_probabilities? |
NativeCompletionTokenProb [] |
token |
string |
▸ convertJsonSchemaToGrammar(«destructured»
): string
| Promise
<string
>
Name | Type |
---|---|
«destructured» |
Object |
› allowFetch? |
boolean |
› dotall? |
boolean |
› propOrder? |
PropOrder |
› schema |
any |
string
| Promise
<string
>
▸ initLlama(«destructured»
, onProgress?
): Promise
<LlamaContext
>
Name | Type |
---|---|
«destructured» |
NativeContextParams |
onProgress? |
(progress : number ) => void |
Promise
<LlamaContext
>
▸ releaseAllLlama(): Promise
<void
>
Promise
<void
>
▸ setContextLimit(limit
): Promise
<void
>
Name | Type |
---|---|
limit |
number |
Promise
<void
>