Class: ReplicateLLM
Replicate LLM implementation used
Extends
Constructors
new ReplicateLLM()
new ReplicateLLM(
init
?):ReplicateLLM
Parameters
• init?: Partial
<ReplicateLLM
> & object
Returns
Overrides
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:115
Properties
chatStrategy
chatStrategy:
ReplicateChatStrategy
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:109
maxTokens?
optional
maxTokens:number
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:112
model
model:
"Llama-2-70b-chat-old"
|"Llama-2-70b-chat-4bit"
|"Llama-2-13b-chat-old"
|"Llama-2-13b-chat-4bit"
|"Llama-2-7b-chat-old"
|"Llama-2-7b-chat-4bit"
|"llama-3-70b-instruct"
|"llama-3-8b-instruct"
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:108
replicateSession
replicateSession:
ReplicateSession
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:113
temperature
temperature:
number
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:110
topP
topP:
number
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:111
Accessors
metadata
get
metadata():object
Returns
object
contextWindow
contextWindow:
number
maxTokens
maxTokens:
undefined
|number
model
model:
"Llama-2-70b-chat-old"
|"Llama-2-70b-chat-4bit"
|"Llama-2-13b-chat-old"
|"Llama-2-13b-chat-4bit"
|"Llama-2-7b-chat-old"
|"Llama-2-7b-chat-4bit"
|"llama-3-70b-instruct"
|"llama-3-8b-instruct"
temperature
temperature:
number
tokenizer
tokenizer:
undefined
=undefined
topP
topP:
number
Overrides
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:140
Methods
chat()
chat(params)
chat(
params
):Promise
<AsyncIterable
<ChatResponseChunk
>>
Get a chat response from the LLM
Parameters
• params: LLMChatParamsStreaming
<object
, object
>
Returns
Promise
<AsyncIterable
<ChatResponseChunk
>>
Overrides
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:307
chat(params)
chat(
params
):Promise
<ChatResponse
<object
>>
Get a chat response from the LLM
Parameters
• params: LLMChatParamsNonStreaming
<object
, object
>
Returns
Promise
<ChatResponse
<object
>>
Overrides
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:310
complete()
complete(params)
complete(
params
):Promise
<AsyncIterable
<CompletionResponse
>>
Get a prompt completion from the LLM
Parameters
• params: LLMCompletionParamsStreaming
Returns
Promise
<AsyncIterable
<CompletionResponse
>>
Inherited from
Defined in
packages/core/dist/llms/index.d.ts:168
complete(params)
complete(
params
):Promise
<CompletionResponse
>
Get a prompt completion from the LLM
Parameters
• params: LLMCompletionParamsNonStreaming
Returns
Promise
<CompletionResponse
>
Inherited from
Defined in
packages/core/dist/llms/index.d.ts:169
mapMessageTypeA16Z()
mapMessageTypeA16Z(
messageType
):string
Parameters
• messageType: MessageType
Returns
string
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:217
mapMessagesToPrompt()
mapMessagesToPrompt(
messages
):object
Parameters
• messages: ChatMessage
[]
Returns
object
prompt
prompt:
string
systemPrompt
systemPrompt:
undefined
|MessageContent
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:151
mapMessagesToPromptA16Z()
mapMessagesToPromptA16Z(
messages
):object
Parameters
• messages: ChatMessage
[]
Returns
object
prompt
prompt:
string
systemPrompt
systemPrompt:
undefined
=undefined
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:203
mapMessagesToPromptLlama3()
mapMessagesToPromptLlama3(
messages
):object
Parameters
• messages: ChatMessage
[]
Returns
object
prompt
prompt:
string
systemPrompt
systemPrompt:
undefined
=undefined
Defined in
packages/llamaindex/src/llm/replicate_ai.ts:177
mapMessagesToPromptMeta()
mapMessagesToPromptMeta(
messages
,opts
?):object
Parameters
• messages: ChatMessage
[]
• opts?
• opts.replicate4Bit?: boolean
• opts.withBos?: boolean
• opts.withNewlines?: boolean
Returns
object
prompt
prompt:
string
systemPrompt
systemPrompt:
undefined
|MessageContent