-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathoutputParseStage.js
51 lines (41 loc) · 1.71 KB
/
outputParseStage.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import {StructuredOutputParser} from "langchain/output_parsers";
import {RunnableSequence} from "@langchain/core/runnables";
import {PromptTemplate} from "@langchain/core/prompts";
import {Ollama} from "@langchain/community/llms/ollama";
const main = async function () {
const parser = StructuredOutputParser.fromNamesAndDescriptions({
answer: "answer to the user's question",
source: "source used to answer the user's question"
})
const chain = RunnableSequence.from([
PromptTemplate.fromTemplate(
"Answer the users question as best as possible.\n{format_instructions}\n{question}"
),
new Ollama({ temperature: 0, model: "mistral"}),
parser
])
console.log("Waiting for response...")
const response = await chain.invoke({
question: "What is the capital of Brazil?",
format_instructions: parser.getFormatInstructions()
})
console.log(response)
}
const commaSeparatedOutputs = async function () {
const template = "You are an ice cream specialist who answers user's questions about ice cream.\n{format_instructions}\n{question}"
const parser = StructuredOutputParser.fromNamesAndDescriptions({
answer: "The answer of the user's question",
reason: "Why do you think this is the answer",
accuracy: "How accurate do you think the answer is"
})
const chain = RunnableSequence.from([
PromptTemplate.fromTemplate(template),
new Ollama({ temperature: 0, model: "mistral"}),
parser
])
console.log(await chain.invoke({
question: "What is the best ice cream flavour?",
format_instructions: parser.getFormatInstructions()
}))
}
commaSeparatedOutputs()