From 49153ea9c87265b4094f431fc15d43f585048834 Mon Sep 17 00:00:00 2001 From: Tomas Dvorak Date: Fri, 10 Nov 2023 12:27:35 +0100 Subject: [PATCH] feat(langchain): add chat example file Signed-off-by: Tomas Dvorak --- examples/langchain/llm-chat.ts | 6 ++--- examples/langchain/llm.ts | 49 ++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 3 deletions(-) create mode 100644 examples/langchain/llm.ts diff --git a/examples/langchain/llm-chat.ts b/examples/langchain/llm-chat.ts index 1e5ebe5..b7b68e3 100644 --- a/examples/langchain/llm-chat.ts +++ b/examples/langchain/llm-chat.ts @@ -1,4 +1,4 @@ -import { HumanChatMessage } from 'langchain/schema'; +import { HumanMessage } from 'langchain/schema'; import { GenAIChatModel } from '../../src/langchain/llm-chat.js'; @@ -31,7 +31,7 @@ const makeClient = (stream?: boolean) => const chat = makeClient(); const response = await chat.call([ - new HumanChatMessage( + new HumanMessage( 'What is a good name for a company that makes colorful socks?', ), ]); @@ -43,7 +43,7 @@ const makeClient = (stream?: boolean) => // Streaming const chat = makeClient(true); - await chat.call([new HumanChatMessage('Tell me a joke.')], undefined, [ + await chat.call([new HumanMessage('Tell me a joke.')], undefined, [ { handleLLMNewToken(token) { console.log(token); diff --git a/examples/langchain/llm.ts b/examples/langchain/llm.ts new file mode 100644 index 0000000..cb46ef8 --- /dev/null +++ b/examples/langchain/llm.ts @@ -0,0 +1,49 @@ +import { GenAIModel } from '../../src/langchain/index.js'; + +const makeClient = (stream?: boolean) => + new GenAIModel({ + modelId: 'google/flan-t5-xl', + stream, + configuration: { + endpoint: process.env.ENDPOINT, + apiKey: process.env.API_KEY, + }, + parameters: { + decoding_method: 'greedy', + min_new_tokens: 5, + max_new_tokens: 25, + repetition_penalty: 1.5, + }, + }); + +{ + // Basic + console.info('---Single Input Example---'); + const model = makeClient(); + + const prompt = 'What is a good name for a company that makes colorful socks?'; + console.info(`Request: ${prompt}`); + const response = await model.call(prompt); + console.log(`Response: ${response}`); +} + +{ + console.info('---Multiple Inputs Example---'); + const model = makeClient(); + + const prompts = ['What is IBM?', 'What is WatsonX?']; + console.info('Request prompts:', prompts); + const response = await model.generate(prompts); + console.info('Response:', response); +} + +{ + console.info('---Streaming Example---'); + const chat = makeClient(true); + + const prompt = 'What is a molecule?'; + console.info(`Request: ${prompt}`); + for await (const token of await chat.stream(prompt)) { + console.info(`Received token: ${token}`); + } +}