Skip to content

Commit

Permalink
feat(langchain): add chat example file
Browse files Browse the repository at this point in the history
Signed-off-by: Tomas Dvorak <[email protected]>
  • Loading branch information
Tomas2D committed Nov 10, 2023
1 parent 912c01c commit 49153ea
Show file tree
Hide file tree
Showing 2 changed files with 52 additions and 3 deletions.
6 changes: 3 additions & 3 deletions examples/langchain/llm-chat.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { HumanChatMessage } from 'langchain/schema';
import { HumanMessage } from 'langchain/schema';

import { GenAIChatModel } from '../../src/langchain/llm-chat.js';

Expand Down Expand Up @@ -31,7 +31,7 @@ const makeClient = (stream?: boolean) =>
const chat = makeClient();

const response = await chat.call([
new HumanChatMessage(
new HumanMessage(
'What is a good name for a company that makes colorful socks?',
),
]);
Expand All @@ -43,7 +43,7 @@ const makeClient = (stream?: boolean) =>
// Streaming
const chat = makeClient(true);

await chat.call([new HumanChatMessage('Tell me a joke.')], undefined, [
await chat.call([new HumanMessage('Tell me a joke.')], undefined, [
{
handleLLMNewToken(token) {
console.log(token);
Expand Down
49 changes: 49 additions & 0 deletions examples/langchain/llm.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import { GenAIModel } from '../../src/langchain/index.js';

const makeClient = (stream?: boolean) =>
new GenAIModel({
modelId: 'google/flan-t5-xl',
stream,
configuration: {
endpoint: process.env.ENDPOINT,
apiKey: process.env.API_KEY,
},
parameters: {
decoding_method: 'greedy',
min_new_tokens: 5,
max_new_tokens: 25,
repetition_penalty: 1.5,
},
});

{
// Basic
console.info('---Single Input Example---');
const model = makeClient();

const prompt = 'What is a good name for a company that makes colorful socks?';
console.info(`Request: ${prompt}`);
const response = await model.call(prompt);
console.log(`Response: ${response}`);
}

{
console.info('---Multiple Inputs Example---');
const model = makeClient();

const prompts = ['What is IBM?', 'What is WatsonX?'];
console.info('Request prompts:', prompts);
const response = await model.generate(prompts);
console.info('Response:', response);
}

{
console.info('---Streaming Example---');
const chat = makeClient(true);

const prompt = 'What is a molecule?';
console.info(`Request: ${prompt}`);
for await (const token of await chat.stream(prompt)) {
console.info(`Received token: ${token}`);
}
}

0 comments on commit 49153ea

Please sign in to comment.