Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: migrate to langchain v0.1.11 #69

Merged
merged 4 commits into from
Jan 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 12 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ const model = new GenAIModel({
#### Basic usage

```typescript
const response = await model.call(
const response = await model.invoke(
'What would be a good company name a company that makes colorful socks?',
);

Expand All @@ -198,7 +198,7 @@ console.log(response); // Fantasy Sockery
#### LLM Chain + Prompt Template

```typescript
import { PromptTemplate } from 'langchain/prompts';
import { PromptTemplate } from '@langchain/core/prompts';
import { LLMChain } from 'langchain/chains';

const prompt = new PromptTemplate({
Expand Down Expand Up @@ -230,20 +230,22 @@ const model = new GenAIModel({
},
});

await model.call('Tell me a joke.', undefined, [
{
handleLLMNewToken(token: string) {
console.log(token);
await model.invoke('Tell me a joke.', {
callbacks: [
{
handleLLMNewToken(token) {
console.log(token);
},
},
},
]);
],
});
```

#### Chat support

```typescript
import { GenAIChatModel } from '@ibm-generative-ai/node-sdk/langchain';
import { SystemMessage, HumanMessage } from 'langchain/schema';
import { SystemMessage, HumanMessage } from '@langchain/core/messages';

const client = new GenAIChatModel({
modelId: 'eleutherai/gpt-neox-20b',
Expand Down Expand Up @@ -285,7 +287,7 @@ This can be done via helper classes provided within our SDK.

```typescript
import { GenAIPromptTemplate } from '@ibm-generative-ai/node-sdk/langchain';
import { PromptTemplate } from 'langchain/prompts';
import { PromptTemplate } from '@langchain/core/prompts';

// Converting the LangChain Prompt Template (f-string) to GenAI Prompt Template'
const promptTemplate = GenAIPromptTemplate.fromLangChain(
Expand Down
18 changes: 10 additions & 8 deletions examples/langchain/llm-chat.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { HumanMessage } from 'langchain/schema';
import { HumanMessage } from '@langchain/core/messages';

import { GenAIChatModel } from '../../src/langchain/llm-chat.js';

Expand Down Expand Up @@ -30,7 +30,7 @@ const makeClient = (stream?: boolean) =>
// Basic
const chat = makeClient();

const response = await chat.call([
const response = await chat.invoke([
new HumanMessage(
'What is a good name for a company that makes colorful socks?',
),
Expand All @@ -43,11 +43,13 @@ const makeClient = (stream?: boolean) =>
// Streaming
const chat = makeClient(true);

await chat.call([new HumanMessage('Tell me a joke.')], undefined, [
{
handleLLMNewToken(token) {
console.log(token);
await chat.invoke([new HumanMessage('Tell me a joke.')], {
callbacks: [
{
handleLLMNewToken(token) {
console.log(token);
},
},
},
]);
],
});
}
2 changes: 1 addition & 1 deletion examples/langchain/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const makeClient = (stream?: boolean) =>

const prompt = 'What is a good name for a company that makes colorful socks?';
console.info(`Request: ${prompt}`);
const response = await model.call(prompt);
const response = await model.invoke(prompt);
console.log(`Response: ${response}`);
}

Expand Down
2 changes: 1 addition & 1 deletion examples/langchain/prompt-templates.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { PromptTemplate } from 'langchain/prompts';
import { PromptTemplate } from '@langchain/core/prompts';

import { GenAIPromptTemplate } from '../../src/langchain/index.js';

Expand Down
4 changes: 2 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,12 @@
"example:chat": "yarn run example:run examples/chat.ts"
},
"peerDependencies": {
"langchain": ">=0.0.155"
"@langchain/core": ">=0.1.11"
},
"devDependencies": {
"@commitlint/cli": "^18.0.0",
"@commitlint/config-conventional": "^18.0.0",
"@langchain/core": "^0.1.11",
"@types/lodash": "^4.14.200",
"@types/node": "^20.8.8",
"@types/promise-retry": "^1.1.5",
Expand All @@ -83,7 +84,6 @@
"eslint-plugin-import": "^2.29.0",
"husky": "^8.0.3",
"jest-extended": "^4.0.2",
"langchain": "^0.0.171",
"lint-staged": "^15.0.2",
"lodash": "^4.17.21",
"msw": "^1.3.2",
Expand Down
11 changes: 7 additions & 4 deletions src/langchain/llm-chat.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
import { BaseChatModel, BaseChatModelParams } from 'langchain/chat_models/base';
import {
BaseChatModel,
BaseChatModelParams,
} from '@langchain/core/language_models/chat_models';
import {
BaseMessage,
ChatResult,
MessageType,
SystemMessage,
} from 'langchain/schema';
import { CallbackManagerForLLMRun } from 'langchain/callbacks';
} from '@langchain/core/messages';
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
import { ChatResult } from '@langchain/core/outputs';

import { InvalidInputError } from '../errors.js';
import { GenerateOutput } from '../client/types.js';
Expand Down
8 changes: 4 additions & 4 deletions src/langchain/llm.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { BaseLLM, BaseLLMParams } from 'langchain/llms/base';
import { CallbackManagerForLLMRun } from 'langchain/callbacks';
import type { LLMResult, Generation } from 'langchain/schema';
import { GenerationChunk } from 'langchain/schema';
import { BaseLLM, BaseLLMParams } from '@langchain/core/language_models/llms';
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
import type { LLMResult, Generation } from '@langchain/core/outputs';
import { GenerationChunk } from '@langchain/core/outputs';

import { Client, Configuration } from '../client/client.js';
import {
Expand Down
2 changes: 1 addition & 1 deletion src/langchain/prompt-template.test.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { PromptTemplate } from 'langchain/prompts';
import { PromptTemplate } from '@langchain/core/prompts';

import { InvalidInputError } from '../errors.js';

Expand Down
2 changes: 1 addition & 1 deletion src/langchain/prompt-template.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { PromptTemplate as LangChainPromptTemplate } from 'langchain/prompts';
import { PromptTemplate as LangChainPromptTemplate } from '@langchain/core/prompts';

import type { PromptTemplateOutput as PromptTemplate } from '../client/types.js';
import { InvalidInputError } from '../errors.js';
Expand Down
4 changes: 2 additions & 2 deletions src/tests/e2e/client.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,9 @@ describe('client', () => {

test('should correctly process moderation chunks during streaming', async () => {
const stream = makeValidStream({
min_new_tokens: 1,
max_new_tokens: 5,
moderations: {
min_new_tokens: 1,
max_new_tokens: 5,
hap: {
input: true,
threshold: 0.01,
Tomas2D marked this conversation as resolved.
Show resolved Hide resolved
Expand Down
17 changes: 7 additions & 10 deletions src/tests/e2e/langchain/llm-chat.test.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { HumanMessage, SystemMessage } from 'langchain/schema';
import { HumanMessage, SystemMessage } from '@langchain/core/messages';

import { GenAIChatModel } from '../../../langchain/index.js';
import { describeIf } from '../../utils.js';
Expand Down Expand Up @@ -46,7 +46,7 @@ describeIf(process.env.RUN_LANGCHAIN_CHAT_TESTS === 'true')(
test('should handle single question', async () => {
const chat = makeClient();

const response = await chat.call([
const response = await chat.invoke([
new HumanMessage(
'What is a good name for a company that makes colorful socks?',
),
Expand All @@ -57,7 +57,7 @@ describeIf(process.env.RUN_LANGCHAIN_CHAT_TESTS === 'true')(
test('should handle question with additional hint', async () => {
const chat = makeClient();

const response = await chat.call([
const response = await chat.invoke([
new SystemMessage(SYSTEM_MESSAGE),
new HumanMessage('I love programming.'),
]);
Expand Down Expand Up @@ -94,14 +94,11 @@ describeIf(process.env.RUN_LANGCHAIN_CHAT_TESTS === 'true')(
tokens.push(token);
});

const output = await chat.call(
const output = await chat.invoke(
[new HumanMessage('Tell me a joke.')],
undefined,
[
{
handleLLMNewToken: handleNewToken,
},
],
{
callbacks: [{ handleLLMNewToken: handleNewToken }],
},
);

expect(handleNewToken).toHaveBeenCalled();
Expand Down
27 changes: 15 additions & 12 deletions src/tests/e2e/langchain/llm.test.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { PromptTemplate } from 'langchain/prompts';
import { LLMChain } from 'langchain/chains';
import { PromptTemplate } from '@langchain/core/prompts';
import { StringOutputParser } from '@langchain/core/output_parsers';

import { GenAIModel } from '../../../langchain/llm.js';

Expand Down Expand Up @@ -39,14 +39,14 @@ describe('Langchain', () => {
test.skip('should handle empty modelId', async () => {
const client = makeClient();

const data = await client.call('Who are you?');
const data = await client.invoke('Who are you?');
expectIsString(data);
}, 15_000);

test('should return correct response for a single input', async () => {
const client = makeClient('google/flan-ul2');

const data = await client.call('Hello, World');
const data = await client.invoke('Hello, World');
expectIsString(data);
}, 15_000);

Expand Down Expand Up @@ -97,7 +97,7 @@ describe('Langchain', () => {
const model = makeClient('google/flan-ul2');

await expect(
model.call('Hello, World', { timeout: 10 }),
model.invoke('Hello, World', { timeout: 10 }),
).rejects.toThrow();
});

Expand All @@ -109,11 +109,13 @@ describe('Langchain', () => {
tokens.push(token);
});

const output = await client.call('Tell me a joke.', undefined, [
{
handleLLMNewToken: handleNewToken,
},
]);
const output = await client.invoke('Tell me a joke.', {
callbacks: [
{
handleLLMNewToken: handleNewToken,
},
],
});

expect(handleNewToken).toHaveBeenCalled();
expectIsString(output);
Expand All @@ -129,9 +131,10 @@ describe('Langchain', () => {
template: 'What is a good name for a company that makes {product}?',
inputVariables: ['product'],
});
const outputParser = new StringOutputParser();

const chain = new LLMChain({ llm: model, prompt: prompt });
const { text } = await chain.call({ product: 'colorful socks' });
const chain = prompt.pipe(model).pipe(outputParser);
const text = await chain.invoke({ product: 'colorful socks' });
expectIsString(text);
}, 20_000);
});
Expand Down
Loading