diff --git a/README.md b/README.md index d5def39..da4058c 100644 --- a/README.md +++ b/README.md @@ -243,6 +243,7 @@ await model.call('Tell me a joke.', undefined, [ ```typescript import { GenAIChatModel } from '@ibm-generative-ai/node-sdk/langchain'; +import { SystemMessage, HumanMessage } from 'langchain/schema'; const client = new GenAIChatModel({ modelId: 'eleutherai/gpt-neox-20b', @@ -268,13 +269,13 @@ const client = new GenAIChatModel({ }); const response = await client.call([ - new SystemChatMessage( + new SystemMessage( 'You are a helpful assistant that translates English to Spanish.', ), - new HumanChatMessage('I love programming.'), + new HumanMessage('I love programming.'), ]); -console.info(response.text); // "Me encanta la programaciĆ³n." +console.info(response.content); // "Me encanta la programaciĆ³n." ``` #### Prompt Templates (GenAI x LangChain) diff --git a/src/langchain/llm-chat.ts b/src/langchain/llm-chat.ts index 9fb0758..8073fcb 100644 --- a/src/langchain/llm-chat.ts +++ b/src/langchain/llm-chat.ts @@ -74,7 +74,7 @@ export class GenAIChatModel extends BaseChatModel { `Unsupported message type "${msg._getType()}"`, ); } - return `${type.stopSequence}${msg.text}`; + return `${type.stopSequence}${msg.content}`; }) .join('\n') .concat(this.#rolesMapping.system.stopSequence); diff --git a/src/tests/e2e/langchain/llm-chat.test.ts b/src/tests/e2e/langchain/llm-chat.test.ts index 2309f4c..48e74b3 100644 --- a/src/tests/e2e/langchain/llm-chat.test.ts +++ b/src/tests/e2e/langchain/llm-chat.test.ts @@ -1,4 +1,4 @@ -import { HumanChatMessage, SystemChatMessage } from 'langchain/schema'; +import { HumanMessage, SystemMessage } from 'langchain/schema'; import { GenAIChatModel } from '../../../langchain/index.js'; import { describeIf } from '../../utils.js'; @@ -47,21 +47,21 @@ describeIf(process.env.RUN_LANGCHAIN_CHAT_TESTS === 'true')( const chat = makeClient(); const response = await chat.call([ - new HumanChatMessage( + new HumanMessage( 'What is a good name for a company that makes colorful socks?', ), ]); - expectIsNonEmptyString(response.text); + expectIsNonEmptyString(response.content); }); test('should handle question with additional hint', async () => { const chat = makeClient(); const response = await chat.call([ - new SystemChatMessage(SYSTEM_MESSAGE), - new HumanChatMessage('I love programming.'), + new SystemMessage(SYSTEM_MESSAGE), + new HumanMessage('I love programming.'), ]); - expectIsNonEmptyString(response.text); + expectIsNonEmptyString(response.content); }); test('should handle multiple questions', async () => { @@ -69,12 +69,12 @@ describeIf(process.env.RUN_LANGCHAIN_CHAT_TESTS === 'true')( const response = await chat.generate([ [ - new SystemChatMessage(SYSTEM_MESSAGE), - new HumanChatMessage('I love programming.'), + new SystemMessage(SYSTEM_MESSAGE), + new HumanMessage('I love programming.'), ], [ - new SystemChatMessage(SYSTEM_MESSAGE), - new HumanChatMessage('I love artificial intelligence.'), + new SystemMessage(SYSTEM_MESSAGE), + new HumanMessage('I love artificial intelligence.'), ], ]); @@ -95,7 +95,7 @@ describeIf(process.env.RUN_LANGCHAIN_CHAT_TESTS === 'true')( }); const output = await chat.call( - [new HumanChatMessage('Tell me a joke.')], + [new HumanMessage('Tell me a joke.')], undefined, [ { @@ -105,8 +105,8 @@ describeIf(process.env.RUN_LANGCHAIN_CHAT_TESTS === 'true')( ); expect(handleNewToken).toHaveBeenCalled(); - expectIsNonEmptyString(output.text); - expect(tokens.join('')).toStrictEqual(output.text); + expectIsNonEmptyString(output.content); + expect(tokens.join('')).toStrictEqual(output.content); }); }); },