From da9c4feebb640d70a977700d60116d3d50f3751c Mon Sep 17 00:00:00 2001 From: wyh Date: Mon, 2 Dec 2024 16:56:23 +0800 Subject: [PATCH] feat: o1 models support streaming now. --- package-lock.json | 17 ++++++++++------- package.json | 1 + src/LLMProviders/chainManager.ts | 2 +- src/LLMProviders/chatModelManager.ts | 18 ++++-------------- 4 files changed, 16 insertions(+), 22 deletions(-) diff --git a/package-lock.json b/package-lock.json index 5e3d6d6a..8e7c892c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -17,6 +17,7 @@ "@langchain/core": "^0.3.3", "@langchain/google-genai": "^0.1.2", "@langchain/groq": "^0.1.2", + "@langchain/openai": "^0.3.14", "@orama/orama": "^3.0.0-rc-2", "@radix-ui/react-dropdown-menu": "^2.1.2", "@radix-ui/react-tooltip": "^1.1.3", @@ -3819,12 +3820,13 @@ } }, "node_modules/@langchain/openai": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.3.2.tgz", - "integrity": "sha512-p513TVHkZ+mMV4dGloprPFKaukOuOZxyPXY/IWReQK34c1dpnywmjrXg8ydcnfncNbq+kJ/kKe671NK9bic4WA==", + "version": "0.3.14", + "resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.3.14.tgz", + "integrity": "sha512-lNWjUo1tbvsss45IF7UQtMu1NJ6oUKvhgPYWXnX9f/d6OmuLu7D99HQ3Y88vLcUo9XjjOy417olYHignMduMjA==", + "license": "MIT", "dependencies": { "js-tiktoken": "^1.0.12", - "openai": "^4.57.3", + "openai": "^4.71.0", "zod": "^3.22.4", "zod-to-json-schema": "^3.22.3" }, @@ -13545,9 +13547,10 @@ "integrity": "sha512-Fvw+Jemq5fjjyWz6CpKx6w9s7xxqo3+JCyM0WXWeCSOboZ8ABkyvP8ID4CZuChA/wxSx+XSJmdOm8rGVyJ1hdQ==" }, "node_modules/openai": { - "version": "4.65.0", - "resolved": "https://registry.npmjs.org/openai/-/openai-4.65.0.tgz", - "integrity": "sha512-LfA4KUBpH/8rA3vjCQ74LZtdK/8wx9W6Qxq8MHqEdImPsN1XPQ2ompIuJWkKS6kXt5Cs5i8Eb65IIo4M7U+yeQ==", + "version": "4.73.1", + "resolved": "https://registry.npmjs.org/openai/-/openai-4.73.1.tgz", + "integrity": "sha512-nWImDJBcUsqrhy7yJScXB4+iqjzbUEgzfA3un/6UnHFdwWhjX24oztj69Ped/njABfOdLcO/F7CeWTI5dt8Xmg==", + "license": "Apache-2.0", "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", diff --git a/package.json b/package.json index 25b5f8a9..c9600501 100644 --- a/package.json +++ b/package.json @@ -69,6 +69,7 @@ "@langchain/core": "^0.3.3", "@langchain/google-genai": "^0.1.2", "@langchain/groq": "^0.1.2", + "@langchain/openai": "^0.3.14", "@orama/orama": "^3.0.0-rc-2", "@radix-ui/react-dropdown-menu": "^2.1.2", "@radix-ui/react-tooltip": "^1.1.3", diff --git a/src/LLMProviders/chainManager.ts b/src/LLMProviders/chainManager.ts index 2afb2ac1..5ac7ebd4 100644 --- a/src/LLMProviders/chainManager.ts +++ b/src/LLMProviders/chainManager.ts @@ -276,7 +276,7 @@ export default class ChainManager { if (isO1Model) { // Temporary fix:for o1-xx model need to covert systemMessage to aiMessage effectivePrompt = ChatPromptTemplate.fromMessages([ - [AI_SENDER, this.getLangChainParams().systemMessage || ""], + [AI_SENDER, getSystemPrompt() || ""], effectivePrompt, ]); } diff --git a/src/LLMProviders/chatModelManager.ts b/src/LLMProviders/chatModelManager.ts index 77b22c5e..62fc57fd 100644 --- a/src/LLMProviders/chatModelManager.ts +++ b/src/LLMProviders/chatModelManager.ts @@ -68,9 +68,6 @@ export default class ChatModelManager { } private getModelConfig(customModel: CustomModel): ModelConfig { - const decrypt = (key: string) => this.encryptionService.getDecryptedKey(key); - const params = this.getLangChainParams(); - const settings = getSettings(); // Check if the model starts with "o1" @@ -97,7 +94,7 @@ export default class ChatModelManager { }, // @ts-ignore openAIOrgId: getDecryptedKey(settings.openAIOrgId), - ...this.handleOpenAIExtraArgs(isO1Model, settings.maxTokens, settings.temperature, true), + ...this.handleOpenAIExtraArgs(isO1Model, settings.maxTokens, settings.temperature), }, [ChatModelProviders.ANTHROPIC]: { anthropicApiKey: getDecryptedKey(customModel.apiKey || settings.anthropicApiKey), @@ -118,7 +115,7 @@ export default class ChatModelManager { baseURL: customModel.baseUrl, fetch: customModel.enableCors ? safeFetch : undefined, }, - ...this.handleOpenAIExtraArgs(isO1Model, settings.maxTokens, settings.temperature, true), + ...this.handleOpenAIExtraArgs(isO1Model, settings.maxTokens, settings.temperature), }, [ChatModelProviders.COHEREAI]: { apiKey: getDecryptedKey(customModel.apiKey || settings.cohereApiKey), @@ -183,7 +180,7 @@ export default class ChatModelManager { fetch: customModel.enableCors ? safeFetch : undefined, dangerouslyAllowBrowser: true, }, - ...this.handleOpenAIExtraArgs(isO1Model, settings.maxTokens, settings.temperature, true), + ...this.handleOpenAIExtraArgs(isO1Model, settings.maxTokens, settings.temperature), }, }; @@ -193,22 +190,15 @@ export default class ChatModelManager { return { ...baseConfig, ...selectedProviderConfig }; } - private handleOpenAIExtraArgs( - isO1Model: boolean, - maxTokens: number, - temperature: number, - streaming: boolean - ) { + private handleOpenAIExtraArgs(isO1Model: boolean, maxTokens: number, temperature: number) { return isO1Model ? { maxCompletionTokens: maxTokens, temperature: 1, - streaming: false, } : { maxTokens: maxTokens, temperature: temperature, - streaming: streaming, }; }