diff --git a/app/api/alibaba.ts b/app/api/alibaba.ts
index 894b1ae4c04..20f6caefa8d 100644
--- a/app/api/alibaba.ts
+++ b/app/api/alibaba.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Alibaba as string,
diff --git a/app/api/anthropic.ts b/app/api/anthropic.ts
index 7a44443710f..b96637b2c8c 100644
--- a/app/api/anthropic.ts
+++ b/app/api/anthropic.ts
@@ -9,7 +9,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);
@@ -122,7 +122,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Anthropic as string,
diff --git a/app/api/baidu.ts b/app/api/baidu.ts
index 0408b43c5bc..0f4e05ee86c 100644
--- a/app/api/baidu.ts
+++ b/app/api/baidu.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
import { getAccessToken } from "@/app/utils/baidu";
const serverConfig = getServerSideConfig();
@@ -104,7 +104,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Baidu as string,
diff --git a/app/api/bytedance.ts b/app/api/bytedance.ts
index cb65b106109..51b39ceb7cb 100644
--- a/app/api/bytedance.ts
+++ b/app/api/bytedance.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.ByteDance as string,
diff --git a/app/api/common.ts b/app/api/common.ts
index 495a12ccdbb..b7e41fa2647 100644
--- a/app/api/common.ts
+++ b/app/api/common.ts
@@ -2,7 +2,7 @@ import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "../config/server";
import { OPENAI_BASE_URL, ServiceProvider } from "../constant";
import { cloudflareAIGatewayUrl } from "../utils/cloudflare";
-import { getModelProvider, isModelAvailableInServer } from "../utils/model";
+import { getModelProvider, isModelNotavailableInServer } from "../utils/model";
const serverConfig = getServerSideConfig();
@@ -118,15 +118,14 @@ export async function requestOpenai(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
- ServiceProvider.OpenAI as string,
- ) ||
- isModelAvailableInServer(
- serverConfig.customModels,
- jsonBody?.model as string,
- ServiceProvider.Azure as string,
+ [
+ ServiceProvider.OpenAI,
+ ServiceProvider.Azure,
+ jsonBody?.model as string, // support provider-unspecified model
+ ],
)
) {
return NextResponse.json(
diff --git a/app/api/glm.ts b/app/api/glm.ts
index 3625b9f7bf9..8431c5db5b0 100644
--- a/app/api/glm.ts
+++ b/app/api/glm.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.ChatGLM as string,
diff --git a/app/api/iflytek.ts b/app/api/iflytek.ts
index 8b8227dce1f..6624f74e9ab 100644
--- a/app/api/iflytek.ts
+++ b/app/api/iflytek.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
// iflytek
const serverConfig = getServerSideConfig();
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Iflytek as string,
diff --git a/app/api/moonshot.ts b/app/api/moonshot.ts
index 5bf4807e3e6..792d14d3334 100644
--- a/app/api/moonshot.ts
+++ b/app/api/moonshot.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Moonshot as string,
diff --git a/app/api/xai.ts b/app/api/xai.ts
index a4ee8b39731..4aad5e5fb3e 100644
--- a/app/api/xai.ts
+++ b/app/api/xai.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.XAI as string,
diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts
index a7bce4fc2d0..5ca8e1071a7 100644
--- a/app/client/platforms/google.ts
+++ b/app/client/platforms/google.ts
@@ -60,9 +60,18 @@ export class GeminiProApi implements LLMApi {
extractMessage(res: any) {
console.log("[Response] gemini-pro response: ", res);
+ const getTextFromParts = (parts: any[]) => {
+ if (!Array.isArray(parts)) return "";
+
+ return parts
+ .map((part) => part?.text || "")
+ .filter((text) => text.trim() !== "")
+ .join("\n\n");
+ };
+
return (
- res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
- res?.at(0)?.candidates?.at(0)?.content?.parts.at(0)?.text ||
+ getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
+ getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
res?.error?.message ||
""
);
@@ -223,7 +232,10 @@ export class GeminiProApi implements LLMApi {
},
});
}
- return chunkJson?.candidates?.at(0)?.content.parts.at(0)?.text;
+ return chunkJson?.candidates
+ ?.at(0)
+ ?.content.parts?.map((part: { text: string }) => part.text)
+ .join("\n\n");
},
// processToolMessage, include tool_calls message and tool call results
(
diff --git a/app/components/sidebar.tsx b/app/components/sidebar.tsx
index 7173cb3ef01..c52fde8d2fd 100644
--- a/app/components/sidebar.tsx
+++ b/app/components/sidebar.tsx
@@ -23,7 +23,6 @@ import {
MIN_SIDEBAR_WIDTH,
NARROW_SIDEBAR_WIDTH,
Path,
- PLUGINS,
REPO_URL,
} from "../constant";
@@ -33,6 +32,12 @@ import dynamic from "next/dynamic";
import { showConfirm, Selector } from "./ui-lib";
import clsx from "clsx";
+const DISCOVERY = [
+ { name: Locale.Plugin.Name, path: Path.Plugins },
+ { name: "Stable Diffusion", path: Path.Sd },
+ { name: Locale.SearchChat.Page.Title, path: Path.SearchChat },
+];
+
const ChatList = dynamic(async () => (await import("./chat-list")).ChatList, {
loading: () => null,
});
@@ -449,7 +454,7 @@ export function SideBarTail(props: {
export function SideBar(props: { className?: string }) {
useHotKey();
const { onDragStart, shouldNarrow } = useDragSideBar();
- const [showPluginSelector, setShowPluginSelector] = useState(false);
+ const [showDiscoverySelector, setshowDiscoverySelector] = useState(false);
const navigate = useNavigate();
const config = useAppConfig();
const chatStore = useChatStore();
@@ -484,21 +489,21 @@ export function SideBar(props: { className?: string }) {
icon={}
text={shouldNarrow ? undefined : Locale.Discovery.Name}
className={styles["sidebar-bar-button"]}
- onClick={() => setShowPluginSelector(true)}
+ onClick={() => setshowDiscoverySelector(true)}
shadow
/>
- {showPluginSelector && (
+ {showDiscoverySelector && (
{
+ ...DISCOVERY.map((item) => {
return {
title: item.name,
value: item.path,
};
}),
]}
- onClose={() => setShowPluginSelector(false)}
+ onClose={() => setshowDiscoverySelector(false)}
onSelection={(s) => {
navigate(s[0], { state: { fromHome: true } });
}}
diff --git a/app/locales/cn.ts b/app/locales/cn.ts
index f101c180c61..36f4202d8bb 100644
--- a/app/locales/cn.ts
+++ b/app/locales/cn.ts
@@ -180,7 +180,7 @@ const cn = {
},
},
Lang: {
- Name: "Language", // ATTENTION: if you wanna add a new translation, please do not translate this value, leave it as `Language`
+ Name: "Language", // 注意:如果要添加新的翻译,请不要翻译此值,将它保留为 `Language`
All: "所有语言",
},
Avatar: "头像",
@@ -634,7 +634,7 @@ const cn = {
Sysmessage: "你是一个助手",
},
SearchChat: {
- Name: "搜索",
+ Name: "搜索聊天记录",
Page: {
Title: "搜索聊天记录",
Search: "输入搜索关键词",
diff --git a/app/locales/tw.ts b/app/locales/tw.ts
index fcc0fc5941d..6ac513d81c4 100644
--- a/app/locales/tw.ts
+++ b/app/locales/tw.ts
@@ -485,7 +485,7 @@ const tw = {
},
},
SearchChat: {
- Name: "搜尋",
+ Name: "搜尋聊天記錄",
Page: {
Title: "搜尋聊天記錄",
Search: "輸入搜尋關鍵詞",
diff --git a/app/utils/model.ts b/app/utils/model.ts
index a1b7df1b61e..a1a38a2f81c 100644
--- a/app/utils/model.ts
+++ b/app/utils/model.ts
@@ -202,3 +202,52 @@ export function isModelAvailableInServer(
const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
return modelTable[fullName]?.available === false;
}
+
+/**
+ * Check if the model name is a GPT-4 related model
+ *
+ * @param modelName The name of the model to check
+ * @returns True if the model is a GPT-4 related model (excluding gpt-4o-mini)
+ */
+export function isGPT4Model(modelName: string): boolean {
+ return (
+ (modelName.startsWith("gpt-4") ||
+ modelName.startsWith("chatgpt-4o") ||
+ modelName.startsWith("o1")) &&
+ !modelName.startsWith("gpt-4o-mini")
+ );
+}
+
+/**
+ * Checks if a model is not available on any of the specified providers in the server.
+ *
+ * @param {string} customModels - A string of custom models, comma-separated.
+ * @param {string} modelName - The name of the model to check.
+ * @param {string|string[]} providerNames - A string or array of provider names to check against.
+ *
+ * @returns {boolean} True if the model is not available on any of the specified providers, false otherwise.
+ */
+export function isModelNotavailableInServer(
+ customModels: string,
+ modelName: string,
+ providerNames: string | string[],
+): boolean {
+ // Check DISABLE_GPT4 environment variable
+ if (
+ process.env.DISABLE_GPT4 === "1" &&
+ isGPT4Model(modelName.toLowerCase())
+ ) {
+ return true;
+ }
+
+ const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
+
+ const providerNamesArray = Array.isArray(providerNames)
+ ? providerNames
+ : [providerNames];
+ for (const providerName of providerNamesArray) {
+ const fullName = `${modelName}@${providerName.toLowerCase()}`;
+ if (modelTable?.[fullName]?.available === true) return false;
+ }
+ return true;
+}
diff --git a/test/model-available.test.ts b/test/model-available.test.ts
new file mode 100644
index 00000000000..5c9fa9977d2
--- /dev/null
+++ b/test/model-available.test.ts
@@ -0,0 +1,80 @@
+import { isModelNotavailableInServer } from "../app/utils/model";
+
+describe("isModelNotavailableInServer", () => {
+ test("test model will return false, which means the model is available", () => {
+ const customModels = "";
+ const modelName = "gpt-4";
+ const providerNames = "OpenAI";
+ const result = isModelNotavailableInServer(
+ customModels,
+ modelName,
+ providerNames,
+ );
+ expect(result).toBe(false);
+ });
+
+ test("test model will return true when model is not available in custom models", () => {
+ const customModels = "-all,gpt-4o-mini";
+ const modelName = "gpt-4";
+ const providerNames = "OpenAI";
+ const result = isModelNotavailableInServer(
+ customModels,
+ modelName,
+ providerNames,
+ );
+ expect(result).toBe(true);
+ });
+
+ test("should respect DISABLE_GPT4 setting", () => {
+ process.env.DISABLE_GPT4 = "1";
+ const result = isModelNotavailableInServer("", "gpt-4", "OpenAI");
+ expect(result).toBe(true);
+ });
+
+ test("should handle empty provider names", () => {
+ const result = isModelNotavailableInServer("-all,gpt-4", "gpt-4", "");
+ expect(result).toBe(true);
+ });
+
+ test("should be case insensitive for model names", () => {
+ const result = isModelNotavailableInServer("-all,GPT-4", "gpt-4", "OpenAI");
+ expect(result).toBe(true);
+ });
+
+ test("support passing multiple providers, model unavailable on one of the providers will return true", () => {
+ const customModels = "-all,gpt-4@google";
+ const modelName = "gpt-4";
+ const providerNames = ["OpenAI", "Azure"];
+ const result = isModelNotavailableInServer(
+ customModels,
+ modelName,
+ providerNames,
+ );
+ expect(result).toBe(true);
+ });
+
+ // FIXME: 这个测试用例有问题,需要修复
+ // test("support passing multiple providers, model available on one of the providers will return false", () => {
+ // const customModels = "-all,gpt-4@google";
+ // const modelName = "gpt-4";
+ // const providerNames = ["OpenAI", "Google"];
+ // const result = isModelNotavailableInServer(
+ // customModels,
+ // modelName,
+ // providerNames,
+ // );
+ // expect(result).toBe(false);
+ // });
+
+ test("test custom model without setting provider", () => {
+ const customModels = "-all,mistral-large";
+ const modelName = "mistral-large";
+ const providerNames = modelName;
+ const result = isModelNotavailableInServer(
+ customModels,
+ modelName,
+ providerNames,
+ );
+ expect(result).toBe(false);
+ });
+});