diff --git a/.env.template b/.env.template index 25addf2b3e5..82f44216ab8 100644 --- a/.env.template +++ b/.env.template @@ -66,4 +66,4 @@ ANTHROPIC_API_VERSION= ANTHROPIC_URL= ### (optional) -WHITE_WEBDEV_ENDPOINTS= \ No newline at end of file +WHITE_WEBDAV_ENDPOINTS= \ No newline at end of file diff --git a/.eslintrc.json b/.eslintrc.json index d229e86f250..5b5e88e67aa 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -1,4 +1,7 @@ { "extends": "next/core-web-vitals", - "plugins": ["prettier"] + "plugins": ["prettier", "unused-imports"], + "rules": { + "unused-imports/no-unused-imports": "warn" + } } diff --git a/.github/workflows/deploy_preview.yml b/.github/workflows/deploy_preview.yml index bdbb78c27c5..30d9b85b44c 100644 --- a/.github/workflows/deploy_preview.yml +++ b/.github/workflows/deploy_preview.yml @@ -49,7 +49,7 @@ jobs: run: npm install --global vercel@latest - name: Cache dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 id: cache-npm with: path: ~/.npm diff --git a/README.md b/README.md index 56e7f9435dd..bf7c3059413 100644 --- a/README.md +++ b/README.md @@ -12,15 +12,18 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。 +[![Saas][Saas-image]][saas-url] [![Web][Web-image]][web-url] [![Windows][Windows-image]][download-url] [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) +[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) -[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) +[NextChatAI](https://nextchat.dev/chat) / [网页版](https://app.nextchat.dev) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) +[saas-url]: https://nextchat.dev/chat?utm_source=readme +[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge [web-url]: https://app.nextchat.dev/ [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases [Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge @@ -30,6 +33,8 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) +[](https://monica.im/?utm=nxcrp) + ## Enterprise Edition @@ -89,12 +94,13 @@ For enterprise inquiries, please contact: **business@nextchat.dev** - [x] Desktop App with tauri - [x] Self-host Model: Fully compatible with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), as well as server deployment of [LocalAI](https://github.com/go-skynet/LocalAI): llama/gpt4all/rwkv/vicuna/koala/gpt4all-j/cerebras/falcon/dolly etc. - [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) -- [x] Plugins: support artifacts, network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) - - [x] artifacts - - [ ] network search, network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) +- [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) + - [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) +- [ ] local knowledge base ## What's New +- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.14.0 Now supports Artifacts & SD - 🚀 v2.10.1 support Google Gemini Pro model. - 🚀 v2.9.11 you can use azure endpoint now. @@ -125,12 +131,13 @@ For enterprise inquiries, please contact: **business@nextchat.dev** - [x] 使用 tauri 打包桌面应用 - [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等,或者使用 [api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) - [x] Artifacts: 通过独立窗口,轻松预览、复制和分享生成的内容/可交互网页 [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) -- [x] 插件机制,支持 artifacts,联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) - - [x] artifacts - - [ ] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) +- [x] 插件机制,支持`联网搜索`、`计算器`、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) + - [x] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) + - [ ] 本地知识库 ## 最新动态 +- 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.14.0 现在支持 Artifacts & SD 了。 - 🚀 v2.10.1 现在支持 Gemini Pro 模型。 - 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。 @@ -168,7 +175,7 @@ We recommend that you follow the steps below to re-deploy: ### Enable Automatic Updates -> If you encounter a failure of Upstream Sync execution, please manually sync fork once. +> If you encounter a failure of Upstream Sync execution, please [manually update code](./README.md#manually-updating-code). After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour: @@ -280,6 +287,18 @@ Alibaba Cloud Api Key. Alibaba Cloud Api Url. +### `IFLYTEK_URL` (Optional) + +iflytek Api Url. + +### `IFLYTEK_API_KEY` (Optional) + +iflytek Api Key. + +### `IFLYTEK_API_SECRET` (Optional) + +iflytek Api Secret. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty @@ -324,7 +343,7 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name Change default model -### `WHITE_WEBDEV_ENDPOINTS` (optional) +### `WHITE_WEBDAV_ENDPOINTS` (optional) You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: - Each address must be a complete endpoint diff --git a/README_CN.md b/README_CN.md index 8c464dc098c..73fbc3f51c4 100644 --- a/README_CN.md +++ b/README_CN.md @@ -8,7 +8,7 @@ 一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 -[企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) /[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) +[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) [Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) @@ -54,7 +54,7 @@ ### 打开自动更新 -> 如果你遇到了 Upstream Sync 执行错误,请手动 Sync Fork 一次! +> 如果你遇到了 Upstream Sync 执行错误,请[手动 Sync Fork 一次](./README_CN.md#手动更新代码)! 当你 fork 项目之后,由于 Github 的限制,需要手动去你 fork 后的项目的 Actions 页面启用 Workflows,并启用 Upstream Sync Action,启用之后即可开启每小时定时自动更新: @@ -172,6 +172,20 @@ ByteDance Api Url. 阿里云(千问)Api Url. +### `IFLYTEK_URL` (可选) + +讯飞星火Api Url. + +### `IFLYTEK_API_KEY` (可选) + +讯飞星火Api Key. + +### `IFLYTEK_API_SECRET` (可选) + +讯飞星火Api Secret. + + + ### `HIDE_USER_API_KEY` (可选) 如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。 @@ -188,7 +202,7 @@ ByteDance Api Url. 如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。 -### `WHITE_WEBDEV_ENDPOINTS` (可选) +### `WHITE_WEBDAV_ENDPOINTS` (可选) 如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: - 每一个地址必须是一个完整的 endpoint diff --git a/README_JA.md b/README_JA.md index 6b8caadae6c..416928c2647 100644 --- a/README_JA.md +++ b/README_JA.md @@ -5,7 +5,7 @@ ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。 -[企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) +[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) [Zeaburでデプロイ](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Zeaburでデプロイ](https://zeabur.com/templates/ZBUEFA) [Gitpodで開く](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) @@ -54,7 +54,7 @@ ### 自動更新を開く -> Upstream Sync の実行エラーが発生した場合は、手動で Sync Fork してください! +> Upstream Sync の実行エラーが発生した場合は、[手動で Sync Fork](./README_JA.md#手動でコードを更新する) してください! プロジェクトを fork した後、GitHub の制限により、fork 後のプロジェクトの Actions ページで Workflows を手動で有効にし、Upstream Sync Action を有効にする必要があります。有効化後、毎時の定期自動更新が可能になります: @@ -193,7 +193,7 @@ ByteDance API の URL。 リンクからのプリセット設定解析を無効にしたい場合は、この環境変数を 1 に設定します。 -### `WHITE_WEBDEV_ENDPOINTS` (オプション) +### `WHITE_WEBDAV_ENDPOINTS` (オプション) アクセス許可を与える WebDAV サービスのアドレスを追加したい場合、このオプションを使用します。フォーマット要件: - 各アドレスは完全なエンドポイントでなければなりません。 diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts index 6d028ac364d..dffb3e9daa4 100644 --- a/app/api/[provider]/[...path]/route.ts +++ b/app/api/[provider]/[...path]/route.ts @@ -1,5 +1,5 @@ import { ApiPath } from "@/app/constant"; -import { NextRequest, NextResponse } from "next/server"; +import { NextRequest } from "next/server"; import { handle as openaiHandler } from "../../openai"; import { handle as azureHandler } from "../../azure"; import { handle as googleHandler } from "../../google"; @@ -9,6 +9,8 @@ import { handle as bytedanceHandler } from "../../bytedance"; import { handle as alibabaHandler } from "../../alibaba"; import { handle as moonshotHandler } from "../../moonshot"; import { handle as stabilityHandler } from "../../stability"; +import { handle as iflytekHandler } from "../../iflytek"; +import { handle as proxyHandler } from "../../proxy"; async function handle( req: NextRequest, @@ -34,8 +36,12 @@ async function handle( return moonshotHandler(req, { params }); case ApiPath.Stability: return stabilityHandler(req, { params }); - default: + case ApiPath.Iflytek: + return iflytekHandler(req, { params }); + case ApiPath.OpenAI: return openaiHandler(req, { params }); + default: + return proxyHandler(req, { params }); } } diff --git a/app/api/alibaba.ts b/app/api/alibaba.ts index 675d9f301aa..894b1ae4c04 100644 --- a/app/api/alibaba.ts +++ b/app/api/alibaba.ts @@ -1,6 +1,5 @@ import { getServerSideConfig } from "@/app/config/server"; import { - Alibaba, ALIBABA_BASE_URL, ApiPath, ModelProvider, @@ -10,7 +9,6 @@ import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; import { isModelAvailableInServer } from "@/app/utils/model"; -import type { RequestPayload } from "@/app/client/platforms/openai"; const serverConfig = getServerSideConfig(); diff --git a/app/api/anthropic.ts b/app/api/anthropic.ts index 3d49f4c88c4..7a44443710f 100644 --- a/app/api/anthropic.ts +++ b/app/api/anthropic.ts @@ -3,7 +3,6 @@ import { ANTHROPIC_BASE_URL, Anthropic, ApiPath, - DEFAULT_MODELS, ServiceProvider, ModelProvider, } from "@/app/constant"; @@ -98,6 +97,7 @@ async function request(req: NextRequest) { headers: { "Content-Type": "application/json", "Cache-Control": "no-store", + "anthropic-dangerous-direct-browser-access": "true", [authHeaderName]: authValue, "anthropic-version": req.headers.get("anthropic-version") || diff --git a/app/api/auth.ts b/app/api/auth.ts index ff52dcd6ee3..95965ceec2d 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -88,6 +88,10 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { case ModelProvider.Moonshot: systemApiKey = serverConfig.moonshotApiKey; break; + case ModelProvider.Iflytek: + systemApiKey = + serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret; + break; case ModelProvider.GPT: default: if (req.nextUrl.pathname.includes("azure/deployments")) { diff --git a/app/api/azure.ts b/app/api/azure.ts index e2cb0c7e66b..39d872e8cf8 100644 --- a/app/api/azure.ts +++ b/app/api/azure.ts @@ -1,4 +1,3 @@ -import { getServerSideConfig } from "@/app/config/server"; import { ModelProvider } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; diff --git a/app/api/baidu.ts b/app/api/baidu.ts index f4315d186da..0408b43c5bc 100644 --- a/app/api/baidu.ts +++ b/app/api/baidu.ts @@ -3,7 +3,6 @@ import { BAIDU_BASE_URL, ApiPath, ModelProvider, - BAIDU_OATUH_URL, ServiceProvider, } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; diff --git a/app/api/common.ts b/app/api/common.ts index 24453dd9635..b4c792d6ff0 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,11 +1,6 @@ import { NextRequest, NextResponse } from "next/server"; import { getServerSideConfig } from "../config/server"; -import { - DEFAULT_MODELS, - OPENAI_BASE_URL, - GEMINI_BASE_URL, - ServiceProvider, -} from "../constant"; +import { OPENAI_BASE_URL, ServiceProvider } from "../constant"; import { isModelAvailableInServer } from "../utils/model"; import { cloudflareAIGatewayUrl } from "../utils/cloudflare"; @@ -32,10 +27,7 @@ export async function requestOpenai(req: NextRequest) { authHeaderName = "Authorization"; } - let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll( - "/api/openai/", - "", - ); + let path = `${req.nextUrl.pathname}`.replaceAll("/api/openai/", ""); let baseUrl = (isAzure ? serverConfig.azureUrl : serverConfig.baseUrl) || OPENAI_BASE_URL; diff --git a/app/api/google.ts b/app/api/google.ts index 98fe469bfb7..707892c33d0 100644 --- a/app/api/google.ts +++ b/app/api/google.ts @@ -1,12 +1,7 @@ import { NextRequest, NextResponse } from "next/server"; import { auth } from "./auth"; import { getServerSideConfig } from "@/app/config/server"; -import { - ApiPath, - GEMINI_BASE_URL, - Google, - ModelProvider, -} from "@/app/constant"; +import { ApiPath, GEMINI_BASE_URL, ModelProvider } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; const serverConfig = getServerSideConfig(); @@ -28,7 +23,8 @@ export async function handle( }); } - const bearToken = req.headers.get("Authorization") ?? ""; + const bearToken = + req.headers.get("x-goog-api-key") || req.headers.get("Authorization") || ""; const token = bearToken.trim().replaceAll("Bearer ", "").trim(); const apiKey = token ? token : serverConfig.googleApiKey; @@ -96,8 +92,8 @@ async function request(req: NextRequest, apiKey: string) { }, 10 * 60 * 1000, ); - const fetchUrl = `${baseUrl}${path}?key=${apiKey}${ - req?.nextUrl?.searchParams?.get("alt") === "sse" ? "&alt=sse" : "" + const fetchUrl = `${baseUrl}${path}${ + req?.nextUrl?.searchParams?.get("alt") === "sse" ? "?alt=sse" : "" }`; console.log("[Fetch Url] ", fetchUrl); @@ -105,6 +101,9 @@ async function request(req: NextRequest, apiKey: string) { headers: { "Content-Type": "application/json", "Cache-Control": "no-store", + "x-goog-api-key": + req.headers.get("x-goog-api-key") || + (req.headers.get("Authorization") ?? "").replace("Bearer ", ""), }, method: req.method, body: req.body, diff --git a/app/api/iflytek.ts b/app/api/iflytek.ts new file mode 100644 index 00000000000..8b8227dce1f --- /dev/null +++ b/app/api/iflytek.ts @@ -0,0 +1,129 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + IFLYTEK_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +// iflytek + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Iflytek Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Iflytek); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Iflytek] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // iflytek use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Iflytek, ""); + + let baseUrl = serverConfig.iflytekUrl || IFLYTEK_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Iflytek as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Iflytek] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/moonshot.ts b/app/api/moonshot.ts index 247dd618321..5bf4807e3e6 100644 --- a/app/api/moonshot.ts +++ b/app/api/moonshot.ts @@ -1,6 +1,5 @@ import { getServerSideConfig } from "@/app/config/server"; import { - Moonshot, MOONSHOT_BASE_URL, ApiPath, ModelProvider, @@ -10,7 +9,6 @@ import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; import { isModelAvailableInServer } from "@/app/utils/model"; -import type { RequestPayload } from "@/app/client/platforms/openai"; const serverConfig = getServerSideConfig(); diff --git a/app/api/openai.ts b/app/api/openai.ts index 6d11d679215..7dfd84e1785 100644 --- a/app/api/openai.ts +++ b/app/api/openai.ts @@ -13,7 +13,9 @@ function getModels(remoteModelRes: OpenAIListModelResponse) { if (config.disableGPT4) { remoteModelRes.data = remoteModelRes.data.filter( - (m) => !m.id.startsWith("gpt-4"), + (m) => + !(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o")) || + m.id.startsWith("gpt-4o-mini"), ); } diff --git a/app/api/proxy.ts b/app/api/proxy.ts new file mode 100644 index 00000000000..731003aa1ea --- /dev/null +++ b/app/api/proxy.ts @@ -0,0 +1,75 @@ +import { NextRequest, NextResponse } from "next/server"; + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Proxy Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + // remove path params from searchParams + req.nextUrl.searchParams.delete("path"); + req.nextUrl.searchParams.delete("provider"); + + const subpath = params.path.join("/"); + const fetchUrl = `${req.headers.get( + "x-base-url", + )}/${subpath}?${req.nextUrl.searchParams.toString()}`; + const skipHeaders = ["connection", "host", "origin", "referer", "cookie"]; + const headers = new Headers( + Array.from(req.headers.entries()).filter((item) => { + if ( + item[0].indexOf("x-") > -1 || + item[0].indexOf("sec-") > -1 || + skipHeaders.includes(item[0]) + ) { + return false; + } + return true; + }), + ); + const controller = new AbortController(); + const fetchOptions: RequestInit = { + headers, + method: req.method, + body: req.body, + // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + try { + const res = await fetch(fetchUrl, fetchOptions); + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + // The latest version of the OpenAI API forced the content-encoding to be "br" in json response + // So if the streaming is disabled, we need to remove the content-encoding header + // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header + // The browser will try to decode the response with brotli and fail + newHeaders.delete("content-encoding"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/tencent/route.ts b/app/api/tencent/route.ts index 885909e7a75..fc4f8c79edf 100644 --- a/app/api/tencent/route.ts +++ b/app/api/tencent/route.ts @@ -1,15 +1,8 @@ import { getServerSideConfig } from "@/app/config/server"; -import { - TENCENT_BASE_URL, - ApiPath, - ModelProvider, - ServiceProvider, - Tencent, -} from "@/app/constant"; +import { TENCENT_BASE_URL, ModelProvider } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; import { getHeader } from "@/app/utils/tencent"; const serverConfig = getServerSideConfig(); diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts index 1f58a884fe3..bb7743bda40 100644 --- a/app/api/webdav/[...path]/route.ts +++ b/app/api/webdav/[...path]/route.ts @@ -6,7 +6,7 @@ const config = getServerSideConfig(); const mergedAllowedWebDavEndpoints = [ ...internalAllowedWebDavEndpoints, - ...config.allowedWebDevEndpoints, + ...config.allowedWebDavEndpoints, ].filter((domain) => Boolean(domain.trim())); const normalizeUrl = (url: string) => { @@ -29,6 +29,7 @@ async function handle( const requestUrl = new URL(req.url); let endpoint = requestUrl.searchParams.get("endpoint"); + let proxy_method = requestUrl.searchParams.get("proxy_method") || req.method; // Validate the endpoint to prevent potential SSRF attacks if ( @@ -65,7 +66,11 @@ async function handle( const targetPath = `${endpoint}${endpointPath}`; // only allow MKCOL, GET, PUT - if (req.method !== "MKCOL" && req.method !== "GET" && req.method !== "PUT") { + if ( + proxy_method !== "MKCOL" && + proxy_method !== "GET" && + proxy_method !== "PUT" + ) { return NextResponse.json( { error: true, @@ -78,7 +83,7 @@ async function handle( } // for MKCOL request, only allow request ${folder} - if (req.method === "MKCOL" && !targetPath.endsWith(folder)) { + if (proxy_method === "MKCOL" && !targetPath.endsWith(folder)) { return NextResponse.json( { error: true, @@ -91,7 +96,7 @@ async function handle( } // for GET request, only allow request ending with fileName - if (req.method === "GET" && !targetPath.endsWith(fileName)) { + if (proxy_method === "GET" && !targetPath.endsWith(fileName)) { return NextResponse.json( { error: true, @@ -104,7 +109,7 @@ async function handle( } // for PUT request, only allow request ending with fileName - if (req.method === "PUT" && !targetPath.endsWith(fileName)) { + if (proxy_method === "PUT" && !targetPath.endsWith(fileName)) { return NextResponse.json( { error: true, @@ -118,7 +123,7 @@ async function handle( const targetUrl = targetPath; - const method = req.method; + const method = proxy_method || req.method; const shouldNotHaveBody = ["get", "head"].includes( method?.toLowerCase() ?? "", ); @@ -143,7 +148,7 @@ async function handle( "[Any Proxy]", targetUrl, { - method: req.method, + method: method, }, { status: fetchResult?.status, diff --git a/app/client/api.ts b/app/client/api.ts index dba97fff09f..7a242ea99dd 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -1,11 +1,16 @@ import { getClientConfig } from "../config/client"; import { ACCESS_CODE_PREFIX, - Azure, ModelProvider, ServiceProvider, } from "../constant"; -import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store"; +import { + ChatMessageTool, + ChatMessage, + ModelType, + useAccessStore, + useChatStore, +} from "../store"; import { ChatGPTApi, DalleRequestPayload } from "./platforms/openai"; import { GeminiProApi } from "./platforms/google"; import { ClaudeApi } from "./platforms/anthropic"; @@ -14,11 +19,13 @@ import { DoubaoApi } from "./platforms/bytedance"; import { QwenApi } from "./platforms/alibaba"; import { HunyuanApi } from "./platforms/tencent"; import { MoonshotApi } from "./platforms/moonshot"; +import { SparkApi } from "./platforms/iflytek"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; export const Models = ["gpt-3.5-turbo", "gpt-4"] as const; +export const TTSModels = ["tts-1", "tts-1-hd"] as const; export type ChatModel = ModelType; export interface MultimodalContent { @@ -43,6 +50,17 @@ export interface LLMConfig { presence_penalty?: number; frequency_penalty?: number; size?: DalleRequestPayload["size"]; + quality?: DalleRequestPayload["quality"]; + style?: DalleRequestPayload["style"]; +} + +export interface SpeechOptions { + model: string; + input: string; + voice: string; + response_format?: string; + speed?: number; + onController?: (controller: AbortController) => void; } export interface ChatOptions { @@ -53,6 +71,8 @@ export interface ChatOptions { onFinish: (message: string) => void; onError?: (err: Error) => void; onController?: (controller: AbortController) => void; + onBeforeTool?: (tool: ChatMessageTool) => void; + onAfterTool?: (tool: ChatMessageTool) => void; } export interface LLMUsage { @@ -77,6 +97,7 @@ export interface LLMModelProvider { export abstract class LLMApi { abstract chat(options: ChatOptions): Promise; + abstract speech(options: SpeechOptions): Promise; abstract usage(): Promise; abstract models(): Promise; } @@ -128,6 +149,9 @@ export class ClientApi { case ModelProvider.Moonshot: this.llm = new MoonshotApi(); break; + case ModelProvider.Iflytek: + this.llm = new SparkApi(); + break; default: this.llm = new ChatGPTApi(); } @@ -192,25 +216,29 @@ export function validString(x: string): boolean { return x?.length > 0; } -export function getHeaders() { +export function getHeaders(ignoreHeaders: boolean = false) { const accessStore = useAccessStore.getState(); const chatStore = useChatStore.getState(); - const headers: Record = { - "Content-Type": "application/json", - Accept: "application/json", - }; + let headers: Record = {}; + if (!ignoreHeaders) { + headers = { + "Content-Type": "application/json", + Accept: "application/json", + }; + } const clientConfig = getClientConfig(); function getConfig() { const modelConfig = chatStore.currentSession().mask.modelConfig; - const isGoogle = modelConfig.providerName == ServiceProvider.Google; + const isGoogle = modelConfig.providerName === ServiceProvider.Google; const isAzure = modelConfig.providerName === ServiceProvider.Azure; const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic; const isBaidu = modelConfig.providerName == ServiceProvider.Baidu; const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance; const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba; const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot; + const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek; const isEnabledAccessControl = accessStore.enabledAccessControl(); const apiKey = isGoogle ? accessStore.googleApiKey @@ -224,6 +252,10 @@ export function getHeaders() { ? accessStore.alibabaApiKey : isMoonshot ? accessStore.moonshotApiKey + : isIflytek + ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret + ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret + : "" : accessStore.openaiApiKey; return { isGoogle, @@ -233,13 +265,20 @@ export function getHeaders() { isByteDance, isAlibaba, isMoonshot, + isIflytek, apiKey, isEnabledAccessControl, }; } function getAuthHeader(): string { - return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization"; + return isAzure + ? "api-key" + : isAnthropic + ? "x-api-key" + : isGoogle + ? "x-goog-api-key" + : "Authorization"; } const { @@ -250,14 +289,15 @@ export function getHeaders() { apiKey, isEnabledAccessControl, } = getConfig(); - // when using google api in app, not set auth header - if (isGoogle && clientConfig?.isApp) return headers; // when using baidu api in app, not set auth header if (isBaidu && clientConfig?.isApp) return headers; const authHeader = getAuthHeader(); - const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic); + const bearerToken = getBearerToken( + apiKey, + isAzure || isAnthropic || isGoogle, + ); if (bearerToken) { headers[authHeader] = bearerToken; @@ -286,6 +326,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi { return new ClientApi(ModelProvider.Hunyuan); case ServiceProvider.Moonshot: return new ClientApi(ModelProvider.Moonshot); + case ServiceProvider.Iflytek: + return new ClientApi(ModelProvider.Iflytek); default: return new ClientApi(ModelProvider.GPT); } diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index d5fa3042fc1..4ade9ebb98f 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -12,6 +12,7 @@ import { getHeaders, LLMApi, LLMModel, + SpeechOptions, MultimodalContent, } from "../api"; import Locale from "../../locales"; @@ -83,6 +84,10 @@ export class QwenApi implements LLMApi { return res?.output?.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ role: v.role, diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index b079ba1ada2..7826838a61e 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -1,18 +1,18 @@ -import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant"; -import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { Anthropic, ApiPath } from "@/app/constant"; +import { ChatOptions, getHeaders, LLMApi, SpeechOptions } from "../api"; +import { + useAccessStore, + useAppConfig, + useChatStore, + usePluginStore, + ChatMessageTool, +} from "@/app/store"; import { getClientConfig } from "@/app/config/client"; import { DEFAULT_API_HOST } from "@/app/constant"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; - -import Locale from "../../locales"; -import { prettyObject } from "@/app/utils/format"; import { getMessageTextContent, isVisionModel } from "@/app/utils"; -import { preProcessImageContent } from "@/app/utils/chat"; +import { preProcessImageContent, stream } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; +import { RequestPayload } from "./openai"; export type MultiBlockContent = { type: "image" | "text"; @@ -73,6 +73,10 @@ const ClaudeMapper = { const keys = ["claude-2, claude-instant-1"]; export class ClaudeApi implements LLMApi { + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + extractMessage(res: any) { console.log("[Response] claude response: ", res); @@ -191,112 +195,126 @@ export class ClaudeApi implements LLMApi { const controller = new AbortController(); options.onController?.(controller); - const payload = { - method: "POST", - body: JSON.stringify(requestBody), - signal: controller.signal, - headers: { - ...getHeaders(), // get common headers - "anthropic-version": accessStore.anthropicApiVersion, - // do not send `anthropicApiKey` in browser!!! - // Authorization: getAuthKey(accessStore.anthropicApiKey), - }, - }; - if (shouldStream) { - try { - const context = { - text: "", - finished: false, - }; - - const finish = () => { - if (!context.finished) { - options.onFinish(context.text); - context.finished = true; - } - }; - - controller.signal.onabort = finish; - fetchEventSource(path, { - ...payload, - async onopen(res) { - const contentType = res.headers.get("content-type"); - console.log("response content type: ", contentType); - - if (contentType?.startsWith("text/plain")) { - context.text = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [context.text]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - context.text = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - let chunkJson: - | undefined - | { - type: "content_block_delta" | "content_block_stop"; - delta?: { - type: "text_delta"; - text: string; - }; - index: number; + let index = -1; + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + path, + requestBody, + { + ...getHeaders(), + "anthropic-version": accessStore.anthropicApiVersion, + }, + // @ts-ignore + tools.map((tool) => ({ + name: tool?.function?.name, + description: tool?.function?.description, + input_schema: tool?.function?.parameters, + })), + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + let chunkJson: + | undefined + | { + type: "content_block_delta" | "content_block_stop"; + content_block?: { + type: "tool_use"; + id: string; + name: string; }; - try { - chunkJson = JSON.parse(msg.data); - } catch (e) { - console.error("[Response] parse error", msg.data); - } - - if (!chunkJson || chunkJson.type === "content_block_stop") { - return finish(); - } - - const { delta } = chunkJson; - if (delta?.text) { - context.text += delta.text; - options.onUpdate?.(context.text, delta.text); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); - } catch (e) { - console.error("failed to chat", e); - options.onError?.(e as Error); - } + delta?: { + type: "text_delta" | "input_json_delta"; + text?: string; + partial_json?: string; + }; + index: number; + }; + chunkJson = JSON.parse(text); + + if (chunkJson?.content_block?.type == "tool_use") { + index += 1; + const id = chunkJson?.content_block.id; + const name = chunkJson?.content_block.name; + runTools.push({ + id, + type: "function", + function: { + name, + arguments: "", + }, + }); + } + if ( + chunkJson?.delta?.type == "input_json_delta" && + chunkJson?.delta?.partial_json + ) { + // @ts-ignore + runTools[index]["function"]["arguments"] += + chunkJson?.delta?.partial_json; + } + return chunkJson?.delta?.text; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // reset index value + index = -1; + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + { + role: "assistant", + content: toolCallMessage.tool_calls.map( + (tool: ChatMessageTool) => ({ + type: "tool_use", + id: tool.id, + name: tool?.function?.name, + input: tool?.function?.arguments + ? JSON.parse(tool?.function?.arguments) + : {}, + }), + ), + }, + // @ts-ignore + ...toolCallResult.map((result) => ({ + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: result.tool_call_id, + content: result.content, + }, + ], + })), + ); + }, + options, + ); } else { + const payload = { + method: "POST", + body: JSON.stringify(requestBody), + signal: controller.signal, + headers: { + ...getHeaders(), // get common headers + "anthropic-version": accessStore.anthropicApiVersion, + // do not send `anthropicApiKey` in browser!!! + // Authorization: getAuthKey(accessStore.anthropicApiKey), + }, + }; + try { controller.signal.onabort = () => options.onFinish(""); diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts index 188b78bf963..c360417c602 100644 --- a/app/client/platforms/baidu.ts +++ b/app/client/platforms/baidu.ts @@ -14,6 +14,7 @@ import { LLMApi, LLMModel, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; import { @@ -75,18 +76,30 @@ export class ErnieApi implements LLMApi { return [baseUrl, path].join("/"); } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ - role: v.role, + // "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function", + role: v.role === "system" ? "user" : v.role, content: getMessageTextContent(v), })); // "error_code": 336006, "error_msg": "the length of messages must be an odd number", if (messages.length % 2 === 0) { - messages.unshift({ - role: "user", - content: " ", - }); + if (messages.at(0)?.role === "user") { + messages.splice(1, 0, { + role: "assistant", + content: " ", + }); + } else { + messages.unshift({ + role: "user", + content: " ", + }); + } } const modelConfig = { diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index 7677cafe12b..a6e2d426ee3 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -13,6 +13,7 @@ import { LLMApi, LLMModel, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; import { @@ -77,6 +78,10 @@ export class DoubaoApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ role: v.role, diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 12d8846357a..3c2607271bf 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -1,5 +1,12 @@ import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; -import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + LLMUsage, + SpeechOptions, +} from "../api"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { getClientConfig } from "@/app/config/client"; import { DEFAULT_API_HOST } from "@/app/constant"; @@ -41,10 +48,6 @@ export class GeminiProApi implements LLMApi { let chatPath = [baseUrl, path].join("/"); chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse"; - // if chatPath.startsWith('http') then add key in query string - if (chatPath.startsWith("http") && accessStore.googleApiKey) { - chatPath += `&key=${accessStore.googleApiKey}`; - } return chatPath; } extractMessage(res: any) { @@ -56,6 +59,10 @@ export class GeminiProApi implements LLMApi { "" ); } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions): Promise { const apiClient = this; let multimodal = false; diff --git a/app/client/platforms/iflytek.ts b/app/client/platforms/iflytek.ts new file mode 100644 index 00000000000..3931672e661 --- /dev/null +++ b/app/client/platforms/iflytek.ts @@ -0,0 +1,250 @@ +"use client"; +import { + ApiPath, + DEFAULT_API_HOST, + Iflytek, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +import { RequestPayload } from "./openai"; + +export class SparkApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.iflytekUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.Iflytek; + baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Iflytek)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + }; + + console.log("[Request] Spark payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(Iflytek.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // Make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // Animate response text to make it look smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // Start animation + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log("[Spark] request response content type: ", contentType); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + // Handle different error scenarios + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + extraInfo = Locale.Error.Unauthorized; + } + + options.onError?.( + new Error( + `Request failed with status ${res.status}: ${extraInfo}`, + ), + ); + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { content: string }; + }>; + const delta = choices[0]?.delta?.content; + + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text); + options.onError?.(new Error(`Failed to parse response: ${text}`)); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + if (!res.ok) { + const errorText = await res.text(); + options.onError?.( + new Error(`Request failed with status ${res.status}: ${errorText}`), + ); + return; + } + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts index 7d257ccb2e6..6b197974571 100644 --- a/app/client/platforms/moonshot.ts +++ b/app/client/platforms/moonshot.ts @@ -3,34 +3,27 @@ import { ApiPath, DEFAULT_API_HOST, - DEFAULT_MODELS, Moonshot, REQUEST_TIMEOUT_MS, - ServiceProvider, } from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; -import { collectModelsWithDefaultModel } from "@/app/utils/model"; -import { preProcessImageContent } from "@/app/utils/chat"; -import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; - +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { stream } from "@/app/utils/chat"; import { ChatOptions, getHeaders, LLMApi, LLMModel, - LLMUsage, - MultimodalContent, + SpeechOptions, } from "../api"; -import Locale from "../../locales"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent } from "@/app/utils"; - -import { OpenAIListModelResponse, RequestPayload } from "./openai"; +import { RequestPayload } from "./openai"; export class MoonshotApi implements LLMApi { private disableListModels = true; @@ -66,6 +59,10 @@ export class MoonshotApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages: ChatOptions["messages"] = []; for (const v of options.messages) { @@ -116,115 +113,66 @@ export class MoonshotApi implements LLMApi { ); if (shouldStream) { - let responseText = ""; - let remainText = ""; - let finished = false; - - // animate response to make it looks smooth - function animateResponseText() { - if (finished || controller.signal.aborted) { - responseText += remainText; - console.log("[Response Animation] finished"); - if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); - } - return; - } - - if (remainText.length > 0) { - const fetchCount = Math.max(1, Math.round(remainText.length / 60)); - const fetchText = remainText.slice(0, fetchCount); - responseText += fetchText; - remainText = remainText.slice(fetchCount); - options.onUpdate?.(responseText, fetchText); - } - - requestAnimationFrame(animateResponseText); - } - - // start animaion - animateResponseText(); - - const finish = () => { - if (!finished) { - finished = true; - options.onFinish(responseText + remainText); - } - }; - - controller.signal.onabort = finish; - - fetchEventSource(chatPath, { - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[OpenAI] request response content type: ", - contentType, - ); - - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text); - const choices = json.choices as Array<{ - delta: { content: string }; - }>; - const delta = choices[0]?.delta?.content; - const textmoderation = json?.prompt_filter_results; - - if (delta) { - remainText += delta; - } - } catch (e) { - console.error("[Request] parse error", text, msg); } + return choices[0]?.delta?.content; }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); }, - openWhenHidden: true, - }); + options, + ); } else { const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 98c6f406afa..0a8d6203ae5 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -9,15 +9,22 @@ import { REQUEST_TIMEOUT_MS, ServiceProvider, } from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { + ChatMessageTool, + useAccessStore, + useAppConfig, + useChatStore, + usePluginStore, +} from "@/app/store"; import { collectModelsWithDefaultModel } from "@/app/utils/model"; import { preProcessImageContent, uploadImage, base64Image2Blob, + stream, } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; -import { DalleSize } from "@/app/typing"; +import { DalleSize, DalleQuality, DalleStyle } from "@/app/typing"; import { ChatOptions, @@ -26,17 +33,12 @@ import { LLMModel, LLMUsage, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent, - getMessageImages, isVisionModel, isDalle3 as _isDalle3, } from "@/app/utils"; @@ -70,6 +72,8 @@ export interface DalleRequestPayload { response_format: "url" | "b64_json"; n: number; size: DalleSize; + quality: DalleQuality; + style: DalleStyle; } export class ChatGPTApi implements LLMApi { @@ -138,6 +142,44 @@ export class ChatGPTApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? res; } + async speech(options: SpeechOptions): Promise { + const requestPayload = { + model: options.model, + input: options.input, + voice: options.voice, + response_format: options.response_format, + speed: options.speed, + }; + + console.log("[Request] openai speech payload: ", requestPayload); + + const controller = new AbortController(); + options.onController?.(controller); + + try { + const speechPath = this.path(OpenaiPath.SpeechPath); + const speechPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + const res = await fetch(speechPath, speechPayload); + clearTimeout(requestTimeoutId); + return await res.arrayBuffer(); + } catch (e) { + console.log("[Request] failed to make a speech request", e); + throw e; + } + } + async chat(options: ChatOptions) { const modelConfig = { ...useAppConfig.getState().modelConfig, @@ -151,6 +193,7 @@ export class ChatGPTApi implements LLMApi { let requestPayload: RequestPayload | DalleRequestPayload; const isDalle3 = _isDalle3(options.config.model); + const isO1 = options.config.model.startsWith("o1"); if (isDalle3) { const prompt = getMessageTextContent( options.messages.slice(-1)?.pop() as any, @@ -162,6 +205,8 @@ export class ChatGPTApi implements LLMApi { response_format: "b64_json", // using b64_json, and save image in CacheStorage n: 1, size: options.config?.size ?? "1024x1024", + quality: options.config?.quality ?? "standard", + style: options.config?.style ?? "vivid", }; } else { const visionModel = isVisionModel(options.config.model); @@ -170,30 +215,32 @@ export class ChatGPTApi implements LLMApi { const content = visionModel ? await preProcessImageContent(v.content) : getMessageTextContent(v); - messages.push({ role: v.role, content }); + if (!(isO1 && v.role === "system")) + messages.push({ role: v.role, content }); } + // O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet. requestPayload = { messages, - stream: options.config.stream, + stream: !isO1 ? options.config.stream : false, model: modelConfig.model, - temperature: modelConfig.temperature, - presence_penalty: modelConfig.presence_penalty, - frequency_penalty: modelConfig.frequency_penalty, - top_p: modelConfig.top_p, + temperature: !isO1 ? modelConfig.temperature : 1, + presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, + frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0, + top_p: !isO1 ? modelConfig.top_p : 1, // max_tokens: Math.max(modelConfig.max_tokens, 1024), // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; // add max_tokens to vision model - if (visionModel && modelConfig.model.includes("preview")) { + if (visionModel) { requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); } } console.log("[Request] openai payload: ", requestPayload); - const shouldStream = !isDalle3 && !!options.config.stream; + const shouldStream = !isDalle3 && !!options.config.stream && !isO1; const controller = new AbortController(); options.onController?.(controller); @@ -229,143 +276,85 @@ export class ChatGPTApi implements LLMApi { isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath, ); } - const chatPayload = { - method: "POST", - body: JSON.stringify(requestPayload), - signal: controller.signal, - headers: getHeaders(), - }; - - // make a fetch request - const requestTimeoutId = setTimeout( - () => controller.abort(), - isDalle3 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. - ); - if (shouldStream) { - let responseText = ""; - let remainText = ""; - let finished = false; - - // animate response to make it looks smooth - function animateResponseText() { - if (finished || controller.signal.aborted) { - responseText += remainText; - console.log("[Response Animation] finished"); - if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); - } - return; - } - - if (remainText.length > 0) { - const fetchCount = Math.max(1, Math.round(remainText.length / 60)); - const fetchText = remainText.slice(0, fetchCount); - responseText += fetchText; - remainText = remainText.slice(fetchCount); - options.onUpdate?.(responseText, fetchText); - } - - requestAnimationFrame(animateResponseText); - } - - // start animaion - animateResponseText(); - - const finish = () => { - if (!finished) { - finished = true; - options.onFinish(responseText + remainText); - } - }; - - controller.signal.onabort = finish; - - fetchEventSource(chatPath, { - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[OpenAI] request response content type: ", - contentType, - ); - - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text); - const choices = json.choices as Array<{ - delta: { content: string }; - }>; - const delta = choices[0]?.delta?.content; - const textmoderation = json?.prompt_filter_results; - - if (delta) { - remainText += delta; - } - - if ( - textmoderation && - textmoderation.length > 0 && - ServiceProvider.Azure - ) { - const contentFilterResults = - textmoderation[0]?.content_filter_results; - console.log( - `[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`, - contentFilterResults, - ); + let index = -1; + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + // console.log("getAsTools", tools, funcs); + stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + index += 1; + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; } - } catch (e) { - console.error("[Request] parse error", text, msg); } + return choices[0]?.delta?.content; }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // reset index value + index = -1; + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); }, - openWhenHidden: true, - }); + options, + ); } else { + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + ); + const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); @@ -457,7 +446,9 @@ export class ChatGPTApi implements LLMApi { }); const resJson = (await res.json()) as OpenAIListModelResponse; - const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-")); + const chatModels = resJson.data?.filter( + (m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"), + ); console.log("[Models]", chatModels); if (!chatModels) { diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts index e9e49d3f0b0..3e8f1a45957 100644 --- a/app/client/platforms/tencent.ts +++ b/app/client/platforms/tencent.ts @@ -8,6 +8,7 @@ import { LLMApi, LLMModel, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; import { @@ -89,10 +90,15 @@ export class HunyuanApi implements LLMApi { return res.Choices?.at(0)?.Message?.Content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const visionModel = isVisionModel(options.config.model); - const messages = options.messages.map((v) => ({ - role: v.role, + const messages = options.messages.map((v, index) => ({ + // "Messages 中 system 角色必须位于列表的最开始" + role: index !== 0 && v.role === "system" ? "user" : v.role, content: visionModel ? v.content : getMessageTextContent(v), })); diff --git a/app/command.ts b/app/command.ts index bea4e06f381..aec73ef53d6 100644 --- a/app/command.ts +++ b/app/command.ts @@ -38,6 +38,7 @@ interface ChatCommands { next?: Command; prev?: Command; clear?: Command; + fork?: Command; del?: Command; } diff --git a/app/components/artifacts.tsx b/app/components/artifacts.tsx index 326891e736a..ce187fbcb2c 100644 --- a/app/components/artifacts.tsx +++ b/app/components/artifacts.tsx @@ -1,6 +1,12 @@ -import { useEffect, useState, useRef, useMemo } from "react"; +import { + useEffect, + useState, + useRef, + useMemo, + forwardRef, + useImperativeHandle, +} from "react"; import { useParams } from "react-router"; -import { useWindowSize } from "@/app/utils"; import { IconButton } from "./button"; import { nanoid } from "nanoid"; import ExportIcon from "../icons/share.svg"; @@ -8,6 +14,7 @@ import CopyIcon from "../icons/copy.svg"; import DownloadIcon from "../icons/download.svg"; import GithubIcon from "../icons/github.svg"; import LoadingButtonIcon from "../icons/loading.svg"; +import ReloadButtonIcon from "../icons/reload.svg"; import Locale from "../locales"; import { Modal, showToast } from "./ui-lib"; import { copyToClipboard, downloadAs } from "../utils"; @@ -15,73 +22,89 @@ import { Path, ApiPath, REPO_URL } from "@/app/constant"; import { Loading } from "./home"; import styles from "./artifacts.module.scss"; -export function HTMLPreview(props: { +type HTMLPreviewProps = { code: string; autoHeight?: boolean; height?: number | string; onLoad?: (title?: string) => void; -}) { - const ref = useRef(null); - const frameId = useRef(nanoid()); - const [iframeHeight, setIframeHeight] = useState(600); - const [title, setTitle] = useState(""); - /* - * https://stackoverflow.com/questions/19739001/what-is-the-difference-between-srcdoc-and-src-datatext-html-in-an - * 1. using srcdoc - * 2. using src with dataurl: - * easy to share - * length limit (Data URIs cannot be larger than 32,768 characters.) - */ +}; - useEffect(() => { - const handleMessage = (e: any) => { - const { id, height, title } = e.data; - setTitle(title); - if (id == frameId.current) { - setIframeHeight(height); - } - }; - window.addEventListener("message", handleMessage); - return () => { - window.removeEventListener("message", handleMessage); - }; - }, []); +export type HTMLPreviewHander = { + reload: () => void; +}; - const height = useMemo(() => { - if (!props.autoHeight) return props.height || 600; - if (typeof props.height === "string") { - return props.height; - } - const parentHeight = props.height || 600; - return iframeHeight + 40 > parentHeight ? parentHeight : iframeHeight + 40; - }, [props.autoHeight, props.height, iframeHeight]); +export const HTMLPreview = forwardRef( + function HTMLPreview(props, ref) { + const iframeRef = useRef(null); + const [frameId, setFrameId] = useState(nanoid()); + const [iframeHeight, setIframeHeight] = useState(600); + const [title, setTitle] = useState(""); + /* + * https://stackoverflow.com/questions/19739001/what-is-the-difference-between-srcdoc-and-src-datatext-html-in-an + * 1. using srcdoc + * 2. using src with dataurl: + * easy to share + * length limit (Data URIs cannot be larger than 32,768 characters.) + */ - const srcDoc = useMemo(() => { - const script = ``; - if (props.code.includes("")) { - props.code.replace("", "" + script); - } - return props.code + script; - }, [props.code]); + useEffect(() => { + const handleMessage = (e: any) => { + const { id, height, title } = e.data; + setTitle(title); + if (id == frameId) { + setIframeHeight(height); + } + }; + window.addEventListener("message", handleMessage); + return () => { + window.removeEventListener("message", handleMessage); + }; + }, [frameId]); - const handleOnLoad = () => { - if (props?.onLoad) { - props.onLoad(title); - } - }; + useImperativeHandle(ref, () => ({ + reload: () => { + setFrameId(nanoid()); + }, + })); - return ( -