diff --git a/.eslintignore b/.eslintignore deleted file mode 100644 index 08975255475..00000000000 --- a/.eslintignore +++ /dev/null @@ -1 +0,0 @@ -public/serviceWorker.js \ No newline at end of file diff --git a/.eslintrc.json b/.eslintrc.json deleted file mode 100644 index 5b5e88e67aa..00000000000 --- a/.eslintrc.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "extends": "next/core-web-vitals", - "plugins": ["prettier", "unused-imports"], - "rules": { - "unused-imports/no-unused-imports": "warn" - } -} diff --git a/.github/ISSUE_TEMPLATE/1_bug_report.yml b/.github/ISSUE_TEMPLATE/1_bug_report.yml index b576629e30b..1786df6d8aa 100644 --- a/.github/ISSUE_TEMPLATE/1_bug_report.yml +++ b/.github/ISSUE_TEMPLATE/1_bug_report.yml @@ -1,80 +1,80 @@ -name: '🐛 Bug Report' -description: 'Report an bug' +name: 🐛 Bug Report +description: Report an bug title: '[Bug] ' -labels: ['bug'] +labels: [bug] body: - - type: dropdown - attributes: - label: '📦 Deployment Method' - multiple: true - options: - - 'Official installation package' - - 'Vercel' - - 'Zeabur' - - 'Sealos' - - 'Netlify' - - 'Docker' - - 'Other' - validations: - required: true - - type: input - attributes: - label: '📌 Version' - validations: - required: true - - - type: dropdown - attributes: - label: '💻 Operating System' - multiple: true - options: - - 'Windows' - - 'macOS' - - 'Ubuntu' - - 'Other Linux' - - 'iOS' - - 'iPad OS' - - 'Android' - - 'Other' - validations: - required: true - - type: input - attributes: - label: '📌 System Version' - validations: - required: true - - type: dropdown - attributes: - label: '🌐 Browser' - multiple: true - options: - - 'Chrome' - - 'Edge' - - 'Safari' - - 'Firefox' - - 'Other' - validations: - required: true - - type: input - attributes: - label: '📌 Browser Version' - validations: - required: true - - type: textarea - attributes: - label: '🐛 Bug Description' - description: A clear and concise description of the bug, if the above option is `Other`, please also explain in detail. - validations: - required: true - - type: textarea - attributes: - label: '📷 Recurrence Steps' - description: A clear and concise description of how to recurrence. - - type: textarea - attributes: - label: '🚦 Expected Behavior' - description: A clear and concise description of what you expected to happen. - - type: textarea - attributes: - label: '📝 Additional Information' - description: If your problem needs further explanation, or if the issue you're seeing cannot be reproduced in a gist, please add more information here. \ No newline at end of file + - type: dropdown + attributes: + label: 📦 Deployment Method + multiple: true + options: + - Official installation package + - Vercel + - Zeabur + - Sealos + - Netlify + - Docker + - Other + validations: + required: true + - type: input + attributes: + label: 📌 Version + validations: + required: true + + - type: dropdown + attributes: + label: 💻 Operating System + multiple: true + options: + - Windows + - macOS + - Ubuntu + - Other Linux + - iOS + - iPad OS + - Android + - Other + validations: + required: true + - type: input + attributes: + label: 📌 System Version + validations: + required: true + - type: dropdown + attributes: + label: 🌐 Browser + multiple: true + options: + - Chrome + - Edge + - Safari + - Firefox + - Other + validations: + required: true + - type: input + attributes: + label: 📌 Browser Version + validations: + required: true + - type: textarea + attributes: + label: 🐛 Bug Description + description: A clear and concise description of the bug, if the above option is `Other`, please also explain in detail. + validations: + required: true + - type: textarea + attributes: + label: 📷 Recurrence Steps + description: A clear and concise description of how to recurrence. + - type: textarea + attributes: + label: 🚦 Expected Behavior + description: A clear and concise description of what you expected to happen. + - type: textarea + attributes: + label: 📝 Additional Information + description: If your problem needs further explanation, or if the issue you're seeing cannot be reproduced in a gist, please add more information here. diff --git a/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml b/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml index 1977237deea..f8085b69c46 100644 --- a/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml +++ b/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml @@ -1,80 +1,80 @@ -name: '🐛 反馈缺陷' -description: '反馈一个问题/缺陷' +name: 🐛 反馈缺陷 +description: 反馈一个问题/缺陷 title: '[Bug] ' -labels: ['bug'] +labels: [bug] body: - - type: dropdown - attributes: - label: '📦 部署方式' - multiple: true - options: - - '官方安装包' - - 'Vercel' - - 'Zeabur' - - 'Sealos' - - 'Netlify' - - 'Docker' - - 'Other' - validations: - required: true - - type: input - attributes: - label: '📌 软件版本' - validations: - required: true + - type: dropdown + attributes: + label: 📦 部署方式 + multiple: true + options: + - 官方安装包 + - Vercel + - Zeabur + - Sealos + - Netlify + - Docker + - Other + validations: + required: true + - type: input + attributes: + label: 📌 软件版本 + validations: + required: true - - type: dropdown - attributes: - label: '💻 系统环境' - multiple: true - options: - - 'Windows' - - 'macOS' - - 'Ubuntu' - - 'Other Linux' - - 'iOS' - - 'iPad OS' - - 'Android' - - 'Other' - validations: - required: true - - type: input - attributes: - label: '📌 系统版本' - validations: - required: true - - type: dropdown - attributes: - label: '🌐 浏览器' - multiple: true - options: - - 'Chrome' - - 'Edge' - - 'Safari' - - 'Firefox' - - 'Other' - validations: - required: true - - type: input - attributes: - label: '📌 浏览器版本' - validations: - required: true - - type: textarea - attributes: - label: '🐛 问题描述' - description: 请提供一个清晰且简洁的问题描述,若上述选项为`Other`,也请详细说明。 - validations: - required: true - - type: textarea - attributes: - label: '📷 复现步骤' - description: 请提供一个清晰且简洁的描述,说明如何复现问题。 - - type: textarea - attributes: - label: '🚦 期望结果' - description: 请提供一个清晰且简洁的描述,说明您期望发生什么。 - - type: textarea - attributes: - label: '📝 补充信息' - description: 如果您的问题需要进一步说明,或者您遇到的问题无法在一个简单的示例中复现,请在这里添加更多信息。 \ No newline at end of file + - type: dropdown + attributes: + label: 💻 系统环境 + multiple: true + options: + - Windows + - macOS + - Ubuntu + - Other Linux + - iOS + - iPad OS + - Android + - Other + validations: + required: true + - type: input + attributes: + label: 📌 系统版本 + validations: + required: true + - type: dropdown + attributes: + label: 🌐 浏览器 + multiple: true + options: + - Chrome + - Edge + - Safari + - Firefox + - Other + validations: + required: true + - type: input + attributes: + label: 📌 浏览器版本 + validations: + required: true + - type: textarea + attributes: + label: 🐛 问题描述 + description: 请提供一个清晰且简洁的问题描述,若上述选项为`Other`,也请详细说明。 + validations: + required: true + - type: textarea + attributes: + label: 📷 复现步骤 + description: 请提供一个清晰且简洁的描述,说明如何复现问题。 + - type: textarea + attributes: + label: 🚦 期望结果 + description: 请提供一个清晰且简洁的描述,说明您期望发生什么。 + - type: textarea + attributes: + label: 📝 补充信息 + description: 如果您的问题需要进一步说明,或者您遇到的问题无法在一个简单的示例中复现,请在这里添加更多信息。 diff --git a/.github/ISSUE_TEMPLATE/2_feature_request.yml b/.github/ISSUE_TEMPLATE/2_feature_request.yml index 8576e8a83e2..1743ab30eb6 100644 --- a/.github/ISSUE_TEMPLATE/2_feature_request.yml +++ b/.github/ISSUE_TEMPLATE/2_feature_request.yml @@ -1,21 +1,21 @@ -name: '🌠 Feature Request' -description: 'Suggest an idea' +name: 🌠 Feature Request +description: Suggest an idea title: '[Feature Request] ' -labels: ['enhancement'] +labels: [enhancement] body: - - type: textarea - attributes: - label: '🥰 Feature Description' - description: Please add a clear and concise description of the problem you are seeking to solve with this feature request. - validations: - required: true - - type: textarea - attributes: - label: '🧐 Proposed Solution' - description: Describe the solution you'd like in a clear and concise manner. - validations: - required: true - - type: textarea - attributes: - label: '📝 Additional Information' - description: Add any other context about the problem here. \ No newline at end of file + - type: textarea + attributes: + label: 🥰 Feature Description + description: Please add a clear and concise description of the problem you are seeking to solve with this feature request. + validations: + required: true + - type: textarea + attributes: + label: 🧐 Proposed Solution + description: Describe the solution you'd like in a clear and concise manner. + validations: + required: true + - type: textarea + attributes: + label: 📝 Additional Information + description: Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml b/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml index c7a3cc3707a..29cb1582fbb 100644 --- a/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml +++ b/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml @@ -1,21 +1,21 @@ -name: '🌠 功能需求' -description: '提出需求或建议' +name: 🌠 功能需求 +description: 提出需求或建议 title: '[Feature Request] ' -labels: ['enhancement'] +labels: [enhancement] body: - - type: textarea - attributes: - label: '🥰 需求描述' - description: 请添加一个清晰且简洁的问题描述,阐述您希望通过这个功能需求解决的问题。 - validations: - required: true - - type: textarea - attributes: - label: '🧐 解决方案' - description: 请清晰且简洁地描述您想要的解决方案。 - validations: - required: true - - type: textarea - attributes: - label: '📝 补充信息' - description: 在这里添加关于问题的任何其他背景信息。 \ No newline at end of file + - type: textarea + attributes: + label: 🥰 需求描述 + description: 请添加一个清晰且简洁的问题描述,阐述您希望通过这个功能需求解决的问题。 + validations: + required: true + - type: textarea + attributes: + label: 🧐 解决方案 + description: 请清晰且简洁地描述您想要的解决方案。 + validations: + required: true + - type: textarea + attributes: + label: 📝 补充信息 + description: 在这里添加关于问题的任何其他背景信息。 diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 3c4c9080324..24f4a498842 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,27 +2,27 @@ -- [ ] feat -- [ ] fix -- [ ] refactor -- [ ] perf -- [ ] style -- [ ] test -- [ ] docs -- [ ] ci -- [ ] chore -- [ ] build +- [ ] feat +- [ ] fix +- [ ] refactor +- [ ] perf +- [ ] style +- [ ] test +- [ ] docs +- [ ] ci +- [ ] chore +- [ ] build #### 🔀 变更说明 | Description of Change - #### 📝 补充信息 | Additional Information - diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 3a3cce5763e..26087ec1fcf 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,7 +5,7 @@ version: 2 updates: - - package-ecosystem: "npm" # See documentation for possible values - directory: "/" # Location of package manifests - schedule: - interval: "weekly" + - package-ecosystem: npm # See documentation for possible values + directory: / # Location of package manifests + schedule: + interval: weekly diff --git a/.github/workflows/app.yml b/.github/workflows/app.yml deleted file mode 100644 index 7e74cf04595..00000000000 --- a/.github/workflows/app.yml +++ /dev/null @@ -1,110 +0,0 @@ -name: Release App - -on: - workflow_dispatch: - release: - types: [published] - -jobs: - create-release: - permissions: - contents: write - runs-on: ubuntu-latest - outputs: - release_id: ${{ steps.create-release.outputs.result }} - - steps: - - uses: actions/checkout@v3 - - name: setup node - uses: actions/setup-node@v3 - with: - node-version: 18 - - name: get version - run: echo "PACKAGE_VERSION=$(node -p "require('./src-tauri/tauri.conf.json').package.version")" >> $GITHUB_ENV - - name: create release - id: create-release - uses: actions/github-script@v6 - with: - script: | - const { data } = await github.rest.repos.getLatestRelease({ - owner: context.repo.owner, - repo: context.repo.repo, - }) - return data.id - - build-tauri: - needs: create-release - permissions: - contents: write - strategy: - fail-fast: false - matrix: - config: - - os: ubuntu-latest - arch: x86_64 - rust_target: x86_64-unknown-linux-gnu - - os: macos-latest - arch: aarch64 - rust_target: x86_64-apple-darwin,aarch64-apple-darwin - - os: windows-latest - arch: x86_64 - rust_target: x86_64-pc-windows-msvc - - runs-on: ${{ matrix.config.os }} - steps: - - uses: actions/checkout@v3 - - name: setup node - uses: actions/setup-node@v3 - with: - node-version: 18 - cache: 'yarn' - - name: install Rust stable - uses: dtolnay/rust-toolchain@stable - with: - targets: ${{ matrix.config.rust_target }} - - uses: Swatinem/rust-cache@v2 - with: - key: ${{ matrix.config.os }} - - name: install dependencies (ubuntu only) - if: matrix.config.os == 'ubuntu-latest' - run: | - sudo apt-get update - sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.0-dev libappindicator3-dev librsvg2-dev patchelf - - name: install frontend dependencies - run: yarn install # change this to npm or pnpm depending on which one you use - - uses: tauri-apps/tauri-action@v0 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }} - TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }} - APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE }} - APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }} - APPLE_SIGNING_IDENTITY: ${{ secrets.APPLE_SIGNING_IDENTITY }} - APPLE_ID: ${{ secrets.APPLE_ID }} - APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }} - APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }} - with: - releaseId: ${{ needs.create-release.outputs.release_id }} - args: ${{ matrix.config.os == 'macos-latest' && '--target universal-apple-darwin' || '' }} - - publish-release: - permissions: - contents: write - runs-on: ubuntu-latest - needs: [create-release, build-tauri] - - steps: - - name: publish release - id: publish-release - uses: actions/github-script@v6 - env: - release_id: ${{ needs.create-release.outputs.release_id }} - with: - script: | - github.rest.repos.updateRelease({ - owner: context.repo.owner, - repo: context.repo.repo, - release_id: process.env.release_id, - draft: false, - prerelease: false - }) diff --git a/.github/workflows/deploy_preview.yml b/.github/workflows/deploy_preview.yml deleted file mode 100644 index b988452433b..00000000000 --- a/.github/workflows/deploy_preview.yml +++ /dev/null @@ -1,82 +0,0 @@ -name: VercelPreviewDeployment - -on: - pull_request_target: - types: - - review_requested - -env: - VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }} - VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }} - VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }} - VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }} - VERCEL_PR_DOMAIN_SUFFIX: ${{ secrets.VERCEL_PR_DOMAIN_SUFFIX }} - -permissions: - contents: read - statuses: write - pull-requests: write - -jobs: - deploy-preview: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - ref: ${{ github.event.pull_request.head.sha }} - - - name: Extract branch name - shell: bash - run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> "$GITHUB_OUTPUT" - id: extract_branch - - - name: Hash branch name - uses: pplanel/hash-calculator-action@v1.3.1 - id: hash_branch - with: - input: ${{ steps.extract_branch.outputs.branch }} - method: MD5 - - - name: Set Environment Variables - id: set_env - if: github.event_name == 'pull_request_target' - run: | - echo "VERCEL_ALIAS_DOMAIN=${{ github.event.pull_request.number }}-${{ github.workflow }}.${VERCEL_PR_DOMAIN_SUFFIX}" >> $GITHUB_OUTPUT - - - name: Install Vercel CLI - run: npm install --global vercel@latest - - - name: Cache dependencies - uses: actions/cache@v4 - id: cache-npm - with: - path: ~/.npm - key: npm-${{ hashFiles('package-lock.json') }} - restore-keys: npm- - - - name: Pull Vercel Environment Information - run: vercel pull --yes --environment=preview --token=${VERCEL_TOKEN} - - - name: Deploy Project Artifacts to Vercel - id: vercel - env: - META_TAG: ${{ steps.hash_branch.outputs.digest }}-${{ github.run_number }}-${{ github.run_attempt}} - run: | - set -e - vercel pull --yes --environment=preview --token=${VERCEL_TOKEN} - vercel build --token=${VERCEL_TOKEN} - vercel deploy --prebuilt --archive=tgz --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }} - - DEFAULT_URL=$(vercel ls --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }}) - ALIAS_URL=$(vercel alias set ${DEFAULT_URL} ${{ steps.set_env.outputs.VERCEL_ALIAS_DOMAIN }} --token=${VERCEL_TOKEN} --scope ${VERCEL_TEAM}| awk '{print $3}') - - echo "New preview URL: ${DEFAULT_URL}" - echo "New alias URL: ${ALIAS_URL}" - echo "VERCEL_URL=${ALIAS_URL}" >> "$GITHUB_OUTPUT" - - - uses: mshick/add-pr-comment@v2 - with: - message: | - Your build has completed! - - [Preview deployment](${{ steps.vercel.outputs.VERCEL_URL }}) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8ac96f19356..848628898b6 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,52 +1,45 @@ name: Publish Docker image on: - workflow_dispatch: - release: - types: [published] + workflow_dispatch: + release: + types: [published] jobs: - push_to_registry: - name: Push Docker image to Docker Hub - runs-on: ubuntu-latest - steps: - - - name: Check out the repo - uses: actions/checkout@v3 - - - name: Log in to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - - name: Extract metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v4 - with: - images: yidadaa/chatgpt-next-web - tags: | - type=raw,value=latest - type=ref,event=tag - - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + push_to_registry: + name: Push Docker image to Docker Hub + runs-on: ubuntu-latest + steps: + - name: Check out the repo + uses: actions/checkout@v3 + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - - name: Build and push Docker image - uses: docker/build-push-action@v4 - with: - context: . - platforms: linux/amd64,linux/arm64 - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - cache-from: type=gha - cache-to: type=gha,mode=max - + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v4 + with: + images: yidadaa/chatgpt-next-web + tags: | + type=raw,value=latest + type=ref,event=tag + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push Docker image + uses: docker/build-push-action@v4 + with: + context: . + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/issue-translator.yml b/.github/workflows/issue-translator.yml index 560f66d3443..5b0ab510ef6 100644 --- a/.github/workflows/issue-translator.yml +++ b/.github/workflows/issue-translator.yml @@ -1,15 +1,15 @@ name: Issue Translator -on: - issue_comment: - types: [created] - issues: - types: [opened] +on: + issue_comment: + types: [created] + issues: + types: [opened] jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: usthe/issues-translate-action@v2.7 - with: - IS_MODIFY_TITLE: false - CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically. + build: + runs-on: ubuntu-latest + steps: + - uses: usthe/issues-translate-action@v2.7 + with: + IS_MODIFY_TITLE: false + CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically. diff --git a/.github/workflows/release-notes.js b/.github/workflows/release-notes.js new file mode 100755 index 00000000000..028e1a0dc6b --- /dev/null +++ b/.github/workflows/release-notes.js @@ -0,0 +1,25 @@ +#!/usr/bin/env node + +import { readFileSync, writeFileSync } from 'node:fs'; +import process from 'node:process'; + +const tag = process.argv[2].replace('v', ''); +const log = readFileSync('./CHANGELOG.md', { encoding: 'utf-8' }).split('\n'); +let result = ''; +let inScope = false; +const regex = new RegExp(`^#+ \\[${tag}`); +for (let i = 0; i < log.length; i++) { + if (regex.test(log[i])) { + inScope = true; + result += log[i]; + continue; + } + if (inScope && /^#+ \[/.test(log[i])) { + inScope = false; + break; + } + if (inScope) { + result += `\n${log[i]}`; + } +} +writeFileSync(`notes-v${tag}.md`, result); diff --git a/.github/workflows/release-tag.yml b/.github/workflows/release-tag.yml new file mode 100644 index 00000000000..adbefa5de6c --- /dev/null +++ b/.github/workflows/release-tag.yml @@ -0,0 +1,37 @@ +on: + push: + tags: + - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 + +name: Create Release + +# https://docs.github.com/en/actions/learn-github-actions/contexts#env-context +env: + RELEASE_VERSION: '' + +jobs: + build: + name: Create Release + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@master + + - name: Get the release version from the tag + shell: bash + if: env.RELEASE_VERSION == '' + run: | + echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV + + - name: Generate Release Notes + run: | + ./.github/workflows/release-notes.js ${{ env.RELEASE_VERSION }} + cat notes-${{ env.RELEASE_VERSION }}.md + + - name: Create Release for Tag + id: release_tag + uses: softprops/action-gh-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + body_path: notes-${{ env.RELEASE_VERSION }}.md diff --git a/.github/workflows/remove_deploy_preview.yml b/.github/workflows/remove_deploy_preview.yml deleted file mode 100644 index 4846cda2d6a..00000000000 --- a/.github/workflows/remove_deploy_preview.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Removedeploypreview - -permissions: - contents: read - statuses: write - pull-requests: write - -env: - VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }} - VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }} - VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }} - -on: - pull_request_target: - types: - - closed - -jobs: - delete-deployments: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Extract branch name - shell: bash - run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT - id: extract_branch - - - name: Hash branch name - uses: pplanel/hash-calculator-action@v1.3.1 - id: hash_branch - with: - input: ${{ steps.extract_branch.outputs.branch }} - method: MD5 - - - name: Call the delete-deployment-preview.sh script - env: - META_TAG: ${{ steps.hash_branch.outputs.digest }} - run: | - bash ./scripts/delete-deployment-preview.sh diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index e04e30adbd6..59439adefe2 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -1,40 +1,40 @@ name: Upstream Sync permissions: - contents: write + contents: write on: - schedule: - - cron: "0 0 * * *" # every day - workflow_dispatch: + schedule: + - cron: '0 0 * * *' # every day + workflow_dispatch: jobs: - sync_latest_from_upstream: - name: Sync latest commits from upstream repo - runs-on: ubuntu-latest - if: ${{ github.event.repository.fork }} + sync_latest_from_upstream: + name: Sync latest commits from upstream repo + runs-on: ubuntu-latest + if: ${{ github.event.repository.fork }} - steps: - # Step 1: run a standard checkout action - - name: Checkout target repo - uses: actions/checkout@v3 + steps: + # Step 1: run a standard checkout action + - name: Checkout target repo + uses: actions/checkout@v3 - # Step 2: run the sync action - - name: Sync upstream changes - id: sync - uses: aormsby/Fork-Sync-With-Upstream-action@v3.4 - with: - upstream_sync_repo: ChatGPTNextWeb/ChatGPT-Next-Web - upstream_sync_branch: main - target_sync_branch: main - target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set + # Step 2: run the sync action + - name: Sync upstream changes + id: sync + uses: aormsby/Fork-Sync-With-Upstream-action@v3.4 + with: + upstream_sync_repo: ChatGPTNextWeb/ChatGPT-Next-Web + upstream_sync_branch: main + target_sync_branch: sync-main + target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set - # Set test_mode true to run tests instead of the true action!! - test_mode: false + # Set test_mode true to run tests instead of the true action!! + test_mode: false - - name: Sync check - if: failure() - run: | - echo "[Error] 由于上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次,详细教程请查看:https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E6%89%93%E5%BC%80%E8%87%AA%E5%8A%A8%E6%9B%B4%E6%96%B0" - echo "[Error] Due to a change in the workflow file of the upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed tutorial for instructions: https://github.com/Yidadaa/ChatGPT-Next-Web#enable-automatic-updates" - exit 1 + - name: Sync check + if: failure() + run: | + echo "[Error] 由于上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次,详细教程请查看:https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E6%89%93%E5%BC%80%E8%87%AA%E5%8A%A8%E6%9B%B4%E6%96%B0" + echo "[Error] Due to a change in the workflow file of the upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed tutorial for instructions: https://github.com/Yidadaa/ChatGPT-Next-Web#enable-automatic-updates" + exit 1 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index faf7205d9cb..00000000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Run Tests - -on: - push: - branches: - - main - tags: - - "!*" - pull_request: - types: - - review_requested - -jobs: - test: - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Node.js - uses: actions/setup-node@v3 - with: - node-version: 18 - cache: "yarn" - - - name: Cache node_modules - uses: actions/cache@v4 - with: - path: node_modules - key: ${{ runner.os }}-node_modules-${{ hashFiles('**/yarn.lock') }} - restore-keys: | - ${{ runner.os }}-node_modules- - - - name: Install dependencies - run: yarn install - - - name: Run Jest tests - run: yarn test:ci diff --git a/.gitignore b/.gitignore index 2ff556f646e..f0d3fcbf15b 100644 --- a/.gitignore +++ b/.gitignore @@ -36,7 +36,6 @@ yarn-error.log* next-env.d.ts dev -.vscode .idea # docker-compose env files diff --git a/.gitpod.yml b/.gitpod.yml deleted file mode 100644 index d81f2dab15c..00000000000 --- a/.gitpod.yml +++ /dev/null @@ -1,11 +0,0 @@ -# This configuration file was automatically generated by Gitpod. -# Please adjust to your needs (see https://www.gitpod.io/docs/introduction/learn-gitpod/gitpod-yaml) -# and commit this file to your remote git repository to share the goodness with others. - -# Learn more from ready-to-use templates: https://www.gitpod.io/docs/introduction/getting-started/quickstart - -tasks: - - init: yarn install && yarn run dev - command: yarn run dev - - diff --git a/.lintstagedrc.json b/.lintstagedrc.json deleted file mode 100644 index 58784bad829..00000000000 --- a/.lintstagedrc.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "./app/**/*.{js,ts,jsx,tsx,json,html,css,md}": [ - "eslint --fix", - "prettier --write" - ] -} diff --git a/.npmrc b/.npmrc new file mode 100644 index 00000000000..5bf3c616309 --- /dev/null +++ b/.npmrc @@ -0,0 +1,3 @@ +registry=https://registry.npmmirror.com +auto-install-peers=true +shamefully-hoist=true diff --git a/.prettierrc.js b/.prettierrc.js deleted file mode 100644 index 95cc75ffaec..00000000000 --- a/.prettierrc.js +++ /dev/null @@ -1,10 +0,0 @@ -module.exports = { - printWidth: 80, - tabWidth: 2, - useTabs: false, - semi: true, - singleQuote: false, - trailingComma: 'all', - bracketSpacing: true, - arrowParens: 'always', -}; diff --git a/.vscode/README.md b/.vscode/README.md new file mode 100644 index 00000000000..aa508f35c61 --- /dev/null +++ b/.vscode/README.md @@ -0,0 +1,15 @@ +## VSCode + +### 1. 扩展安装 + +> 以下为项目初始化必须要安装的扩展,若有冲突的扩展项,请先卸载掉或者去掉全局配置 + +1. `ESLint` # JS 及 CSS 文件格式化 +2. `Git History` +3. `Git History Diff` +4. `Pretier - Code formatter` # JSON 文件格式化 +5. `vscode-wxml` # 提供 wxml 语法支持 +6. `vscode-icons` # 文件显示图标,开发体验优化用到 +7. `wxml` # 微信小程序 wxml 格式化 +8. `TODO Highlight` # TODO 高亮展示 +9. `Todo Tree` # 显示待办任务列表 diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000000..07362ec78b7 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,41 @@ +{ + "window.title": "${activeEditorMedium}${separator}${rootName}", + + // Enable the ESlint flat config support + "eslint.useFlatConfig": true, + + // Disable the default formatter, use eslint instead + "prettier.enable": false, + "editor.formatOnSave": false, + + // Auto fix + "editor.codeActionsOnSave": { + "source.fixAll.eslint": "explicit", + "source.organizeImports": "never" + }, + + // Enable eslint for all supported languages + "eslint.validate": [ + "javascript", + "javascriptreact", + "typescript", + "typescriptreact", + "vue", + "html", + "markdown", + "json", + "jsonc", + "yaml", + "toml", + "xml", + "gql", + "graphql", + "astro", + "svelte", + "css", + "less", + "scss", + "pcss", + "postcss" + ] +} diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000000..85b9639e2b7 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,748 @@ +# 1.0.0 (2024-12-20) + + +### Bug Fixes + +* Fix memory leak issue by adding fetch request timeout ([2b912c6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2b912c683455f51b93537c7a67ab525172153f2a)) +* [[#5308](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5308)] gemini对话总结 ([7eda14f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7eda14f13882be635c9e6e5b8077617df8c5339b)) +* [[#5574](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5574)] 文档错误 ([c0c8cdb](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c0c8cdbbf37fdde5df0fba4adf6fce477dded75b)) +* [#10](https://github.com/oceanopen/ChatGPT-Next-Web/issues/10) replace export icon ([3136d6d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3136d6d3fd945f672f134c6534b391dd9d853261)) +* [#1094](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1094) try to disable zoom on ios safari ([2deb5cb](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2deb5cbc9eefb29a3b96a0eed98f6305355157b7)) +* [#1124](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1124) mask model config does not works ([9f3188f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9f3188fe45d9d5c14abcb4d0a98b3b7a0718f1fe)) +* [#1126](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1126) can not select prompt ([1aaf4ae](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1aaf4ae5bc30309de7e1d8aea1df0fe413e11c45)) +* [#1130](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1130) [#1131](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1131) delete right session ([c37885e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c37885e743f02f7102816f0c96f86c124f3d8b1e)) +* [#1147](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1147) edit mask after creating a new mask ([b2fc7d4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b2fc7d476a51cd0ed757bfbf3dd15d2a8673bebf)) +* [#1154](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1154) wrong date range when query usage ([0209ace](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0209ace221c1f2ba4a0bda096b25bad15573c218)) +* [#1201](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1201) wont close prompt list when blur ([c1b6828](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c1b6828ed42c3d978edd98f475758ea39c68a9e3)) +* [#1210](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1210) change default lang to en ([328ecd1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/328ecd1cfb74d06bc42cf0430b1e629230c3de0a)) +* [#1233](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1233) detect api key with custom prefix ([c2e79d2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c2e79d22d2edb61c966dc81fa563bd766d03fb09)) +* [#1237](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1237) can not delete cloned mask ([40223e6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/40223e6b3fde309dbfab8e3a087b7ac96a91d6b1)) +* [#1251](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1251) use google fonts mirror ([4b9d753](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4b9d753254af48a77848d0d1e7fcbd7af5b34f0e)) +* [#1273](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1273) overlap detecting ([fe8e3f2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fe8e3f2bcfcc703ea9dec7c3d85be7d7c8a833ba)) +* [#1294](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1294) fallback while mermaid render fails ([c394b21](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c394b214231508c25843fa37534ced3b9232a2b8)) +* [#1307](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1307) empty messages ([1f2ef1c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1f2ef1cdb714500b500c1ff207d580c73fe53ba3)) +* [#1359](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1359) empty line wrap count ([9b1f251](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9b1f25140e861b72a3b783d52ca7f42e6bd966b2)) +* [#1363](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1363) session index after deleting ([6d9abf1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6d9abf11b8a3c92a946c55af660332b06cbba822)) +* [#1401](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1401) try to disable zoom ([36adfe8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/36adfe87fb965120a208df907a609ec235437d06)) +* [#1423](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1423) should not scroll right when dragging side bar items ([6da3aab](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6da3aab046d85825cf195bf2074465471f7fc481)) +* [#1444](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1444) async load google fonts ([03163d6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/03163d6a61856dbe52f156d89da80a2ce9f7cb79)) +* [#1498](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1498) missing text caused by streaming ([aed6b34](https://github.com/oceanopen/ChatGPT-Next-Web/commit/aed6b349507dce2bdca77756db52bca88db268a9)) +* [#1509](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1509) openai url split ([8b0cf7d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8b0cf7d248bd3582c619f9337f711076caa75532)) +* [#1533](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1533) handle non-stream type ([e00652c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e00652ce86c5ac09192de255e5a8863651d7a73e)) +* [#1571](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1571) [#1578](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1578) handle more error code ([30676d1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/30676d118f4b6e699472c07b8ca1609202fd7535)) +* [#159](https://github.com/oceanopen/ChatGPT-Next-Web/issues/159) temperature should range 0 - 2 ([380f818](https://github.com/oceanopen/ChatGPT-Next-Web/commit/380f818285d2a0add330d50fe4df4c08e4819649)) +* [#1611](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1611) show corret message when can not query usage ([a524a60](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a524a60c463c7c8f151bb7d2e7c5d28662edbef5)) +* [#1612](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1612) fill empty message with a placeholder ([6cf2fa0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6cf2fa02e59b776bf91039b821d557296e9bc0aa)) +* [#1612](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1612) infinite loading ([af497c9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/af497c96ec066abe93ac05433382283acc3ccf93)) +* [#1668](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1668) should not summarize twice ([58f726c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/58f726c6023795ae8fe82a2c114dbcea3985bffa)) +* [#1681](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1681) replace svg icons with png icons ([ec61a5b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ec61a5b32d15da4eda81d2c11dc489318e1a255d)) +* [#1685](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1685) clear context index should be recoverable ([57514e9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/57514e91b630213f3795dec4731e82864cf74981)) +* [#1688](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1688) wrong clear context index ([e9642c7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e9642c750547d608dfa3cc9d8cdd26b8205b4c7e)) +* [#1711](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1711) input range style in mobile screen ([4ca34e0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4ca34e04368420cf97626d1b9803f9b7d647190e)) +* [#1746](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1746) Can't modify immutable headers. ([d533895](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d5338956371707e9825da741cdcab83bdf6b5525)) +* [#1771](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1771) should not lose chat context when sumindex > n - count ([db63a5a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/db63a5a67020e399f16b47a3e541506df645ec3f)) +* [#1815](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1815) refuse to serve when disable gpt4 ([37da759](https://github.com/oceanopen/ChatGPT-Next-Web/commit/37da759fd53f9284148a5d7376223649dc5b8eae)) +* [#182](https://github.com/oceanopen/ChatGPT-Next-Web/issues/182) prompt cannot be selected ([b3fdf3e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b3fdf3efecadf015d04ab4fb8a2f60d58bd4d444)) +* [#185](https://github.com/oceanopen/ChatGPT-Next-Web/issues/185) input and select align center ([7827b40](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7827b40f1798b65d84d3f59d351d35d55784e89a)) +* [#1931](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1931) try to fix cors issues ([0fb775d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0fb775d71a0fac5ce4aa802bc3eb0b066c12ed7b)) +* [#1954](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1954) lazy render bugs ([184a0b9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/184a0b94811c5a697351388021bad03d62d98105)) +* [#1982](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1982) should not fullscreen on standlone build ([47c546f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/47c546fafa313549cf3885c1f6a4170ad6d5192d)) +* [#2](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2) use shift+enter to wrap lines when submit key is enter ([547ef55](https://github.com/oceanopen/ChatGPT-Next-Web/commit/547ef5565e6af4ba5292381a7ccc78e19b7f0ebf)) +* [#203](https://github.com/oceanopen/ChatGPT-Next-Web/issues/203) pwa installation problem ([1602879](https://github.com/oceanopen/ChatGPT-Next-Web/commit/16028795f91bb65c84362475b977271ac0df3243)) +* [#2055](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2055) should render mermaid completely ([3c38b9c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3c38b9c93b412afc614badf6a175af521ee6877a)) +* [#2061](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2061) RequestInit TypeError ([9c05d13](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9c05d136f5241a6c8ee16d09ee060840f6b3cfb0)) +* [#2087](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2087) chat box font size use global config ([fa9ceb5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fa9ceb587503d1597754f9a0f4a5ffbe41447b85)) +* [#2111](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2111) make "Attached Messages Count" working ([91d8f9d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/91d8f9d73e24d90c668b2dcc98b2eb9df98eac5b)) +* [#2135](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2135) disable cmd + arrow to change session ([058e289](https://github.com/oceanopen/ChatGPT-Next-Web/commit/058e28911a0698a8c7dbf1710a99b332535cc0be)) +* [#2149](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2149) try to fix chat action button style ([5b1d45c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5b1d45c1a9e4f2a5b65c35d96b678496a978557c)) +* [#2149](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2149) try to fix chat action button style ([bce7489](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bce74890dca87dbdab5f5171ecfb791a1f8c55be)) +* [#2149](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2149) try to fix ChatAction style on ios mobile ([d5c33a1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d5c33a11839d45990a0da0d015de445bf0b59789)) +* [#2195](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2195) correct macos icon size ([829df56](https://github.com/oceanopen/ChatGPT-Next-Web/commit/829df567339cb7f749da98ef15be085d9a541426)) +* [#2208](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2208) use global settings button dose not work ([ee55f87](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ee55f8790ed25cb0a105a086ce32f884089864b6)) +* [#2221](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2221) user prompts in front of all prompts ([5963459](https://github.com/oceanopen/ChatGPT-Next-Web/commit/59634594994bfc00facf4ea7b6160a4e2ed1f49e)) +* [#2230](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2230) hide chat actions for context prompts ([0bc2c71](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0bc2c71b0c906c1e70f5e557e2f742bcabb8ef17)) +* [#2252](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2252) polyfill composing for old safari browsers ([1197521](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1197521921f98e92e7c89b91dbcbb6b981908ec6)) +* [#2261](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2261) default enable gpt-4 models ([3863cfe](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3863cfe78648885163c8326d9fb47db5658ca751)) +* [#2280](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2280) auto-detect models from 'list/models' ([28c4577](https://github.com/oceanopen/ChatGPT-Next-Web/commit/28c457730afc838f6cd153c3dc789b70f3a0b761)) +* [#229](https://github.com/oceanopen/ChatGPT-Next-Web/issues/229) disable light code theme ([bf50eba](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bf50ebac945ea31113dadb7ac9118929897dc4ef)) +* [#2295](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2295) use correct methods to migrate state ([90d8f31](https://github.com/oceanopen/ChatGPT-Next-Web/commit/90d8f3117f787584e54b250c0914d09b8617dc09)) +* [#23](https://github.com/oceanopen/ChatGPT-Next-Web/issues/23) errors when dev on windows ([a5b3998](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a5b39983047e86c71d9fccf925708863cf9fcb44)) +* [#2303](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2303) should select custom models ([09b05cd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/09b05cde7fef0ceea087511f1d498b3975782941)) +* [#2308](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2308) improve chat actions ([ca29558](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ca295588c426001489d00907c1a255db00436d1a)) +* [#2336](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2336) resending message should delete origional messages ([c00a63e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c00a63e4c3a01efd0e8cb099f87811f062ad7aaf)) +* [#2367](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2367) do not copy in async callback after sharing to ShareGPT ([30473ec](https://github.com/oceanopen/ChatGPT-Next-Web/commit/30473ec41e68842bf0eed03f9a308ca8aaa551b5)) +* [#2393](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2393) try to fix chat list lag ([1357608](https://github.com/oceanopen/ChatGPT-Next-Web/commit/13576087f4806946ee0f93b44de6482ba010705e)) +* [#244](https://github.com/oceanopen/ChatGPT-Next-Web/issues/244) better scroll ux ([7599ae3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7599ae385be260c10a3e6b784c22484b636c8576)) +* [#244](https://github.com/oceanopen/ChatGPT-Next-Web/issues/244) optimize polyfill ([37587f6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/37587f6f717eb5092f1c5e5fb5eabedd40f12c94)) +* [#2485](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2485) one-time-use body ([b14c5cd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b14c5cd89c760ac81b555c0b4eb061c34cae6978)) +* [#2514](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2514) should not clear the message after editing message ([129e7af](https://github.com/oceanopen/ChatGPT-Next-Web/commit/129e7afc160c5118d363ad10c9f937b4c6a78d40)) +* [#253](https://github.com/oceanopen/ChatGPT-Next-Web/issues/253) [#356](https://github.com/oceanopen/ChatGPT-Next-Web/issues/356) auto scroll ux ([c978de2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c978de2c1097a5fdf048a2f9ab28f9dbd3334449)) +* [#2564](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2564) should not clear message when error ([4ab9141](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4ab9141429ba170308443284bd06c84dac027788)) +* [#2566](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2566) click avatar to edit context messages ([840277f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/840277f5846ab13eaec0f3848ebd86d3a4ade410)) +* [#2594](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2594) trim the / ([0b7de6f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0b7de6f7b2fc0043631607dd880e810605b312a9)) +* [#2614](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2614) better rtl detecting algo ([a496bc5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a496bc5a6387a8c25364dec7b78df96058639643)) +* [#2615](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2615) scrollbar jitter under certain message counts ([db5c7ab](https://github.com/oceanopen/ChatGPT-Next-Web/commit/db5c7aba788c5f0a1a347f7d68baa5f0b1c5f516)) +* [#2672](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2672) should use correct resend index ([e114221](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e1142216eca8c91701457a2a85cbe45d1e7c3ec9)) +* [#2699](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2699) remove double quotes in readme ([d8b6ebf](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d8b6ebf6cbcfcad7865f51e4a75e912a9aa87d8f)) +* [#277](https://github.com/oceanopen/ChatGPT-Next-Web/issues/277) no cache for credit query ([e5aa72a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e5aa72af7688c5b596934ceb7f1f65be96f8cc63)) +* [#2817](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2817) min-height for landscape orientation on mobile phone ([61ca60c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/61ca60c550295c75e3e3feb8061455d298c27501)) +* [#2820](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2820) try to fix 520 error code ([adb860b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/adb860b4646c0c7548a059c5a8e8b3349ebdeca8)) +* [#2841](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2841) dollar sign conflict with latex math ([a0cd939](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a0cd939bfd560621b854b7533fa0b28a329dfa75)) +* [#289](https://github.com/oceanopen/ChatGPT-Next-Web/issues/289) [#367](https://github.com/oceanopen/ChatGPT-Next-Web/issues/367) [#353](https://github.com/oceanopen/ChatGPT-Next-Web/issues/353) [#369](https://github.com/oceanopen/ChatGPT-Next-Web/issues/369) provide more error message info ([b44caee](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b44caeeefb9f90baa69fac1d76201447f7930e98)) +* [#289](https://github.com/oceanopen/ChatGPT-Next-Web/issues/289) use highlight.js instead of prism ([4f0108b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4f0108b0eaa3fb1f06e3227c7f3ae9d22306621a)) +* [#2981](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2981) full screen button not works ([f54db69](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f54db695af55ea925369950be9b1b8988461544b)) +* [#3016](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3016) disable sidebar transition on ios ([6e52d14](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6e52d14180345446abf61e933368eccbebad8694)) +* [#305](https://github.com/oceanopen/ChatGPT-Next-Web/issues/305) disable double click to copy on pc ([0385f6e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0385f6ede919117e7278cd64fe01f7d688805059)) +* [#3152](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3152) system prompt should be injected ([836bf83](https://github.com/oceanopen/ChatGPT-Next-Web/commit/836bf836d37b0d704eab132b7849447214b93d8c)) +* [#3174](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3174) should prompt to confirm to delete chat ([fbc0236](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fbc02367484416a98d20b86d9994d019869d78a8)) +* [#3186](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3186) enable max_tokens in chat payload ([d0a1d91](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d0a1d910d4dae62351ae0273562cc6067e3e6ed9)) +* [#3189](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3189) should correct math eq in exporter ([b52e237](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b52e237044bdc0ddf0427dfb363486da10810973)) +* [#3192](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3192) use smaller max_tokens as default ([87e3d66](https://github.com/oceanopen/ChatGPT-Next-Web/commit/87e3d663a2955f7344f214b355f8a8d03032ea65)) +* [#3196](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3196) 3.5-turbo-1106 should use old cutoff date ([3b3ebda](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3b3ebda34bc5def7e7b72f9a3a7dcca2fa0c0aac)) +* [#3207](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3207) ensure corner case ([cb140e4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cb140e482f522b5add2f31b42d80eda471764335)) +* [#3241](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3241) should not ensure openai url non-empty ([d033168](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d033168d80b54636e306d6a38e604482f3999486)) +* [#3275](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3275) refuse on server side if hide user api key ([9876a1a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9876a1aeca71610841af5585d7baeba3126a8df9)) +* [#34](https://github.com/oceanopen/ChatGPT-Next-Web/issues/34) only auto scroll when textbox is focused ([1e89fe1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1e89fe14ac08a87249a917b4630671c52d4f03f5)) +* [#366](https://github.com/oceanopen/ChatGPT-Next-Web/issues/366) use fallback copy ([7386565](https://github.com/oceanopen/ChatGPT-Next-Web/commit/73865651a0095885713b61ae36e8e900158b332c)) +* [#367](https://github.com/oceanopen/ChatGPT-Next-Web/issues/367) failed to fetch account usage ([7b5af27](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7b5af271d501b2c8d85f438dfa358913b8da81ac)) +* [#38](https://github.com/oceanopen/ChatGPT-Next-Web/issues/38) high resolution favicon ([a5ec152](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a5ec15236ad13b971bafb2684bf2c127b27ef3dd)) +* [#384](https://github.com/oceanopen/ChatGPT-Next-Web/issues/384) improve scroll ([0e784c5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0e784c50ad11079d7af5537c0e9cc28bf84c7ac9)) +* [#397](https://github.com/oceanopen/ChatGPT-Next-Web/issues/397) [#373](https://github.com/oceanopen/ChatGPT-Next-Web/issues/373) Array.prototype.at polyfill errors ([5c75b6c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5c75b6c784c3b99ec849288e83f4345ad40621d1)) +* [#410](https://github.com/oceanopen/ChatGPT-Next-Web/issues/410) can not stop response ([8e560d2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8e560d2b2eec503ae43685a5a23f0c726eb9ae58)) +* [#418](https://github.com/oceanopen/ChatGPT-Next-Web/issues/418) valid model config ([4e644cf](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4e644cfca70914371586e8761fe63791c7a6b04e)) +* [#4240](https://github.com/oceanopen/ChatGPT-Next-Web/issues/4240) remove tip when 0 context ([4b8288a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4b8288a2b2c8cbd6567fea0b729537b1696b54af)) +* [#439](https://github.com/oceanopen/ChatGPT-Next-Web/issues/439) context prompt input with textarea ([0e77177](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0e77177a607260bbb982ea2446ca85bc4c3a466e)) +* [#451](https://github.com/oceanopen/ChatGPT-Next-Web/issues/451) override default model config ([dce2546](https://github.com/oceanopen/ChatGPT-Next-Web/commit/dce2546f5f99df85810ced575c1a1c9cbc178781)) +* [#463](https://github.com/oceanopen/ChatGPT-Next-Web/issues/463) add subscrption total amount ([acfe6ee](https://github.com/oceanopen/ChatGPT-Next-Web/commit/acfe6eec18ea33ed0a65f8653199b220cdccff55)) +* [#5](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5) crash if code block cannot be highlighted ([e55520e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e55520e93cf554aca9f43bcfdb0e77490f1fdaf2)) +* [#507](https://github.com/oceanopen/ChatGPT-Next-Web/issues/507) break cjk chars in stream mode ([7aee53e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7aee53ea05494ef55412a1e2745a8a9ee8d497d8)) +* [#512](https://github.com/oceanopen/ChatGPT-Next-Web/issues/512) Mobile renaming should not return to chat list ([806587c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/806587c8eae4ffa21805bc29e83f7ce85ca4682a)) +* [#513](https://github.com/oceanopen/ChatGPT-Next-Web/issues/513) show toast after copying ([f3dbe5a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f3dbe5a25116bc9487edd5165cf8cbe442655264)) +* [#522](https://github.com/oceanopen/ChatGPT-Next-Web/issues/522) resizable side bar ([6ae61c5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6ae61c5357644675e162b4a2c2d90b53c58b91a8)) +* [#528](https://github.com/oceanopen/ChatGPT-Next-Web/issues/528) wont send max_tokens ([45c8de4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/45c8de42b9a6269f1dcea5f95902f932c81cdc51)) +* [#537](https://github.com/oceanopen/ChatGPT-Next-Web/issues/537) delete chat button style ([6420f61](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6420f615662be17e27f83caa3058606261e0db71)) +* [#5429](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5429) Anthropic authentication_error CORS ([9a5a3d4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9a5a3d4ce4e6b1c7210fc1b9d9e78231d4e2b3a8)) +* [#5450](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5450) ([269d064](https://github.com/oceanopen/ChatGPT-Next-Web/commit/269d064e0a7b7b3690cc9aa0f3204960f1bee912)) +* [#5486](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5486) plugin样式优化 ([0e210cf](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0e210cf8de4b4a5c75acd8684b706a840ca947ba)) +* [#559](https://github.com/oceanopen/ChatGPT-Next-Web/issues/559) custom input ui style ([85bf4ac](https://github.com/oceanopen/ChatGPT-Next-Web/commit/85bf4ac0770d525046d3de9509ec80cd06bc5336)) +* [#589](https://github.com/oceanopen/ChatGPT-Next-Web/issues/589) improve unauthorized tips ([0e05733](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0e05733bbb9ebe3ee40f23edf41531ea6d4f8d70)) +* [#613](https://github.com/oceanopen/ChatGPT-Next-Web/issues/613) show all prompts when input / ([637cda5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/637cda5b4cfc4bc4841b86b99a3d3e9e2fc76f73)) +* [#641](https://github.com/oceanopen/ChatGPT-Next-Web/issues/641) delete wrong chat list ([4a49226](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4a492264a164fb9f771025fde466a389d1e0e624)) +* [#648](https://github.com/oceanopen/ChatGPT-Next-Web/issues/648) password input style ([174c745](https://github.com/oceanopen/ChatGPT-Next-Web/commit/174c745279f7f27b2283318695060184468641ab)) +* [#676](https://github.com/oceanopen/ChatGPT-Next-Web/issues/676) docker override old proxy files ([8df8ee8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8df8ee8936505f19bfbb59e550df5dca47253f49)) +* [#7](https://github.com/oceanopen/ChatGPT-Next-Web/issues/7) disable light code theme ([fb2d281](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fb2d281aac7c51c932bdb4fbb47f2dbecdba45e8)) +* [#751](https://github.com/oceanopen/ChatGPT-Next-Web/issues/751) do not cache request ([8f5c289](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8f5c28981877c3428b29fb08c36a3c15117c873d)) +* [#804](https://github.com/oceanopen/ChatGPT-Next-Web/issues/804) disable auto scroll in textarea ([fb14785](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fb14785cadf2055818bd4ff9c6064b59e53c2700)) +* [#829](https://github.com/oceanopen/ChatGPT-Next-Web/issues/829) filter empty prompt ([ea3e8a7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ea3e8a7459db28ca201aada341e54137e43cebb4)) +* [#832](https://github.com/oceanopen/ChatGPT-Next-Web/issues/832) update nextjs version to 13.3.0 ([124938e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/124938ecc9d0e015ed1a0cd3185395fec34de08d)) +* [#853](https://github.com/oceanopen/ChatGPT-Next-Web/issues/853) fetch duplex errors ([cc053b1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cc053b148d6487c83a2dd647059e0cfa7314fe16)) +* [#866](https://github.com/oceanopen/ChatGPT-Next-Web/issues/866) remove unused retry messages ([525a2ff](https://github.com/oceanopen/ChatGPT-Next-Web/commit/525a2ff9a7e9b1be79a15972f138d092b71bf4de)) +* [#915](https://github.com/oceanopen/ChatGPT-Next-Web/issues/915) allow send 0 history messages ([072a35b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/072a35b4ee1940fb23264731038403c563638150)) +* [#930](https://github.com/oceanopen/ChatGPT-Next-Web/issues/930) wont show delete for first message ([2390da1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2390da11651c80bd3e0fd3935063614a5694aa02)) +* [#963](https://github.com/oceanopen/ChatGPT-Next-Web/issues/963) config not work ([ae479f4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ae479f4a92d1f5a20cfd5265a932bc329a029d58)) +* [#965](https://github.com/oceanopen/ChatGPT-Next-Web/issues/965) improve loading animation ([ab82636](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ab826363ea4d585becb70d53778d45c0aa312403)) +* *.scss *.svg types ([9146b98](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9146b98285800c09666f7a439ac9fbbfa041e741)) +* **#5378:** default plugin ids to empty array ([db58ca6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/db58ca6c1d59dc6410c1fa55116926a6ec5fb1c6)), closes [#5378](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5378) +* **#65:** fix unknown git commit id ([efaf659](https://github.com/oceanopen/ChatGPT-Next-Web/commit/efaf6590ef5ef46174b7e9a90d63b4b8bf806b78)) +* 1. anthropic client using common getHeaders; 2. always using `Authorization` header send access code ([37e2517](https://github.com/oceanopen/ChatGPT-Next-Web/commit/37e2517dac850aef0bec0430f02356402b8610d8)) +* 代码块嵌入小代码块时渲染错误 ([e562165](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e56216549efe58c1b734f5094eb77bfaa6654c69)) +* 兼容不同浏览器的input range兼容 ([d921084](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d92108453f20c6b5807daeed0f1e84ab9ee62a5b)) +* 修复查看全部按钮导致artifacts失效 ([cd49c12](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cd49c12181bbcdd099827855075457f14ecccbac)) +* 修复多余的查看全部 ([8b67536](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8b67536c2313b03d19e28896292d6b81ad90247b)) +* 修复在手机浏览器高度样式问题 ([5f7a264](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5f7a264e52d8369df89842c3c362ff9e338216bf)) +* 修改是否应该注入System Prompt的判断规则为根据设置项 ([1513881](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1513881eed064768da907a52d76ae869d771fd09)) +* 修改InjectSystemPrompts.SubTitle使其更符合系统行为 ([2930ba0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2930ba0457777319b05ea305956f86ebcc87a6a7)) +* a few typos ([fee38b8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fee38b8d1397cd0db6acdb169cd1d799663c7921)) +* action ubuntu version ([5ce53db](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5ce53dbcf4c2a4189efaac8a0fde08bed7fe9e46)) +* add localnet to proxychains config ([de000a8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/de000a8b4e207dbcc4711fdc9040c966344af9d3)) +* add max_tokens when using vision model ([#4157](https://github.com/oceanopen/ChatGPT-Next-Web/issues/4157)) ([08fa227](https://github.com/oceanopen/ChatGPT-Next-Web/commit/08fa22749aea8f497811f684bd9c7ef68d698666)) +* add media query to theme-color, fix auto theme not updating theme-color ([30ff915](https://github.com/oceanopen/ChatGPT-Next-Web/commit/30ff915e9db9fa766e871118ff36f77818ec0d9c)) +* add support to http scheme. ([#3985](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3985)) ([47ae874](https://github.com/oceanopen/ChatGPT-Next-Web/commit/47ae874e4d0d2554a0079119c77bcc1ef9afe649)) +* add webdav request filter ([038fa3b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/038fa3b301794050ec7e59325aa00f25b3ce3257)) +* adjust presence_penalty step 0.1 ([4d45c07](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4d45c07bf2096e9f12c142c010e3893c905d35f1)) +* adjust upstash api ([9a84972](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9a8497299d11706f096a4fc10ff0ab5af43465c7)) +* alignment of mobile settings page title. ([b003a37](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b003a374b83aa0c508e01453818b7d1e6766a550)) +* allow to import a single mask ([f6c268d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f6c268dc1e3afad9448801f47bcec8b4cc81ef91)) +* anthropic client using common getHeaders ([d65ddea](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d65ddead11ad9e14a6b7eb522c5e2fceb6e5df53)) +* apiClient ([f3e3f08](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f3e3f083774ab01db558a213a0b180fe995ad2c4)) +* artifact render error ([#5306](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5306)) ([4ec6b06](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4ec6b067e7964f055bd003fe3696477e23b2bd39)) +* auto grow textarea ([13035ec](https://github.com/oceanopen/ChatGPT-Next-Web/commit/13035ecb0d3ed2e28855dcdd92f770ef2d8efa27)) +* auto migrate proxy config ([066ca9e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/066ca9e552f5f455bb9456994361c6ac9e08297c)) +* auto scroll on enter ([802ea20](https://github.com/oceanopen/ChatGPT-Next-Web/commit/802ea20ec4bc4c5cd2acb3a5de2ac4c6a1096694)) +* autoscroll conflict ([4269775](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4269775665760a514a978cdc6363e8440143bbfe)) +* avoiding not operation for custom models ([#4010](https://github.com/oceanopen/ChatGPT-Next-Web/issues/4010)) ([9d5801f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9d5801fb5ff21893c6dfc0417ab65eb99ccc0fdc)) +* azure summary ([3da717d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3da717d9fcb43134336d0105b8e794699edbf559)) +* baidu error_code 336006 ([b667eff](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b667eff6bdbafe0e131b077e03f863317cf5ef45)) +* baidu error_code 336006 ([54fdf40](https://github.com/oceanopen/ChatGPT-Next-Web/commit/54fdf40f5a60dbf9b4161094a559b0efe7270af8)) +* baidu error_code 336006 ([9ab45c3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9ab45c396919d37221521a737f41b5591c52c856)) +* baidu error_code 336006 ([d0e296a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d0e296adf82ffd169554ef8ab97f1005dabd0bbb)) +* botMessage reply date ([85cdcab](https://github.com/oceanopen/ChatGPT-Next-Web/commit/85cdcab850cadbbd346d38b34603e3eb00e3e715)) +* bug ([ed5aea0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ed5aea0521797841981919fa3c1ebb6340c35168)) +* bug [#1240](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1240) ([6e20031](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6e20031dcef62d8a5cabe7e343e9abb2aa6e11b7)) +* bug [#1413](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1413) cmd/alt/ctrl should be checked for arrowUp events ([7bf74c6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7bf74c6a5d07e5746a1299b61a3cac1bd08ec416)) +* bug [#1413](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1413) input '/' when clicking icon to open prompt modal ([cbb50c1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cbb50c14e1fd8513d3b89cf958a12e5499a1cd01)) +* bug [#1662](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1662) ([23f6c2e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/23f6c2e8c90cdbe33854e0428f4175350cb406f3)) +* bugs ([2a1c05a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2a1c05a0283539275b98387a2fe203301f122873)) +* build error ([3fb3895](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3fb389551ba5284be77734be47b7595c9c425967)) +* build errors ([b4b11a4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b4b11a470f7123ed6619a224f8cd0ad92381a3b5)) +* build errors ([8ec9278](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8ec927844c85deb1b99424ec2d7623096ac500b8)) +* built-in plugin dalle3 error [#5787](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5787) ([2b0f2e5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2b0f2e5f9d064bc8107414b0c2e7efe61c03cdef)) +* change matching pattern ([8645214](https://github.com/oceanopen/ChatGPT-Next-Web/commit/86452146540a224a3242238dd07964a26b8df246)) +* chat history with memory ([4d97c26](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4d97c269ff792627b8ac9517c359a60ea8b993e0)) +* **chat-item:** selected chat-item showing border in other pages ([943a270](https://github.com/oceanopen/ChatGPT-Next-Web/commit/943a2707d2976bfab8ecd2258bc629396de18775)) +* cicd , add pull_request_target ([6206ceb](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6206ceb49b26b987adcde5e0715cc83adf500e21)) +* cicd, alias domain env name ([ecbab75](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ecbab75a25a347f8990db7e902cb900cb7ebb990)) +* cicd, checkout sha ([3fa55f9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3fa55f9022b20fb3b31d14d82cc712d6fa470449)) +* cicd, delete velcel pre ([15d25df](https://github.com/oceanopen/ChatGPT-Next-Web/commit/15d25df2458ff9160eff5184158aea3bfa6571d7)) +* cicd, delete vercel pre ([fae82a3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fae82a30e7b127427199cfe2b2fc2a1f095a2414)) +* cicd, delete vercel pre ([#3910](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3910)) ([32bcdb8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/32bcdb8982bfe12b9f26b3f8414a88ead7ca943e)) +* cicd, remove workflow_dispatch ([148c32a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/148c32a38383dcd4bd8927a75eb97105fab23806)) +* cicd, vercel alias domain ([ba3e7e7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ba3e7e79741289c9eddee533828aa40c1e9e940a)) +* cicd, vercel domain suffix secret ([43631a3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/43631a32718bfa287dfcb10e95087b0ef5a2d3ca)) +* clear btn should display in correct place ([523d553](https://github.com/oceanopen/ChatGPT-Next-Web/commit/523d553daca12455f6d90ac075dacb5daffb9b96)) +* click the prompt button to hide hints when it's already shown ([ec19b86](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ec19b86ade04857bf339b26c853f27b0ba2688a8)) +* code copy button position ([55f3724](https://github.com/oceanopen/ChatGPT-Next-Web/commit/55f37248f7dc7be70a30daa39f902e3dfc7adf17)) +* code highlight styles ([eb531d4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/eb531d4524e7bbf2aa018e8cedef73c4927749aa)) +* commit id as version id ([2f2e0b6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2f2e0b6762826cc2bb0ae05b928d0f7d0920bbdd)) +* compile erros ([701a6e4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/701a6e413fffe49cd5e23ee035db986d0015582b)) +* correct typo in variable name from ALLOWD_PATH to ALLOWED_PATH ([cd75461](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cd75461f9e215f6e3140e36359d138dc096abe99)) +* crash caused by filter config ([dea3d26](https://github.com/oceanopen/ChatGPT-Next-Web/commit/dea3d26335a8303daf97a476f9139202f7f3b00b)) +* css ([7f1b44b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7f1b44befe8449f767968f545742049ff90a089b)) +* css on display chat names ([f8ef627](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f8ef6278a5b325820f3c9ad14e7aba6c5eb6c367)) +* dark theme css ([61245e3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/61245e3d7e41064bc9b5a431848489a3d82c2ef5)) +* default enable artifacts ([715d1dc](https://github.com/oceanopen/ChatGPT-Next-Web/commit/715d1dc02f4de09c0dbfa35b801ad075b0d79414)) +* default is forced to set gpt-3.5-turbo if no server default model have been set ([36a0c7b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/36a0c7b8a3ab0c0b138940af7ec2efaf94aadcaf)) +* default model ([c6ebd6e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c6ebd6e73cbc58bbd752eeab22a3b029985d2e57)) +* Determine if Tencent is authorized ([1102ef6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1102ef6e6b4d68309f5f7ec22157bc2f62fedb05)) +* dialog height ([752c083](https://github.com/oceanopen/ChatGPT-Next-Web/commit/752c083905d642b964b87fcf599faf4a6f4f8f2a)) +* distinguish PC/Mobile behavior on auto-scroll ([dd20c36](https://github.com/oceanopen/ChatGPT-Next-Web/commit/dd20c36a557b37726ff74635fdef9f7fef535c4c)) +* docker access code setting missing ([eb72c83](https://github.com/oceanopen/ChatGPT-Next-Web/commit/eb72c83b7e71007ed2bd7f06409a39bdbb727fb0)) +* docker build ([e7e39ba](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e7e39ba56e65e7c359fabbad5fdc67a952889af7)) +* empty response ([4a8e85c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4a8e85c28a293c765ce73af6afb34aaa4840290e)) +* enable `enableInjectSystemPrompts` attribute for old sessions ([fd058cc](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fd058cc6937d2d1647f07d4d440c68d60cae9f50)) +* Enter key bug ([6c82f80](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6c82f804aeeec6f8ae117d1341cea5ccc34a4b8a)) +* enter key cannot select prompt when using enter key to submit ([cf775e3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cf775e3487db1b519dd6a48dfc67eac46765372c)) +* error ([10d7a64](https://github.com/oceanopen/ChatGPT-Next-Web/commit/10d7a64f8869e1b35cc2e296d111431f2a00945d)) +* error in windows ([3038dfd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3038dfdb278c3fa4bdc5e1aa6ce12aa7f1e8eae8)) +* false window style ([76a6341](https://github.com/oceanopen/ChatGPT-Next-Web/commit/76a6341c7bf52de30e49b02b1f4cb4195755e044)) +* fix [#82](https://github.com/oceanopen/ChatGPT-Next-Web/issues/82), close sidebar after new session ([684a3c4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/684a3c41efb1ec4f975aec365ed8e9bffbb4159c)) +* fix add api auth ([4169431](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4169431f2c5d78345de7704dda4872d7d5e7790f)) +* fix bug in generating wrong gemini request url ([26c2598](https://github.com/oceanopen/ChatGPT-Next-Web/commit/26c2598f56b82b0b1082aeb05b58aacdb2de0cd9)) +* fix gemini issue when using app ([#4013](https://github.com/oceanopen/ChatGPT-Next-Web/issues/4013)) ([bca7424](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bca74241e636086b633ce39d10804f31437278f3)) +* fix gemini pro streaming api duplicated issue ([#3721](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3721)) ([a80502f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a80502f7db80a1cfa0814b213b9444be24e3ac7c)) +* fix history message count ([fea4f56](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fea4f561b4c175c6f5c1fcc842e31a475132591b)) +* fix issue https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/3616 ([5af68ac](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5af68ac545902e80465235051c39f068baaf9160)) +* fix llm models field ([ae0d68c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ae0d68c27e4f7f65d9467e724561fab3e924400d)) +* fix removing bearer header ([f5ed160](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f5ed1604aa0b3b60a8fcac1cecb03f75a0a65cdb)) +* fix server token fetch policy ([7d9a213](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7d9a2132cbdafa896ca6523a284d38ec880328b2)) +* fix the different colors on mobile ([785372a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/785372ad73b6691717fb699125bd62fbdc5f078b)) +* fix the method to detect vision model ([43e5dc2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/43e5dc22920c60bf87fc1b78bf95c441356bb1d8)) +* fix type errors ([45798f9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/45798f993d3ae852206398b25ef4fda4f642f412)) +* fix upstash sync issue ([6aaf83f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6aaf83f3c211b3efea63d20f39a58f0c1ab6fa17)) +* fix using different model ([5c63825](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5c638251f866e51d629c5e25cbe1ee11433c08f6)) +* fix webdav sync issue ([99aa064](https://github.com/oceanopen/ChatGPT-Next-Web/commit/99aa064319991b6ee53eb9c75bcfeb5a6b0188cb)) +* Fixed an issue where the sample of the reply content was displayed out of order ([8498cad](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8498cadae8f394c680be6addf35a489e75d33954)) +* Fixed the issue that WebDAV synchronization could not check the status and failed during the first backup ([716899c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/716899c030646402fd5562496a12e2cd385d169d)) +* format package ([461154b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/461154bb039793b3086a4697d031f9a85a1c3b26)) +* get real-ip instead of vercel edge network ip ([c282433](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c282433095d7b34bc31e2704cf12c27ca77c5381)) +* give o1 some time to think twice ([03fa580](https://github.com/oceanopen/ChatGPT-Next-Web/commit/03fa580a558374c80485b6b36f9b1aad810f2df4)) +* glm chatpath ([adf7d82](https://github.com/oceanopen/ChatGPT-Next-Web/commit/adf7d8200b63ba9e389c3df2b801b82a272a85bf)) +* header title overflow ([909e2ab](https://github.com/oceanopen/ChatGPT-Next-Web/commit/909e2ab60f276270a958d3d38bd79b9f65ff8bc3)) +* hide actions when loading ([3a3999d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3a3999d73ae2939c3e397c3b5ffa403e6cb2ed59)) +* hide actions when loading ([7a5c35b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7a5c35baf3e5102c6cc9859589a10af6c911480c)) +* hide toast on cancel session deletion on mobile ([71d9fbc](https://github.com/oceanopen/ChatGPT-Next-Web/commit/71d9fbc36771108e32761f1ec0f35a4fcd1bae22)) +* historyMessageCount ([12f342f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/12f342f01589a1a458d16601c47d617ebe124659)) +* hot fix for data migration ([6419ce3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6419ce345f4d3a317227fe2197a34b9a6864afca)) +* hydrated ([ed9aae5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ed9aae531e0191d8b7fcbe594e0dc4e6176450da)) +* hydrated for indexedDB ([886ffc0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/886ffc0af89b2bf09c8a1af16648b00a629b584e)) +* i18n ([819238a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/819238acaf5114329168f2c95da74d747795daa1)) +* i18n ([c5168c2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c5168c213257d44ab8b637dc267a194000c76ea7)) +* i18n ([e9f90a4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e9f90a4d82edbb446aedaef7ae27984d21b870d4)) +* iframe bg ([3f9f556](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3f9f556e1c32c70ac34fd0ed8b3fd5fca73d539d)) +* import language issue ([916d764](https://github.com/oceanopen/ChatGPT-Next-Web/commit/916d764477611dc7043b7f29d610b8ffba72be0d)) +* import typing error ([cc86923](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cc86923fd578d3f9182df6d1cac86a6e97b30259)) +* improve scroll ([56ba8a6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/56ba8a65e06937b7ba1a072cb959e0d3626c32d7)) +* innerHTML may leads to script execution ([7ed8517](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7ed85177719f035367bbc186ddf7ee3684624647)) +* input-range style ([f3ab6b2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f3ab6b27c97bb0c876b544530ac961458334227b)) +* layout styles ([7a1bcac](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7a1bcac8bf87e2e1db019b6cfc4533c74bb8b61a)) +* light theme code highlight ([6155a19](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6155a190ac91ea66a444b605567e3ee49b6ffced)) +* locales => Locale ([8cb72d8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8cb72d84526343146eca3a0f608b111d884321f1)) +* **locales:** type error in pt.ts ([6527074](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6527074cdea14d1ba0506e611a300ea3d155acfb)) +* Logical corrections & syntax errors ([e33ad07](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e33ad07e1684977ea0cca5642d3928aa9d7696f9)) +* make env PROXY_URL avaliable in Docker container. ([bf3bc3c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bf3bc3c7e92c58d19886343cec14f859ae4d95b3)) +* mask download not working ([ba0753c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ba0753c4180447f53d0bb5657169053c4151d4f1)) +* mask json ([d6089e6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d6089e6309c27af7f84d3cf5510fb68574cde2e1)) +* math overflow styles ([5f7856c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5f7856cc31982fa2feadf7209dd19dba57157da6)) +* MessageSelectorWarning ([10ea9bf](https://github.com/oceanopen/ChatGPT-Next-Web/commit/10ea9bf1e39d982fce208da2925200ec88371409)) +* middleware match error ([53e30e2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/53e30e20db87f6e1a295e392c4a483b48b8246bd)) +* migrate modelConfig state ([f4c99c9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f4c99c9cf719ebedad984f07b6dd7a141427aca0)) +* migrated mask object key ([a4d0128](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a4d012828c55b308e2ea0c57a250d0ac3709d02c)) +* minor fix ([1c017b8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1c017b8ee9119f0829fe59fc8ef0867d47ab21d8)) +* minor fix ([48dc2c2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/48dc2c2295662677a8b3ce6e5d238c68b42f7fd2)) +* mobile scroll problem ([1afca0b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1afca0b28acb0f4e9ea60809355be8897c779e11)) +* mobile textarea autofocus ui error ([71f119c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/71f119c9e8b545a27683f935c3ce36cf56c913ec)) +* model version field name ([5440116](https://github.com/oceanopen/ChatGPT-Next-Web/commit/54401162bd3a3141d37d245d95ae0c8de9756731)) +* navigation between settings and home ([2badfbd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2badfbd61932f5c495a734a9086d4c2e8e1e7d50)) +* new session should insert at top ([4f10b9a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4f10b9a60faa94ccb9de440e65a0310192496b29)) +* onfinish responseRes ([44fc5b5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/44fc5b5cbf44b7a362a916fbc3b1c3a34cc8e7cb)) +* persisted available models ard not be update after source code have been updated ([9e1e0a7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9e1e0a72521cc84ef74499195f3734850b9ccd13)) +* prevent title update on invalid message response ([e8581c8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e8581c8f3ce9d72296abec1c5a2c002e3679723c)) +* prevent users from setting a extremly short history that resulting in no content being sent for the title summary ([fa48ace](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fa48ace39badb237728188482550ae5bb8f0e47a)) +* proxy api request ([eec1dd6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/eec1dd6448124830e82b59f51489f42b6d56e9fa)) +* raw.split is not a function ([725054c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/725054c7d569f4d7b63def9b3bd3bb1b70cd3bda)) +* remove corsFetch ([eebc334](https://github.com/oceanopen/ChatGPT-Next-Web/commit/eebc334e02e9f5d9f83203c97fbf4622a9141d0a)) +* remove scroll anchor height ([45bf2c3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/45bf2c3d2590b7c6ae43ebeaaffd13e4c489ca72)) +* remove slection range when user blured ([21aa015](https://github.com/oceanopen/ChatGPT-Next-Web/commit/21aa015a79f909e5602f779c1ae50cb4d404710c)) +* remove the content-encoding header ([dd6e799](https://github.com/oceanopen/ChatGPT-Next-Web/commit/dd6e79922a455862d6ae5c0a9469680d5c9e1d90)) +* remove the content-encoding header ([bf84269](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bf8426952079e9a116ad0e273c73612f0fa00e77)) +* remove the visual model judgment method that checks if the model name contains 'preview' from the openai api to prevent models like o1-preview from being classified as visual models ([6bb01bc](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6bb01bc5643bc69c0f068dd5c051312937491217)) +* replace '&' with concurrently for cross-platform compatibility ([04a4e1b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/04a4e1b39afb066fc2ce62cbdf93e793660357c4)) +* request errors ([8e4fc83](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8e4fc83d3bf58496e5ed791aa5b9f548f79c7dfd)) +* request timeout errors ([e2a4b3d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e2a4b3d45c8119b0921cb2f0e1ac7ec4eb08fe2a)) +* resolve hydration error ([fb06fb8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fb06fb8c38f8578597e28c861824ad5e0004c34e)) +* Resolve markdown link issue ([aeda752](https://github.com/oceanopen/ChatGPT-Next-Web/commit/aeda7520fea361474c2177539d203a75226af358)) +* return bearer header when using openai ([19137b7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/19137b79bcf17d1b1be01740dd5ed0238c784680)) +* revert gpt-4-turbo-preview detection ([5df8b1d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5df8b1d183ffc657b44f51d280d994da672f1103)) +* right click ([19c7a84](https://github.com/oceanopen/ChatGPT-Next-Web/commit/19c7a84548b55aa348e009611d4ac766e6b23af0)) +* row count logic ([a80dcaa](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a80dcaa1c37db621bac15dad198e3054cbae6af1)) +* safaLocalStorage ([992c3a5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/992c3a5d3a9fd08ecc46a26d12b91f9a1fd87c1a)) +* scroll ux on ios device ([a2baad9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a2baad9c7fb731f80329d94b3eddd81de88ad934)) +* **scroll:** scroll after click submit button ([a2807c9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a2807c9815d88febad341e23b55f553e73234c27)) +* sd image preview modal size ([dd10301](https://github.com/oceanopen/ChatGPT-Next-Web/commit/dd1030139bcc0108ca1dceec1bb2baafb0da776b)) +* sd mobile ([6b98b14](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6b98b141791ba2c8b3fd1861ba001280b6f5be23)) +* sd3 model default select ([74b915a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/74b915a7907f8f6964b3adbd068f65182d0f3409)) +* selector css ([ebaeb5a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ebaeb5a0d5cb2fa514b2529b015ce7c99f13de15)) +* selector z-index bug ([c10e838](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c10e8382a9a5530e12e0ba14471bc4e81b3145fd)) +* send button covering the text in the textarea ([c47e900](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c47e90004ad7b34f8d70b9bb8cf1cac17219d23c)) +* set openWhenHidden to be true ([94a2104](https://github.com/oceanopen/ChatGPT-Next-Web/commit/94a2104b55a71529feb852670abce83fc5175083)) +* sharegpt roles ([d275e32](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d275e32e70db5a1747593d8f5b9c52e0ab6c9083)) +* should not tight border in desktop app ([eae7d62](https://github.com/oceanopen/ChatGPT-Next-Web/commit/eae7d6260f6d0968a59a07576bd86937b12a673a)) +* shouldstream is not depend on iso1 ([d0dce65](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d0dce654bffead1971600a5feb313d9079800254)) +* show Loading Icon when checking repo update ([96545bd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/96545bd523aa430c77a1133ae15871ef09f94a89)) +* show Vitenamese in it's own language ([a0e192b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a0e192b6e4ed5717e8bcd9ec787a012c73e0a9e2)) +* sidebar style ([9961b51](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9961b513cc0bfa1db8e1865af4099fdd9b78c15d)) +* sidebar title style ([99f3160](https://github.com/oceanopen/ChatGPT-Next-Web/commit/99f3160aa26dde331999c0397547ae154e60d7c8)) +* solve navigator undefined && merge from main ([00d45e7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/00d45e7cc43c0bd2025cad9fd9e954a57487f888)) +* style typo error in home.module.scss ([c1cc3d1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c1cc3d1d1fbf176e118db780ec0e4789e96a7388)) +* styles and mobile ux ([653a740](https://github.com/oceanopen/ChatGPT-Next-Web/commit/653a740f0f76a50769e19da13538dee3b9d7ffbd)) +* styles and store version number ([76db385](https://github.com/oceanopen/ChatGPT-Next-Web/commit/76db385d6dd8ce74e2faac864de6ef5258fae9d8)) +* styles on .user-prompt-buttons ([4a82a91](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4a82a91f2d3370785cbeeae76c6b0ddded1f1147)) +* support custom api endpoint ([#4016](https://github.com/oceanopen/ChatGPT-Next-Web/issues/4016)) ([b8f0822](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b8f0822214b2f957e55fc6a4b4f404d2685c2735)) +* taskbar color follow([#54](https://github.com/oceanopen/ChatGPT-Next-Web/issues/54)) ([cd73c3a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cd73c3a7cb062e59bfee60f421be8a3a508bf286)) +* tauri auto updater url ([1e8d476](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1e8d4763bb8470b65f61c4716b3ded351332d3be)) +* temperature -> top_p ([0a2af93](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0a2af9335c9cc465fda02e5e371adef3fbf2f28a)) +* tencent InvalidParameter error ([f900283](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f900283b0975de9d88270244e3131d6d0f188eeb)) +* tencent InvalidParameter error ([d7e2ee6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d7e2ee63d87bb713231e11d3ff2dabb3b1904e0c)) +* the display format of json ([6bbdaf7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6bbdaf7ab0499dbb8554173b175caf440f2b424d)) +* the position of top-action buttons ([846e323](https://github.com/oceanopen/ChatGPT-Next-Web/commit/846e3238404fef87175ad3533385bb59a2ef8afb)) +* the theme-color selector ([170936a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/170936a96ef9b59ac8a95d0201c34ef6d9438644)) +* The width of the sidebar changes abruptly by dragging it multiple times over and over again (bouncing) ([48e6087](https://github.com/oceanopen/ChatGPT-Next-Web/commit/48e6087b1be1562c50de3b4aa648445df5510539)) +* tight border on mobile style ([1b140a1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1b140a1ed33f2fa35dbd4551563a29cb6aa9d155)) +* transcription headers ([318e098](https://github.com/oceanopen/ChatGPT-Next-Web/commit/318e0989a2c28ae323d3f00d8256a7e48169e4a6)) +* try catch errors ([ca679e8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ca679e86b4d26ff30a6ea56387956402fdbe8c1c)) +* ts error ([88cd3ac](https://github.com/oceanopen/ChatGPT-Next-Web/commit/88cd3ac122cfe0a93f3c87e441d9e1e59c8bfb33)) +* ts error ([4988d2e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4988d2ee26f5cd65b128dae8924942c54a9da3ee)) +* ts error ([4d75b23](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4d75b23ed1b41a042e28805e46ad2b5c8111cc3d)) +* ts error ([8ac9141](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8ac9141a29b049a851c51ea3c65f08d18cfd8ce6)) +* ts error ([45306bb](https://github.com/oceanopen/ChatGPT-Next-Web/commit/45306bbb6c3574d93438abca0f79fc493ece21df)) +* ts error ([72d6f97](https://github.com/oceanopen/ChatGPT-Next-Web/commit/72d6f970241e9390c9c4027f49718ff8afe593dd)) +* ts type ([7237d33](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7237d33be38c1b51fc867d047e9f599429bd8eec)) +* type error ([7804182](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7804182d0d027f630497c911652cd877ea0cc30a)) +* typescript error ([4b9697e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4b9697e3365ab4bcfdd6f51a4b461088c7a4b8f9)) +* typo ([2f2aefd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2f2aefd48ec77e51bd7d230f9bcd466860918a48)) +* typo ([1761289](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1761289716aba1e6c6745d7e313dd837e463b4ee)) +* typo ([79f58f5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/79f58f5c6ad61e321c24c039e8e17607bd8d0397)) +* typo ([bd85d9a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bd85d9a36a4ce22ea708d393b634997edaec558a)) +* typo ([#3871](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3871)) ([b25a054](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b25a0545f5b348bbef81f4fe8d41695c3fc10d94)) +* typo IMPRTANT -> IMPORTANT ([b357e2e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b357e2ecef6f42c4cec433ec20a0dea3c73072c0)) +* typo PresencePenlty -> PresencePenalty ([44874fb](https://github.com/oceanopen/ChatGPT-Next-Web/commit/44874fb5e0307f46fdd7a2260d635ede7901a713)) +* typo reqestTimeoutId -> requestTimeoutId ([06534fa](https://github.com/oceanopen/ChatGPT-Next-Web/commit/06534fa0aee6ce92ea8fefb26ecf3dc4dec2d3e0)) +* **typo:** ngnix -> nginx ([cf4f928](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cf4f928b256a800e84778feb98dd2794d1e8cb80)) +* typos ([dd047fd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/dd047fd58f055ef6573773a7b818a26609cba957)) +* update google url description ([#3719](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3719)) ([eade013](https://github.com/oceanopen/ChatGPT-Next-Web/commit/eade013138d1555027615aea05fe051971e3ec13)) +* update package version ([27828d9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/27828d9ca86112c1d179c906197ed3158c9a9f68)) +* update yarn.lock file ([166329a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/166329abeed6ce01932c391c2e13ba793556f606)) +* updateCurrentSession => updateTargetSession ([c4e19db](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c4e19dbc59e59b71c81cf33600f7a2be235b0ccc)) +* updating the array using push in zustand does not actually trigger component updates ([1d42e95](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1d42e955fc60365477ec9e4b38077dc5c6676924)) +* updating the array using push in zustand does not actually trigger component updates ([e636d48](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e636d486f5b08fd14473f31b6c9e33ad92fe723a)) +* uploading loading ([f2d2622](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f2d2622172fa8b081f5e44f7c3655ffcb4969ed6)) +* use current session id to trigger rerender ([1d14a99](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1d14a991eedb17a492d6e840de71567c8a6884a7)) +* use Select component ([99317f7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/99317f759bf3eba58b89264049236c331fbced16)) +* use tauri fetch ([deb1e76](https://github.com/oceanopen/ChatGPT-Next-Web/commit/deb1e76c41ec156450db10872f88f84d2865d450)) +* useAccessStore filter spaces ([322eb66](https://github.com/oceanopen/ChatGPT-Next-Web/commit/322eb66fdf6a342e615b1d648a141b111a428207)) +* useEffect hooks ([e5b4cb2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e5b4cb28fef9f7e1f9b130ed0c0a9ca07927adb8)) +* **utils:** 修复复制问题 ([0af5536](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0af55366f1443e66ad1e74852af9ee0ebaf47165)) +* validate the url to avoid SSRF ([9fb8fbc](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9fb8fbcc65c29c74473a13715c05725e2b49065d)) +* vision model dalle3 ([2d3f7c9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2d3f7c922f5a3e52da30f45b67a74f0df908e147)) +* webdav check httpcode list ([86f42d5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/86f42d56f28b725006e60dbf2ae875917feb3a3f)) +* Width changes abruptly when dragging the sidebar (jumps) ([3687016](https://github.com/oceanopen/ChatGPT-Next-Web/commit/368701610f039241eeb0fda27db28803b607527e)) +* wont show auth popup when receiving a 401 http code ([600b181](https://github.com/oceanopen/ChatGPT-Next-Web/commit/600b1814a1b982e6faca151afff0518b15884c79)) + + +### Features + +* (1) fix issues/4335 and issues/4518 ([fb8b8d2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fb8b8d28da3174e134dc2551f1a97f2fdab27d1d)) +* [[#5714](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5714)] 支持GLM ([d357b45](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d357b45e84eb773c2e0c142d0d849c4f20be2975)) +* [#1000](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1000) client-side only and desktop app ([a023308](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a023308d52a14e1d43d51558dec61dc1253064cc)) +* [#1000](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1000) ready to support client-side only ([50cd33d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/50cd33dbb2992066dae8c51c2da6ef4781e4500a)) +* [#112](https://github.com/oceanopen/ChatGPT-Next-Web/issues/112) add edit chat title ([45088a3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/45088a3e0658beac56251ff2d4cebc8dc2c5becc)) +* [#1303](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1303) improve long text input ux and mobile modal ([1b19fdf](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1b19fdfe11ecf33ff881593df1cbd7bdd27ae275)) +* [#138](https://github.com/oceanopen/ChatGPT-Next-Web/issues/138) add context prompt, close [#330](https://github.com/oceanopen/ChatGPT-Next-Web/issues/330) [#321](https://github.com/oceanopen/ChatGPT-Next-Web/issues/321) ([b85245e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b85245e317d7fc2f48dacb9a1d65eef034502cb4)) +* [#1640](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1640) support free gpt endpoint ([203ac09](https://github.com/oceanopen/ChatGPT-Next-Web/commit/203ac0970d7af35d4ae92dc93776b76cf74182aa)) +* [#170](https://github.com/oceanopen/ChatGPT-Next-Web/issues/170) auto scroll after retrying ([08f3c70](https://github.com/oceanopen/ChatGPT-Next-Web/commit/08f3c7026d07bcf28d278dd482d6ac30b8fe3fe4)) +* [#2](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2) [#8](https://github.com/oceanopen/ChatGPT-Next-Web/issues/8) add stop and retry button ([86507fa](https://github.com/oceanopen/ChatGPT-Next-Web/commit/86507fa569334a43c7d9e7b40add815c665eae4a)) +* [#2](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2) add access control by ([2c899cf](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2c899cf00eb729cc4aad2a13a74d2cabea9e7200)) +* [#2](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2) add prompt hints ([6782e65](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6782e65fdf6ea7f79ead3c6907eacf110d097402)) +* [#2](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2) add prompt list ([7d5e742](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7d5e742ea61be97da4d4cff9ca69528ee171d216)) +* [#2](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2) trying to add stop response button ([806e7b0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/806e7b09c1460691171679e4f4a8cfc40e024823)) +* [#2144](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2144) improve input template ([5f0cda8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5f0cda829f91fa1e2ff3b02825fa233c97e1b944)) +* [#2308](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2308) improve chat actions ux ([b55b01c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b55b01cb13ac3ab96d0c621c94b2968424825d2f)) +* [#2330](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2330) disable /list/models ([15e063e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/15e063e1b5202ba0e1f9784fb584ec150e5b5240)) +* [#24](https://github.com/oceanopen/ChatGPT-Next-Web/issues/24) docker publish actions ([0463b35](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0463b350d843af268d6738987d3a8a4e8f4db030)) +* [#27](https://github.com/oceanopen/ChatGPT-Next-Web/issues/27) add docker image publish actions ([cc1a1d4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cc1a1d4f3ca95398d5b50f1cac299ef5d39dbe52)) +* [#3224](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3224) auto switch to first avaliable model ([be97749](https://github.com/oceanopen/ChatGPT-Next-Web/commit/be9774943bc17e30111ccf6ec1eb8242e61f3fa1)) +* [#499](https://github.com/oceanopen/ChatGPT-Next-Web/issues/499) revert delete session ([5952064](https://github.com/oceanopen/ChatGPT-Next-Web/commit/595206436231727659fde77239b6c1e668b0d879)) +* [#577](https://github.com/oceanopen/ChatGPT-Next-Web/issues/577) maximum / minimium icon ([eae5a8a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/eae5a8a2e6722d233856d0a2958e2f894c0202d2)) +* [#9](https://github.com/oceanopen/ChatGPT-Next-Web/issues/9) add copy code button ([e57bd51](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e57bd5180939f4f134c5a3fb47db7f7203ad6f4a)) +* [#920](https://github.com/oceanopen/ChatGPT-Next-Web/issues/920) migrate id to nanoid ([8e4743e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8e4743e7191f59b72496c9dbdae3b580c2b37d24)) +* [WIP] support webdav ([1dd75b6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1dd75b63de6745ece9de5df57663af751698c82d)) +* #close 1789 add user input template ([be597a5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/be597a551d38dc83f0febb3a4c6a424e128555b1)) +* 1. using cache storage store image data; 2. get base64image before chat to api [#5013](https://github.com/oceanopen/ChatGPT-Next-Web/issues/5013) ([287fa0a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/287fa0a39cf07767630a9cd744f32929357a9aee)) +* 1)upload image with type 'heic' 2)change the empty message to ';' for models 3) ([c10447d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c10447df79b6e3300f65885f94472e435f53c03f)) +* 补充文档 ([7fcfbc3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7fcfbc372921e85fb957dbe6cab35843d54a3872)) +* 翻译InjectSystemPrompts配置项为其他语言 ([f59235b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f59235bd5ac49d1da28e87ed678c7c0f0a6a90a9)) +* 去掉不必要的文件 ([7139671](https://github.com/oceanopen/ChatGPT-Next-Web/commit/71396717d2c6109df48baabc5d54985e09baeb7f)) +* 全局设置是否启用artifacts ([6c8143b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6c8143b7de54724ce8e7e3d1d40bd2052cce25e3)) +* 新增阿里系模型代码配置 ([86ffa1e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/86ffa1e6430b0a34893665bb284130c1f144e399)) +* 暂不支持 tauri ([b2303d8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b2303d81e1480626c90f997c49cd4db0f87994ea)) +* 中文版锚点 ([28cedb1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/28cedb14935384af0feb1cffdcf6eafbc917dd4c)) +* add "Hide_Balance_Query" environment variable ([c05de45](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c05de45d9918fe7be4b2afb9070c706e951f3df4)) +* add analytics ([76f851b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/76f851bfa6ab3d5f9c310eaa78ae0e82ef938b2b)) +* add app dev mode ([80d5bfd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/80d5bfd7c0cffaebe0eb7dd1790babb186447344)) +* add app logo and ([6264c02](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6264c02543cfbcda249e3a848cdeecc230157b4b)) +* add auth tip ([e4fda6c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e4fda6cacfbaf863dcd54dcbabcb0d93088019f7)) +* add autoGenerateTitle option ([20a508e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/20a508e2d6e16252e44f6a9cbb07dd5c195b6fc3)) +* add basic ui ([d49b2aa](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d49b2aa2c312306573ba18b1950de5267f0ee98e)) +* add calcTextareaHeight.js from element-ui ([bce020f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bce020fc8e701dfcb229d26581e6c03e0ed308c3)) +* add calcTextareaHeight.ts ([de740ec](https://github.com/oceanopen/ChatGPT-Next-Web/commit/de740ec57fe9c4b11c8b81396c1ef00906aa5bc7)) +* add check update ([29de957](https://github.com/oceanopen/ChatGPT-Next-Web/commit/29de95739511cadffbe147fdbbeaea898344598e)) +* add claude and bard ([cdf0311](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cdf0311d270d5808efca2c9ba07c593a7ec57d41)) +* add confirm tips when deleting conversation on pc ([4dc1e02](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4dc1e025e1eba7eb2dd9153897774ea7dd44eb8c)) +* add dark theme support ([14d50f1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/14d50f116774bb134f628a86f72a9663c65cbc22)) +* add docker proxy ([c8be5e4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c8be5e42671e534e3382db67f80560442086772c)) +* add Dockerfile for docker deployment support ([8d0d087](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8d0d08725d9bed14f6aea3cb17923ee24f4cac40)) +* add error tip ([065f015](https://github.com/oceanopen/ChatGPT-Next-Web/commit/065f015f7b87b65f522c913f95958c4f3392b97d)) +* add export to .md button ([bab470d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bab470d000c2bb18df3d2d1c8b43f110b5a3c9b6)) +* add favicon ([9912762](https://github.com/oceanopen/ChatGPT-Next-Web/commit/99127621575f7231c30c5f18e716ae22a846f64c)) +* add font size setting ([f979822](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f979822508b3289237383082a72471f8aa5b0939)) +* Add frequency_penalty request parameter ([72cbb15](https://github.com/oceanopen/ChatGPT-Next-Web/commit/72cbb156ae78d2390a4238c5e71b6d0b8850e27f)) +* add gemini flash into vision model list ([4789a7f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4789a7f6a93cb7c271755a201d04523de246bbec)) +* add getClientApi method ([5e0657c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5e0657ce556bbf04cce22bb451ff9349def6b04b)) +* add google api safety setting ([7498680](https://github.com/oceanopen/ChatGPT-Next-Web/commit/74986803db5241392f4044e9493661113e955ee0)) +* add i18n for mask ([c7c58ef](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c7c58ef0317c9823af28935a5b3a3a1d5b471e39)) +* add indexDB ([492b55c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/492b55c8939593f9eddef084f99e14a5d4a5033b)) +* add indexDB ([4060e36](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4060e367ad90be23b9a94c241e2251d952520ea4)) +* add lint-staged ([e648a59](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e648a59b1f6babd49e41256b81bd728f0ef91112)) +* add lodash-es ([a17df03](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a17df037afadcf7b61264686e9993555eed44e6f)) +* add log ([49fc752](https://github.com/oceanopen/ChatGPT-Next-Web/commit/49fc75235a568ce44af735838ae4028e632689fa)) +* add mask crud ([a7a8aad](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a7a8aad9bc584f3bac0aa27eb8d295381939995b)) +* add mask page ([ffa7302](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ffa73025716774b88c685ef21c6a2e6d137b597f)) +* add mask screen ([aeb9862](https://github.com/oceanopen/ChatGPT-Next-Web/commit/aeb986243c2460792ab4605d4fba223f6d8f98ab)) +* add max icon for modals ([9e6617e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9e6617e3ca251260943ce0ebc15f2fff1022df26)) +* add mobile support ([1fae774](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1fae774bb26aa0f7f516a6d5eddfc74081a0e710)) +* add model and time info to prompts ([cb55ce0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cb55ce084ce86cc71db12aa4e4e1f82f696e27af)) +* add model config to settings ([2f112ec](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2f112ecc54ca330de42c3996f12ea9b7b406055f)) +* add model name ([fb5fc13](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fb5fc13f720da18ee808d493f7198bcae5259d79)) +* add multi-model support ([5610f42](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5610f423d06a735737f833cbba7b182687f37ef8)) +* add nynorsk option ([113bd24](https://github.com/oceanopen/ChatGPT-Next-Web/commit/113bd24796eba568c7263843cc9365d02f65747f)) +* add o1 model ([71df415](https://github.com/oceanopen/ChatGPT-Next-Web/commit/71df415b140fec2b2754fd4cf99a38a6f38dacc2)) +* add one-key setup script ([3b6f93a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3b6f93afdf79368de81181aafc75a960613ed21a)) +* add optimize textarea height when inputing ([2a79d35](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2a79d356674236e4b4e345ae02236d52fd609f73)) +* add plugin entry selection ([fa6ebad](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fa6ebadc7b78cb023dc15705207ce2d180298edf)) +* add proxy for docker ([1bb7b4a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1bb7b4a6536884eb2eb9826a2a40751e224bb0b3)) +* add PWA support ([5593c06](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5593c067c4a576f35ff11e40c2c0e98352a74db5)) +* add PWA support ([689b7ba](https://github.com/oceanopen/ChatGPT-Next-Web/commit/689b7bab2692a9fe0271d1392819ba9b7ff59239)) +* add PWA support ([bdf17fa](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bdf17fafff02733c76b8977b2c19bb87db0864d1)) +* add robots.txt ([306f085](https://github.com/oceanopen/ChatGPT-Next-Web/commit/306f0850e925bd75201085341eb6700dac8a4ca2)) +* add SD page switching ([d214811](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d21481173e5c8eeb89024216acb164930ba31175)) +* add session config modal ([7345639](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7345639af33aede885afe6828a0969cf1f9a4a2d)) +* add setting to hide builtin masks ([74fa065](https://github.com/oceanopen/ChatGPT-Next-Web/commit/74fa065266687921e83446358018d7d84ab6fd78)) +* add settings ui ([a9940cb](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a9940cb05e74f5fea50509511441654538a8118b)) +* add shortcut key ([f219515](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f2195154f6a94e3ac324465c1adc6150180a186e)) +* add side bar mask entry ([59edcc3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/59edcc3e2e5c95cce0961f90d1337bb7b3bbfba8)) +* Add Stability API server relay sending ([2b01538](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2b0153807cf6294ea7b8bce9f2f4b58a71c94be4)) +* add stream support ([74dd619](https://github.com/oceanopen/ChatGPT-Next-Web/commit/74dd6194d8048cb9264f47f2a2b2a7790112c599)) +* add support for iFLYTEK Spark API (接入讯飞星火模型) ([b2c1644](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b2c1644d69929ce4073d458b0eb4cf7d416e22ed)) +* add switch of send preview bubble ([1db2100](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1db210097c431fa460aea5b8a1bb697fb0f2db6d)) +* add tight border layout ([ff0cf2f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ff0cf2f9dc9c013f1bf88798e4a2a0210821007f)) +* add top p config ([8230326](https://github.com/oceanopen/ChatGPT-Next-Web/commit/823032617dfd9928544f38c928085b9b41ba8691)) +* add tts stt ([2f410fc](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2f410fc09f62e67c32ac6142e99937d3e8f29601)) +* add type for import ([442a529](https://github.com/oceanopen/ChatGPT-Next-Web/commit/442a529a725c0cf6a780c93f17b02f8742251558)) +* add typings for metadata ([e1243f3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e1243f3d5946d0ac385e35a0f9dd67b3361bfaea)) +* add upstash redis cloud sync ([83fed42](https://github.com/oceanopen/ChatGPT-Next-Web/commit/83fed429971fcc758ada9af12d52a2936b537456)) +* add voice action ([f86b220](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f86b220c922a9209e99e2a3647e97ab72f47de3d)) +* add webdav support ([6f83fbd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6f83fbd21278c90cd978108abe54291c38ec10d7)) +* adding iOS Webapp support ([dd80c45](https://github.com/oceanopen/ChatGPT-Next-Web/commit/dd80c4563ddc4b40232fb823405ddd1f31c8f4de)) +* align chat page title center on mobile screen ([82ec447](https://github.com/oceanopen/ChatGPT-Next-Web/commit/82ec4474c2fb37b1fc558e6006159a670017310f)) +* allow send image only ([7d55a6d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7d55a6d0e441bddaf9870c9adfa88f1f72c600a5)) +* allow to disable chunk building by setting DISABLE_CHUNK=1 ([463251d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/463251dcc1953a6d0565129320fdc0258c90c5f8)) +* animate streaming response to make more smooth ([536ace8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/536ace8e10553c6101308ec09f2fa65bc84d2416)) +* artifacts style ([c27ef6f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c27ef6ffbf94be6bab2f6ba7cc9237b1125627a2)) +* artifacts style ([21ef9a4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/21ef9a4567ec4f61a4d0db26f0e23815bb0f7924)) +* audio to message ([a494152](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a4941521d0973943bbd0abba86dc7295b444f2b5)) +* **auth:** xg feature ([1980f43](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1980f43b9f634f658be05fcf5601d461a5435029)) +* auto detach scrolling ([410a22d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/410a22dc634816b13848977d037506fbe2ad4957)) +* auto fill upstash backup name ([f1e7db6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f1e7db6a88611a62a6ef6446c768ab16bd943173)) +* better animation speed ([f248593](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f2485931d9b3680234f4816f4526759c8d4b741e)) +* bugfix ([a1493bf](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a1493bfb4e9efe0a2e12917ab861bbf2321dbd7d)) +* bump version ([b972a0d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b972a0d0817e612fe2a1cba398c338bcec7573e6)) +* bump version ([3a007e4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3a007e4f3d8d0ac7be8a8bf08f962101589b1e3c)) +* bump version ([aec3c5d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/aec3c5d6cc598282e1f35b9e1de5081190a9c378)) +* bump version ([aacd26c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/aacd26c7db7d87953d14f3c0cf841cf422a2e027)) +* bump version ([#4009](https://github.com/oceanopen/ChatGPT-Next-Web/issues/4009)) ([887bec0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/887bec019a654aee647aad095b7db0ab34266589)) +* bump version ([#4015](https://github.com/oceanopen/ChatGPT-Next-Web/issues/4015)) ([0869455](https://github.com/oceanopen/ChatGPT-Next-Web/commit/08694556128fa65688e69a77da723b2d986c5063)) +* bump version ([#4133](https://github.com/oceanopen/ChatGPT-Next-Web/issues/4133)) ([bc1794f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bc1794fb4af5229c4811008ad247b57bbf091334)) +* bump version code ([b2e8a1e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b2e8a1eaa202c29378a83ef4d48ca5c39efc3689)) +* call claude api not in credential 'include' mode ([63f9063](https://github.com/oceanopen/ChatGPT-Next-Web/commit/63f9063255f150a53160d401e3965e4cff0a38eb)) +* call claude api not in credential 'include' mode ([6dad353](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6dad353e1c940b33c2a243b70b9a604af3a8f794)) +* check usage throttle ([fdc8278](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fdc8278b90cdcacc8859df4740752a58d8829d8b)) +* clean codes ([3cb4315](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3cb4315193d60ca0bd79aca49628045254967b01)) +* clear indexDB ([0b75894](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0b758941a4104ee6fdcb58431ac7ebc5c69f2323)) +* clear session only ([506cdbc](https://github.com/oceanopen/ChatGPT-Next-Web/commit/506cdbc83c83feeabf6c427418ce04916bd3a8d6)) +* close [#1055](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1055) cmd/alt/ctrl + arrow up/down to switch window ([2b7f72d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2b7f72deec7dec5ccbe5583c10e81af7cf136808)) +* close [#1072](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1072) share mask as link ([5c8be2a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5c8be2a8f68d74ae1cb72c51beb5b0d46f73ea77)) +* close [#118](https://github.com/oceanopen/ChatGPT-Next-Web/issues/118) add stop all button ([dc3883e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/dc3883ed1aa8bc4c7b25216f52774a4a860623e4)) +* close [#1301](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1301) support message actions ([2223013](https://github.com/oceanopen/ChatGPT-Next-Web/commit/222301307fd13ec2ed9828cc0dc4f8b2e309c0d6)) +* close [#1382](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1382) only clear memory btn in chat config ([05b1b8b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/05b1b8b2407b41f3c4ee3dc75bee030b603a4489)) +* close [#1415](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1415) clear context button ([a19d238](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a19d23848314e8539b40d9fb26544777d53d17df)) +* close [#1478](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1478) new chat use global config as default ([b1ba3df](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b1ba3df989781b557f8963a83a5c285b65ef8ecc)) +* close [#1615](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1615) pin messages to contextual prompts ([7893693](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7893693706af25227770f53555085f98b00afb06)) +* close [#1626](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1626) hide context prompts in mask config ([6d8c7ba](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6d8c7ba1403248e1d3c01515d58824bff74bc826)) +* close [#1762](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1762) add hover text for chat input actions ([88df4a2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/88df4a2223beb86d8c9d4fe0285732152f0b372a)) +* close [#1960](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1960) add gpt-3.5-turbo-16k-0613 ([8590750](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8590750e4c883a79d9462f23fd21b32b13ab4c04)) +* close [#1994](https://github.com/oceanopen/ChatGPT-Next-Web/issues/1994) add clipboard write api ([98afd55](https://github.com/oceanopen/ChatGPT-Next-Web/commit/98afd5516b697d3a8cafe12e9aeac09aba79e45c)) +* close [#2](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2) add check account balance ([447dec9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/447dec9444c61f6caf23008a17bd7ad5e2e445c5)) +* close [#2013](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2013) add switch model button to chat actions ([5d06fa2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5d06fa217cfde63439ef2c5b09cc71703e7d7f90)) +* close [#2025](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2025) just use a smaller to-bottom threshold ([93c666b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/93c666b03d9aae799290fca28a75d73c1e185511)) +* close [#2136](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2136) click avatar to edit message ([b044e27](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b044e274aa0ae8eb450042cfe31be2f201c8a754)) +* close [#2141](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2141) danger zone ([6c3d4a1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6c3d4a11cc703a6f3c50b74ccfaaa7f4ce76cd97)) +* close [#2175](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2175) use default api host if endpoint is empty ([0140f77](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0140f771c6a23256bf171b3edcf2f7fd810b5794)) +* close [#2187](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2187) improve chat actions ux ([3937dad](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3937dad6a6a8d9c4cc2c4a7a23705eb1931332d6)) +* close [#2190](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2190) improve app auto updater ([be48346](https://github.com/oceanopen/ChatGPT-Next-Web/commit/be4834688d635ac29c0e1a98a48eab7aab4ecbe4)) +* close [#2192](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2192) use /list/models to get model ids ([4131fcc](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4131fccbe0c77832aa496825e9362a78797234ad)) +* close [#2194](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2194) add macos arm support ([475158a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/475158a145246a715925eebaf6bac1f25f93aedd)) +* close [#2266](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2266) use modal to switch model ([0373b2c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0373b2c9dd646c288e7027fcd3e93a9fecf94658)) +* close [#2267](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2267) display a modal to export image ([6c6a2d0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6c6a2d08db4b8f74ded430c93125ffbc8f1d0eaf)) +* close [#2294](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2294) add documents for adding a new translation ([6014b76](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6014b765f4d42585cd91d07887cc27fd64ae2880)) +* close [#2303](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2303) add custom model name config ([98ac7ee](https://github.com/oceanopen/ChatGPT-Next-Web/commit/98ac7ee277b17a60f8d4926e26887ba72926ff37)) +* close [#2376](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2376) add babel polyfill ([af5f67d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/af5f67d459185c77d1edefec4fe06bc36dd06e6a)) +* close [#2430](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2430) add a simple user maual ([4904612](https://github.com/oceanopen/ChatGPT-Next-Web/commit/49046125235d11f85ee0dc81f2424f2cde91f1eb)) +* close [#2445](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2445) switch to mit license ([0198c5b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0198c5b7811fff550f0c0014e4781f3c94dd0ebc)) +* close [#2447](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2447) pre-fill key/code/url ([e5f6133](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e5f6133127894b68498de0a4d38741bccdba68f1)) +* close [#2449](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2449) edit / insert / delete messages modal ([7c2fa9f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7c2fa9f8a4c9b04d534e9bea946fa3e909369240)) +* close [#2545](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2545) improve lazy load message list ([203067c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/203067c936b6f2e3375ee79041c33dafacfc0653)) +* close [#2580](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2580) only use 3.5 to summarize when not using custom models ([3bd76b9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3bd76b9156627116b8bbcf038e08e35d84438447)) +* close [#2583](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2583) do not summarize with gpt-4 ([769c2f9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/769c2f9f49b1fd0d0e8e30b3bf579805c6259b7b)) +* close [#2618](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2618) use correct html lang attr ([e8e01aa](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e8e01aa60d559fb7654b0f5e9521aa637e3d0b22)) +* close [#2621](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2621) use better default api url ([ae82269](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ae8226907ff03100cafd45ba5d648d2a62f77fef)) +* close [#2638](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2638) hide auth page and use better unauth tips ([ed62c87](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ed62c871567e9c5781f742932b0e0521833cded0)) +* close [#2752](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2752) auto re-fill unfinished input ([885f2a3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/885f2a32260b93adfbf58818913ba25ddac28d94)) +* close [#2754](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2754) add import/export to file ([38f6956](https://github.com/oceanopen/ChatGPT-Next-Web/commit/38f6956e71a3d582b24e67ee93d263fcc7367725)) +* close [#2848](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2848) click drag icon to toggle sidebar width ([d713d01](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d713d016000e09f245fc9496bd9864293aaa95c0)) +* close [#2908](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2908) allow to disable parse settings from link ([c5ca278](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c5ca278253456c7d65bcd877b0ca40da0b8026eb)) +* close [#291](https://github.com/oceanopen/ChatGPT-Next-Web/issues/291) gpt-4 model uses black icon ([7e8973c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7e8973c9ffba853b46ea9d795b1a05e81828ed37)) +* close [#2954](https://github.com/oceanopen/ChatGPT-Next-Web/issues/2954) chat summary should be copyable ([8c0ba1a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8c0ba1aee24f2f076c48e89a5e872466684afc85)) +* close [#3031](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3031) user can set larger font size ([65c4a0c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/65c4a0c319c2264dcd5236d944fe7f541ef16441)) +* close [#3187](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3187) use CUSTOM_MODELS to control model list ([d93f05f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d93f05f51163488525b3957bedfa0ed8a6167b8c)) +* close [#3222](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3222) share message list should start from clear context index ([be6d45e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/be6d45e49f1df90daba4625117b95903189891c2)) +* close [#3300](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3300) support multiple api keys ([6aade62](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6aade62ce2f131caeaefc18689fea502ec1a3966)) +* close [#3301](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3301) enable or diable default models with -all / +all ([dc7159a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/dc7159a4504682f6bfad104d5d03168412c550f1)) +* close [#3304](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3304) use `=` instead of `:` to map model name in CUSTOM_MODELS ([45b88eb](https://github.com/oceanopen/ChatGPT-Next-Web/commit/45b88ebb2a720c62d60e63a873004d3cd9734801)) +* close [#380](https://github.com/oceanopen/ChatGPT-Next-Web/issues/380) collapse side bar ([82ad057](https://github.com/oceanopen/ChatGPT-Next-Web/commit/82ad0573be93b0ee43f9cc52b865ea8878988dfa)) +* close [#427](https://github.com/oceanopen/ChatGPT-Next-Web/issues/427) add OPENAI_ORG_ID ([e3d2dd7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e3d2dd72794aa3d2b63c477231d54b0df62111e6)) +* close [#444](https://github.com/oceanopen/ChatGPT-Next-Web/issues/444) use env var to disable gpt-4 ([7e8def5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7e8def50aa9e4c95464a21b021a707a0ccd28ec3)) +* close [#469](https://github.com/oceanopen/ChatGPT-Next-Web/issues/469) support reset session and do not send memory ([c2b37f8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c2b37f811bcfb41001dab787f11e493aba45b9a3)) +* close [#539](https://github.com/oceanopen/ChatGPT-Next-Web/issues/539) add delete message button ([12d4081](https://github.com/oceanopen/ChatGPT-Next-Web/commit/12d4081311f22ee2b9de30292b1be8aa5c69e6dd)) +* close [#580](https://github.com/oceanopen/ChatGPT-Next-Web/issues/580) export messages as image ([4dad7f2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4dad7f2ab621eaea55a841fbb41d2d4775c4f78f)) +* close [#628](https://github.com/oceanopen/ChatGPT-Next-Web/issues/628) add chat commands ([ae1ef32](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ae1ef3215b45ae373044f0cba370307279d1ff7a)) +* close [#663](https://github.com/oceanopen/ChatGPT-Next-Web/issues/663) allow disable user api key input ([074bd9f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/074bd9f045005d626a8c0aea686b45fca9c81150)) +* close [#680](https://github.com/oceanopen/ChatGPT-Next-Web/issues/680) lazy load markdown dom ([d790b0b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d790b0b372c0ff2236b24a57f83f9e59a8b76914)) +* close [#680](https://github.com/oceanopen/ChatGPT-Next-Web/issues/680) lazy rendering markdown ([8363cdd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8363cdd9faee5ad56e90586e51f582061d506404)) +* close [#741](https://github.com/oceanopen/ChatGPT-Next-Web/issues/741) add auth page ([ebbd012](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ebbd0128f17aa80e3685311c49f6553a1d71de20)) +* close [#782](https://github.com/oceanopen/ChatGPT-Next-Web/issues/782) select prompt with arrow down / up ([58eadd6](https://github.com/oceanopen/ChatGPT-Next-Web/commit/58eadd6d7bbcb31fa774d4ade75853bd4bb8ccc5)) +* close [#813](https://github.com/oceanopen/ChatGPT-Next-Web/issues/813) log user ip time ([bd69c8f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bd69c8f5dd90bef9290c20a321a638a949b929b5)) +* close [#864](https://github.com/oceanopen/ChatGPT-Next-Web/issues/864) improve long term history ([d75b7d4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d75b7d49b83362583a09884654bbbcd81f0f08ce)) +* close [#887](https://github.com/oceanopen/ChatGPT-Next-Web/issues/887) import masks ([596c9b1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/596c9b1d274d0d89ece5772b4c8efce233f1ab0d)) +* close [#928](https://github.com/oceanopen/ChatGPT-Next-Web/issues/928) summarize with gpt-3.5 ([06d5031](https://github.com/oceanopen/ChatGPT-Next-Web/commit/06d503152bcba1ad9175441709d7e5c3044eea0a)) +* close [#928](https://github.com/oceanopen/ChatGPT-Next-Web/issues/928) summarize with gpt3.5 ([209a727](https://github.com/oceanopen/ChatGPT-Next-Web/commit/209a727fe92d9dac8e33c49a83efef702c661a7e)) +* close [#935](https://github.com/oceanopen/ChatGPT-Next-Web/issues/935) add azure support ([b7ffca0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b7ffca031ebda555c373783820056541307ceba0)) +* close [#951](https://github.com/oceanopen/ChatGPT-Next-Web/issues/951) support mermaid ([d88da1f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d88da1f6ab0eb1d2ed5e8fb5686f6937f35c408e)) +* close [#976](https://github.com/oceanopen/ChatGPT-Next-Web/issues/976) esc to close modal ([e1ce1f2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e1ce1f2f4002abbb0cb86cf688957457e92afb90)) +* configs about app client ([ef7617d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ef7617d545417fe10b3094530a62c59694063d6b)) +* CUSTOM_MODELS support mapper ([a5a1f2e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a5a1f2e8ad781e0c82a6f775746286477d806545)) +* default disable balance query ([638fdd8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/638fdd8c3e48837d4f060cca5bc73241d9bd9071)) +* delete returned models in modals function of ClaudeApi instance ([0fbb560](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0fbb560e906f04e3bad1af43eba51a7e5b97e3ca)) +* disable auto focus on mobile screen ([3e63f6b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3e63f6ba345a2598e0d1e3ccf4feec9c4679ff18)) +* disable auto-scroll on ios device ([caec012](https://github.com/oceanopen/ChatGPT-Next-Web/commit/caec01269afb06a015c2fa5e35655f0aafc1d100)) +* discovery icon ([fd441d9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fd441d9303b9d77bf21e9e93c19e31f4e36b3b7f)) +* display line break hints in enter mode ([974c455](https://github.com/oceanopen/ChatGPT-Next-Web/commit/974c455bf9a20f4595dcb30d03e7247a43688250)) +* **dnd:** add drag and drop feature ([301cbbf](https://github.com/oceanopen/ChatGPT-Next-Web/commit/301cbbfdfbf5eed665756d9619ae6b5ad5a65e97)) +* drag and drop in contextual prompts ([fb98050](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fb98050d9f8ea593377aa48bd2f612b212602d61)) +* dynamic config ([d6e6dd0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d6e6dd09f06ed2797bfe5b9ea4803213179bed97)) +* edit session title button ([47a2911](https://github.com/oceanopen/ChatGPT-Next-Web/commit/47a2911ee2cd229feb14967a10d02148d2ae2913)) +* enable drag area for tauri apps ([698be66](https://github.com/oceanopen/ChatGPT-Next-Web/commit/698be6671c0fe1c5b5c46c6fe9209191420b202e)) +* finish basic functions ([2c9baa4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2c9baa4e2c27d29b4d6dacd91e0c16efec92eb9a)) +* fix 1)the property named 'role' of the first message must be 'user' 2)if default summarize model 'gpt-3.5-turbo' is blocked, use currentModel instead 3)if apiurl&apikey set by location, useCustomConfig would be opened ([b3e856d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b3e856df1d0aa00038f0e4048c209ce0c7def444)) +* fix illegal exports in app/api/anthropic/[...path]/route.ts ([5446d8d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5446d8d4a2a71c7e983af1538b25ed4ca7192483)) +* fix no max_tokens in payload when calling openai vision model ([9b982b4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9b982b408d28fddbc90c2d3e3390653e4f2889b4)) +* fix system prompt ([d508127](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d50812745211f6ef043a7fad8d50f3178e5a2290)) +* fix the logtics of client joining webdav url ([fd8d0a1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fd8d0a1746adc3c337ba9bb9dcefe525d7a19d40)) +* fix the logtics of client joining webdav url ([55d7014](https://github.com/oceanopen/ChatGPT-Next-Web/commit/55d70143018d6b285c1d7ae57fd16ceb27f815a2)) +* fix webdav 逻辑 ([ee15c14](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ee15c140499ca222bd1f5d08526de9f251c89374)) +* fix webdav 逻辑2 ([b72d7fb](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b72d7fbeda8fa9cb8f020b1dea6188075a92a3bf)) +* googleApiKey & anthropicApiKey support setting multi-key ([864529c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/864529cbf61925f3b85cfa698613c766efd93436)) +* handle non-stream response ([5f2745c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5f2745c32a7c5735712e45636a2d19801dee0c55)) +* Hot keys: Escape to close settings, Up Arrow to get last input ([58b956f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/58b956f7cc6f8b93d5a84fa60f91cc347a6962f7)) +* i18n refactor and style adjustment ([fb32770](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fb327704866231748827d4b580851e18b797f49a)) +* import ramarkBreaks plugin ([2c5420a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2c5420ab9e8cdb86bda5b14cbdae35d4653d054e)) +* improve auto scroll ux and edit model title ([b5ef552](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b5ef552c253bfc7e1a13b0a44ddea4d5a907deb3)) +* improve chat commands ([6caf791](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6caf79121b7fa28c02400b7d26c260628cc61058)) +* improve ChatAction ux ([cbabb93](https://github.com/oceanopen/ChatGPT-Next-Web/commit/cbabb9392c6a2f07235f9765061d7620391ec3ff)) +* improve components structure ([038e6df](https://github.com/oceanopen/ChatGPT-Next-Web/commit/038e6df8f0f553e33cdc37317cec0221b835fece)) +* improve dnd icon ([3ddedc9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3ddedc903e4e10f9d88cd31fadf39440712d741a)) +* improve mask ui ([717c123](https://github.com/oceanopen/ChatGPT-Next-Web/commit/717c123b82e5b20e27b1bd29849ba4f64bbd9b6b)) +* improve mask ui ([132f6c8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/132f6c842073d354f7c88fc06fa18d705a978717)) +* improve message item buttons style ([38c8ee8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/38c8ee8cd2b9689ad7499a99cc79edf2374c17dd)) +* improve mobile style ([64e331a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/64e331a3e3b7948f7da81437a573cc64d94293ba)) +* improve model selector ui ([48a6cdd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/48a6cdd50a0c2739989ffeef94c9584650187ad7)) +* Improve SD list data and API integration ([a16725a](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a16725ac178007ebbc7597f3e7bf8b93ec1940ea)) +* Improve setting.model selector ([84a7afc](https://github.com/oceanopen/ChatGPT-Next-Web/commit/84a7afcd948743fa8c69b712d812ad6fbd73c5db)) +* improve svg viewer ([b718285](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b718285125879382aaa5fe6508b7809186f6b906)) +* Improve the data input and submission acquisition of SD parameter panel ([7fde932](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7fde9327a2ef0d9664276855f735ec6715063045)) +* jest ([1ef2aa3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1ef2aa35e910dcc587094909dc5ff114d2252c93)) +* just disable all ngnix buffer ([6410aa2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6410aa214e7bf05b1a5bd2d0157378c81d5c7088)) +* language support traditional chinese ([307be40](https://github.com/oceanopen/ChatGPT-Next-Web/commit/307be405ac2da05c3f0ac8da57d47d4107362a1e)) +* **mac:** add sign config, fix arm64 build ([#4008](https://github.com/oceanopen/ChatGPT-Next-Web/issues/4008)) ([d0463b2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d0463b2089cddbd828639220cb7d0c04cc8b7e5e)) +* manual refresh for title ([fc27441](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fc274415619f0714f11cd888f2069930a2b693a5)) +* migrate state from v1 to v2 ([30040a0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/30040a0366222cd63b12b2e66fa96bb43a66737e)) +* mobile chat overscroll-behavior none ([6d62ab4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6d62ab4257eaba1088e712d1798d32f7872c48bc)) +* modify some propmt in DEFAULT_INPUT_TEMPLATE about expressing latex ([02b0e79](https://github.com/oceanopen/ChatGPT-Next-Web/commit/02b0e79ba371e9de9da9095a288b902a3c8a4f0a)) +* move sd config to store ([82e6fd7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/82e6fd7bb5498e6bd758d927a30a9bfea6e6ba80)) +* new chat message actions style ([25ce6af](https://github.com/oceanopen/ChatGPT-Next-Web/commit/25ce6af36e141d5274adbf4258b70e0d5435a618)) +* new chat-item avatar ([a3ca8ea](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a3ca8ea5c458a8453c21095b65c88305125243ab)) +* new token count function ([76fdd04](https://github.com/oceanopen/ChatGPT-Next-Web/commit/76fdd047e7a9427dee18785d1cf60cc0e0999554)) +* now support gpt-4 model ([b57663b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b57663bf02d445fd100a82d0557cbd354506c0d8)) +* now user can choose their own summarize model ([93bc2f5](https://github.com/oceanopen/ChatGPT-Next-Web/commit/93bc2f5870976a17ce9deacd29816022f5036c52)) +* Optimize code ([df33139](https://github.com/oceanopen/ChatGPT-Next-Web/commit/df3313971dd3e66abcf7dafbabc48f1630add8d2)) +* Optimize document ([908ce3b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/908ce3bbd988c45dea10b552ede34cd051c99de5)) +* Optimize document ([4cb0655](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4cb0655192281765fea2ef73e6bd620a961d1f70)) +* optimize getHeaders ([700b06f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/700b06f9c5cc396b54aacebe0741e7d23ba56266)) +* optimize getHeaders ([b58bbf8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b58bbf8eb49bcf2fc461f818097c853a1c0ac652)) +* optimize loading screen ([9398b34](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9398b34b5c248c3d6e7f2c79824e133fd666895d)) +* optimize usage display ([bb30fdf](https://github.com/oceanopen/ChatGPT-Next-Web/commit/bb30fdfa1735835b5d51b317d9e3ae0f0d52de30)) +* Optimize var names ([b175132](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b175132854e5710d6635f8f58b9a690cd04a66e1)) +* parse response message ([768decd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/768decde9370f6eecd83f65b6974b8af3a9cb792)) +* partial locale type ([50cfbaa](https://github.com/oceanopen/ChatGPT-Next-Web/commit/50cfbaaab535041e765473205137443f6bd3d9a1)) +* prevent browser to invoke basic auth popup ([c2b36cd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c2b36cdffaa0b418bc22588c637f5fcde6fc9ef5)) +* prod/dev env settings ([144eb68](https://github.com/oceanopen/ChatGPT-Next-Web/commit/144eb684143db99abb0cbe12263369844f76cb9d)) +* qwen ([9bdd37b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9bdd37bb631198f8c75b995b47ba87a1e6639c14)) +* reactive isMobileScreen ([55281ed](https://github.com/oceanopen/ChatGPT-Next-Web/commit/55281ed5485d67d1d8ae555114a828cc3bcc8f48)) +* realtime chat ui ([d544eea](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d544eead3818f69413de20c25c5f3578439b7a4d)) +* realtime config ([e44ebe3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e44ebe3f0eda9ab6f08dc6a58601e333dd46101b)) +* reduce first load js size from 500kb to 85kb ([ce5abac](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ce5abac9fbb35999c577ba20621433dcc22c276a)) +* remove debug code ([6cb296f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6cb296f952ec50b1ff56add63ee1989d96aae822)) +* remove duplicate Input Template ([264da67](https://github.com/oceanopen/ChatGPT-Next-Web/commit/264da6798ca74ca51290d9c1281ee324d9a8628e)) +* remove empty memoryPrompt in ChatMessages ([0aa807d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0aa807df190e1d08fc368a337e6d3651410c1993)) +* replace window.confirm with showConfirm ([3298961](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3298961748ec331669e8e34d8e33b585d439c032)) +* replace window.prompt with showPrompt ([ea6926c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/ea6926cad3de64173d39717444e42aad62c68d1a)) +* right-click to copy message to user input ([50b1f7d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/50b1f7db12fce10eeab7a83d25a9abe423f21809)) +* roles must alternate between user and assistant in claude, so add a fake assistant message between two user messages ([86b5c55](https://github.com/oceanopen/ChatGPT-Next-Web/commit/86b5c5585523c042a0a2ab451a5bfa50dd95872c)) +* run test before build ([1287e39](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1287e39cc65d1e0339ec39afbccd8d4526bee9d9)) +* scrollable mask lists in new-chat page ([f14b413](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f14b413b7c94a477ce3644953a3df2b4ace666bf)) +* scrollbar width change ([32d05c9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/32d05c9855ae1c3d6c049f3501ab57f1e9de0990)) +* scrolling effect when switching chat windows ([93c9974](https://github.com/oceanopen/ChatGPT-Next-Web/commit/93c9974019002b57d8184c23d70f68390be316c0)) +* sd setting ([3935c72](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3935c725c9741b5fda2b74ca79fef5c968983842)) +* session-level model config ([4cdb2f0](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4cdb2f0fa37c9e97dd4dafe490955a57a5940370)) +* settings command dev done ([15e5958](https://github.com/oceanopen/ChatGPT-Next-Web/commit/15e595837be45d0fa2f5a429840950345801b7f9)) +* setUserInput with onDoubleClickCapture in mobile phone ([e68aaf2](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e68aaf24f13d5b4b6931bb1edd0b57ab67bbf9fa)) +* share to ShareGPT ([3e65ef3](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3e65ef3beaa6550763feb1f3bcb9d4e59c2f0a07)) +* Solve the problem of using openai interface protocol for user-defined claude model & add some famous webdav endpoints ([79f3424](https://github.com/oceanopen/ChatGPT-Next-Web/commit/79f342439af8e4c8835c32398b58098acd6bd3dc)) +* some en masks ([de77551](https://github.com/oceanopen/ChatGPT-Next-Web/commit/de775511d02b8f165a58c461f4da4b8c98a85a0d)) +* sort model by name ([54a5332](https://github.com/oceanopen/ChatGPT-Next-Web/commit/54a53328341af2d07db19e56db5febdaac225a87)) +* stop all stale messages ([736c66f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/736c66f46a03ba13329bf030fadfa85e604e23b1)) +* **SubmitKey:** add MetaEnter option ([d822f33](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d822f333c2e7291b21217e7fa3933adbd773aa47)) +* Support a way to define default model by adding DEFAULT_MODEL env. ([c96e4b7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c96e4b79667cc3335bf5ee225914f43b5918c62f)) +* support baidu model ([785d374](https://github.com/oceanopen/ChatGPT-Next-Web/commit/785d3748e10c6c2fa5b21129aa8e35905876a171)) +* support code highlight and markdown gfm ([f5aef31](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f5aef317df98ffb32de3b5ad2739799bf8e74c6c)) +* support compress chat history ([c133cae](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c133cae04b7427723c34028803684288018374da)) +* support custom gemini pro params ([7c3dfb7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7c3dfb7bae37a7d0412a8696393b6189cf2a42cb)) +* support env var DEFAULT_INPUT_TEMPLATE to custom default template for preprocessing user inputs ([9d7ce20](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9d7ce207b689d2636465da8088a1d96c1275d27a)) +* support env var DEFAULT_INPUT_TEMPLATE to custom default template for preprocessing user inputs ([2d1f0c9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2d1f0c9f5760b726153c347ef3f6b6bffcd439a5)) +* support fast chatgpt mobile models ([7f13a8d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7f13a8d2bce33dfe0aa34a2fd29e4824dc6a3fba)) +* support gemini flash ([6612550](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6612550c064a68dbc8772c182228d7428b562fd7)) +* support history message count to zero ([0c9add7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0c9add79884156e778863849bba95dd50c2b5fb0)) +* support i18n ([7cd170b](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7cd170b933b4be3e85ebe689dd397e6549bbeff7)) +* support model: claude-3-5-sonnet-20240620 ([4640060](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4640060891c85b6619cdaf7b7ee4c0cfc4404170)) +* support more http status check for webdav ([23eb773](https://github.com/oceanopen/ChatGPT-Next-Web/commit/23eb7732d7011ce9476ab6309c92509e094fca81)) +* support mort user-friendly scrolling ([19facc7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/19facc7c85a0e509b5d4ca1eaa98782f29477c9a)) +* support safari appleWebApp ([6446692](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6446692db04b612ab243d19c88c09c3cbee52c29)) +* support set api key from client side ([20f2f61](https://github.com/oceanopen/ChatGPT-Next-Web/commit/20f2f61349d68b3720623d37803fe968868c834a)) +* support setting up GTM ([31d9d2e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/31d9d2efcd5cb366cb17e18d09198c0c627c5542)) +* support streaming for Gemini Pro ([#3688](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3688)) ([5cf58d9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5cf58d94466604cb53a6b026f477827baf12f012)) +* support using user api key ([df66eef](https://github.com/oceanopen/ChatGPT-Next-Web/commit/df66eef919a3eda0569c94b7ab79523aa3957968)) +* support vercel speed insight ([#3686](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3686)) ([406530c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/406530ca69d9f6bd1159e1ff8bde98ec0a3306e2)) +* supports the display of line breaks in Markdown ([b94607f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b94607f636789701612334b48d58a381b2cd06f7)) +* swap name and displayName for bytedance in custom models ([1caa61f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1caa61f4c0e8d35bfff2dd670925f8c1ceb8267a)) +* textarea with adaptive height ([3656c84](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3656c8458fa955570dff2e0d6cb076e3e5a8e7e9)) +* try catch indexedDB error ([7b6fe66](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7b6fe66f2a1a7f227f7116b72f9dd4e10207cd44)) +* try catch indexedDB error ([c2fc0b4](https://github.com/oceanopen/ChatGPT-Next-Web/commit/c2fc0b49797ef8b016949d9051bfad140326bdef)) +* try to add auto updater ([91b871e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/91b871ef3bde497a8641b7104485225cc25af45e)) +* tts ([3ae8ec1](https://github.com/oceanopen/ChatGPT-Next-Web/commit/3ae8ec1af6011cec2ff57f62e66531c48576a9bf)) +* **tw.ts:** added new translations ([#4142](https://github.com/oceanopen/ChatGPT-Next-Web/issues/4142)) ([f22e36e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/f22e36e52f35a1d447b50d3b8afb1b70bb160961)) +* update app release workflow ([e785849](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e7858495e60266f84b2c397a2eadd049d1d6b8a6)) +* update apple-touch-icon.png ([17e57bb](https://github.com/oceanopen/ChatGPT-Next-Web/commit/17e57bb28e67d13048c9123b76b4c642020a3c14)) +* update button hover style and scrollbar style ([e5c441d](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e5c441d53062b52b52324cde988ff5dd91e4f606)) +* update dependencies ([b9995e7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b9995e7f701b4a78ede62da686049c12ab8be092)) +* update dev config ([b17a6a7](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b17a6a7f619c55ff080348998983ce0dd7082d20)) +* update i18n ([86b8bfc](https://github.com/oceanopen/ChatGPT-Next-Web/commit/86b8bfcb1f478214fbcfe76e9de09680b68f1034)) +* update new logo & cover image ([#3716](https://github.com/oceanopen/ChatGPT-Next-Web/issues/3716)) ([9122547](https://github.com/oceanopen/ChatGPT-Next-Web/commit/912254751a773c5425d3c36e847e96985cbcbeb6)) +* update payload config ([1161ada](https://github.com/oceanopen/ChatGPT-Next-Web/commit/1161adaa9f07c8a0842ae423f4aa7a0eb5500968)) +* update real 'currentSession' ([e49466f](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e49466fa054c702898780967812abe2dabd4ba6b)) +* update style and timeout handler ([5c70456](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5c70456e180c086cde69a3a74fe3a79caf9b5840)) +* update vercel deploy env ([9fd7505](https://github.com/oceanopen/ChatGPT-Next-Web/commit/9fd750511c86ef7d45b9a8d304fc98495a2ec252)) +* use commit time as version id ([fce3b3c](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fce3b3ce7bfa817ae683bfd2bea7c326a3b81f8b)) +* use tag as version number ([7783545](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7783545bffad789e3504e010960d1c69062f8d17)) +* use toast instead of alert ([4af8c26](https://github.com/oceanopen/ChatGPT-Next-Web/commit/4af8c26d02e3dd74373d5c0fa835a79f3542d032)) +* user prompts ([789a779](https://github.com/oceanopen/ChatGPT-Next-Web/commit/789a77977525eb06be52c329a7a65ad47e6babfc)) +* using fetch to get buildin masks ([88c74ae](https://github.com/oceanopen/ChatGPT-Next-Web/commit/88c74ae18d74b79caded849f9a022b6d5a8a101d)) +* voice print ([d33e772](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d33e772fa592c24e4adc03f127c887c9e4727913)) +* voice print ([89136fb](https://github.com/oceanopen/ChatGPT-Next-Web/commit/89136fba32dbf731e4aaed03508684cfeb54614b)) +* voice print ([8b4ca13](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8b4ca133fda68ed7034ee5bbae8d622d66bf81f9)) +* white url list for openai security ([0d46110](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0d4611052e75cbe9b2dc9309b60435178dcab663)) +* white webdav server domain ([8b191bd](https://github.com/oceanopen/ChatGPT-Next-Web/commit/8b191bd2f733d8677c851d90a5003617bd1da937)) +* wider app body ([09fd743](https://github.com/oceanopen/ChatGPT-Next-Web/commit/09fd743e2e56352bb165c26e9a3a856eecfb51ac)) +* wont fetch prompts in every building ([9304459](https://github.com/oceanopen/ChatGPT-Next-Web/commit/93044590ccd3bf7fcef384d17a935de3e35d70dd)) +* wont send max_tokens ([fd2f441](https://github.com/oceanopen/ChatGPT-Next-Web/commit/fd2f441e02b1eecfd2139942fcb911b32ee3c1e4)) + + +### Performance Improvements + +* models接口返回数据的容错处理 ([6653a31](https://github.com/oceanopen/ChatGPT-Next-Web/commit/6653a31eb7e97d88affe88e3b58844632052e678)) +* avoid read localStorage on every render ([2322851](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2322851ac48e60fe67aab1ac31ee2c4133e2d231)) +* build in stages to reduce container size ([2645540](https://github.com/oceanopen/ChatGPT-Next-Web/commit/2645540721a457b8772730e65ff16c86da45108f)) +* close [#909](https://github.com/oceanopen/ChatGPT-Next-Web/issues/909) reduce message items render time ([a69cec8](https://github.com/oceanopen/ChatGPT-Next-Web/commit/a69cec89fb3b4264abaaa9537c5351bbe7860882)) +* improve prompt list performance ([e509749](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e509749421dc7d81180bc3f4255dae27712defc6)) +* memorize markdown rendering ([962f434](https://github.com/oceanopen/ChatGPT-Next-Web/commit/962f434e17be9ec802626db897b1682edef264c6)), closes [#302](https://github.com/oceanopen/ChatGPT-Next-Web/issues/302) + + +### Reverts + +* Revert "Fix [TypesScript] [LLM Api] Chaining Model" ([0c11625](https://github.com/oceanopen/ChatGPT-Next-Web/commit/0c116251b1c51d16e3e9b3d025c4feed8d7c069e)) +* Revert "Fix & Refactor UI/UX Page [Auth]" ([7df868e](https://github.com/oceanopen/ChatGPT-Next-Web/commit/7df868e22a66db618688878bbe4753c4dd3c495c)) +* Revert "Fix UI/UX Page Local Language [Exporter Message]" ([5ba3fc9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/5ba3fc9321a126dce367c57d14649ec8a590dc82)) +* Revert "Add Jailbreak Mask" ([b68d6e9](https://github.com/oceanopen/ChatGPT-Next-Web/commit/b68d6e9d1a7d3bee9d1dfb3593ef2a9bee720185)) +* fix mobile scroll ([e2c1475](https://github.com/oceanopen/ChatGPT-Next-Web/commit/e2c1475857843c65d803b67292a8f14adffe49d8)) +* remove unused lines in gitignore file ([d5a4527](https://github.com/oceanopen/ChatGPT-Next-Web/commit/d5a4527e9d075373cc430d833907173b9026d83a)) + + + diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 7712d974276..00000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or - advances of any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -flynn.zhang@foxmail.com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. diff --git a/README.md b/README.md index 0c06b73f05b..9a142840c89 100644 --- a/README.md +++ b/README.md @@ -1,119 +1,17 @@
- - icon - -

NextChat (ChatGPT Next Web)

+
+ English / [简体中文](./README_CN.md) One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support. 一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。 -[![Saas][Saas-image]][saas-url] -[![Web][Web-image]][web-url] -[![Windows][Windows-image]][download-url] -[![MacOS][MacOS-image]][download-url] -[![Linux][Linux-image]][download-url] - -[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) - -[NextChatAI](https://nextchat.dev/chat) / [网页版](https://app.nextchat.dev) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) - -[saas-url]: https://nextchat.dev/chat?utm_source=readme -[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge -[web-url]: https://app.nextchat.dev/ -[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases -[Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge -[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows -[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple -[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu - -[Deploy on Vercel](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [BT Deply Install](https://www.bt.cn/new/download.html) [Deploy to Alibaba Cloud](https://computenest.aliyun.com/market/service-f1c9b75e59814dc49d52) - -[](https://monica.im/?utm=nxcrp) - - - -## Enterprise Edition - -Meeting Your Company's Privatization and Customization Deployment Requirements: -- **Brand Customization**: Tailored VI/UI to seamlessly align with your corporate brand image. -- **Resource Integration**: Unified configuration and management of dozens of AI resources by company administrators, ready for use by team members. -- **Permission Control**: Clearly defined member permissions, resource permissions, and knowledge base permissions, all controlled via a corporate-grade Admin Panel. -- **Knowledge Integration**: Combining your internal knowledge base with AI capabilities, making it more relevant to your company's specific business needs compared to general AI. -- **Security Auditing**: Automatically intercept sensitive inquiries and trace all historical conversation records, ensuring AI adherence to corporate information security standards. -- **Private Deployment**: Enterprise-level private deployment supporting various mainstream private cloud solutions, ensuring data security and privacy protection. -- **Continuous Updates**: Ongoing updates and upgrades in cutting-edge capabilities like multimodal AI, ensuring consistent innovation and advancement. - -For enterprise inquiries, please contact: **business@nextchat.dev** - -## 企业版 - -满足企业用户私有化部署和个性化定制需求: -- **品牌定制**:企业量身定制 VI/UI,与企业品牌形象无缝契合 -- **资源集成**:由企业管理人员统一配置和管理数十种 AI 资源,团队成员开箱即用 -- **权限管理**:成员权限、资源权限、知识库权限层级分明,企业级 Admin Panel 统一控制 -- **知识接入**:企业内部知识库与 AI 能力相结合,比通用 AI 更贴近企业自身业务需求 -- **安全审计**:自动拦截敏感提问,支持追溯全部历史对话记录,让 AI 也能遵循企业信息安全规范 -- **私有部署**:企业级私有部署,支持各类主流私有云部署,确保数据安全和隐私保护 -- **持续更新**:提供多模态、智能体等前沿能力持续更新升级服务,常用常新、持续先进 - -企业版咨询: **business@nextchat.dev** - - - -## Features - -- **Deploy for free with one-click** on Vercel in under 1 minute -- Compact client (~5MB) on Linux/Windows/MacOS, [download it now](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) -- Fully compatible with self-deployed LLMs, recommended for use with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) or [LocalAI](https://github.com/go-skynet/LocalAI) -- Privacy first, all data is stored locally in the browser -- Markdown support: LaTex, mermaid, code highlight, etc. -- Responsive design, dark mode and PWA -- Fast first screen loading speed (~100kb), support streaming response -- New in v2: create, share and debug your chat tools with prompt templates (mask) -- Awesome prompts powered by [awesome-chatgpt-prompts-zh](https://github.com/PlexPt/awesome-chatgpt-prompts-zh) and [awesome-chatgpt-prompts](https://github.com/f/awesome-chatgpt-prompts) -- Automatically compresses chat history to support long conversations while also saving your tokens -- I18n: English, 简体中文, 繁体中文, 日本語, Français, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia - -
- -![主界面](./docs/images/cover.png) - -
- -## Roadmap - -- [x] System Prompt: pin a user defined prompt as system prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138) -- [x] User Prompt: user can edit and save custom prompts to prompt list -- [x] Prompt Template: create a new chat with pre-defined in-context prompts [#993](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/993) -- [x] Share as image, share to ShareGPT [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741) -- [x] Desktop App with tauri -- [x] Self-host Model: Fully compatible with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), as well as server deployment of [LocalAI](https://github.com/go-skynet/LocalAI): llama/gpt4all/rwkv/vicuna/koala/gpt4all-j/cerebras/falcon/dolly etc. -- [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) -- [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) - - [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) -- [x] Supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672) -- [ ] local knowledge base - -## What's New -- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672) -- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379) -- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) -- 🚀 v2.14.0 Now supports Artifacts & SD -- 🚀 v2.10.1 support Google Gemini Pro model. -- 🚀 v2.9.11 you can use azure endpoint now. -- 🚀 v2.8 now we have a client that runs across all platforms! -- 🚀 v2.7 let's share conversations as image, or share to ShareGPT! -- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/). - ## 主要功能 -- 在 1 分钟内使用 Vercel **免费一键部署** -- 提供体积极小(~5MB)的跨平台客户端(Linux/Windows/MacOS), [下载地址](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) - 完整的 Markdown 支持:LaTex 公式、Mermaid 流程图、代码高亮等等 - 精心设计的 UI,响应式设计,支持深色模式,支持 PWA - 极快的首屏加载速度(~100kb),支持流式响应 @@ -122,395 +20,11 @@ For enterprise inquiries, please contact: **business@nextchat.dev** - 海量的内置 prompt 列表,来自[中文](https://github.com/PlexPt/awesome-chatgpt-prompts-zh)和[英文](https://github.com/f/awesome-chatgpt-prompts) - 自动压缩上下文聊天记录,在节省 Token 的同时支持超长对话 - 多国语言支持:English, 简体中文, 繁体中文, 日本語, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia -- 拥有自己的域名?好上加好,绑定后即可在任何地方**无障碍**快速访问 ## 开发计划 -- [x] 为每个对话设置系统 Prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138) -- [x] 允许用户自行编辑内置 Prompt 列表 -- [x] 预制角色:使用预制角色快速定制新对话 [#993](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/993) -- [x] 分享为图片,分享到 ShareGPT 链接 [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741) -- [x] 使用 tauri 打包桌面应用 -- [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等,或者使用 [api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) -- [x] Artifacts: 通过独立窗口,轻松预览、复制和分享生成的内容/可交互网页 [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) -- [x] 插件机制,支持`联网搜索`、`计算器`、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) - - [x] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) - - [x] 支持 Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672) - - [ ] 本地知识库 - -## 最新动态 -- 🚀 v2.15.8 现在支持Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672) -- 🚀 v2.15.4 客户端支持Tauri本地直接调用大模型API,更安全![#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379) -- 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) -- 🚀 v2.14.0 现在支持 Artifacts & SD 了。 -- 🚀 v2.10.1 现在支持 Gemini Pro 模型。 -- 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。 -- 🚀 v2.8 发布了横跨 Linux/Windows/MacOS 的体积极小的客户端。 -- 🚀 v2.7 现在可以将会话分享为图片了,也可以分享到 ShareGPT 的在线链接。 -- 🚀 v2.0 已经发布,现在你可以使用面具功能快速创建预制对话了! 了解更多: [ChatGPT 提示词高阶技能:零次、一次和少样本提示](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)。 -- 💡 想要更方便地随时随地使用本项目?可以试下这款桌面插件:https://github.com/mushan0x0/AI0x0.com - -## Get Started - -> [简体中文 > 如何开始使用](./README_CN.md#开始使用) - -1. Get [OpenAI API Key](https://platform.openai.com/account/api-keys); -2. Click - [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web), remember that `CODE` is your page password; -3. Enjoy :) +- [ ] 支持登录 ## FAQ [简体中文 > 常见问题](./docs/faq-cn.md) - -[English > FAQ](./docs/faq-en.md) - -## Keep Updated - -> [简体中文 > 如何保持代码更新](./README_CN.md#保持更新) - -If you have deployed your own project with just one click following the steps above, you may encounter the issue of "Updates Available" constantly showing up. This is because Vercel will create a new project for you by default instead of forking this project, resulting in the inability to detect updates correctly. - -We recommend that you follow the steps below to re-deploy: - -- Delete the original repository; -- Use the fork button in the upper right corner of the page to fork this project; -- Choose and deploy in Vercel again, [please see the detailed tutorial](./docs/vercel-cn.md). - -### Enable Automatic Updates - -> If you encounter a failure of Upstream Sync execution, please [manually update code](./README.md#manually-updating-code). - -After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour: - -![Automatic Updates](./docs/images/enable-actions.jpg) - -![Enable Automatic Updates](./docs/images/enable-actions-sync.jpg) - -### Manually Updating Code - -If you want to update instantly, you can check out the [GitHub documentation](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork) to learn how to synchronize a forked project with upstream code. - -You can star or watch this project or follow author to get release notifications in time. - -## Access Password - -> [简体中文 > 如何增加访问密码](./README_CN.md#配置页面访问密码) - -This project provides limited access control. Please add an environment variable named `CODE` on the vercel environment variables page. The value should be passwords separated by comma like this: - -``` -code1,code2,code3 -``` - -After adding or modifying this environment variable, please redeploy the project for the changes to take effect. - -## Environment Variables - -> [简体中文 > 如何配置 api key、访问密码、接口代理](./README_CN.md#环境变量) - -### `CODE` (optional) - -Access password, separated by comma. - -### `OPENAI_API_KEY` (required) - -Your openai api key, join multiple api keys with comma. - -### `BASE_URL` (optional) - -> Default: `https://api.openai.com` - -> Examples: `http://your-openai-proxy.com` - -Override openai api request base url. - -### `OPENAI_ORG_ID` (optional) - -Specify OpenAI organization ID. - -### `AZURE_URL` (optional) - -> Example: https://{azure-resource-url}/openai - -Azure deploy url. - -### `AZURE_API_KEY` (optional) - -Azure Api Key. - -### `AZURE_API_VERSION` (optional) - -Azure Api Version, find it at [Azure Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions). - -### `GOOGLE_API_KEY` (optional) - -Google Gemini Pro Api Key. - -### `GOOGLE_URL` (optional) - -Google Gemini Pro Api Url. - -### `ANTHROPIC_API_KEY` (optional) - -anthropic claude Api Key. - -### `ANTHROPIC_API_VERSION` (optional) - -anthropic claude Api version. - -### `ANTHROPIC_URL` (optional) - -anthropic claude Api Url. - -### `BAIDU_API_KEY` (optional) - -Baidu Api Key. - -### `BAIDU_SECRET_KEY` (optional) - -Baidu Secret Key. - -### `BAIDU_URL` (optional) - -Baidu Api Url. - -### `BYTEDANCE_API_KEY` (optional) - -ByteDance Api Key. - -### `BYTEDANCE_URL` (optional) - -ByteDance Api Url. - -### `ALIBABA_API_KEY` (optional) - -Alibaba Cloud Api Key. - -### `ALIBABA_URL` (optional) - -Alibaba Cloud Api Url. - -### `IFLYTEK_URL` (Optional) - -iflytek Api Url. - -### `IFLYTEK_API_KEY` (Optional) - -iflytek Api Key. - -### `IFLYTEK_API_SECRET` (Optional) - -iflytek Api Secret. - -### `CHATGLM_API_KEY` (optional) - -ChatGLM Api Key. - -### `CHATGLM_URL` (optional) - -ChatGLM Api Url. - -### `HIDE_USER_API_KEY` (optional) - -> Default: Empty - -If you do not want users to input their own API key, set this value to 1. - -### `DISABLE_GPT4` (optional) - -> Default: Empty - -If you do not want users to use GPT-4, set this value to 1. - -### `ENABLE_BALANCE_QUERY` (optional) - -> Default: Empty - -If you do want users to query balance, set this value to 1. - -### `DISABLE_FAST_LINK` (optional) - -> Default: Empty - -If you want to disable parse settings from url, set this to 1. - -### `CUSTOM_MODELS` (optional) - -> Default: Empty -> Example: `+llama,+claude-2,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` means add `llama, claude-2` to model list, and remove `gpt-3.5-turbo` from list, and display `gpt-4-1106-preview` as `gpt-4-turbo`. - -To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma. - -User `-all` to disable all default models, `+all` to enable all default models. - -For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name. -> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list. -> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list. - -For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. -> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. - -### `DEFAULT_MODEL` (optional) - -Change default model - -### `WHITE_WEBDAV_ENDPOINTS` (optional) - -You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: -- Each address must be a complete endpoint -> `https://xxxx/yyy` -- Multiple addresses are connected by ', ' - -### `DEFAULT_INPUT_TEMPLATE` (optional) - -Customize the default template used to initialize the User Input Preprocessing configuration item in Settings. - -### `STABILITY_API_KEY` (optional) - -Stability API key. - -### `STABILITY_URL` (optional) - -Customize Stability API url. - -## Requirements - -NodeJS >= 18, Docker >= 20 - -## Development - -> [简体中文 > 如何进行二次开发](./README_CN.md#开发) - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -Before starting development, you must create a new `.env.local` file at project root, and place your api key into it: - -``` -OPENAI_API_KEY= - -# if you are not able to access openai service, use this BASE_URL -BASE_URL=https://chatgpt1.nextweb.fun/api/proxy -``` - -### Local Development - -```shell -# 1. install nodejs and yarn first -# 2. config local env vars in `.env.local` -# 3. run -yarn install -yarn dev -``` - -## Deployment - -> [简体中文 > 如何部署到私人服务器](./README_CN.md#部署) - -### BT Install -> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md) - -### Docker (Recommended) - -```shell -docker pull yidadaa/chatgpt-next-web - -docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY=sk-xxxx \ - -e CODE=your-password \ - yidadaa/chatgpt-next-web -``` - -You can start service behind a proxy: - -```shell -docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY=sk-xxxx \ - -e CODE=your-password \ - -e PROXY_URL=http://localhost:7890 \ - yidadaa/chatgpt-next-web -``` - -If your proxy needs password, use: - -```shell --e PROXY_URL="http://127.0.0.1:7890 user pass" -``` - -### Shell - -```shell -bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh) -``` - -## Synchronizing Chat Records (UpStash) - -| [简体中文](./docs/synchronise-chat-logs-cn.md) | [English](./docs/synchronise-chat-logs-en.md) | [Italiano](./docs/synchronise-chat-logs-es.md) | [日本語](./docs/synchronise-chat-logs-ja.md) | [한국어](./docs/synchronise-chat-logs-ko.md) - -## Documentation - -> Please go to the [docs][./docs] directory for more documentation instructions. - -- [Deploy with cloudflare (Deprecated)](./docs/cloudflare-pages-en.md) -- [Frequent Ask Questions](./docs/faq-en.md) -- [How to add a new translation](./docs/translation.md) -- [How to use Vercel (No English)](./docs/vercel-cn.md) -- [User Manual (Only Chinese, WIP)](./docs/user-manual-cn.md) - -## Screenshots - -![Settings](./docs/images/settings.png) - -![More](./docs/images/more.png) - -## Translation - -If you want to add a new translation, read this [document](./docs/translation.md). - -## Donation - -[Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa) - -## Special Thanks - -### Sponsor - -> 仅列出捐赠金额 >= 100RMB 的用户。 - -[@mushan0x0](https://github.com/mushan0x0) -[@ClarenceDan](https://github.com/ClarenceDan) -[@zhangjia](https://github.com/zhangjia) -[@hoochanlon](https://github.com/hoochanlon) -[@relativequantum](https://github.com/relativequantum) -[@desenmeng](https://github.com/desenmeng) -[@webees](https://github.com/webees) -[@chazzhou](https://github.com/chazzhou) -[@hauy](https://github.com/hauy) -[@Corwin006](https://github.com/Corwin006) -[@yankunsong](https://github.com/yankunsong) -[@ypwhs](https://github.com/ypwhs) -[@fxxxchao](https://github.com/fxxxchao) -[@hotic](https://github.com/hotic) -[@WingCH](https://github.com/WingCH) -[@jtung4](https://github.com/jtung4) -[@micozhu](https://github.com/micozhu) -[@jhansion](https://github.com/jhansion) -[@Sha1rholder](https://github.com/Sha1rholder) -[@AnsonHyq](https://github.com/AnsonHyq) -[@synwith](https://github.com/synwith) -[@piksonGit](https://github.com/piksonGit) -[@ouyangzhiping](https://github.com/ouyangzhiping) -[@wenjiavv](https://github.com/wenjiavv) -[@LeXwDeX](https://github.com/LeXwDeX) -[@Licoy](https://github.com/Licoy) -[@shangmin2009](https://github.com/shangmin2009) - -### Contributors - - - - - -## LICENSE - -[MIT](https://opensource.org/license/mit/) diff --git a/README_CN.md b/README_CN.md index d4da8b9da13..19caf99b297 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,57 +1,23 @@
- - icon - - -

NextChat

- -一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 - -[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) - -[Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) +

NextChat (ChatGPT Next Web)

-## 企业版 - -满足您公司私有化部署和定制需求 -- **品牌定制**:企业量身定制 VI/UI,与企业品牌形象无缝契合 -- **资源集成**:由企业管理人员统一配置和管理数十种 AI 资源,团队成员开箱即用 -- **权限管理**:成员权限、资源权限、知识库权限层级分明,企业级 Admin Panel 统一控制 -- **知识接入**:企业内部知识库与 AI 能力相结合,比通用 AI 更贴近企业自身业务需求 -- **安全审计**:自动拦截敏感提问,支持追溯全部历史对话记录,让 AI 也能遵循企业信息安全规范 -- **私有部署**:企业级私有部署,支持各类主流私有云部署,确保数据安全和隐私保护 -- **持续更新**:提供多模态、智能体等前沿能力持续更新升级服务,常用常新、持续先进 - -企业版咨询: **business@nextchat.dev** - - +一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 ## 开始使用 1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys); -2. 点击右侧按钮开始部署: - [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE; -3. 部署完毕后,即可开始使用; -4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。
- + ![主界面](./docs/images/cover.png)
## 保持更新 -如果你按照上述步骤一键部署了自己的项目,可能会发现总是提示“存在更新”的问题,这是由于 Vercel 会默认为你创建一个新项目而不是 fork 本项目,这会导致无法正确地检测更新。 -推荐你按照下列步骤重新部署: - -- 删除掉原先的仓库; -- 使用页面右上角的 fork 按钮,fork 本项目; -- 在 Vercel 重新选择并部署,[请查看详细教程](./docs/vercel-cn.md#如何新建项目)。 - ### 打开自动更新 > 如果你遇到了 Upstream Sync 执行错误,请[手动 Sync Fork 一次](./README_CN.md#手动更新代码)! @@ -192,7 +158,6 @@ ChatGLM Api Key. ChatGLM Api Url. - ### `HIDE_USER_API_KEY` (可选) 如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。 @@ -212,8 +177,9 @@ ChatGLM Api Url. ### `WHITE_WEBDAV_ENDPOINTS` (可选) 如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: + - 每一个地址必须是一个完整的 endpoint -> `https://xxxx/xxx` + > `https://xxxx/xxx` - 多个地址以`,`相连 ### `CUSTOM_MODELS` (可选) @@ -224,12 +190,13 @@ ChatGLM Api Url. 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 在Azure的模式下,支持使用`modelName@Azure=deploymentName`的方式配置模型名称和部署名称(deploy-name) + > 示例:`+gpt-3.5-turbo@Azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。 > 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@Azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)` 在ByteDance的模式下,支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name) -> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项 +> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项 ### `DEFAULT_MODEL` (可选) @@ -247,13 +214,8 @@ Stability API密钥 自定义的Stability API请求地址 - ## 开发 -点击下方按钮,开始二次开发: - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - 在开始写代码之前,需要在项目根目录新建一个 `.env.local` 文件,里面填入环境变量: ``` @@ -271,9 +233,6 @@ BASE_URL=https://b.nextweb.fun/api/proxy ## 部署 -### 宝塔面板部署 -> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md) - ### 容器部署 (推荐) > Docker 版本需要在 20 及其以上,否则会提示找不到镜像。 @@ -289,7 +248,7 @@ docker run -d -p 3000:3000 \ yidadaa/chatgpt-next-web ``` -你也可以指定 proxy: +你也可以指定 proxy: ```shell docker run -d -p 3000:3000 \ @@ -318,16 +277,6 @@ bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/s ⚠️ 注意:如果你安装过程中遇到了问题,请使用 docker 部署。 -## 鸣谢 - -### 捐赠者 - -> 见英文版。 - -### 贡献者 - -[见项目贡献者列表](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors) - ### 相关项目 - [one-api](https://github.com/songquanpeng/one-api): 一站式大模型额度管理平台,支持市面上所有主流大语言模型 diff --git a/README_JA.md b/README_JA.md deleted file mode 100644 index 062c112629d..00000000000 --- a/README_JA.md +++ /dev/null @@ -1,310 +0,0 @@ -
-プレビュー - -

NextChat

- -ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。 - -[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) - -[Zeaburでデプロイ](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Zeaburでデプロイ](https://zeabur.com/templates/ZBUEFA) [Gitpodで開く](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - - -
- -## 企業版 - -あなたの会社のプライベートデプロイとカスタマイズのニーズに応える -- **ブランドカスタマイズ**:企業向けに特別に設計された VI/UI、企業ブランドイメージとシームレスにマッチ -- **リソース統合**:企業管理者が数十種類のAIリソースを統一管理、チームメンバーはすぐに使用可能 -- **権限管理**:メンバーの権限、リソースの権限、ナレッジベースの権限を明確にし、企業レベルのAdmin Panelで統一管理 -- **知識の統合**:企業内部のナレッジベースとAI機能を結びつけ、汎用AIよりも企業自身の業務ニーズに近づける -- **セキュリティ監査**:機密質問を自動的にブロックし、すべての履歴対話を追跡可能にし、AIも企業の情報セキュリティ基準に従わせる -- **プライベートデプロイ**:企業レベルのプライベートデプロイ、主要なプライベートクラウドデプロイをサポートし、データのセキュリティとプライバシーを保護 -- **継続的な更新**:マルチモーダル、エージェントなどの最先端機能を継続的に更新し、常に最新であり続ける - -企業版のお問い合わせ: **business@nextchat.dev** - - -## 始めに - -1. [OpenAI API Key](https://platform.openai.com/account/api-keys)を準備する; -2. 右側のボタンをクリックしてデプロイを開始: - [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) 、GitHubアカウントで直接ログインし、環境変数ページにAPI Keyと[ページアクセスパスワード](#設定ページアクセスパスワード) CODEを入力してください; -3. デプロイが完了したら、すぐに使用を開始できます; -4. (オプション)[カスタムドメインをバインド](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercelが割り当てたドメインDNSは一部の地域で汚染されているため、カスタムドメインをバインドすると直接接続できます。 - -
- -![メインインターフェース](./docs/images/cover.png) - -
- - -## 更新を維持する - -もし上記の手順に従ってワンクリックでプロジェクトをデプロイした場合、「更新があります」というメッセージが常に表示されることがあります。これは、Vercel がデフォルトで新しいプロジェクトを作成するためで、本プロジェクトを fork していないことが原因です。そのため、正しく更新を検出できません。 - -以下の手順に従って再デプロイすることをお勧めします: - -- 元のリポジトリを削除する -- ページ右上の fork ボタンを使って、本プロジェクトを fork する -- Vercel で再度選択してデプロイする、[詳細な手順はこちらを参照してください](./docs/vercel-ja.md)。 - - -### 自動更新を開く - -> Upstream Sync の実行エラーが発生した場合は、[手動で Sync Fork](./README_JA.md#手動でコードを更新する) してください! - -プロジェクトを fork した後、GitHub の制限により、fork 後のプロジェクトの Actions ページで Workflows を手動で有効にし、Upstream Sync Action を有効にする必要があります。有効化後、毎時の定期自動更新が可能になります: - -![自動更新](./docs/images/enable-actions.jpg) - -![自動更新を有効にする](./docs/images/enable-actions-sync.jpg) - - -### 手動でコードを更新する - -手動で即座に更新したい場合は、[GitHub のドキュメント](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork)を参照して、fork したプロジェクトを上流のコードと同期する方法を確認してください。 - -このプロジェクトをスターまたはウォッチしたり、作者をフォローすることで、新機能の更新通知をすぐに受け取ることができます。 - - - -## ページアクセスパスワードを設定する - -> パスワードを設定すると、ユーザーは設定ページでアクセスコードを手動で入力しない限り、通常のチャットができず、未承認の状態であることを示すメッセージが表示されます。 - -> **警告**:パスワードの桁数は十分に長く設定してください。7桁以上が望ましいです。さもないと、[ブルートフォース攻撃を受ける可能性があります](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518)。 - -このプロジェクトは限られた権限管理機能を提供しています。Vercel プロジェクトのコントロールパネルで、環境変数ページに `CODE` という名前の環境変数を追加し、値をカンマで区切ったカスタムパスワードに設定してください: - -``` -code1,code2,code3 -``` - -この環境変数を追加または変更した後、**プロジェクトを再デプロイ**して変更を有効にしてください。 - - -## 環境変数 - -> 本プロジェクトのほとんどの設定は環境変数で行います。チュートリアル:[Vercel の環境変数を変更する方法](./docs/vercel-ja.md)。 - -### `OPENAI_API_KEY` (必須) - -OpenAI の API キー。OpenAI アカウントページで申請したキーをカンマで区切って複数設定できます。これにより、ランダムにキーが選択されます。 - -### `CODE` (オプション) - -アクセスパスワード。カンマで区切って複数設定可能。 - -**警告**:この項目を設定しないと、誰でもデプロイしたウェブサイトを利用でき、トークンが急速に消耗する可能性があるため、設定をお勧めします。 - -### `BASE_URL` (オプション) - -> デフォルト: `https://api.openai.com` - -> 例: `http://your-openai-proxy.com` - -OpenAI API のプロキシ URL。手動で OpenAI API のプロキシを設定している場合はこのオプションを設定してください。 - -> SSL 証明書の問題がある場合は、`BASE_URL` のプロトコルを http に設定してください。 - -### `OPENAI_ORG_ID` (オプション) - -OpenAI の組織 ID を指定します。 - -### `AZURE_URL` (オプション) - -> 形式: https://{azure-resource-url}/openai/deployments/{deploy-name} -> `CUSTOM_MODELS` で `displayName` 形式で {deploy-name} を設定した場合、`AZURE_URL` から {deploy-name} を省略できます。 - -Azure のデプロイ URL。 - -### `AZURE_API_KEY` (オプション) - -Azure の API キー。 - -### `AZURE_API_VERSION` (オプション) - -Azure API バージョン。[Azure ドキュメント](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)で確認できます。 - -### `GOOGLE_API_KEY` (オプション) - -Google Gemini Pro API キー。 - -### `GOOGLE_URL` (オプション) - -Google Gemini Pro API の URL。 - -### `ANTHROPIC_API_KEY` (オプション) - -Anthropic Claude API キー。 - -### `ANTHROPIC_API_VERSION` (オプション) - -Anthropic Claude API バージョン。 - -### `ANTHROPIC_URL` (オプション) - -Anthropic Claude API の URL。 - -### `BAIDU_API_KEY` (オプション) - -Baidu API キー。 - -### `BAIDU_SECRET_KEY` (オプション) - -Baidu シークレットキー。 - -### `BAIDU_URL` (オプション) - -Baidu API の URL。 - -### `BYTEDANCE_API_KEY` (オプション) - -ByteDance API キー。 - -### `BYTEDANCE_URL` (オプション) - -ByteDance API の URL。 - -### `ALIBABA_API_KEY` (オプション) - -アリババ(千问)API キー。 - -### `ALIBABA_URL` (オプション) - -アリババ(千问)API の URL。 - -### `HIDE_USER_API_KEY` (オプション) - -ユーザーが API キーを入力できないようにしたい場合は、この環境変数を 1 に設定します。 - -### `DISABLE_GPT4` (オプション) - -ユーザーが GPT-4 を使用できないようにしたい場合は、この環境変数を 1 に設定します。 - -### `ENABLE_BALANCE_QUERY` (オプション) - -バランスクエリ機能を有効にしたい場合は、この環境変数を 1 に設定します。 - -### `DISABLE_FAST_LINK` (オプション) - -リンクからのプリセット設定解析を無効にしたい場合は、この環境変数を 1 に設定します。 - -### `WHITE_WEBDAV_ENDPOINTS` (オプション) - -アクセス許可を与える WebDAV サービスのアドレスを追加したい場合、このオプションを使用します。フォーマット要件: -- 各アドレスは完全なエンドポイントでなければなりません。 -> `https://xxxx/xxx` -- 複数のアドレスは `,` で接続します。 - -### `CUSTOM_MODELS` (オプション) - -> 例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` は `qwen-7b-chat` と `glm-6b` をモデルリストに追加し、`gpt-3.5-turbo` を削除し、`gpt-4-1106-preview` のモデル名を `gpt-4-turbo` として表示します。 -> すべてのモデルを無効にし、特定のモデルを有効にしたい場合は、`-all,+gpt-3.5-turbo` を使用します。これは `gpt-3.5-turbo` のみを有効にすることを意味します。 - -モデルリストを管理します。`+` でモデルを追加し、`-` でモデルを非表示にし、`モデル名=表示名` でモデルの表示名をカスタマイズし、カンマで区切ります。 - -Azure モードでは、`modelName@Azure=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。 -> 例:`+gpt-3.5-turbo@Azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。 - -ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。 -> 例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` この設定でモデルリストに `Doubao-lite-4k(ByteDance)` のオプションが表示されます。 - -### `DEFAULT_MODEL` (オプション) - -デフォルトのモデルを変更します。 - -### `DEFAULT_INPUT_TEMPLATE` (オプション) - -『設定』の『ユーザー入力前処理』の初期設定に使用するテンプレートをカスタマイズします。 - - -## 開発 - -下のボタンをクリックして二次開発を開始してください: - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -コードを書く前に、プロジェクトのルートディレクトリに `.env.local` ファイルを新規作成し、環境変数を記入します: - -``` -OPENAI_API_KEY= -``` - - -### ローカル開発 - -1. Node.js 18 と Yarn をインストールします。具体的な方法は ChatGPT にお尋ねください。 -2. `yarn install && yarn dev` を実行します。⚠️ 注意:このコマンドはローカル開発用であり、デプロイには使用しないでください。 -3. ローカルでデプロイしたい場合は、`yarn install && yarn build && yarn start` コマンドを使用してください。プロセスを守るために pm2 を使用することもできます。詳細は ChatGPT にお尋ねください。 - - -## デプロイ - -### コンテナデプロイ(推奨) - -> Docker バージョンは 20 以上が必要です。それ以下だとイメージが見つからないというエラーが出ます。 - -> ⚠️ 注意:Docker バージョンは最新バージョンより 1~2 日遅れることが多いため、デプロイ後に「更新があります」の通知が出続けることがありますが、正常です。 - -```shell -docker pull yidadaa/chatgpt-next-web - -docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY=sk-xxxx \ - -e CODE=ページアクセスパスワード \ - yidadaa/chatgpt-next-web -``` - -プロキシを指定することもできます: - -```shell -docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY=sk-xxxx \ - -e CODE=ページアクセスパスワード \ - --net=host \ - -e PROXY_URL=http://127.0.0.1:7890 \ - yidadaa/chatgpt-next-web -``` - -ローカルプロキシがアカウントとパスワードを必要とする場合は、以下を使用できます: - -```shell --e PROXY_URL="http://127.0.0.1:7890 user password" -``` - -他の環境変数を指定する必要がある場合は、上記のコマンドに `-e 環境変数=環境変数値` を追加して指定してください。 - - -### ローカルデプロイ - -コンソールで以下のコマンドを実行します: - -```shell -bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh) -``` - -⚠️ 注意:インストール中に問題が発生した場合は、Docker を使用してデプロイしてください。 - - -## 謝辞 - -### 寄付者 - -> 英語版をご覧ください。 - -### 貢献者 - -[プロジェクトの貢献者リストはこちら](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors) - -### 関連プロジェクト - -- [one-api](https://github.com/songquanpeng/one-api): 一つのプラットフォームで大規模モデルのクォータ管理を提供し、市場に出回っているすべての主要な大規模言語モデルをサポートします。 - - -## オープンソースライセンス - -[MIT](https://opensource.org/license/mit/) diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts index 3017fd37180..5ed0dab9414 100644 --- a/app/api/[provider]/[...path]/route.ts +++ b/app/api/[provider]/[...path]/route.ts @@ -1,18 +1,18 @@ -import { ApiPath } from "@/app/constant"; -import { NextRequest } from "next/server"; -import { handle as openaiHandler } from "../../openai"; -import { handle as azureHandler } from "../../azure"; -import { handle as googleHandler } from "../../google"; -import { handle as anthropicHandler } from "../../anthropic"; -import { handle as baiduHandler } from "../../baidu"; -import { handle as bytedanceHandler } from "../../bytedance"; -import { handle as alibabaHandler } from "../../alibaba"; -import { handle as moonshotHandler } from "../../moonshot"; -import { handle as stabilityHandler } from "../../stability"; -import { handle as iflytekHandler } from "../../iflytek"; -import { handle as xaiHandler } from "../../xai"; -import { handle as chatglmHandler } from "../../glm"; -import { handle as proxyHandler } from "../../proxy"; +import type { NextRequest } from 'next/server'; +import { ApiPath } from '@/app/constant'; +import { handle as alibabaHandler } from '../../alibaba'; +import { handle as anthropicHandler } from '../../anthropic'; +import { handle as azureHandler } from '../../azure'; +import { handle as baiduHandler } from '../../baidu'; +import { handle as bytedanceHandler } from '../../bytedance'; +import { handle as chatglmHandler } from '../../glm'; +import { handle as googleHandler } from '../../google'; +import { handle as iflytekHandler } from '../../iflytek'; +import { handle as moonshotHandler } from '../../moonshot'; +import { handle as openaiHandler } from '../../openai'; +import { handle as proxyHandler } from '../../proxy'; +import { handle as stabilityHandler } from '../../stability'; +import { handle as xaiHandler } from '../../xai'; async function handle( req: NextRequest, @@ -54,23 +54,23 @@ async function handle( export const GET = handle; export const POST = handle; -export const runtime = "edge"; +export const runtime = 'edge'; export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", + 'arn1', + 'bom1', + 'cdg1', + 'cle1', + 'cpt1', + 'dub1', + 'fra1', + 'gru1', + 'hnd1', + 'iad1', + 'icn1', + 'kix1', + 'lhr1', + 'pdx1', + 'sfo1', + 'sin1', + 'syd1', ]; diff --git a/app/api/alibaba.ts b/app/api/alibaba.ts index 894b1ae4c04..a21a8fe872f 100644 --- a/app/api/alibaba.ts +++ b/app/api/alibaba.ts @@ -1,14 +1,15 @@ -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { auth } from '@/app/api/auth'; +import { getServerSideConfig } from '@/app/config/server'; import { ALIBABA_BASE_URL, ApiPath, ModelProvider, ServiceProvider, -} from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +} from '@/app/constant'; +import { prettyObject } from '@/app/utils/format'; +import { isModelAvailableInServer } from '@/app/utils/model'; +import { NextResponse } from 'next/server'; const serverConfig = getServerSideConfig(); @@ -16,10 +17,10 @@ export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[Alibaba Route] params ", params); + console.log('[Alibaba Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const authResult = auth(req, ModelProvider.Qwen); @@ -33,7 +34,7 @@ export async function handle( const response = await request(req); return response; } catch (e) { - console.error("[Alibaba] ", e); + console.error('[Alibaba] ', e); return NextResponse.json(prettyObject(e)); } } @@ -42,20 +43,20 @@ async function request(req: NextRequest) { const controller = new AbortController(); // alibaba use base url or just remove the path - let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Alibaba, ""); + const path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Alibaba, ''); let baseUrl = serverConfig.alibabaUrl || ALIBABA_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); + console.log('[Proxy] ', path); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -67,15 +68,15 @@ async function request(req: NextRequest) { const fetchUrl = `${baseUrl}${path}`; const fetchOptions: RequestInit = { headers: { - "Content-Type": "application/json", - Authorization: req.headers.get("Authorization") ?? "", - "X-DashScope-SSE": req.headers.get("X-DashScope-SSE") ?? "disable", + 'Content-Type': 'application/json', + 'Authorization': req.headers.get('Authorization') ?? '', + 'X-DashScope-SSE': req.headers.get('X-DashScope-SSE') ?? 'disable', }, method: req.method, body: req.body, - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -114,9 +115,9 @@ async function request(req: NextRequest) { // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, diff --git a/app/api/anthropic.ts b/app/api/anthropic.ts index 7a44443710f..868152ab038 100644 --- a/app/api/anthropic.ts +++ b/app/api/anthropic.ts @@ -1,16 +1,17 @@ -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { getServerSideConfig } from '@/app/config/server'; import { - ANTHROPIC_BASE_URL, Anthropic, + ANTHROPIC_BASE_URL, ApiPath, - ServiceProvider, ModelProvider, -} from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "./auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; -import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; + ServiceProvider, +} from '@/app/constant'; +import { cloudflareAIGatewayUrl } from '@/app/utils/cloudflare'; +import { prettyObject } from '@/app/utils/format'; +import { isModelAvailableInServer } from '@/app/utils/model'; +import { NextResponse } from 'next/server'; +import { auth } from './auth'; const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]); @@ -18,20 +19,20 @@ export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[Anthropic Route] params ", params); + console.log('[Anthropic Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } - const subpath = params.path.join("/"); + const subpath = params.path.join('/'); if (!ALLOWD_PATH.has(subpath)) { - console.log("[Anthropic Route] forbidden path ", subpath); + console.log('[Anthropic Route] forbidden path ', subpath); return NextResponse.json( { error: true, - msg: "you are not allowed to request " + subpath, + msg: `you are not allowed to request ${subpath}`, }, { status: 403, @@ -50,7 +51,7 @@ export async function handle( const response = await request(req); return response; } catch (e) { - console.error("[Anthropic] ", e); + console.error('[Anthropic] ', e); return NextResponse.json(prettyObject(e)); } } @@ -60,28 +61,28 @@ const serverConfig = getServerSideConfig(); async function request(req: NextRequest) { const controller = new AbortController(); - let authHeaderName = "x-api-key"; - let authValue = - req.headers.get(authHeaderName) || - req.headers.get("Authorization")?.replaceAll("Bearer ", "").trim() || - serverConfig.anthropicApiKey || - ""; + const authHeaderName = 'x-api-key'; + const authValue + = req.headers.get(authHeaderName) + || req.headers.get('Authorization')?.replaceAll('Bearer ', '').trim() + || serverConfig.anthropicApiKey + || ''; - let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Anthropic, ""); + const path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Anthropic, ''); - let baseUrl = - serverConfig.anthropicUrl || serverConfig.baseUrl || ANTHROPIC_BASE_URL; + let baseUrl + = serverConfig.anthropicUrl || serverConfig.baseUrl || ANTHROPIC_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); + console.log('[Proxy] ', path); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -95,20 +96,20 @@ async function request(req: NextRequest) { const fetchOptions: RequestInit = { headers: { - "Content-Type": "application/json", - "Cache-Control": "no-store", - "anthropic-dangerous-direct-browser-access": "true", + 'Content-Type': 'application/json', + 'Cache-Control': 'no-store', + 'anthropic-dangerous-direct-browser-access': 'true', [authHeaderName]: authValue, - "anthropic-version": - req.headers.get("anthropic-version") || - serverConfig.anthropicApiVersion || - Anthropic.Vision, + 'anthropic-version': + req.headers.get('anthropic-version') + || serverConfig.anthropicApiVersion + || Anthropic.Vision, }, method: req.method, body: req.body, - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -155,9 +156,9 @@ async function request(req: NextRequest) { // ); // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, diff --git a/app/api/artifacts/route.ts b/app/api/artifacts/route.ts index 4707e795f0a..798ab802d0f 100644 --- a/app/api/artifacts/route.ts +++ b/app/api/artifacts/route.ts @@ -1,6 +1,7 @@ -import md5 from "spark-md5"; -import { NextRequest, NextResponse } from "next/server"; -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { getServerSideConfig } from '@/app/config/server'; +import { NextResponse } from 'next/server'; +import md5 from 'spark-md5'; async function handle(req: NextRequest, res: NextResponse) { const serverConfig = getServerSideConfig(); @@ -9,7 +10,7 @@ async function handle(req: NextRequest, res: NextResponse) { const storeHeaders = () => ({ Authorization: `Bearer ${serverConfig.cloudflareKVApiKey}`, }); - if (req.method === "POST") { + if (req.method === 'POST') { const clonedBody = await req.text(); const hashedCode = md5.hash(clonedBody).trim(); const body: { @@ -21,9 +22,9 @@ async function handle(req: NextRequest, res: NextResponse) { value: clonedBody, }; try { - const ttl = parseInt(serverConfig.cloudflareKVTTL as string); + const ttl = Number.parseInt(serverConfig.cloudflareKVTTL as string); if (ttl > 60) { - body["expiration_ttl"] = ttl; + body.expiration_ttl = ttl; } } catch (e) { console.error(e); @@ -31,13 +32,13 @@ async function handle(req: NextRequest, res: NextResponse) { const res = await fetch(`${storeUrl()}/bulk`, { headers: { ...storeHeaders(), - "Content-Type": "application/json", + 'Content-Type': 'application/json', }, - method: "PUT", + method: 'PUT', body: JSON.stringify([body]), }); const result = await res.json(); - console.log("save data", result); + console.log('save data', result); if (result?.success) { return NextResponse.json( { code: 0, id: hashedCode, result }, @@ -45,15 +46,15 @@ async function handle(req: NextRequest, res: NextResponse) { ); } return NextResponse.json( - { error: true, msg: "Save data error" }, + { error: true, msg: 'Save data error' }, { status: 400 }, ); } - if (req.method === "GET") { - const id = req?.nextUrl?.searchParams?.get("id"); + if (req.method === 'GET') { + const id = req?.nextUrl?.searchParams?.get('id'); const res = await fetch(`${storeUrl()}/values/${id}`, { headers: storeHeaders(), - method: "GET", + method: 'GET', }); return new Response(res.body, { status: res.status, @@ -62,7 +63,7 @@ async function handle(req: NextRequest, res: NextResponse) { }); } return NextResponse.json( - { error: true, msg: "Invalid request" }, + { error: true, msg: 'Invalid request' }, { status: 400 }, ); } @@ -70,4 +71,4 @@ async function handle(req: NextRequest, res: NextResponse) { export const POST = handle; export const GET = handle; -export const runtime = "edge"; +export const runtime = 'edge'; diff --git a/app/api/auth.ts b/app/api/auth.ts index 6703b64bd15..0b3a61c8212 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -1,55 +1,55 @@ -import { NextRequest } from "next/server"; -import { getServerSideConfig } from "../config/server"; -import md5 from "spark-md5"; -import { ACCESS_CODE_PREFIX, ModelProvider } from "../constant"; +import type { NextRequest } from 'next/server'; +import md5 from 'spark-md5'; +import { getServerSideConfig } from '../config/server'; +import { ACCESS_CODE_PREFIX, ModelProvider } from '../constant'; function getIP(req: NextRequest) { - let ip = req.ip ?? req.headers.get("x-real-ip"); - const forwardedFor = req.headers.get("x-forwarded-for"); + let ip = req.ip ?? req.headers.get('x-real-ip'); + const forwardedFor = req.headers.get('x-forwarded-for'); if (!ip && forwardedFor) { - ip = forwardedFor.split(",").at(0) ?? ""; + ip = forwardedFor.split(',').at(0) ?? ''; } return ip; } function parseApiKey(bearToken: string) { - const token = bearToken.trim().replaceAll("Bearer ", "").trim(); + const token = bearToken.trim().replaceAll('Bearer ', '').trim(); const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX); return { - accessCode: isApiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length), - apiKey: isApiKey ? token : "", + accessCode: isApiKey ? '' : token.slice(ACCESS_CODE_PREFIX.length), + apiKey: isApiKey ? token : '', }; } export function auth(req: NextRequest, modelProvider: ModelProvider) { - const authToken = req.headers.get("Authorization") ?? ""; + const authToken = req.headers.get('Authorization') ?? ''; // check if it is openai api key or user token const { accessCode, apiKey } = parseApiKey(authToken); - const hashedCode = md5.hash(accessCode ?? "").trim(); + const hashedCode = md5.hash(accessCode ?? '').trim(); const serverConfig = getServerSideConfig(); - console.log("[Auth] allowed hashed codes: ", [...serverConfig.codes]); - console.log("[Auth] got access code:", accessCode); - console.log("[Auth] hashed access code:", hashedCode); - console.log("[User IP] ", getIP(req)); - console.log("[Time] ", new Date().toLocaleString()); + console.log('[Auth] allowed hashed codes: ', [...serverConfig.codes]); + console.log('[Auth] got access code:', accessCode); + console.log('[Auth] hashed access code:', hashedCode); + console.log('[User IP] ', getIP(req)); + console.log('[Time] ', new Date().toLocaleString()); if (serverConfig.needCode && !serverConfig.codes.has(hashedCode) && !apiKey) { return { error: true, - msg: !accessCode ? "empty access code" : "wrong access code", + msg: !accessCode ? 'empty access code' : 'wrong access code', }; } if (serverConfig.hideUserApiKey && !!apiKey) { return { error: true, - msg: "you are not allowed to access with your own api key", + msg: 'you are not allowed to access with your own api key', }; } @@ -89,8 +89,8 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { systemApiKey = serverConfig.moonshotApiKey; break; case ModelProvider.Iflytek: - systemApiKey = - serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret; + systemApiKey + = `${serverConfig.iflytekApiKey}:${serverConfig.iflytekApiSecret}`; break; case ModelProvider.XAI: systemApiKey = serverConfig.xaiApiKey; @@ -100,7 +100,7 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { break; case ModelProvider.GPT: default: - if (req.nextUrl.pathname.includes("azure/deployments")) { + if (req.nextUrl.pathname.includes('azure/deployments')) { systemApiKey = serverConfig.azureApiKey; } else { systemApiKey = serverConfig.apiKey; @@ -108,13 +108,13 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { } if (systemApiKey) { - console.log("[Auth] use system api key"); - req.headers.set("Authorization", `Bearer ${systemApiKey}`); + console.log('[Auth] use system api key'); + req.headers.set('Authorization', `Bearer ${systemApiKey}`); } else { - console.log("[Auth] admin did not provide an api key"); + console.log('[Auth] admin did not provide an api key'); } } else { - console.log("[Auth] use user api key"); + console.log('[Auth] use user api key'); } return { diff --git a/app/api/azure.ts b/app/api/azure.ts index 39d872e8cf8..0d822541009 100644 --- a/app/api/azure.ts +++ b/app/api/azure.ts @@ -1,20 +1,21 @@ -import { ModelProvider } from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "./auth"; -import { requestOpenai } from "./common"; +import type { NextRequest } from 'next/server'; +import { ModelProvider } from '@/app/constant'; +import { prettyObject } from '@/app/utils/format'; +import { NextResponse } from 'next/server'; +import { auth } from './auth'; +import { requestOpenai } from './common'; export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[Azure Route] params ", params); + console.log('[Azure Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } - const subpath = params.path.join("/"); + const subpath = params.path.join('/'); const authResult = auth(req, ModelProvider.GPT); if (authResult.error) { @@ -26,7 +27,7 @@ export async function handle( try { return await requestOpenai(req); } catch (e) { - console.error("[Azure] ", e); + console.error('[Azure] ', e); return NextResponse.json(prettyObject(e)); } } diff --git a/app/api/baidu.ts b/app/api/baidu.ts index 0408b43c5bc..84d2a018a4e 100644 --- a/app/api/baidu.ts +++ b/app/api/baidu.ts @@ -1,15 +1,16 @@ -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { auth } from '@/app/api/auth'; +import { getServerSideConfig } from '@/app/config/server'; import { - BAIDU_BASE_URL, ApiPath, + BAIDU_BASE_URL, ModelProvider, ServiceProvider, -} from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; -import { getAccessToken } from "@/app/utils/baidu"; +} from '@/app/constant'; +import { getAccessToken } from '@/app/utils/baidu'; +import { prettyObject } from '@/app/utils/format'; +import { isModelAvailableInServer } from '@/app/utils/model'; +import { NextResponse } from 'next/server'; const serverConfig = getServerSideConfig(); @@ -17,10 +18,10 @@ export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[Baidu Route] params ", params); + console.log('[Baidu Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const authResult = auth(req, ModelProvider.Ernie); @@ -46,7 +47,7 @@ export async function handle( const response = await request(req); return response; } catch (e) { - console.error("[Baidu] ", e); + console.error('[Baidu] ', e); return NextResponse.json(prettyObject(e)); } } @@ -54,20 +55,20 @@ export async function handle( async function request(req: NextRequest) { const controller = new AbortController(); - let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Baidu, ""); + const path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Baidu, ''); let baseUrl = serverConfig.baiduUrl || BAIDU_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); + console.log('[Proxy] ', path); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -84,13 +85,13 @@ async function request(req: NextRequest) { const fetchOptions: RequestInit = { headers: { - "Content-Type": "application/json", + 'Content-Type': 'application/json', }, method: req.method, body: req.body, - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -129,9 +130,9 @@ async function request(req: NextRequest) { // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, diff --git a/app/api/bytedance.ts b/app/api/bytedance.ts index cb65b106109..572dc477401 100644 --- a/app/api/bytedance.ts +++ b/app/api/bytedance.ts @@ -1,14 +1,15 @@ -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { auth } from '@/app/api/auth'; +import { getServerSideConfig } from '@/app/config/server'; import { - BYTEDANCE_BASE_URL, ApiPath, + BYTEDANCE_BASE_URL, ModelProvider, ServiceProvider, -} from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +} from '@/app/constant'; +import { prettyObject } from '@/app/utils/format'; +import { isModelAvailableInServer } from '@/app/utils/model'; +import { NextResponse } from 'next/server'; const serverConfig = getServerSideConfig(); @@ -16,10 +17,10 @@ export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[ByteDance Route] params ", params); + console.log('[ByteDance Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const authResult = auth(req, ModelProvider.Doubao); @@ -33,7 +34,7 @@ export async function handle( const response = await request(req); return response; } catch (e) { - console.error("[ByteDance] ", e); + console.error('[ByteDance] ', e); return NextResponse.json(prettyObject(e)); } } @@ -41,20 +42,20 @@ export async function handle( async function request(req: NextRequest) { const controller = new AbortController(); - let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ByteDance, ""); + const path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ByteDance, ''); let baseUrl = serverConfig.bytedanceUrl || BYTEDANCE_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); + console.log('[Proxy] ', path); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -67,14 +68,14 @@ async function request(req: NextRequest) { const fetchOptions: RequestInit = { headers: { - "Content-Type": "application/json", - Authorization: req.headers.get("Authorization") ?? "", + 'Content-Type': 'application/json', + 'Authorization': req.headers.get('Authorization') ?? '', }, method: req.method, body: req.body, - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -114,9 +115,9 @@ async function request(req: NextRequest) { // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, diff --git a/app/api/common.ts b/app/api/common.ts index 495a12ccdbb..683fb2f20ea 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,47 +1,48 @@ -import { NextRequest, NextResponse } from "next/server"; -import { getServerSideConfig } from "../config/server"; -import { OPENAI_BASE_URL, ServiceProvider } from "../constant"; -import { cloudflareAIGatewayUrl } from "../utils/cloudflare"; -import { getModelProvider, isModelAvailableInServer } from "../utils/model"; +import type { NextRequest } from 'next/server'; +import { NextResponse } from 'next/server'; +import { getServerSideConfig } from '../config/server'; +import { OPENAI_BASE_URL, ServiceProvider } from '../constant'; +import { cloudflareAIGatewayUrl } from '../utils/cloudflare'; +import { getModelProvider, isModelAvailableInServer } from '../utils/model'; const serverConfig = getServerSideConfig(); export async function requestOpenai(req: NextRequest) { const controller = new AbortController(); - const isAzure = req.nextUrl.pathname.includes("azure/deployments"); + const isAzure = req.nextUrl.pathname.includes('azure/deployments'); - var authValue, - authHeaderName = ""; + let authValue; + let authHeaderName = ''; if (isAzure) { - authValue = - req.headers - .get("Authorization") + authValue + = req.headers + .get('Authorization') ?.trim() - .replaceAll("Bearer ", "") - .trim() ?? ""; + .replaceAll('Bearer ', '') + .trim() ?? ''; - authHeaderName = "api-key"; + authHeaderName = 'api-key'; } else { - authValue = req.headers.get("Authorization") ?? ""; - authHeaderName = "Authorization"; + authValue = req.headers.get('Authorization') ?? ''; + authHeaderName = 'Authorization'; } - let path = `${req.nextUrl.pathname}`.replaceAll("/api/openai/", ""); + let path = `${req.nextUrl.pathname}`.replaceAll('/api/openai/', ''); - let baseUrl = - (isAzure ? serverConfig.azureUrl : serverConfig.baseUrl) || OPENAI_BASE_URL; + let baseUrl + = (isAzure ? serverConfig.azureUrl : serverConfig.baseUrl) || OPENAI_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); + console.log('[Proxy] ', path); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -51,30 +52,30 @@ export async function requestOpenai(req: NextRequest) { ); if (isAzure) { - const azureApiVersion = - req?.nextUrl?.searchParams?.get("api-version") || - serverConfig.azureApiVersion; - baseUrl = baseUrl.split("/deployments").shift() as string; + const azureApiVersion + = req?.nextUrl?.searchParams?.get('api-version') + || serverConfig.azureApiVersion; + baseUrl = baseUrl.split('/deployments').shift() as string; path = `${req.nextUrl.pathname.replaceAll( - "/api/azure/", - "", + '/api/azure/', + '', )}?api-version=${azureApiVersion}`; // Forward compatibility: // if display_name(deployment_name) not set, and '{deploy-id}' in AZURE_URL // then using default '{deploy-id}' if (serverConfig.customModels && serverConfig.azureUrl) { - const modelName = path.split("/")[1]; - let realDeployName = ""; + const modelName = path.split('/')[1]; + let realDeployName = ''; serverConfig.customModels - .split(",") - .filter((v) => !!v && !v.startsWith("-") && v.includes(modelName)) + .split(',') + .filter(v => !!v && !v.startsWith('-') && v.includes(modelName)) .forEach((m) => { - const [fullName, displayName] = m.split("="); + const [fullName, displayName] = m.split('='); const [_, providerName] = getModelProvider(fullName); - if (providerName === "azure" && !displayName) { - const [_, deployId] = (serverConfig?.azureUrl ?? "").split( - "deployments/", + if (providerName === 'azure' && !displayName) { + const [_, deployId] = (serverConfig?.azureUrl ?? '').split( + 'deployments/', ); if (deployId) { realDeployName = deployId; @@ -82,29 +83,29 @@ export async function requestOpenai(req: NextRequest) { } }); if (realDeployName) { - console.log("[Replace with DeployId", realDeployName); + console.log('[Replace with DeployId', realDeployName); path = path.replaceAll(modelName, realDeployName); } } } const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}/${path}`); - console.log("fetchUrl", fetchUrl); + console.log('fetchUrl', fetchUrl); const fetchOptions: RequestInit = { headers: { - "Content-Type": "application/json", - "Cache-Control": "no-store", + 'Content-Type': 'application/json', + 'Cache-Control': 'no-store', [authHeaderName]: authValue, ...(serverConfig.openaiOrgId && { - "OpenAI-Organization": serverConfig.openaiOrgId, + 'OpenAI-Organization': serverConfig.openaiOrgId, }), }, method: req.method, body: req.body, // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -122,8 +123,8 @@ export async function requestOpenai(req: NextRequest) { serverConfig.customModels, jsonBody?.model as string, ServiceProvider.OpenAI as string, - ) || - isModelAvailableInServer( + ) + || isModelAvailableInServer( serverConfig.customModels, jsonBody?.model as string, ServiceProvider.Azure as string, @@ -140,7 +141,7 @@ export async function requestOpenai(req: NextRequest) { ); } } catch (e) { - console.error("[OpenAI] gpt4 filter", e); + console.error('[OpenAI] gpt4 filter', e); } } @@ -148,33 +149,33 @@ export async function requestOpenai(req: NextRequest) { const res = await fetch(fetchUrl, fetchOptions); // Extract the OpenAI-Organization header from the response - const openaiOrganizationHeader = res.headers.get("OpenAI-Organization"); + const openaiOrganizationHeader = res.headers.get('OpenAI-Organization'); // Check if serverConfig.openaiOrgId is defined and not an empty string - if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") { + if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== '') { // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present - console.log("[Org ID]", openaiOrganizationHeader); + console.log('[Org ID]', openaiOrganizationHeader); } else { - console.log("[Org ID] is not set up."); + console.log('[Org ID] is not set up.'); } // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); // Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV) // Also, this is to prevent the header from being sent to the client - if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === "") { - newHeaders.delete("OpenAI-Organization"); + if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === '') { + newHeaders.delete('OpenAI-Organization'); } // The latest version of the OpenAI API forced the content-encoding to be "br" in json response // So if the streaming is disabled, we need to remove the content-encoding header // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header // The browser will try to decode the response with brotli and fail - newHeaders.delete("content-encoding"); + newHeaders.delete('content-encoding'); return new Response(res.body, { status: res.status, diff --git a/app/api/config/route.ts b/app/api/config/route.ts index b0d9da03103..a06e1f09744 100644 --- a/app/api/config/route.ts +++ b/app/api/config/route.ts @@ -1,6 +1,6 @@ -import { NextResponse } from "next/server"; +import { NextResponse } from 'next/server'; -import { getServerSideConfig } from "../../config/server"; +import { getServerSideConfig } from '../../config/server'; const serverConfig = getServerSideConfig(); @@ -27,4 +27,4 @@ async function handle() { export const GET = handle; export const POST = handle; -export const runtime = "edge"; +export const runtime = 'edge'; diff --git a/app/api/glm.ts b/app/api/glm.ts index 3625b9f7bf9..51f4a178ab1 100644 --- a/app/api/glm.ts +++ b/app/api/glm.ts @@ -1,14 +1,15 @@ -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { auth } from '@/app/api/auth'; +import { getServerSideConfig } from '@/app/config/server'; import { - CHATGLM_BASE_URL, ApiPath, + CHATGLM_BASE_URL, ModelProvider, ServiceProvider, -} from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +} from '@/app/constant'; +import { prettyObject } from '@/app/utils/format'; +import { isModelAvailableInServer } from '@/app/utils/model'; +import { NextResponse } from 'next/server'; const serverConfig = getServerSideConfig(); @@ -16,10 +17,10 @@ export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[GLM Route] params ", params); + console.log('[GLM Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const authResult = auth(req, ModelProvider.ChatGLM); @@ -33,7 +34,7 @@ export async function handle( const response = await request(req); return response; } catch (e) { - console.error("[GLM] ", e); + console.error('[GLM] ', e); return NextResponse.json(prettyObject(e)); } } @@ -42,20 +43,20 @@ async function request(req: NextRequest) { const controller = new AbortController(); // alibaba use base url or just remove the path - let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ChatGLM, ""); + const path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ChatGLM, ''); let baseUrl = serverConfig.chatglmUrl || CHATGLM_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); + console.log('[Proxy] ', path); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -65,17 +66,17 @@ async function request(req: NextRequest) { ); const fetchUrl = `${baseUrl}${path}`; - console.log("[Fetch Url] ", fetchUrl); + console.log('[Fetch Url] ', fetchUrl); const fetchOptions: RequestInit = { headers: { - "Content-Type": "application/json", - Authorization: req.headers.get("Authorization") ?? "", + 'Content-Type': 'application/json', + 'Authorization': req.headers.get('Authorization') ?? '', }, method: req.method, body: req.body, - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -114,9 +115,9 @@ async function request(req: NextRequest) { // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, diff --git a/app/api/google.ts b/app/api/google.ts index 707892c33d0..6d757e7c726 100644 --- a/app/api/google.ts +++ b/app/api/google.ts @@ -1,8 +1,9 @@ -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "./auth"; -import { getServerSideConfig } from "@/app/config/server"; -import { ApiPath, GEMINI_BASE_URL, ModelProvider } from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; +import type { NextRequest } from 'next/server'; +import { getServerSideConfig } from '@/app/config/server'; +import { ApiPath, GEMINI_BASE_URL, ModelProvider } from '@/app/constant'; +import { prettyObject } from '@/app/utils/format'; +import { NextResponse } from 'next/server'; +import { auth } from './auth'; const serverConfig = getServerSideConfig(); @@ -10,10 +11,10 @@ export async function handle( req: NextRequest, { params }: { params: { provider: string; path: string[] } }, ) { - console.log("[Google Route] params ", params); + console.log('[Google Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const authResult = auth(req, ModelProvider.GeminiPro); @@ -23,11 +24,11 @@ export async function handle( }); } - const bearToken = - req.headers.get("x-goog-api-key") || req.headers.get("Authorization") || ""; - const token = bearToken.trim().replaceAll("Bearer ", "").trim(); + const bearToken + = req.headers.get('x-goog-api-key') || req.headers.get('Authorization') || ''; + const token = bearToken.trim().replaceAll('Bearer ', '').trim(); - const apiKey = token ? token : serverConfig.googleApiKey; + const apiKey = token || serverConfig.googleApiKey; if (!apiKey) { return NextResponse.json( @@ -44,7 +45,7 @@ export async function handle( const response = await request(req, apiKey); return response; } catch (e) { - console.error("[Google] ", e); + console.error('[Google] ', e); return NextResponse.json(prettyObject(e)); } } @@ -52,20 +53,20 @@ export async function handle( export const GET = handle; export const POST = handle; -export const runtime = "edge"; +export const runtime = 'edge'; export const preferredRegion = [ - "bom1", - "cle1", - "cpt1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "pdx1", - "sfo1", - "sin1", - "syd1", + 'bom1', + 'cle1', + 'cpt1', + 'gru1', + 'hnd1', + 'iad1', + 'icn1', + 'kix1', + 'pdx1', + 'sfo1', + 'sin1', + 'syd1', ]; async function request(req: NextRequest, apiKey: string) { @@ -73,18 +74,18 @@ async function request(req: NextRequest, apiKey: string) { let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL; - let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Google, ""); + const path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Google, ''); - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); + console.log('[Proxy] ', path); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -93,24 +94,24 @@ async function request(req: NextRequest, apiKey: string) { 10 * 60 * 1000, ); const fetchUrl = `${baseUrl}${path}${ - req?.nextUrl?.searchParams?.get("alt") === "sse" ? "?alt=sse" : "" + req?.nextUrl?.searchParams?.get('alt') === 'sse' ? '?alt=sse' : '' }`; - console.log("[Fetch Url] ", fetchUrl); + console.log('[Fetch Url] ', fetchUrl); const fetchOptions: RequestInit = { headers: { - "Content-Type": "application/json", - "Cache-Control": "no-store", - "x-goog-api-key": - req.headers.get("x-goog-api-key") || - (req.headers.get("Authorization") ?? "").replace("Bearer ", ""), + 'Content-Type': 'application/json', + 'Cache-Control': 'no-store', + 'x-goog-api-key': + req.headers.get('x-goog-api-key') + || (req.headers.get('Authorization') ?? '').replace('Bearer ', ''), }, method: req.method, body: req.body, // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -118,9 +119,9 @@ async function request(req: NextRequest, apiKey: string) { const res = await fetch(fetchUrl, fetchOptions); // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, diff --git a/app/api/iflytek.ts b/app/api/iflytek.ts index 8b8227dce1f..16313b2f2eb 100644 --- a/app/api/iflytek.ts +++ b/app/api/iflytek.ts @@ -1,14 +1,15 @@ -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { auth } from '@/app/api/auth'; +import { getServerSideConfig } from '@/app/config/server'; import { - IFLYTEK_BASE_URL, ApiPath, + IFLYTEK_BASE_URL, ModelProvider, ServiceProvider, -} from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +} from '@/app/constant'; +import { prettyObject } from '@/app/utils/format'; +import { isModelAvailableInServer } from '@/app/utils/model'; +import { NextResponse } from 'next/server'; // iflytek const serverConfig = getServerSideConfig(); @@ -17,10 +18,10 @@ export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[Iflytek Route] params ", params); + console.log('[Iflytek Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const authResult = auth(req, ModelProvider.Iflytek); @@ -34,7 +35,7 @@ export async function handle( const response = await request(req); return response; } catch (e) { - console.error("[Iflytek] ", e); + console.error('[Iflytek] ', e); return NextResponse.json(prettyObject(e)); } } @@ -43,20 +44,20 @@ async function request(req: NextRequest) { const controller = new AbortController(); // iflytek use base url or just remove the path - let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Iflytek, ""); + const path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Iflytek, ''); let baseUrl = serverConfig.iflytekUrl || IFLYTEK_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); + console.log('[Proxy] ', path); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -68,14 +69,14 @@ async function request(req: NextRequest) { const fetchUrl = `${baseUrl}${path}`; const fetchOptions: RequestInit = { headers: { - "Content-Type": "application/json", - Authorization: req.headers.get("Authorization") ?? "", + 'Content-Type': 'application/json', + 'Authorization': req.headers.get('Authorization') ?? '', }, method: req.method, body: req.body, - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -114,9 +115,9 @@ async function request(req: NextRequest) { // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, diff --git a/app/api/moonshot.ts b/app/api/moonshot.ts index 5bf4807e3e6..60c1ddd1034 100644 --- a/app/api/moonshot.ts +++ b/app/api/moonshot.ts @@ -1,14 +1,15 @@ -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { auth } from '@/app/api/auth'; +import { getServerSideConfig } from '@/app/config/server'; import { - MOONSHOT_BASE_URL, ApiPath, ModelProvider, + MOONSHOT_BASE_URL, ServiceProvider, -} from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +} from '@/app/constant'; +import { prettyObject } from '@/app/utils/format'; +import { isModelAvailableInServer } from '@/app/utils/model'; +import { NextResponse } from 'next/server'; const serverConfig = getServerSideConfig(); @@ -16,10 +17,10 @@ export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[Moonshot Route] params ", params); + console.log('[Moonshot Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const authResult = auth(req, ModelProvider.Moonshot); @@ -33,7 +34,7 @@ export async function handle( const response = await request(req); return response; } catch (e) { - console.error("[Moonshot] ", e); + console.error('[Moonshot] ', e); return NextResponse.json(prettyObject(e)); } } @@ -42,20 +43,20 @@ async function request(req: NextRequest) { const controller = new AbortController(); // alibaba use base url or just remove the path - let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Moonshot, ""); + const path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Moonshot, ''); let baseUrl = serverConfig.moonshotUrl || MOONSHOT_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); + console.log('[Proxy] ', path); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -67,14 +68,14 @@ async function request(req: NextRequest) { const fetchUrl = `${baseUrl}${path}`; const fetchOptions: RequestInit = { headers: { - "Content-Type": "application/json", - Authorization: req.headers.get("Authorization") ?? "", + 'Content-Type': 'application/json', + 'Authorization': req.headers.get('Authorization') ?? '', }, method: req.method, body: req.body, - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -113,9 +114,9 @@ async function request(req: NextRequest) { // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, diff --git a/app/api/openai.ts b/app/api/openai.ts index 2b5deca8be3..98e5c967b9e 100644 --- a/app/api/openai.ts +++ b/app/api/openai.ts @@ -1,10 +1,11 @@ -import { type OpenAIListModelResponse } from "@/app/client/platforms/openai"; -import { getServerSideConfig } from "@/app/config/server"; -import { ModelProvider, OpenaiPath } from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "./auth"; -import { requestOpenai } from "./common"; +import type { OpenAIListModelResponse } from '@/app/client/platforms/openai'; +import type { NextRequest } from 'next/server'; +import { getServerSideConfig } from '@/app/config/server'; +import { ModelProvider, OpenaiPath } from '@/app/constant'; +import { prettyObject } from '@/app/utils/format'; +import { NextResponse } from 'next/server'; +import { auth } from './auth'; +import { requestOpenai } from './common'; const ALLOWED_PATH = new Set(Object.values(OpenaiPath)); @@ -13,9 +14,9 @@ function getModels(remoteModelRes: OpenAIListModelResponse) { if (config.disableGPT4) { remoteModelRes.data = remoteModelRes.data.filter( - (m) => - !(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o") || m.id.startsWith("o1")) || - m.id.startsWith("gpt-4o-mini"), + m => + !(m.id.startsWith('gpt-4') || m.id.startsWith('chatgpt-4o') || m.id.startsWith('o1')) + || m.id.startsWith('gpt-4o-mini'), ); } @@ -26,20 +27,20 @@ export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[OpenAI Route] params ", params); + console.log('[OpenAI Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } - const subpath = params.path.join("/"); + const subpath = params.path.join('/'); if (!ALLOWED_PATH.has(subpath)) { - console.log("[OpenAI Route] forbidden path ", subpath); + console.log('[OpenAI Route] forbidden path ', subpath); return NextResponse.json( { error: true, - msg: "you are not allowed to request " + subpath, + msg: `you are not allowed to request ${subpath}`, }, { status: 403, @@ -68,7 +69,7 @@ export async function handle( return response; } catch (e) { - console.error("[OpenAI] ", e); + console.error('[OpenAI] ', e); return NextResponse.json(prettyObject(e)); } } diff --git a/app/api/proxy.ts b/app/api/proxy.ts index b3e5e7b7b93..6143605dd28 100644 --- a/app/api/proxy.ts +++ b/app/api/proxy.ts @@ -1,32 +1,33 @@ -import { NextRequest, NextResponse } from "next/server"; -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { getServerSideConfig } from '@/app/config/server'; +import { NextResponse } from 'next/server'; export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[Proxy Route] params ", params); + console.log('[Proxy Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const serverConfig = getServerSideConfig(); // remove path params from searchParams - req.nextUrl.searchParams.delete("path"); - req.nextUrl.searchParams.delete("provider"); + req.nextUrl.searchParams.delete('path'); + req.nextUrl.searchParams.delete('provider'); - const subpath = params.path.join("/"); + const subpath = params.path.join('/'); const fetchUrl = `${req.headers.get( - "x-base-url", + 'x-base-url', )}/${subpath}?${req.nextUrl.searchParams.toString()}`; - const skipHeaders = ["connection", "host", "origin", "referer", "cookie"]; + const skipHeaders = ['connection', 'host', 'origin', 'referer', 'cookie']; const headers = new Headers( Array.from(req.headers.entries()).filter((item) => { if ( - item[0].indexOf("x-") > -1 || - item[0].indexOf("sec-") > -1 || - skipHeaders.includes(item[0]) + item[0].includes('x-') + || item[0].includes('sec-') + || skipHeaders.includes(item[0]) ) { return false; } @@ -34,16 +35,16 @@ export async function handle( }), ); // if dalle3 use openai api key - const baseUrl = req.headers.get("x-base-url"); - if (baseUrl?.includes("api.openai.com")) { - if (!serverConfig.apiKey) { - return NextResponse.json( - { error: "OpenAI API key not configured" }, - { status: 500 }, - ); - } - headers.set("Authorization", `Bearer ${serverConfig.apiKey}`); + const baseUrl = req.headers.get('x-base-url'); + if (baseUrl?.includes('api.openai.com')) { + if (!serverConfig.apiKey) { + return NextResponse.json( + { error: 'OpenAI API key not configured' }, + { status: 500 }, + ); } + headers.set('Authorization', `Bearer ${serverConfig.apiKey}`); + } const controller = new AbortController(); const fetchOptions: RequestInit = { @@ -51,9 +52,9 @@ export async function handle( method: req.method, body: req.body, // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -68,15 +69,15 @@ export async function handle( const res = await fetch(fetchUrl, fetchOptions); // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); // The latest version of the OpenAI API forced the content-encoding to be "br" in json response // So if the streaming is disabled, we need to remove the content-encoding header // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header // The browser will try to decode the response with brotli and fail - newHeaders.delete("content-encoding"); + newHeaders.delete('content-encoding'); return new Response(res.body, { status: res.status, diff --git a/app/api/stability.ts b/app/api/stability.ts index 2646ace858e..e4a004e32b8 100644 --- a/app/api/stability.ts +++ b/app/api/stability.ts @@ -1,16 +1,17 @@ -import { NextRequest, NextResponse } from "next/server"; -import { getServerSideConfig } from "@/app/config/server"; -import { ModelProvider, STABILITY_BASE_URL } from "@/app/constant"; -import { auth } from "@/app/api/auth"; +import type { NextRequest } from 'next/server'; +import { auth } from '@/app/api/auth'; +import { getServerSideConfig } from '@/app/config/server'; +import { ModelProvider, STABILITY_BASE_URL } from '@/app/constant'; +import { NextResponse } from 'next/server'; export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[Stability] params ", params); + console.log('[Stability] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const controller = new AbortController(); @@ -19,18 +20,18 @@ export async function handle( let baseUrl = serverConfig.stabilityUrl || STABILITY_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - let path = `${req.nextUrl.pathname}`.replaceAll("/api/stability/", ""); + const path = `${req.nextUrl.pathname}`.replaceAll('/api/stability/', ''); - console.log("[Stability Proxy] ", path); - console.log("[Stability Base Url]", baseUrl); + console.log('[Stability Proxy] ', path); + console.log('[Stability Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -47,10 +48,10 @@ export async function handle( }); } - const bearToken = req.headers.get("Authorization") ?? ""; - const token = bearToken.trim().replaceAll("Bearer ", "").trim(); + const bearToken = req.headers.get('Authorization') ?? ''; + const token = bearToken.trim().replaceAll('Bearer ', '').trim(); - const key = token ? token : serverConfig.stabilityApiKey; + const key = token || serverConfig.stabilityApiKey; if (!key) { return NextResponse.json( @@ -65,19 +66,19 @@ export async function handle( } const fetchUrl = `${baseUrl}/${path}`; - console.log("[Stability Url] ", fetchUrl); + console.log('[Stability Url] ', fetchUrl); const fetchOptions: RequestInit = { headers: { - "Content-Type": req.headers.get("Content-Type") || "multipart/form-data", - Accept: req.headers.get("Accept") || "application/json", - Authorization: `Bearer ${key}`, + 'Content-Type': req.headers.get('Content-Type') || 'multipart/form-data', + 'Accept': req.headers.get('Accept') || 'application/json', + 'Authorization': `Bearer ${key}`, }, method: req.method, body: req.body, // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -85,9 +86,9 @@ export async function handle( const res = await fetch(fetchUrl, fetchOptions); // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, statusText: res.statusText, diff --git a/app/api/tencent/route.ts b/app/api/tencent/route.ts index fc4f8c79edf..4c31e8007e1 100644 --- a/app/api/tencent/route.ts +++ b/app/api/tencent/route.ts @@ -1,9 +1,10 @@ -import { getServerSideConfig } from "@/app/config/server"; -import { TENCENT_BASE_URL, ModelProvider } from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "@/app/api/auth"; -import { getHeader } from "@/app/utils/tencent"; +import type { NextRequest } from 'next/server'; +import { auth } from '@/app/api/auth'; +import { getServerSideConfig } from '@/app/config/server'; +import { ModelProvider, TENCENT_BASE_URL } from '@/app/constant'; +import { prettyObject } from '@/app/utils/format'; +import { getHeader } from '@/app/utils/tencent'; +import { NextResponse } from 'next/server'; const serverConfig = getServerSideConfig(); @@ -11,10 +12,10 @@ async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[Tencent Route] params ", params); + console.log('[Tencent Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const authResult = auth(req, ModelProvider.Hunyuan); @@ -28,7 +29,7 @@ async function handle( const response = await request(req); return response; } catch (e) { - console.error("[Tencent] ", e); + console.error('[Tencent] ', e); return NextResponse.json(prettyObject(e)); } } @@ -36,25 +37,25 @@ async function handle( export const GET = handle; export const POST = handle; -export const runtime = "edge"; +export const runtime = 'edge'; export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", + 'arn1', + 'bom1', + 'cdg1', + 'cle1', + 'cpt1', + 'dub1', + 'fra1', + 'gru1', + 'hnd1', + 'iad1', + 'icn1', + 'kix1', + 'lhr1', + 'pdx1', + 'sfo1', + 'sin1', + 'syd1', ]; async function request(req: NextRequest) { @@ -62,15 +63,15 @@ async function request(req: NextRequest) { let baseUrl = serverConfig.tencentUrl || TENCENT_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Base Url]", baseUrl); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -91,9 +92,9 @@ async function request(req: NextRequest) { headers, method: req.method, body, - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -102,9 +103,9 @@ async function request(req: NextRequest) { // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, diff --git a/app/api/upstash/[action]/[...key]/route.ts b/app/api/upstash/[action]/[...key]/route.ts index fcfef471862..46d10610ee7 100644 --- a/app/api/upstash/[action]/[...key]/route.ts +++ b/app/api/upstash/[action]/[...key]/route.ts @@ -1,22 +1,23 @@ -import { NextRequest, NextResponse } from "next/server"; +import type { NextRequest } from 'next/server'; +import { NextResponse } from 'next/server'; async function handle( req: NextRequest, { params }: { params: { action: string; key: string[] } }, ) { const requestUrl = new URL(req.url); - const endpoint = requestUrl.searchParams.get("endpoint"); + const endpoint = requestUrl.searchParams.get('endpoint'); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const [...key] = params.key; // only allow to request to *.upstash.io - if (!endpoint || !new URL(endpoint).hostname.endsWith(".upstash.io")) { + if (!endpoint || !new URL(endpoint).hostname.endsWith('.upstash.io')) { return NextResponse.json( { error: true, - msg: "you are not allowed to request " + params.key.join("/"), + msg: `you are not allowed to request ${params.key.join('/')}`, }, { status: 403, @@ -25,12 +26,12 @@ async function handle( } // only allow upstash get and set method - if (params.action !== "get" && params.action !== "set") { - console.log("[Upstash Route] forbidden action ", params.action); + if (params.action !== 'get' && params.action !== 'set') { + console.log('[Upstash Route] forbidden action ', params.action); return NextResponse.json( { error: true, - msg: "you are not allowed to request " + params.action, + msg: `you are not allowed to request ${params.action}`, }, { status: 403, @@ -38,27 +39,27 @@ async function handle( ); } - const targetUrl = `${endpoint}/${params.action}/${params.key.join("/")}`; + const targetUrl = `${endpoint}/${params.action}/${params.key.join('/')}`; const method = req.method; - const shouldNotHaveBody = ["get", "head"].includes( - method?.toLowerCase() ?? "", + const shouldNotHaveBody = ['get', 'head'].includes( + method?.toLowerCase() ?? '', ); const fetchOptions: RequestInit = { headers: { - authorization: req.headers.get("authorization") ?? "", + authorization: req.headers.get('authorization') ?? '', }, body: shouldNotHaveBody ? null : req.body, method, // @ts-ignore - duplex: "half", + duplex: 'half', }; - console.log("[Upstash Proxy]", targetUrl, fetchOptions); + console.log('[Upstash Proxy]', targetUrl, fetchOptions); const fetchResult = await fetch(targetUrl, fetchOptions); - console.log("[Any Proxy]", targetUrl, { + console.log('[Any Proxy]', targetUrl, { status: fetchResult.status, statusText: fetchResult.statusText, }); @@ -70,4 +71,4 @@ export const POST = handle; export const GET = handle; export const OPTIONS = handle; -export const runtime = "edge"; +export const runtime = 'edge'; diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts index bb7743bda40..ad965f443a7 100644 --- a/app/api/webdav/[...path]/route.ts +++ b/app/api/webdav/[...path]/route.ts @@ -1,47 +1,48 @@ -import { NextRequest, NextResponse } from "next/server"; -import { STORAGE_KEY, internalAllowedWebDavEndpoints } from "../../../constant"; -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { getServerSideConfig } from '@/app/config/server'; +import { NextResponse } from 'next/server'; +import { internalAllowedWebDavEndpoints, STORAGE_KEY } from '../../../constant'; const config = getServerSideConfig(); const mergedAllowedWebDavEndpoints = [ ...internalAllowedWebDavEndpoints, ...config.allowedWebDavEndpoints, -].filter((domain) => Boolean(domain.trim())); +].filter(domain => Boolean(domain.trim())); -const normalizeUrl = (url: string) => { +function normalizeUrl(url: string) { try { return new URL(url); } catch (err) { return null; } -}; +} async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const folder = STORAGE_KEY; const fileName = `${folder}/backup.json`; const requestUrl = new URL(req.url); - let endpoint = requestUrl.searchParams.get("endpoint"); - let proxy_method = requestUrl.searchParams.get("proxy_method") || req.method; + let endpoint = requestUrl.searchParams.get('endpoint'); + const proxy_method = requestUrl.searchParams.get('proxy_method') || req.method; // Validate the endpoint to prevent potential SSRF attacks if ( - !endpoint || - !mergedAllowedWebDavEndpoints.some((allowedEndpoint) => { + !endpoint + || !mergedAllowedWebDavEndpoints.some((allowedEndpoint) => { const normalizedAllowedEndpoint = normalizeUrl(allowedEndpoint); const normalizedEndpoint = normalizeUrl(endpoint as string); return ( - normalizedEndpoint && - normalizedEndpoint.hostname === normalizedAllowedEndpoint?.hostname && - normalizedEndpoint.pathname.startsWith( + normalizedEndpoint + && normalizedEndpoint.hostname === normalizedAllowedEndpoint?.hostname + && normalizedEndpoint.pathname.startsWith( normalizedAllowedEndpoint.pathname, ) ); @@ -50,7 +51,7 @@ async function handle( return NextResponse.json( { error: true, - msg: "Invalid endpoint", + msg: 'Invalid endpoint', }, { status: 400, @@ -58,23 +59,23 @@ async function handle( ); } - if (!endpoint?.endsWith("/")) { - endpoint += "/"; + if (!endpoint?.endsWith('/')) { + endpoint += '/'; } - const endpointPath = params.path.join("/"); + const endpointPath = params.path.join('/'); const targetPath = `${endpoint}${endpointPath}`; // only allow MKCOL, GET, PUT if ( - proxy_method !== "MKCOL" && - proxy_method !== "GET" && - proxy_method !== "PUT" + proxy_method !== 'MKCOL' + && proxy_method !== 'GET' + && proxy_method !== 'PUT' ) { return NextResponse.json( { error: true, - msg: "you are not allowed to request " + targetPath, + msg: `you are not allowed to request ${targetPath}`, }, { status: 403, @@ -83,11 +84,11 @@ async function handle( } // for MKCOL request, only allow request ${folder} - if (proxy_method === "MKCOL" && !targetPath.endsWith(folder)) { + if (proxy_method === 'MKCOL' && !targetPath.endsWith(folder)) { return NextResponse.json( { error: true, - msg: "you are not allowed to request " + targetPath, + msg: `you are not allowed to request ${targetPath}`, }, { status: 403, @@ -96,11 +97,11 @@ async function handle( } // for GET request, only allow request ending with fileName - if (proxy_method === "GET" && !targetPath.endsWith(fileName)) { + if (proxy_method === 'GET' && !targetPath.endsWith(fileName)) { return NextResponse.json( { error: true, - msg: "you are not allowed to request " + targetPath, + msg: `you are not allowed to request ${targetPath}`, }, { status: 403, @@ -109,11 +110,11 @@ async function handle( } // for PUT request, only allow request ending with fileName - if (proxy_method === "PUT" && !targetPath.endsWith(fileName)) { + if (proxy_method === 'PUT' && !targetPath.endsWith(fileName)) { return NextResponse.json( { error: true, - msg: "you are not allowed to request " + targetPath, + msg: `you are not allowed to request ${targetPath}`, }, { status: 403, @@ -124,19 +125,19 @@ async function handle( const targetUrl = targetPath; const method = proxy_method || req.method; - const shouldNotHaveBody = ["get", "head"].includes( - method?.toLowerCase() ?? "", + const shouldNotHaveBody = ['get', 'head'].includes( + method?.toLowerCase() ?? '', ); const fetchOptions: RequestInit = { headers: { - authorization: req.headers.get("authorization") ?? "", + authorization: req.headers.get('authorization') ?? '', }, body: shouldNotHaveBody ? null : req.body, - redirect: "manual", + redirect: 'manual', method, // @ts-ignore - duplex: "half", + duplex: 'half', }; let fetchResult; @@ -145,10 +146,10 @@ async function handle( fetchResult = await fetch(targetUrl, fetchOptions); } finally { console.log( - "[Any Proxy]", + '[Any Proxy]', targetUrl, { - method: method, + method, }, { status: fetchResult?.status, @@ -164,4 +165,4 @@ export const PUT = handle; export const GET = handle; export const OPTIONS = handle; -export const runtime = "edge"; +export const runtime = 'edge'; diff --git a/app/api/xai.ts b/app/api/xai.ts index a4ee8b39731..f59f1329cd8 100644 --- a/app/api/xai.ts +++ b/app/api/xai.ts @@ -1,14 +1,15 @@ -import { getServerSideConfig } from "@/app/config/server"; +import type { NextRequest } from 'next/server'; +import { auth } from '@/app/api/auth'; +import { getServerSideConfig } from '@/app/config/server'; import { - XAI_BASE_URL, ApiPath, ModelProvider, ServiceProvider, -} from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; + XAI_BASE_URL, +} from '@/app/constant'; +import { prettyObject } from '@/app/utils/format'; +import { isModelAvailableInServer } from '@/app/utils/model'; +import { NextResponse } from 'next/server'; const serverConfig = getServerSideConfig(); @@ -16,10 +17,10 @@ export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { - console.log("[XAI Route] params ", params); + console.log('[XAI Route] params ', params); - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); + if (req.method === 'OPTIONS') { + return NextResponse.json({ body: 'OK' }, { status: 200 }); } const authResult = auth(req, ModelProvider.XAI); @@ -33,7 +34,7 @@ export async function handle( const response = await request(req); return response; } catch (e) { - console.error("[XAI] ", e); + console.error('[XAI] ', e); return NextResponse.json(prettyObject(e)); } } @@ -42,20 +43,20 @@ async function request(req: NextRequest) { const controller = new AbortController(); // alibaba use base url or just remove the path - let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.XAI, ""); + const path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.XAI, ''); let baseUrl = serverConfig.xaiUrl || XAI_BASE_URL; - if (!baseUrl.startsWith("http")) { + if (!baseUrl.startsWith('http')) { baseUrl = `https://${baseUrl}`; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, -1); } - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); + console.log('[Proxy] ', path); + console.log('[Base Url]', baseUrl); const timeoutId = setTimeout( () => { @@ -67,14 +68,14 @@ async function request(req: NextRequest) { const fetchUrl = `${baseUrl}${path}`; const fetchOptions: RequestInit = { headers: { - "Content-Type": "application/json", - Authorization: req.headers.get("Authorization") ?? "", + 'Content-Type': 'application/json', + 'Authorization': req.headers.get('Authorization') ?? '', }, method: req.method, body: req.body, - redirect: "manual", + redirect: 'manual', // @ts-ignore - duplex: "half", + duplex: 'half', signal: controller.signal, }; @@ -113,9 +114,9 @@ async function request(req: NextRequest) { // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); + newHeaders.delete('www-authenticate'); // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); + newHeaders.set('X-Accel-Buffering', 'no'); return new Response(res.body, { status: res.status, diff --git a/app/client/api.ts b/app/client/api.ts index 1da81e96448..c6cce515b04 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -1,37 +1,40 @@ -import { getClientConfig } from "../config/client"; +import type { + ChatMessage, + ChatMessageTool, + ModelType, +} from '../store'; +import type { DalleRequestPayload } from './platforms/openai'; +import { getClientConfig } from '../config/client'; import { ACCESS_CODE_PREFIX, ModelProvider, ServiceProvider, -} from "../constant"; +} from '../constant'; import { - ChatMessageTool, - ChatMessage, - ModelType, useAccessStore, useChatStore, -} from "../store"; -import { ChatGPTApi, DalleRequestPayload } from "./platforms/openai"; -import { GeminiProApi } from "./platforms/google"; -import { ClaudeApi } from "./platforms/anthropic"; -import { ErnieApi } from "./platforms/baidu"; -import { DoubaoApi } from "./platforms/bytedance"; -import { QwenApi } from "./platforms/alibaba"; -import { HunyuanApi } from "./platforms/tencent"; -import { MoonshotApi } from "./platforms/moonshot"; -import { SparkApi } from "./platforms/iflytek"; -import { XAIApi } from "./platforms/xai"; -import { ChatGLMApi } from "./platforms/glm"; - -export const ROLES = ["system", "user", "assistant"] as const; +} from '../store'; +import { QwenApi } from './platforms/alibaba'; +import { ClaudeApi } from './platforms/anthropic'; +import { ErnieApi } from './platforms/baidu'; +import { DoubaoApi } from './platforms/bytedance'; +import { ChatGLMApi } from './platforms/glm'; +import { GeminiProApi } from './platforms/google'; +import { SparkApi } from './platforms/iflytek'; +import { MoonshotApi } from './platforms/moonshot'; +import { ChatGPTApi } from './platforms/openai'; +import { HunyuanApi } from './platforms/tencent'; +import { XAIApi } from './platforms/xai'; + +export const ROLES = ['system', 'user', 'assistant'] as const; export type MessageRole = (typeof ROLES)[number]; -export const Models = ["gpt-3.5-turbo", "gpt-4"] as const; -export const TTSModels = ["tts-1", "tts-1-hd"] as const; +export const Models = ['gpt-3.5-turbo', 'gpt-4'] as const; +export const TTSModels = ['tts-1', 'tts-1-hd'] as const; export type ChatModel = ModelType; export interface MultimodalContent { - type: "text" | "image_url"; + type: 'text' | 'image_url'; text?: string; image_url?: { url: string; @@ -51,9 +54,9 @@ export interface LLMConfig { stream?: boolean; presence_penalty?: number; frequency_penalty?: number; - size?: DalleRequestPayload["size"]; - quality?: DalleRequestPayload["quality"]; - style?: DalleRequestPayload["style"]; + size?: DalleRequestPayload['size']; + quality?: DalleRequestPayload['quality']; + style?: DalleRequestPayload['style']; } export interface SpeechOptions { @@ -104,7 +107,7 @@ export abstract class LLMApi { abstract models(): Promise; } -type ProviderName = "openai" | "azure" | "claude" | "palm"; +type ProviderName = 'openai' | 'azure' | 'claude' | 'palm'; interface Model { name: string; @@ -173,24 +176,24 @@ export class ClientApi { async share(messages: ChatMessage[], avatarUrl: string | null = null) { const msgs = messages - .map((m) => ({ - from: m.role === "user" ? "human" : "gpt", + .map(m => ({ + from: m.role === 'user' ? 'human' : 'gpt', value: m.content, })) .concat([ { - from: "human", + from: 'human', value: - "Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web", + 'Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web', }, ]); // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用 // Please do not modify this message - console.log("[Share]", messages, msgs); + console.log('[Share]', messages, msgs); const clientConfig = getClientConfig(); - const proxyUrl = "/sharegpt"; - const rawUrl = "https://sharegpt.com/api/conversations"; + const proxyUrl = '/sharegpt'; + const rawUrl = 'https://sharegpt.com/api/conversations'; const shareUrl = clientConfig?.isApp ? rawUrl : proxyUrl; const res = await fetch(shareUrl, { body: JSON.stringify({ @@ -198,13 +201,13 @@ export class ClientApi { items: msgs, }), headers: { - "Content-Type": "application/json", + 'Content-Type': 'application/json', }, - method: "POST", + method: 'POST', }); const resJson = await res.json(); - console.log("[Share]", resJson); + console.log('[Share]', resJson); if (resJson.id) { return `https://shareg.pt/${resJson.id}`; } @@ -216,8 +219,8 @@ export function getBearerToken( noBearer: boolean = false, ): string { return validString(apiKey) - ? `${noBearer ? "" : "Bearer "}${apiKey.trim()}` - : ""; + ? `${noBearer ? '' : 'Bearer '}${apiKey.trim()}` + : ''; } export function validString(x: string): boolean { @@ -230,8 +233,8 @@ export function getHeaders(ignoreHeaders: boolean = false) { let headers: Record = {}; if (!ignoreHeaders) { headers = { - "Content-Type": "application/json", - Accept: "application/json", + 'Content-Type': 'application/json', + 'Accept': 'application/json', }; } @@ -253,24 +256,24 @@ export function getHeaders(ignoreHeaders: boolean = false) { const apiKey = isGoogle ? accessStore.googleApiKey : isAzure - ? accessStore.azureApiKey - : isAnthropic - ? accessStore.anthropicApiKey - : isByteDance - ? accessStore.bytedanceApiKey - : isAlibaba - ? accessStore.alibabaApiKey - : isMoonshot - ? accessStore.moonshotApiKey - : isXAI - ? accessStore.xaiApiKey - : isChatGLM - ? accessStore.chatglmApiKey - : isIflytek - ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret - ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret - : "" - : accessStore.openaiApiKey; + ? accessStore.azureApiKey + : isAnthropic + ? accessStore.anthropicApiKey + : isByteDance + ? accessStore.bytedanceApiKey + : isAlibaba + ? accessStore.alibabaApiKey + : isMoonshot + ? accessStore.moonshotApiKey + : isXAI + ? accessStore.xaiApiKey + : isChatGLM + ? accessStore.chatglmApiKey + : isIflytek + ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret + ? `${accessStore.iflytekApiKey}:${accessStore.iflytekApiSecret}` + : '' + : accessStore.openaiApiKey; return { isGoogle, isAzure, @@ -289,12 +292,12 @@ export function getHeaders(ignoreHeaders: boolean = false) { function getAuthHeader(): string { return isAzure - ? "api-key" + ? 'api-key' : isAnthropic - ? "x-api-key" - : isGoogle - ? "x-goog-api-key" - : "Authorization"; + ? 'x-api-key' + : isGoogle + ? 'x-goog-api-key' + : 'Authorization'; } const { @@ -306,7 +309,8 @@ export function getHeaders(ignoreHeaders: boolean = false) { isEnabledAccessControl, } = getConfig(); // when using baidu api in app, not set auth header - if (isBaidu && clientConfig?.isApp) return headers; + if (isBaidu && clientConfig?.isApp) + { return headers; } const authHeader = getAuthHeader(); @@ -318,7 +322,7 @@ export function getHeaders(ignoreHeaders: boolean = false) { if (bearerToken) { headers[authHeader] = bearerToken; } else if (isEnabledAccessControl && validString(accessStore.accessCode)) { - headers["Authorization"] = getBearerToken( + headers.Authorization = getBearerToken( ACCESS_CODE_PREFIX + accessStore.accessCode, ); } diff --git a/app/client/controller.ts b/app/client/controller.ts index a2e00173dd0..5990f476adf 100644 --- a/app/client/controller.ts +++ b/app/client/controller.ts @@ -19,7 +19,7 @@ export const ChatControllerPool = { }, stopAll() { - Object.values(this.controllers).forEach((v) => v.abort()); + Object.values(this.controllers).forEach(v => v.abort()); }, hasPending() { diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index 6fe69e87ae2..731176c4c2f 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -1,29 +1,31 @@ -"use client"; -import { - ApiPath, - Alibaba, - ALIBABA_BASE_URL, - REQUEST_TIMEOUT_MS, -} from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; - -import { +'use client'; +import type { ChatOptions, - getHeaders, LLMApi, LLMModel, - SpeechOptions, MultimodalContent, -} from "../api"; -import Locale from "../../locales"; + SpeechOptions, +} from '../api'; +import { getClientConfig } from '@/app/config/client'; + +import { + Alibaba, + ALIBABA_BASE_URL, + ApiPath, + REQUEST_TIMEOUT_MS, +} from '@/app/constant'; +import { useAccessStore, useAppConfig, useChatStore } from '@/app/store'; +import { getMessageTextContent } from '@/app/utils'; +import { prettyObject } from '@/app/utils/format'; +import { fetch } from '@/app/utils/stream'; import { EventStreamContentType, fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; -import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; -import { fetch } from "@/app/utils/stream"; +} from '@fortaine/fetch-event-source'; +import Locale from '../../locales'; +import { + getHeaders, +} from '../api'; export interface OpenAIListModelResponse { object: string; @@ -36,7 +38,7 @@ export interface OpenAIListModelResponse { interface RequestInput { messages: { - role: "system" | "user" | "assistant"; + role: 'system' | 'user' | 'assistant'; content: string | MultimodalContent[]; }[]; } @@ -58,7 +60,7 @@ export class QwenApi implements LLMApi { path(path: string): string { const accessStore = useAccessStore.getState(); - let baseUrl = ""; + let baseUrl = ''; if (accessStore.useCustomConfig) { baseUrl = accessStore.alibabaUrl; @@ -69,28 +71,28 @@ export class QwenApi implements LLMApi { baseUrl = isApp ? ALIBABA_BASE_URL : ApiPath.Alibaba; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Alibaba)) { - baseUrl = "https://" + baseUrl; + if (!baseUrl.startsWith('http') && !baseUrl.startsWith(ApiPath.Alibaba)) { + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy Endpoint] ", baseUrl, path); + console.log('[Proxy Endpoint] ', baseUrl, path); - return [baseUrl, path].join("/"); + return [baseUrl, path].join('/'); } extractMessage(res: any) { - return res?.output?.choices?.at(0)?.message?.content ?? ""; + return res?.output?.choices?.at(0)?.message?.content ?? ''; } speech(options: SpeechOptions): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } async chat(options: ChatOptions) { - const messages = options.messages.map((v) => ({ + const messages = options.messages.map(v => ({ role: v.role, content: getMessageTextContent(v), })); @@ -110,7 +112,7 @@ export class QwenApi implements LLMApi { messages, }, parameters: { - result_format: "message", + result_format: 'message', incremental_output: shouldStream, temperature: modelConfig.temperature, // max_tokens: modelConfig.max_tokens, @@ -124,12 +126,12 @@ export class QwenApi implements LLMApi { try { const chatPath = this.path(Alibaba.ChatPath); const chatPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: { ...getHeaders(), - "X-DashScope-SSE": shouldStream ? "enable" : "disable", + 'X-DashScope-SSE': shouldStream ? 'enable' : 'disable', }, }; @@ -140,8 +142,8 @@ export class QwenApi implements LLMApi { ); if (shouldStream) { - let responseText = ""; - let remainText = ""; + let responseText = ''; + let remainText = ''; let finished = false; let responseRes: Response; @@ -149,9 +151,9 @@ export class QwenApi implements LLMApi { function animateResponseText() { if (finished || controller.signal.aborted) { responseText += remainText; - console.log("[Response Animation] finished"); + console.log('[Response Animation] finished'); if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); + options.onError?.(new Error('empty response from server')); } return; } @@ -184,24 +186,24 @@ export class QwenApi implements LLMApi { ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); + const contentType = res.headers.get('content-type'); console.log( - "[Alibaba] request response content type: ", + '[Alibaba] request response content type: ', contentType, ); responseRes = res; - if (contentType?.startsWith("text/plain")) { + if (contentType?.startsWith('text/plain')) { responseText = await res.clone().text(); return finish(); } if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 + !res.ok + || !res.headers + .get('content-type') + ?.startsWith(EventStreamContentType) + || res.status !== 200 ) { const responseTexts = [responseText]; let extraInfo = await res.clone().text(); @@ -218,13 +220,13 @@ export class QwenApi implements LLMApi { responseTexts.push(extraInfo); } - responseText = responseTexts.join("\n\n"); + responseText = responseTexts.join('\n\n'); return finish(); } }, onmessage(msg) { - if (msg.data === "[DONE]" || finished) { + if (msg.data === '[DONE]' || finished) { return finish(); } const text = msg.data; @@ -238,7 +240,7 @@ export class QwenApi implements LLMApi { remainText += delta; } } catch (e) { - console.error("[Request] parse error", text, msg); + console.error('[Request] parse error', text, msg); } }, onclose() { @@ -259,10 +261,11 @@ export class QwenApi implements LLMApi { options.onFinish(message, res); } } catch (e) { - console.log("[Request] failed to make a chat request", e); + console.log('[Request] failed to make a chat request', e); options.onError?.(e as Error); } } + async usage() { return { used: 0, diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 6747221a861..46db3ce834d 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -1,34 +1,36 @@ -import { Anthropic, ApiPath } from "@/app/constant"; -import { ChatOptions, getHeaders, LLMApi, SpeechOptions } from "../api"; +import type { + ChatMessageTool, +} from '@/app/store'; +import type { ChatOptions, LLMApi, SpeechOptions } from '../api'; +import type { RequestPayload } from './openai'; +import { getClientConfig } from '@/app/config/client'; +import { Anthropic, ANTHROPIC_BASE_URL, ApiPath } from '@/app/constant'; import { useAccessStore, useAppConfig, useChatStore, usePluginStore, - ChatMessageTool, -} from "@/app/store"; -import { getClientConfig } from "@/app/config/client"; -import { ANTHROPIC_BASE_URL } from "@/app/constant"; -import { getMessageTextContent, isVisionModel } from "@/app/utils"; -import { preProcessImageContent, stream } from "@/app/utils/chat"; -import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; -import { RequestPayload } from "./openai"; -import { fetch } from "@/app/utils/stream"; - -export type MultiBlockContent = { - type: "image" | "text"; +} from '@/app/store'; +import { getMessageTextContent, isVisionModel } from '@/app/utils'; +import { preProcessImageContent, stream } from '@/app/utils/chat'; +import { cloudflareAIGatewayUrl } from '@/app/utils/cloudflare'; +import { fetch } from '@/app/utils/stream'; +import { getHeaders } from '../api'; + +export interface MultiBlockContent { + type: 'image' | 'text'; source?: { type: string; media_type: string; data: string; }; text?: string; -}; +} -export type AnthropicMessage = { +export interface AnthropicMessage { role: (typeof ClaudeMapper)[keyof typeof ClaudeMapper]; content: string | MultiBlockContent[]; -}; +} export interface AnthropicChatRequest { model: string; // The model that will complete your prompt. @@ -56,7 +58,7 @@ export interface ChatRequest { export interface ChatResponse { completion: string; - stop_reason: "stop_sequence" | "max_tokens"; + stop_reason: 'stop_sequence' | 'max_tokens'; model: string; } @@ -66,23 +68,24 @@ export type ChatStreamResponse = ChatResponse & { }; const ClaudeMapper = { - assistant: "assistant", - user: "user", - system: "user", + assistant: 'assistant', + user: 'user', + system: 'user', } as const; -const keys = ["claude-2, claude-instant-1"]; +const keys = ['claude-2, claude-instant-1']; export class ClaudeApi implements LLMApi { speech(options: SpeechOptions): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } extractMessage(res: any) { - console.log("[Response] claude response: ", res); + console.log('[Response] claude response: ', res); return res?.content?.[0]?.text; } + async chat(options: ChatOptions): Promise { const visionModel = isVisionModel(options.config.model); @@ -99,13 +102,13 @@ export class ClaudeApi implements LLMApi { }; // try get base64image from local cache image_url - const messages: ChatOptions["messages"] = []; + const messages: ChatOptions['messages'] = []; for (const v of options.messages) { const content = await preProcessImageContent(v.content); messages.push({ role: v.role, content }); } - const keys = ["system", "user"]; + const keys = ['system', 'user']; // roles must alternate between "user" and "assistant" in claude, so add a fake assistant message between two user messages for (let i = 0; i < messages.length - 1; i++) { @@ -116,8 +119,8 @@ export class ClaudeApi implements LLMApi { messages[i] = [ message, { - role: "assistant", - content: ";", + role: 'assistant', + content: ';', }, ] as any; } @@ -126,15 +129,17 @@ export class ClaudeApi implements LLMApi { const prompt = messages .flat() .filter((v) => { - if (!v.content) return false; - if (typeof v.content === "string" && !v.content.trim()) return false; + if (!v.content) + { return false; } + if (typeof v.content === 'string' && !v.content.trim()) + { return false; } return true; }) .map((v) => { const { role, content } = v; - const insideRole = ClaudeMapper[role] ?? "user"; + const insideRole = ClaudeMapper[role] ?? 'user'; - if (!visionModel || typeof content === "string") { + if (!visionModel || typeof content === 'string') { return { role: insideRole, content: getMessageTextContent(v), @@ -143,25 +148,25 @@ export class ClaudeApi implements LLMApi { return { role: insideRole, content: content - .filter((v) => v.image_url || v.text) + .filter(v => v.image_url || v.text) .map(({ type, text, image_url }) => { - if (type === "text") { + if (type === 'text') { return { type, text: text!, }; } - const { url = "" } = image_url || {}; - const colonIndex = url.indexOf(":"); - const semicolonIndex = url.indexOf(";"); - const comma = url.indexOf(","); + const { url = '' } = image_url || {}; + const colonIndex = url.indexOf(':'); + const semicolonIndex = url.indexOf(';'); + const comma = url.indexOf(','); const mimeType = url.slice(colonIndex + 1, semicolonIndex); const encodeType = url.slice(semicolonIndex + 1, comma); const data = url.slice(comma + 1); return { - type: "image" as const, + type: 'image' as const, source: { type: encodeType, media_type: mimeType, @@ -172,10 +177,10 @@ export class ClaudeApi implements LLMApi { }; }); - if (prompt[0]?.role === "assistant") { + if (prompt[0]?.role === 'assistant') { prompt.unshift({ - role: "user", - content: ";", + role: 'user', + content: ';', }); } @@ -208,10 +213,10 @@ export class ClaudeApi implements LLMApi { requestBody, { ...getHeaders(), - "anthropic-version": accessStore.anthropicApiVersion, + 'anthropic-version': accessStore.anthropicApiVersion, }, // @ts-ignore - tools.map((tool) => ({ + tools.map(tool => ({ name: tool?.function?.name, description: tool?.function?.description, input_schema: tool?.function?.parameters, @@ -224,41 +229,41 @@ export class ClaudeApi implements LLMApi { let chunkJson: | undefined | { - type: "content_block_delta" | "content_block_stop"; - content_block?: { - type: "tool_use"; - id: string; - name: string; - }; - delta?: { - type: "text_delta" | "input_json_delta"; - text?: string; - partial_json?: string; - }; - index: number; + type: 'content_block_delta' | 'content_block_stop'; + content_block?: { + type: 'tool_use'; + id: string; + name: string; + }; + delta?: { + type: 'text_delta' | 'input_json_delta'; + text?: string; + partial_json?: string; }; + index: number; + }; chunkJson = JSON.parse(text); - if (chunkJson?.content_block?.type == "tool_use") { + if (chunkJson?.content_block?.type == 'tool_use') { index += 1; const id = chunkJson?.content_block.id; const name = chunkJson?.content_block.name; runTools.push({ id, - type: "function", + type: 'function', function: { name, - arguments: "", + arguments: '', }, }); } if ( - chunkJson?.delta?.type == "input_json_delta" && - chunkJson?.delta?.partial_json + chunkJson?.delta?.type == 'input_json_delta' + && chunkJson?.delta?.partial_json ) { // @ts-ignore - runTools[index]["function"]["arguments"] += - chunkJson?.delta?.partial_json; + runTools[index].function.arguments + += chunkJson?.delta?.partial_json; } return chunkJson?.delta?.text; }, @@ -276,10 +281,10 @@ export class ClaudeApi implements LLMApi { requestPayload?.messages?.length, 0, { - role: "assistant", + role: 'assistant', content: toolCallMessage.tool_calls.map( (tool: ChatMessageTool) => ({ - type: "tool_use", + type: 'tool_use', id: tool.id, name: tool?.function?.name, input: tool?.function?.arguments @@ -289,11 +294,11 @@ export class ClaudeApi implements LLMApi { ), }, // @ts-ignore - ...toolCallResult.map((result) => ({ - role: "user", + ...toolCallResult.map(result => ({ + role: 'user', content: [ { - type: "tool_result", + type: 'tool_result', tool_use_id: result.tool_call_id, content: result.content, }, @@ -305,12 +310,12 @@ export class ClaudeApi implements LLMApi { ); } else { const payload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestBody), signal: controller.signal, headers: { ...getHeaders(), // get common headers - "anthropic-version": accessStore.anthropicApiVersion, + 'anthropic-version': accessStore.anthropicApiVersion, // do not send `anthropicApiKey` in browser!!! // Authorization: getAuthKey(accessStore.anthropicApiKey), }, @@ -318,7 +323,7 @@ export class ClaudeApi implements LLMApi { try { controller.signal.onabort = () => - options.onFinish("", new Response(null, { status: 400 })); + options.onFinish('', new Response(null, { status: 400 })); const res = await fetch(path, payload); const resJson = await res.json(); @@ -326,17 +331,19 @@ export class ClaudeApi implements LLMApi { const message = this.extractMessage(resJson); options.onFinish(message, res); } catch (e) { - console.error("failed to chat", e); + console.error('failed to chat', e); options.onError?.(e as Error); } } } + async usage() { return { used: 0, total: 0, }; } + async models() { // const provider = { // id: "anthropic", @@ -377,10 +384,11 @@ export class ClaudeApi implements LLMApi { // }, ]; } + path(path: string): string { const accessStore = useAccessStore.getState(); - let baseUrl: string = ""; + let baseUrl: string = ''; if (accessStore.useCustomConfig) { baseUrl = accessStore.anthropicUrl; @@ -393,19 +401,20 @@ export class ClaudeApi implements LLMApi { baseUrl = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic; } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) { - baseUrl = "https://" + baseUrl; + if (!baseUrl.startsWith('http') && !baseUrl.startsWith('/api')) { + baseUrl = `https://${baseUrl}`; } - baseUrl = trimEnd(baseUrl, "/"); + baseUrl = trimEnd(baseUrl, '/'); // try rebuild url, when using cloudflare ai gateway in client return cloudflareAIGatewayUrl(`${baseUrl}/${path}`); } } -function trimEnd(s: string, end = " ") { - if (end.length === 0) return s; +function trimEnd(s: string, end = ' ') { + if (end.length === 0) + { return s; } while (s.endsWith(end)) { s = s.slice(0, -end.length); diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts index 9e8c2f139b6..54f2976d6c3 100644 --- a/app/client/platforms/baidu.ts +++ b/app/client/platforms/baidu.ts @@ -1,30 +1,32 @@ -"use client"; +'use client'; +import type { + ChatOptions, + LLMApi, + LLMModel, + MultimodalContent, + SpeechOptions, +} from '../api'; +import { getClientConfig } from '@/app/config/client'; import { ApiPath, Baidu, BAIDU_BASE_URL, REQUEST_TIMEOUT_MS, -} from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; -import { getAccessToken } from "@/app/utils/baidu"; +} from '@/app/constant'; -import { - ChatOptions, - getHeaders, - LLMApi, - LLMModel, - MultimodalContent, - SpeechOptions, -} from "../api"; -import Locale from "../../locales"; +import { useAccessStore, useAppConfig, useChatStore } from '@/app/store'; +import { getMessageTextContent } from '@/app/utils'; +import { getAccessToken } from '@/app/utils/baidu'; +import { prettyObject } from '@/app/utils/format'; +import { fetch } from '@/app/utils/stream'; import { EventStreamContentType, fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; -import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; -import { fetch } from "@/app/utils/stream"; +} from '@fortaine/fetch-event-source'; +import Locale from '../../locales'; +import { + getHeaders, +} from '../api'; export interface OpenAIListModelResponse { object: string; @@ -37,7 +39,7 @@ export interface OpenAIListModelResponse { interface RequestPayload { messages: { - role: "system" | "user" | "assistant"; + role: 'system' | 'user' | 'assistant'; content: string | MultimodalContent[]; }[]; stream?: boolean; @@ -53,7 +55,7 @@ export class ErnieApi implements LLMApi { path(path: string): string { const accessStore = useAccessStore.getState(); - let baseUrl = ""; + let baseUrl = ''; if (accessStore.useCustomConfig) { baseUrl = accessStore.baiduUrl; @@ -65,40 +67,40 @@ export class ErnieApi implements LLMApi { baseUrl = isApp ? BAIDU_BASE_URL : ApiPath.Baidu; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Baidu)) { - baseUrl = "https://" + baseUrl; + if (!baseUrl.startsWith('http') && !baseUrl.startsWith(ApiPath.Baidu)) { + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy Endpoint] ", baseUrl, path); + console.log('[Proxy Endpoint] ', baseUrl, path); - return [baseUrl, path].join("/"); + return [baseUrl, path].join('/'); } speech(options: SpeechOptions): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } async chat(options: ChatOptions) { - const messages = options.messages.map((v) => ({ + const messages = options.messages.map(v => ({ // "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function", - role: v.role === "system" ? "user" : v.role, + role: v.role === 'system' ? 'user' : v.role, content: getMessageTextContent(v), })); // "error_code": 336006, "error_msg": "the length of messages must be an odd number", if (messages.length % 2 === 0) { - if (messages.at(0)?.role === "user") { + if (messages.at(0)?.role === 'user') { messages.splice(1, 0, { - role: "assistant", - content: " ", + role: 'assistant', + content: ' ', }); } else { messages.unshift({ - role: "user", - content: " ", + role: 'user', + content: ' ', }); } } @@ -122,7 +124,7 @@ export class ErnieApi implements LLMApi { top_p: modelConfig.top_p, }; - console.log("[Request] Baidu payload: ", requestPayload); + console.log('[Request] Baidu payload: ', requestPayload); const controller = new AbortController(); options.onController?.(controller); @@ -131,7 +133,7 @@ export class ErnieApi implements LLMApi { let chatPath = this.path(Baidu.ChatPath(modelConfig.model)); // getAccessToken can not run in browser, because cors error - if (!!getClientConfig()?.isApp) { + if (getClientConfig()?.isApp) { const accessStore = useAccessStore.getState(); if (accessStore.useCustomConfig) { if (accessStore.isValidBaidu()) { @@ -140,13 +142,13 @@ export class ErnieApi implements LLMApi { accessStore.baiduSecretKey, ); chatPath = `${chatPath}${ - chatPath.includes("?") ? "&" : "?" + chatPath.includes('?') ? '&' : '?' }access_token=${access_token}`; } } } const chatPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: getHeaders(), @@ -159,8 +161,8 @@ export class ErnieApi implements LLMApi { ); if (shouldStream) { - let responseText = ""; - let remainText = ""; + let responseText = ''; + let remainText = ''; let finished = false; let responseRes: Response; @@ -168,9 +170,9 @@ export class ErnieApi implements LLMApi { function animateResponseText() { if (finished || controller.signal.aborted) { responseText += remainText; - console.log("[Response Animation] finished"); + console.log('[Response Animation] finished'); if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); + options.onError?.(new Error('empty response from server')); } return; } @@ -203,20 +205,20 @@ export class ErnieApi implements LLMApi { ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log("[Baidu] request response content type: ", contentType); + const contentType = res.headers.get('content-type'); + console.log('[Baidu] request response content type: ', contentType); responseRes = res; - if (contentType?.startsWith("text/plain")) { + if (contentType?.startsWith('text/plain')) { responseText = await res.clone().text(); return finish(); } if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 + !res.ok + || !res.headers + .get('content-type') + ?.startsWith(EventStreamContentType) + || res.status !== 200 ) { const responseTexts = [responseText]; let extraInfo = await res.clone().text(); @@ -233,13 +235,13 @@ export class ErnieApi implements LLMApi { responseTexts.push(extraInfo); } - responseText = responseTexts.join("\n\n"); + responseText = responseTexts.join('\n\n'); return finish(); } }, onmessage(msg) { - if (msg.data === "[DONE]" || finished) { + if (msg.data === '[DONE]' || finished) { return finish(); } const text = msg.data; @@ -250,7 +252,7 @@ export class ErnieApi implements LLMApi { remainText += delta; } } catch (e) { - console.error("[Request] parse error", text, msg); + console.error('[Request] parse error', text, msg); } }, onclose() { @@ -271,10 +273,11 @@ export class ErnieApi implements LLMApi { options.onFinish(message, res); } } catch (e) { - console.log("[Request] failed to make a chat request", e); + console.log('[Request] failed to make a chat request', e); options.onError?.(e as Error); } } + async usage() { return { used: 0, diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index a2f0660d828..126ade377f3 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -1,29 +1,31 @@ -"use client"; -import { - ApiPath, - ByteDance, - BYTEDANCE_BASE_URL, - REQUEST_TIMEOUT_MS, -} from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; - -import { +'use client'; +import type { ChatOptions, - getHeaders, LLMApi, LLMModel, MultimodalContent, SpeechOptions, -} from "../api"; -import Locale from "../../locales"; +} from '../api'; +import { getClientConfig } from '@/app/config/client'; + +import { + ApiPath, + ByteDance, + BYTEDANCE_BASE_URL, + REQUEST_TIMEOUT_MS, +} from '@/app/constant'; +import { useAccessStore, useAppConfig, useChatStore } from '@/app/store'; +import { getMessageTextContent } from '@/app/utils'; +import { prettyObject } from '@/app/utils/format'; +import { fetch } from '@/app/utils/stream'; import { EventStreamContentType, fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; -import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; -import { fetch } from "@/app/utils/stream"; +} from '@fortaine/fetch-event-source'; +import Locale from '../../locales'; +import { + getHeaders, +} from '../api'; export interface OpenAIListModelResponse { object: string; @@ -36,7 +38,7 @@ export interface OpenAIListModelResponse { interface RequestPayload { messages: { - role: "system" | "user" | "assistant"; + role: 'system' | 'user' | 'assistant'; content: string | MultimodalContent[]; }[]; stream?: boolean; @@ -52,7 +54,7 @@ export class DoubaoApi implements LLMApi { path(path: string): string { const accessStore = useAccessStore.getState(); - let baseUrl = ""; + let baseUrl = ''; if (accessStore.useCustomConfig) { baseUrl = accessStore.bytedanceUrl; @@ -63,28 +65,28 @@ export class DoubaoApi implements LLMApi { baseUrl = isApp ? BYTEDANCE_BASE_URL : ApiPath.ByteDance; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ByteDance)) { - baseUrl = "https://" + baseUrl; + if (!baseUrl.startsWith('http') && !baseUrl.startsWith(ApiPath.ByteDance)) { + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy Endpoint] ", baseUrl, path); + console.log('[Proxy Endpoint] ', baseUrl, path); - return [baseUrl, path].join("/"); + return [baseUrl, path].join('/'); } extractMessage(res: any) { - return res.choices?.at(0)?.message?.content ?? ""; + return res.choices?.at(0)?.message?.content ?? ''; } speech(options: SpeechOptions): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } async chat(options: ChatOptions) { - const messages = options.messages.map((v) => ({ + const messages = options.messages.map(v => ({ role: v.role, content: getMessageTextContent(v), })); @@ -114,7 +116,7 @@ export class DoubaoApi implements LLMApi { try { const chatPath = this.path(ByteDance.ChatPath); const chatPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: getHeaders(), @@ -127,8 +129,8 @@ export class DoubaoApi implements LLMApi { ); if (shouldStream) { - let responseText = ""; - let remainText = ""; + let responseText = ''; + let remainText = ''; let finished = false; let responseRes: Response; @@ -136,9 +138,9 @@ export class DoubaoApi implements LLMApi { function animateResponseText() { if (finished || controller.signal.aborted) { responseText += remainText; - console.log("[Response Animation] finished"); + console.log('[Response Animation] finished'); if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); + options.onError?.(new Error('empty response from server')); } return; } @@ -171,23 +173,23 @@ export class DoubaoApi implements LLMApi { ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); + const contentType = res.headers.get('content-type'); console.log( - "[ByteDance] request response content type: ", + '[ByteDance] request response content type: ', contentType, ); responseRes = res; - if (contentType?.startsWith("text/plain")) { + if (contentType?.startsWith('text/plain')) { responseText = await res.clone().text(); return finish(); } if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 + !res.ok + || !res.headers + .get('content-type') + ?.startsWith(EventStreamContentType) + || res.status !== 200 ) { const responseTexts = [responseText]; let extraInfo = await res.clone().text(); @@ -204,13 +206,13 @@ export class DoubaoApi implements LLMApi { responseTexts.push(extraInfo); } - responseText = responseTexts.join("\n\n"); + responseText = responseTexts.join('\n\n'); return finish(); } }, onmessage(msg) { - if (msg.data === "[DONE]" || finished) { + if (msg.data === '[DONE]' || finished) { return finish(); } const text = msg.data; @@ -224,7 +226,7 @@ export class DoubaoApi implements LLMApi { remainText += delta; } } catch (e) { - console.error("[Request] parse error", text, msg); + console.error('[Request] parse error', text, msg); } }, onclose() { @@ -245,10 +247,11 @@ export class DoubaoApi implements LLMApi { options.onFinish(message, res); } } catch (e) { - console.log("[Request] failed to make a chat request", e); + console.log('[Request] failed to make a chat request', e); options.onError?.(e as Error); } } + async usage() { return { used: 0, diff --git a/app/client/platforms/glm.ts b/app/client/platforms/glm.ts index a7965947fab..c7c201264a5 100644 --- a/app/client/platforms/glm.ts +++ b/app/client/platforms/glm.ts @@ -1,29 +1,33 @@ -"use client"; +'use client'; +import type { + ChatMessageTool, +} from '@/app/store'; +import type { + ChatOptions, + LLMApi, + LLMModel, + SpeechOptions, +} from '../api'; +import type { RequestPayload } from './openai'; +import { getClientConfig } from '@/app/config/client'; import { ApiPath, - CHATGLM_BASE_URL, ChatGLM, + CHATGLM_BASE_URL, REQUEST_TIMEOUT_MS, -} from "@/app/constant"; +} from '@/app/constant'; import { useAccessStore, useAppConfig, useChatStore, - ChatMessageTool, usePluginStore, -} from "@/app/store"; -import { stream } from "@/app/utils/chat"; +} from '@/app/store'; +import { getMessageTextContent } from '@/app/utils'; +import { stream } from '@/app/utils/chat'; +import { fetch } from '@/app/utils/stream'; import { - ChatOptions, getHeaders, - LLMApi, - LLMModel, - SpeechOptions, -} from "../api"; -import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; -import { RequestPayload } from "./openai"; -import { fetch } from "@/app/utils/stream"; +} from '../api'; export class ChatGLMApi implements LLMApi { private disableListModels = true; @@ -31,7 +35,7 @@ export class ChatGLMApi implements LLMApi { path(path: string): string { const accessStore = useAccessStore.getState(); - let baseUrl = ""; + let baseUrl = ''; if (accessStore.useCustomConfig) { baseUrl = accessStore.chatglmUrl; @@ -43,28 +47,28 @@ export class ChatGLMApi implements LLMApi { baseUrl = isApp ? CHATGLM_BASE_URL : apiPath; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ChatGLM)) { - baseUrl = "https://" + baseUrl; + if (!baseUrl.startsWith('http') && !baseUrl.startsWith(ApiPath.ChatGLM)) { + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy Endpoint] ", baseUrl, path); + console.log('[Proxy Endpoint] ', baseUrl, path); - return [baseUrl, path].join("/"); + return [baseUrl, path].join('/'); } extractMessage(res: any) { - return res.choices?.at(0)?.message?.content ?? ""; + return res.choices?.at(0)?.message?.content ?? ''; } speech(options: SpeechOptions): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } async chat(options: ChatOptions) { - const messages: ChatOptions["messages"] = []; + const messages: ChatOptions['messages'] = []; for (const v of options.messages) { const content = getMessageTextContent(v); messages.push({ role: v.role, content }); @@ -89,7 +93,7 @@ export class ChatGLMApi implements LLMApi { top_p: modelConfig.top_p, }; - console.log("[Request] glm payload: ", requestPayload); + console.log('[Request] glm payload: ', requestPayload); const shouldStream = !!options.config.stream; const controller = new AbortController(); @@ -98,7 +102,7 @@ export class ChatGLMApi implements LLMApi { try { const chatPath = this.path(ChatGLM.ChatPath); const chatPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: getHeaders(), @@ -149,7 +153,7 @@ export class ChatGLMApi implements LLMApi { }); } else { // @ts-ignore - runTools[index]["function"]["arguments"] += args; + runTools[index].function.arguments += args; } } return choices[0]?.delta?.content; @@ -180,10 +184,11 @@ export class ChatGLMApi implements LLMApi { options.onFinish(message, res); } } catch (e) { - console.log("[Request] failed to make a chat request", e); + console.log('[Request] failed to make a chat request', e); options.onError?.(e as Error); } } + async usage() { return { used: 0, diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index a7bce4fc2d0..91c09288d71 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -1,38 +1,40 @@ -import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; -import { +import type { + ChatMessageTool, +} from '@/app/store'; +import type { ChatOptions, - getHeaders, LLMApi, LLMModel, LLMUsage, SpeechOptions, -} from "../api"; +} from '../api'; +import type { RequestPayload } from './openai'; +import { getClientConfig } from '@/app/config/client'; +import { ApiPath, GEMINI_BASE_URL, Google, REQUEST_TIMEOUT_MS } from '@/app/constant'; import { useAccessStore, useAppConfig, useChatStore, usePluginStore, - ChatMessageTool, -} from "@/app/store"; -import { stream } from "@/app/utils/chat"; -import { getClientConfig } from "@/app/config/client"; -import { GEMINI_BASE_URL } from "@/app/constant"; +} from '@/app/store'; import { - getMessageTextContent, getMessageImages, + getMessageTextContent, isVisionModel, -} from "@/app/utils"; -import { preProcessImageContent } from "@/app/utils/chat"; -import { nanoid } from "nanoid"; -import { RequestPayload } from "./openai"; -import { fetch } from "@/app/utils/stream"; +} from '@/app/utils'; +import { preProcessImageContent, stream } from '@/app/utils/chat'; +import { fetch } from '@/app/utils/stream'; +import { nanoid } from 'nanoid'; +import { + getHeaders, +} from '../api'; export class GeminiProApi implements LLMApi { path(path: string, shouldStream = false): string { const accessStore = useAccessStore.getState(); - let baseUrl = ""; + let baseUrl = ''; if (accessStore.useCustomConfig) { baseUrl = accessStore.googleUrl; } @@ -41,34 +43,36 @@ export class GeminiProApi implements LLMApi { if (baseUrl.length === 0) { baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Google)) { - baseUrl = "https://" + baseUrl; + if (!baseUrl.startsWith('http') && !baseUrl.startsWith(ApiPath.Google)) { + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy Endpoint] ", baseUrl, path); + console.log('[Proxy Endpoint] ', baseUrl, path); - let chatPath = [baseUrl, path].join("/"); + let chatPath = [baseUrl, path].join('/'); if (shouldStream) { - chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse"; + chatPath += chatPath.includes('?') ? '&alt=sse' : '?alt=sse'; } return chatPath; } + extractMessage(res: any) { - console.log("[Response] gemini-pro response: ", res); + console.log('[Response] gemini-pro response: ', res); return ( - res?.candidates?.at(0)?.content?.parts.at(0)?.text || - res?.at(0)?.candidates?.at(0)?.content?.parts.at(0)?.text || - res?.error?.message || - "" + res?.candidates?.at(0)?.content?.parts.at(0)?.text + || res?.at(0)?.candidates?.at(0)?.content?.parts.at(0)?.text + || res?.error?.message + || '' ); } + speech(options: SpeechOptions): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } async chat(options: ChatOptions): Promise { @@ -76,7 +80,7 @@ export class GeminiProApi implements LLMApi { let multimodal = false; // try get base64image from local cache image_url - const _messages: ChatOptions["messages"] = []; + const _messages: ChatOptions['messages'] = []; for (const v of options.messages) { const content = await preProcessImageContent(v.content); _messages.push({ role: v.role, content }); @@ -89,8 +93,8 @@ export class GeminiProApi implements LLMApi { multimodal = true; parts = parts.concat( images.map((image) => { - const imageType = image.split(";")[0].split(":")[1]; - const imageData = image.split(",")[1]; + const imageType = image.split(';')[0].split(':')[1]; + const imageData = image.split(',')[1]; return { inline_data: { mime_type: imageType, @@ -102,13 +106,13 @@ export class GeminiProApi implements LLMApi { } } return { - role: v.role.replace("assistant", "model").replace("system", "user"), - parts: parts, + role: v.role.replace('assistant', 'model').replace('system', 'user'), + parts, }; }); // google requires that role in neighboring messages must not be the same - for (let i = 0; i < messages.length - 1; ) { + for (let i = 0; i < messages.length - 1;) { // Check if current and next item both have the role "model" if (messages[i].role === messages[i + 1].role) { // Concatenate the 'parts' of the current and next item @@ -146,25 +150,25 @@ export class GeminiProApi implements LLMApi { }, safetySettings: [ { - category: "HARM_CATEGORY_HARASSMENT", + category: 'HARM_CATEGORY_HARASSMENT', threshold: accessStore.googleSafetySettings, }, { - category: "HARM_CATEGORY_HATE_SPEECH", + category: 'HARM_CATEGORY_HATE_SPEECH', threshold: accessStore.googleSafetySettings, }, { - category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", + category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: accessStore.googleSafetySettings, }, { - category: "HARM_CATEGORY_DANGEROUS_CONTENT", + category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: accessStore.googleSafetySettings, }, ], }; - let shouldStream = !!options.config.stream; + const shouldStream = !!options.config.stream; const controller = new AbortController(); options.onController?.(controller); try { @@ -175,7 +179,7 @@ export class GeminiProApi implements LLMApi { ); const chatPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: getHeaders(), @@ -200,7 +204,7 @@ export class GeminiProApi implements LLMApi { // @ts-ignore tools.length > 0 ? // @ts-ignore - [{ functionDeclarations: tools.map((tool) => tool.function) }] + [{ functionDeclarations: tools.map(tool => tool.function) }] : [], funcs, controller, @@ -211,12 +215,15 @@ export class GeminiProApi implements LLMApi { const functionCall = chunkJson?.candidates ?.at(0) - ?.content.parts.at(0)?.functionCall; + ?.content + .parts + .at(0) + ?.functionCall; if (functionCall) { const { name, args } = functionCall; runTools.push({ id: nanoid(), - type: "function", + type: 'function', function: { name, arguments: JSON.stringify(args), // utils.chat call function, using JSON.parse @@ -237,7 +244,7 @@ export class GeminiProApi implements LLMApi { requestPayload?.contents?.length, 0, { - role: "model", + role: 'model', parts: toolCallMessage.tool_calls.map( (tool: ChatMessageTool) => ({ functionCall: { @@ -248,8 +255,8 @@ export class GeminiProApi implements LLMApi { ), }, // @ts-ignore - ...toolCallResult.map((result) => ({ - role: "function", + ...toolCallResult.map(result => ({ + role: 'function', parts: [ { functionResponse: { @@ -274,8 +281,8 @@ export class GeminiProApi implements LLMApi { // being blocked options.onError?.( new Error( - "Message is being blocked for reason: " + - resJson.promptFeedback.blockReason, + `Message is being blocked for reason: ${ + resJson.promptFeedback.blockReason}`, ), ); } @@ -283,13 +290,15 @@ export class GeminiProApi implements LLMApi { options.onFinish(message, res); } } catch (e) { - console.log("[Request] failed to make a chat request", e); + console.log('[Request] failed to make a chat request', e); options.onError?.(e as Error); } } + usage(): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } + async models(): Promise { return []; } diff --git a/app/client/platforms/iflytek.ts b/app/client/platforms/iflytek.ts index cfc37b3b256..1bb8444ba19 100644 --- a/app/client/platforms/iflytek.ts +++ b/app/client/platforms/iflytek.ts @@ -1,30 +1,32 @@ -"use client"; -import { - ApiPath, - IFLYTEK_BASE_URL, - Iflytek, - REQUEST_TIMEOUT_MS, -} from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; - -import { +'use client'; +import type { ChatOptions, - getHeaders, LLMApi, LLMModel, SpeechOptions, -} from "../api"; -import Locale from "../../locales"; +} from '../api'; +import type { RequestPayload } from './openai'; + +import { getClientConfig } from '@/app/config/client'; +import { + ApiPath, + Iflytek, + IFLYTEK_BASE_URL, + REQUEST_TIMEOUT_MS, +} from '@/app/constant'; +import { useAccessStore, useAppConfig, useChatStore } from '@/app/store'; +import { getMessageTextContent } from '@/app/utils'; +import { prettyObject } from '@/app/utils/format'; +import { fetch } from '@/app/utils/stream'; import { EventStreamContentType, fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; -import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; -import { fetch } from "@/app/utils/stream"; +} from '@fortaine/fetch-event-source'; +import Locale from '../../locales'; -import { RequestPayload } from "./openai"; +import { + getHeaders, +} from '../api'; export class SparkApi implements LLMApi { private disableListModels = true; @@ -32,7 +34,7 @@ export class SparkApi implements LLMApi { path(path: string): string { const accessStore = useAccessStore.getState(); - let baseUrl = ""; + let baseUrl = ''; if (accessStore.useCustomConfig) { baseUrl = accessStore.iflytekUrl; @@ -44,28 +46,28 @@ export class SparkApi implements LLMApi { baseUrl = isApp ? IFLYTEK_BASE_URL : apiPath; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Iflytek)) { - baseUrl = "https://" + baseUrl; + if (!baseUrl.startsWith('http') && !baseUrl.startsWith(ApiPath.Iflytek)) { + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy Endpoint] ", baseUrl, path); + console.log('[Proxy Endpoint] ', baseUrl, path); - return [baseUrl, path].join("/"); + return [baseUrl, path].join('/'); } extractMessage(res: any) { - return res.choices?.at(0)?.message?.content ?? ""; + return res.choices?.at(0)?.message?.content ?? ''; } speech(options: SpeechOptions): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } async chat(options: ChatOptions) { - const messages: ChatOptions["messages"] = []; + const messages: ChatOptions['messages'] = []; for (const v of options.messages) { const content = getMessageTextContent(v); messages.push({ role: v.role, content }); @@ -92,7 +94,7 @@ export class SparkApi implements LLMApi { // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; - console.log("[Request] Spark payload: ", requestPayload); + console.log('[Request] Spark payload: ', requestPayload); const shouldStream = !!options.config.stream; const controller = new AbortController(); @@ -101,7 +103,7 @@ export class SparkApi implements LLMApi { try { const chatPath = this.path(Iflytek.ChatPath); const chatPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: getHeaders(), @@ -114,8 +116,8 @@ export class SparkApi implements LLMApi { ); if (shouldStream) { - let responseText = ""; - let remainText = ""; + let responseText = ''; + let remainText = ''; let finished = false; let responseRes: Response; @@ -123,7 +125,7 @@ export class SparkApi implements LLMApi { function animateResponseText() { if (finished || controller.signal.aborted) { responseText += remainText; - console.log("[Response Animation] finished"); + console.log('[Response Animation] finished'); return; } @@ -155,21 +157,21 @@ export class SparkApi implements LLMApi { ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log("[Spark] request response content type: ", contentType); + const contentType = res.headers.get('content-type'); + console.log('[Spark] request response content type: ', contentType); responseRes = res; - if (contentType?.startsWith("text/plain")) { + if (contentType?.startsWith('text/plain')) { responseText = await res.clone().text(); return finish(); } // Handle different error scenarios if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 + !res.ok + || !res.headers + .get('content-type') + ?.startsWith(EventStreamContentType) + || res.status !== 200 ) { let extraInfo = await res.clone().text(); try { @@ -190,7 +192,7 @@ export class SparkApi implements LLMApi { } }, onmessage(msg) { - if (msg.data === "[DONE]" || finished) { + if (msg.data === '[DONE]' || finished) { return finish(); } const text = msg.data; @@ -205,7 +207,7 @@ export class SparkApi implements LLMApi { remainText += delta; } } catch (e) { - console.error("[Request] parse error", text); + console.error('[Request] parse error', text); options.onError?.(new Error(`Failed to parse response: ${text}`)); } }, @@ -235,7 +237,7 @@ export class SparkApi implements LLMApi { options.onFinish(message, res); } } catch (e) { - console.log("[Request] failed to make a chat request", e); + console.log('[Request] failed to make a chat request', e); options.onError?.(e as Error); } } diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts index b6812c0d766..c3f99d7a275 100644 --- a/app/client/platforms/moonshot.ts +++ b/app/client/platforms/moonshot.ts @@ -1,30 +1,34 @@ -"use client"; +'use client'; +import type { + ChatMessageTool, +} from '@/app/store'; +import type { + ChatOptions, + LLMApi, + LLMModel, + SpeechOptions, +} from '../api'; +import type { RequestPayload } from './openai'; +import { getClientConfig } from '@/app/config/client'; // azure and openai, using same models. so using same LLMApi. import { ApiPath, - MOONSHOT_BASE_URL, Moonshot, + MOONSHOT_BASE_URL, REQUEST_TIMEOUT_MS, -} from "@/app/constant"; +} from '@/app/constant'; import { useAccessStore, useAppConfig, useChatStore, - ChatMessageTool, usePluginStore, -} from "@/app/store"; -import { stream } from "@/app/utils/chat"; +} from '@/app/store'; +import { getMessageTextContent } from '@/app/utils'; +import { stream } from '@/app/utils/chat'; +import { fetch } from '@/app/utils/stream'; import { - ChatOptions, getHeaders, - LLMApi, - LLMModel, - SpeechOptions, -} from "../api"; -import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; -import { RequestPayload } from "./openai"; -import { fetch } from "@/app/utils/stream"; +} from '../api'; export class MoonshotApi implements LLMApi { private disableListModels = true; @@ -32,7 +36,7 @@ export class MoonshotApi implements LLMApi { path(path: string): string { const accessStore = useAccessStore.getState(); - let baseUrl = ""; + let baseUrl = ''; if (accessStore.useCustomConfig) { baseUrl = accessStore.moonshotUrl; @@ -44,28 +48,28 @@ export class MoonshotApi implements LLMApi { baseUrl = isApp ? MOONSHOT_BASE_URL : apiPath; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Moonshot)) { - baseUrl = "https://" + baseUrl; + if (!baseUrl.startsWith('http') && !baseUrl.startsWith(ApiPath.Moonshot)) { + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy Endpoint] ", baseUrl, path); + console.log('[Proxy Endpoint] ', baseUrl, path); - return [baseUrl, path].join("/"); + return [baseUrl, path].join('/'); } extractMessage(res: any) { - return res.choices?.at(0)?.message?.content ?? ""; + return res.choices?.at(0)?.message?.content ?? ''; } speech(options: SpeechOptions): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } async chat(options: ChatOptions) { - const messages: ChatOptions["messages"] = []; + const messages: ChatOptions['messages'] = []; for (const v of options.messages) { const content = getMessageTextContent(v); messages.push({ role: v.role, content }); @@ -92,7 +96,7 @@ export class MoonshotApi implements LLMApi { // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; - console.log("[Request] openai payload: ", requestPayload); + console.log('[Request] openai payload: ', requestPayload); const shouldStream = !!options.config.stream; const controller = new AbortController(); @@ -101,7 +105,7 @@ export class MoonshotApi implements LLMApi { try { const chatPath = this.path(Moonshot.ChatPath); const chatPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: getHeaders(), @@ -152,7 +156,7 @@ export class MoonshotApi implements LLMApi { }); } else { // @ts-ignore - runTools[index]["function"]["arguments"] += args; + runTools[index].function.arguments += args; } } return choices[0]?.delta?.content; @@ -183,10 +187,11 @@ export class MoonshotApi implements LLMApi { options.onFinish(message, res); } } catch (e) { - console.log("[Request] failed to make a chat request", e); + console.log('[Request] failed to make a chat request', e); options.onError?.(e as Error); } } + async usage() { return { used: 0, diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 15cfb7ca602..44bb52c1de8 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -1,48 +1,52 @@ -"use client"; +'use client'; +import type { + ChatMessageTool, +} from '@/app/store'; +import type { DalleQuality, DalleSize, DalleStyle } from '@/app/typing'; +import type { + ChatOptions, + LLMApi, + LLMModel, + LLMUsage, + MultimodalContent, + SpeechOptions, +} from '../api'; +import { getClientConfig } from '@/app/config/client'; // azure and openai, using same models. so using same LLMApi. import { ApiPath, - OPENAI_BASE_URL, + Azure, DEFAULT_MODELS, + OPENAI_BASE_URL, OpenaiPath, - Azure, REQUEST_TIMEOUT_MS, ServiceProvider, -} from "@/app/constant"; +} from '@/app/constant'; import { - ChatMessageTool, useAccessStore, useAppConfig, useChatStore, usePluginStore, -} from "@/app/store"; -import { collectModelsWithDefaultModel } from "@/app/utils/model"; +} from '@/app/store'; +import { + isDalle3 as _isDalle3, + getMessageTextContent, + isVisionModel, +} from '@/app/utils'; + import { - preProcessImageContent, - uploadImage, base64Image2Blob, + preProcessImageContent, stream, -} from "@/app/utils/chat"; -import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; -import { DalleSize, DalleQuality, DalleStyle } from "@/app/typing"; - + uploadImage, +} from '@/app/utils/chat'; +import { cloudflareAIGatewayUrl } from '@/app/utils/cloudflare'; +import { collectModelsWithDefaultModel } from '@/app/utils/model'; +import { fetch } from '@/app/utils/stream'; +import Locale from '../../locales'; import { - ChatOptions, getHeaders, - LLMApi, - LLMModel, - LLMUsage, - MultimodalContent, - SpeechOptions, -} from "../api"; -import Locale from "../../locales"; -import { getClientConfig } from "@/app/config/client"; -import { - getMessageTextContent, - isVisionModel, - isDalle3 as _isDalle3, -} from "@/app/utils"; -import { fetch } from "@/app/utils/stream"; +} from '../api'; export interface OpenAIListModelResponse { object: string; @@ -55,7 +59,7 @@ export interface OpenAIListModelResponse { export interface RequestPayload { messages: { - role: "system" | "user" | "assistant"; + role: 'system' | 'user' | 'assistant'; content: string | MultimodalContent[]; }[]; stream?: boolean; @@ -71,7 +75,7 @@ export interface RequestPayload { export interface DalleRequestPayload { model: string; prompt: string; - response_format: "url" | "b64_json"; + response_format: 'url' | 'b64_json'; n: number; size: DalleSize; quality: DalleQuality; @@ -84,13 +88,13 @@ export class ChatGPTApi implements LLMApi { path(path: string): string { const accessStore = useAccessStore.getState(); - let baseUrl = ""; + let baseUrl = ''; - const isAzure = path.includes("deployments"); + const isAzure = path.includes('deployments'); if (accessStore.useCustomConfig) { if (isAzure && !accessStore.isValidAzure()) { - throw Error( - "incomplete azure config, please check it in your settings page", + throw new Error( + 'incomplete azure config, please check it in your settings page', ); } @@ -103,38 +107,38 @@ export class ChatGPTApi implements LLMApi { baseUrl = isApp ? OPENAI_BASE_URL : apiPath; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } if ( - !baseUrl.startsWith("http") && - !isAzure && - !baseUrl.startsWith(ApiPath.OpenAI) + !baseUrl.startsWith('http') + && !isAzure + && !baseUrl.startsWith(ApiPath.OpenAI) ) { - baseUrl = "https://" + baseUrl; + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy Endpoint] ", baseUrl, path); + console.log('[Proxy Endpoint] ', baseUrl, path); // try rebuild url, when using cloudflare ai gateway in client - return cloudflareAIGatewayUrl([baseUrl, path].join("/")); + return cloudflareAIGatewayUrl([baseUrl, path].join('/')); } async extractMessage(res: any) { if (res.error) { - return "```\n" + JSON.stringify(res, null, 4) + "\n```"; + return `\`\`\`\n${JSON.stringify(res, null, 4)}\n\`\`\``; } // dalle3 model return url, using url create image message if (res.data) { - let url = res.data?.at(0)?.url ?? ""; - const b64_json = res.data?.at(0)?.b64_json ?? ""; + let url = res.data?.at(0)?.url ?? ''; + const b64_json = res.data?.at(0)?.b64_json ?? ''; if (!url && b64_json) { // uploadImage - url = await uploadImage(base64Image2Blob(b64_json, "image/png")); + url = await uploadImage(base64Image2Blob(b64_json, 'image/png')); } return [ { - type: "image_url", + type: 'image_url', image_url: { url, }, @@ -153,7 +157,7 @@ export class ChatGPTApi implements LLMApi { speed: options.speed, }; - console.log("[Request] openai speech payload: ", requestPayload); + console.log('[Request] openai speech payload: ', requestPayload); const controller = new AbortController(); options.onController?.(controller); @@ -161,7 +165,7 @@ export class ChatGPTApi implements LLMApi { try { const speechPath = this.path(OpenaiPath.SpeechPath); const speechPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: getHeaders(), @@ -177,7 +181,7 @@ export class ChatGPTApi implements LLMApi { clearTimeout(requestTimeoutId); return await res.arrayBuffer(); } catch (e) { - console.log("[Request] failed to make a speech request", e); + console.log('[Request] failed to make a speech request', e); throw e; } } @@ -195,7 +199,7 @@ export class ChatGPTApi implements LLMApi { let requestPayload: RequestPayload | DalleRequestPayload; const isDalle3 = _isDalle3(options.config.model); - const isO1 = options.config.model.startsWith("o1"); + const isO1 = options.config.model.startsWith('o1'); if (isDalle3) { const prompt = getMessageTextContent( options.messages.slice(-1)?.pop() as any, @@ -204,21 +208,21 @@ export class ChatGPTApi implements LLMApi { model: options.config.model, prompt, // URLs are only valid for 60 minutes after the image has been generated. - response_format: "b64_json", // using b64_json, and save image in CacheStorage + response_format: 'b64_json', // using b64_json, and save image in CacheStorage n: 1, - size: options.config?.size ?? "1024x1024", - quality: options.config?.quality ?? "standard", - style: options.config?.style ?? "vivid", + size: options.config?.size ?? '1024x1024', + quality: options.config?.quality ?? 'standard', + style: options.config?.style ?? 'vivid', }; } else { const visionModel = isVisionModel(options.config.model); - const messages: ChatOptions["messages"] = []; + const messages: ChatOptions['messages'] = []; for (const v of options.messages) { const content = visionModel ? await preProcessImageContent(v.content) : getMessageTextContent(v); - if (!(isO1 && v.role === "system")) - messages.push({ role: v.role, content }); + if (!(isO1 && v.role === 'system')) + { messages.push({ role: v.role, content }); } } // O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet. @@ -236,27 +240,27 @@ export class ChatGPTApi implements LLMApi { // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs) if (isO1) { - requestPayload["max_completion_tokens"] = modelConfig.max_tokens; + requestPayload.max_completion_tokens = modelConfig.max_tokens; } // add max_tokens to vision model if (visionModel) { - requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); + requestPayload.max_tokens = Math.max(modelConfig.max_tokens, 4000); } } - console.log("[Request] openai payload: ", requestPayload); + console.log('[Request] openai payload: ', requestPayload); const shouldStream = !isDalle3 && !!options.config.stream; const controller = new AbortController(); options.onController?.(controller); try { - let chatPath = ""; + let chatPath = ''; if (modelConfig.providerName === ServiceProvider.Azure) { // find model, and get displayName as deployName - const { models: configModels, customModels: configCustomModels } = - useAppConfig.getState(); + const { models: configModels, customModels: configCustomModels } + = useAppConfig.getState(); const { defaultModel, customModels: accessCustomModels, @@ -264,18 +268,18 @@ export class ChatGPTApi implements LLMApi { } = useAccessStore.getState(); const models = collectModelsWithDefaultModel( configModels, - [configCustomModels, accessCustomModels].join(","), + [configCustomModels, accessCustomModels].join(','), defaultModel, ); const model = models.find( - (model) => - model.name === modelConfig.model && - model?.provider?.providerName === ServiceProvider.Azure, + model => + model.name === modelConfig.model + && model?.provider?.providerName === ServiceProvider.Azure, ); chatPath = this.path( (isDalle3 ? Azure.ImagePath : Azure.ChatPath)( (model?.displayName ?? model?.name) as string, - useCustomConfig ? useAccessStore.getState().azureApiVersion : "", + useCustomConfig ? useAccessStore.getState().azureApiVersion : '', ), ); } else { @@ -324,7 +328,7 @@ export class ChatGPTApi implements LLMApi { }); } else { // @ts-ignore - runTools[index]["function"]["arguments"] += args; + runTools[index].function.arguments += args; } } return choices[0]?.delta?.content; @@ -350,7 +354,7 @@ export class ChatGPTApi implements LLMApi { ); } else { const chatPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: getHeaders(), @@ -370,16 +374,17 @@ export class ChatGPTApi implements LLMApi { options.onFinish(message, res); } } catch (e) { - console.log("[Request] failed to make a chat request", e); + console.log('[Request] failed to make a chat request', e); options.onError?.(e as Error); } } + async usage() { const formatDate = (d: Date) => - `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d + `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, '0')}-${d .getDate() .toString() - .padStart(2, "0")}`; + .padStart(2, '0')}`; const ONE_DAY = 1 * 24 * 60 * 60 * 1000; const now = new Date(); const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1); @@ -392,12 +397,12 @@ export class ChatGPTApi implements LLMApi { `${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`, ), { - method: "GET", + method: 'GET', headers: getHeaders(), }, ), fetch(this.path(OpenaiPath.SubsPath), { - method: "GET", + method: 'GET', headers: getHeaders(), }), ]); @@ -407,7 +412,7 @@ export class ChatGPTApi implements LLMApi { } if (!used.ok || !subs.ok) { - throw new Error("Failed to query usage from openai"); + throw new Error('Failed to query usage from openai'); } const response = (await used.json()) as { @@ -423,7 +428,7 @@ export class ChatGPTApi implements LLMApi { }; if (response.error && response.error.type) { - throw Error(response.error.message); + throw new Error(response.error.message); } if (response.total_usage) { @@ -446,7 +451,7 @@ export class ChatGPTApi implements LLMApi { } const res = await fetch(this.path(OpenaiPath.ListModelPath), { - method: "GET", + method: 'GET', headers: { ...getHeaders(), }, @@ -454,24 +459,24 @@ export class ChatGPTApi implements LLMApi { const resJson = (await res.json()) as OpenAIListModelResponse; const chatModels = resJson.data?.filter( - (m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"), + m => m.id.startsWith('gpt-') || m.id.startsWith('chatgpt-'), ); - console.log("[Models]", chatModels); + console.log('[Models]', chatModels); if (!chatModels) { return []; } - //由于目前 OpenAI 的 disableListModels 默认为 true,所以当前实际不会运行到这场 - let seq = 1000; //同 Constant.ts 中的排序保持一致 - return chatModels.map((m) => ({ + // 由于目前 OpenAI 的 disableListModels 默认为 true,所以当前实际不会运行到这场 + let seq = 1000; // 同 Constant.ts 中的排序保持一致 + return chatModels.map(m => ({ name: m.id, available: true, sorted: seq++, provider: { - id: "openai", - providerName: "OpenAI", - providerType: "openai", + id: 'openai', + providerName: 'OpenAI', + providerType: 'openai', sorted: 1, }, })); diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts index 580844a5b31..faa91ab3ab1 100644 --- a/app/client/platforms/tencent.ts +++ b/app/client/platforms/tencent.ts @@ -1,28 +1,30 @@ -"use client"; -import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; - -import { +'use client'; +import type { ChatOptions, - getHeaders, LLMApi, LLMModel, MultimodalContent, SpeechOptions, -} from "../api"; -import Locale from "../../locales"; +} from '../api'; +import { getClientConfig } from '@/app/config/client'; + +import { ApiPath, REQUEST_TIMEOUT_MS, TENCENT_BASE_URL } from '@/app/constant'; +import { useAccessStore, useAppConfig, useChatStore } from '@/app/store'; +import { getMessageTextContent, isVisionModel } from '@/app/utils'; +import { prettyObject } from '@/app/utils/format'; +import { fetch } from '@/app/utils/stream'; import { EventStreamContentType, fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; -import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent, isVisionModel } from "@/app/utils"; -import mapKeys from "lodash-es/mapKeys"; -import mapValues from "lodash-es/mapValues"; -import isArray from "lodash-es/isArray"; -import isObject from "lodash-es/isObject"; -import { fetch } from "@/app/utils/stream"; +} from '@fortaine/fetch-event-source'; +import isArray from 'lodash-es/isArray'; +import isObject from 'lodash-es/isObject'; +import mapKeys from 'lodash-es/mapKeys'; +import mapValues from 'lodash-es/mapValues'; +import Locale from '../../locales'; +import { + getHeaders, +} from '../api'; export interface OpenAIListModelResponse { object: string; @@ -35,7 +37,7 @@ export interface OpenAIListModelResponse { interface RequestPayload { Messages: { - Role: "system" | "user" | "assistant"; + Role: 'system' | 'user' | 'assistant'; Content: string | MultimodalContent[]; }[]; Stream?: boolean; @@ -50,8 +52,7 @@ function capitalizeKeys(obj: any): any { } else if (isObject(obj)) { return mapValues( mapKeys(obj, (value: any, key: string) => - key.replace(/(^|_)(\w)/g, (m, $1, $2) => $2.toUpperCase()), - ), + key.replace(/(^|_)(\w)/g, (m, $1, $2) => $2.toUpperCase())), capitalizeKeys, ); } else { @@ -63,7 +64,7 @@ export class HunyuanApi implements LLMApi { path(): string { const accessStore = useAccessStore.getState(); - let baseUrl = ""; + let baseUrl = ''; if (accessStore.useCustomConfig) { baseUrl = accessStore.tencentUrl; @@ -74,30 +75,30 @@ export class HunyuanApi implements LLMApi { baseUrl = isApp ? TENCENT_BASE_URL : ApiPath.Tencent; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Tencent)) { - baseUrl = "https://" + baseUrl; + if (!baseUrl.startsWith('http') && !baseUrl.startsWith(ApiPath.Tencent)) { + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy Endpoint] ", baseUrl); + console.log('[Proxy Endpoint] ', baseUrl); return baseUrl; } extractMessage(res: any) { - return res.Choices?.at(0)?.Message?.Content ?? ""; + return res.Choices?.at(0)?.Message?.Content ?? ''; } speech(options: SpeechOptions): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } async chat(options: ChatOptions) { const visionModel = isVisionModel(options.config.model); const messages = options.messages.map((v, index) => ({ // "Messages 中 system 角色必须位于列表的最开始" - role: index !== 0 && v.role === "system" ? "user" : v.role, + role: index !== 0 && v.role === 'system' ? 'user' : v.role, content: visionModel ? v.content : getMessageTextContent(v), })); @@ -117,7 +118,7 @@ export class HunyuanApi implements LLMApi { stream: options.config.stream, }); - console.log("[Request] Tencent payload: ", requestPayload); + console.log('[Request] Tencent payload: ', requestPayload); const shouldStream = !!options.config.stream; const controller = new AbortController(); @@ -126,7 +127,7 @@ export class HunyuanApi implements LLMApi { try { const chatPath = this.path(); const chatPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: getHeaders(), @@ -139,8 +140,8 @@ export class HunyuanApi implements LLMApi { ); if (shouldStream) { - let responseText = ""; - let remainText = ""; + let responseText = ''; + let remainText = ''; let finished = false; let responseRes: Response; @@ -148,9 +149,9 @@ export class HunyuanApi implements LLMApi { function animateResponseText() { if (finished || controller.signal.aborted) { responseText += remainText; - console.log("[Response Animation] finished"); + console.log('[Response Animation] finished'); if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); + options.onError?.(new Error('empty response from server')); } return; } @@ -183,23 +184,23 @@ export class HunyuanApi implements LLMApi { ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); + const contentType = res.headers.get('content-type'); console.log( - "[Tencent] request response content type: ", + '[Tencent] request response content type: ', contentType, ); responseRes = res; - if (contentType?.startsWith("text/plain")) { + if (contentType?.startsWith('text/plain')) { responseText = await res.clone().text(); return finish(); } if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 + !res.ok + || !res.headers + .get('content-type') + ?.startsWith(EventStreamContentType) + || res.status !== 200 ) { const responseTexts = [responseText]; let extraInfo = await res.clone().text(); @@ -216,13 +217,13 @@ export class HunyuanApi implements LLMApi { responseTexts.push(extraInfo); } - responseText = responseTexts.join("\n\n"); + responseText = responseTexts.join('\n\n'); return finish(); } }, onmessage(msg) { - if (msg.data === "[DONE]" || finished) { + if (msg.data === '[DONE]' || finished) { return finish(); } const text = msg.data; @@ -236,7 +237,7 @@ export class HunyuanApi implements LLMApi { remainText += delta; } } catch (e) { - console.error("[Request] parse error", text, msg); + console.error('[Request] parse error', text, msg); } }, onclose() { @@ -257,10 +258,11 @@ export class HunyuanApi implements LLMApi { options.onFinish(message, res); } } catch (e) { - console.log("[Request] failed to make a chat request", e); + console.log('[Request] failed to make a chat request', e); options.onError?.(e as Error); } } + async usage() { return { used: 0, diff --git a/app/client/platforms/xai.ts b/app/client/platforms/xai.ts index 06dbaaa29ff..20836a70a11 100644 --- a/app/client/platforms/xai.ts +++ b/app/client/platforms/xai.ts @@ -1,25 +1,29 @@ -"use client"; +'use client'; +import type { + ChatMessageTool, +} from '@/app/store'; +import type { + ChatOptions, + LLMApi, + LLMModel, + SpeechOptions, +} from '../api'; +import type { RequestPayload } from './openai'; +import { getClientConfig } from '@/app/config/client'; // azure and openai, using same models. so using same LLMApi. -import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { ApiPath, REQUEST_TIMEOUT_MS, XAI, XAI_BASE_URL } from '@/app/constant'; import { useAccessStore, useAppConfig, useChatStore, - ChatMessageTool, usePluginStore, -} from "@/app/store"; -import { stream } from "@/app/utils/chat"; +} from '@/app/store'; +import { getMessageTextContent } from '@/app/utils'; +import { stream } from '@/app/utils/chat'; +import { fetch } from '@/app/utils/stream'; import { - ChatOptions, getHeaders, - LLMApi, - LLMModel, - SpeechOptions, -} from "../api"; -import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; -import { RequestPayload } from "./openai"; -import { fetch } from "@/app/utils/stream"; +} from '../api'; export class XAIApi implements LLMApi { private disableListModels = true; @@ -27,7 +31,7 @@ export class XAIApi implements LLMApi { path(path: string): string { const accessStore = useAccessStore.getState(); - let baseUrl = ""; + let baseUrl = ''; if (accessStore.useCustomConfig) { baseUrl = accessStore.xaiUrl; @@ -39,28 +43,28 @@ export class XAIApi implements LLMApi { baseUrl = isApp ? XAI_BASE_URL : apiPath; } - if (baseUrl.endsWith("/")) { + if (baseUrl.endsWith('/')) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.XAI)) { - baseUrl = "https://" + baseUrl; + if (!baseUrl.startsWith('http') && !baseUrl.startsWith(ApiPath.XAI)) { + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy Endpoint] ", baseUrl, path); + console.log('[Proxy Endpoint] ', baseUrl, path); - return [baseUrl, path].join("/"); + return [baseUrl, path].join('/'); } extractMessage(res: any) { - return res.choices?.at(0)?.message?.content ?? ""; + return res.choices?.at(0)?.message?.content ?? ''; } speech(options: SpeechOptions): Promise { - throw new Error("Method not implemented."); + throw new Error('Method not implemented.'); } async chat(options: ChatOptions) { - const messages: ChatOptions["messages"] = []; + const messages: ChatOptions['messages'] = []; for (const v of options.messages) { const content = getMessageTextContent(v); messages.push({ role: v.role, content }); @@ -85,7 +89,7 @@ export class XAIApi implements LLMApi { top_p: modelConfig.top_p, }; - console.log("[Request] xai payload: ", requestPayload); + console.log('[Request] xai payload: ', requestPayload); const shouldStream = !!options.config.stream; const controller = new AbortController(); @@ -94,7 +98,7 @@ export class XAIApi implements LLMApi { try { const chatPath = this.path(XAI.ChatPath); const chatPayload = { - method: "POST", + method: 'POST', body: JSON.stringify(requestPayload), signal: controller.signal, headers: getHeaders(), @@ -145,7 +149,7 @@ export class XAIApi implements LLMApi { }); } else { // @ts-ignore - runTools[index]["function"]["arguments"] += args; + runTools[index].function.arguments += args; } } return choices[0]?.delta?.content; @@ -176,10 +180,11 @@ export class XAIApi implements LLMApi { options.onFinish(message, res); } } catch (e) { - console.log("[Request] failed to make a chat request", e); + console.log('[Request] failed to make a chat request', e); options.onError?.(e as Error); } } + async usage() { return { used: 0, diff --git a/app/command.ts b/app/command.ts index aec73ef53d6..663278a7b7d 100644 --- a/app/command.ts +++ b/app/command.ts @@ -1,6 +1,6 @@ -import { useEffect } from "react"; -import { useSearchParams } from "react-router-dom"; -import Locale from "./locales"; +import { useEffect } from 'react'; +import { useSearchParams } from 'react-router-dom'; +import Locale from './locales'; type Command = (param: string) => void; interface Commands { @@ -18,7 +18,7 @@ export function useCommand(commands: Commands = {}) { let shouldUpdate = false; searchParams.forEach((param, name) => { const commandName = name as keyof Commands; - if (typeof commands[commandName] === "function") { + if (typeof commands[commandName] === 'function') { commands[commandName]!(param); searchParams.delete(name); shouldUpdate = true; @@ -58,16 +58,16 @@ export function useChatCommand(commands: ChatCommands = {}) { const input = extract(userInput); const desc = Locale.Chat.Commands; return Object.keys(commands) - .filter((c) => c.startsWith(input)) - .map((c) => ({ + .filter(c => c.startsWith(input)) + .map(c => ({ title: desc[c as keyof ChatCommands], - content: ":" + c, + content: `:${c}`, })); } function match(userInput: string) { const command = extract(userInput); - const matched = typeof commands[command] === "function"; + const matched = typeof commands[command] === 'function'; return { matched, diff --git a/app/components/artifacts.tsx b/app/components/artifacts.tsx index ce187fbcb2c..87cdacc50ad 100644 --- a/app/components/artifacts.tsx +++ b/app/components/artifacts.tsx @@ -1,44 +1,44 @@ +import { ApiPath, Path, REPO_URL } from '@/app/constant'; +import { nanoid } from 'nanoid'; import { - useEffect, - useState, - useRef, - useMemo, forwardRef, + useEffect, useImperativeHandle, -} from "react"; -import { useParams } from "react-router"; -import { IconButton } from "./button"; -import { nanoid } from "nanoid"; -import ExportIcon from "../icons/share.svg"; -import CopyIcon from "../icons/copy.svg"; -import DownloadIcon from "../icons/download.svg"; -import GithubIcon from "../icons/github.svg"; -import LoadingButtonIcon from "../icons/loading.svg"; -import ReloadButtonIcon from "../icons/reload.svg"; -import Locale from "../locales"; -import { Modal, showToast } from "./ui-lib"; -import { copyToClipboard, downloadAs } from "../utils"; -import { Path, ApiPath, REPO_URL } from "@/app/constant"; -import { Loading } from "./home"; -import styles from "./artifacts.module.scss"; + useMemo, + useRef, + useState, +} from 'react'; +import { useParams } from 'react-router'; +import CopyIcon from '../icons/copy.svg'; +import DownloadIcon from '../icons/download.svg'; +import GithubIcon from '../icons/github.svg'; +import LoadingButtonIcon from '../icons/loading.svg'; +import ReloadButtonIcon from '../icons/reload.svg'; +import ExportIcon from '../icons/share.svg'; +import Locale from '../locales'; +import { copyToClipboard, downloadAs } from '../utils'; +import styles from './artifacts.module.scss'; +import { IconButton } from './button'; +import { Loading } from './home'; +import { Modal, showToast } from './ui-lib'; -type HTMLPreviewProps = { +interface HTMLPreviewProps { code: string; autoHeight?: boolean; height?: number | string; onLoad?: (title?: string) => void; -}; +} -export type HTMLPreviewHander = { +export interface HTMLPreviewHander { reload: () => void; -}; +} export const HTMLPreview = forwardRef( - function HTMLPreview(props, ref) { + (props, ref) => { const iframeRef = useRef(null); const [frameId, setFrameId] = useState(nanoid()); const [iframeHeight, setIframeHeight] = useState(600); - const [title, setTitle] = useState(""); + const [title, setTitle] = useState(''); /* * https://stackoverflow.com/questions/19739001/what-is-the-difference-between-srcdoc-and-src-datatext-html-in-an * 1. using srcdoc @@ -55,9 +55,9 @@ export const HTMLPreview = forwardRef( setIframeHeight(height); } }; - window.addEventListener("message", handleMessage); + window.addEventListener('message', handleMessage); return () => { - window.removeEventListener("message", handleMessage); + window.removeEventListener('message', handleMessage); }; }, [frameId]); @@ -68,8 +68,9 @@ export const HTMLPreview = forwardRef( })); const height = useMemo(() => { - if (!props.autoHeight) return props.height || 600; - if (typeof props.height === "string") { + if (!props.autoHeight) + { return props.height || 600; } + if (typeof props.height === 'string') { return props.height; } const parentHeight = props.height || 600; @@ -80,8 +81,8 @@ export const HTMLPreview = forwardRef( const srcDoc = useMemo(() => { const script = ``; - if (props.code.includes("")) { - props.code.replace("", "" + script); + if (props.code.includes('')) { + props.code.replace('', `${script}`); } return script + props.code; }, [props.code, frameId]); @@ -94,7 +95,7 @@ export const HTMLPreview = forwardRef( return (