diff --git a/package.json b/package.json index d1ccb03e6..1587bf40b 100644 --- a/package.json +++ b/package.json @@ -56,6 +56,7 @@ "eslint-plugin-react": "^7.34.3", "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-unused-imports": "^4.0.0", + "gpt-tokens": "^1.3.6", "i18next": "^23.11.5", "localforage": "^1.10.0", "lodash": "^4.17.21", diff --git a/src/renderer/src/assets/styles/index.scss b/src/renderer/src/assets/styles/index.scss index 6d4cc4666..4e7dddded 100644 --- a/src/renderer/src/assets/styles/index.scss +++ b/src/renderer/src/assets/styles/index.scss @@ -38,7 +38,7 @@ --topic-list-width: 250px; --settings-width: var(--assistants-width); --status-bar-height: 40px; - --input-bar-height: 120px; + --input-bar-height: 125px; } *, diff --git a/src/renderer/src/components/app/Sidebar.tsx b/src/renderer/src/components/app/Sidebar.tsx index ef30a43a8..6f2f0e382 100644 --- a/src/renderer/src/components/app/Sidebar.tsx +++ b/src/renderer/src/components/app/Sidebar.tsx @@ -44,14 +44,13 @@ const Container = styled.div` display: flex; flex-direction: column; align-items: center; - padding: 12px 0; + padding: 8px 0; min-width: var(--sidebar-width); min-height: 100%; -webkit-app-region: drag !important; background-color: #1f1f1f; border-right: 0.5px solid var(--color-border); - margin-top: var(--navbar-height); - padding-bottom: calc(var(--navbar-height) + 6px); + padding-top: var(--navbar-height); ` const AvatarImg = styled.img` @@ -60,6 +59,7 @@ const AvatarImg = styled.img` height: 28px; background-color: var(--color-background-soft); margin: 5px 0; + margin-top: 12px; ` const MainMenus = styled.div` display: flex; diff --git a/src/renderer/src/config/constant.ts b/src/renderer/src/config/constant.ts index e69de29bb..a1d31d054 100644 --- a/src/renderer/src/config/constant.ts +++ b/src/renderer/src/config/constant.ts @@ -0,0 +1,3 @@ +export const DEFAULT_TEMPERATURE = 0.7 +export const DEFAULT_MAXTOKENS = 800 +export const DEFAULT_CONEXTCOUNT = 5 diff --git a/src/renderer/src/hooks/useProvider.ts b/src/renderer/src/hooks/useProvider.ts index d4292148c..62c6760fc 100644 --- a/src/renderer/src/hooks/useProvider.ts +++ b/src/renderer/src/hooks/useProvider.ts @@ -9,9 +9,15 @@ import { } from '@renderer/store/llm' import { Assistant, Model, Provider } from '@renderer/types' import { useDefaultModel } from './useAssistant' +import { createSelector } from '@reduxjs/toolkit' + +const selectEnabledProviders = createSelector( + (state) => state.llm.providers, + (providers) => providers.filter((p) => p.enabled) +) export function useProviders() { - const providers = useAppSelector((state) => state.llm.providers.filter((p) => p.enabled)) + const providers = useAppSelector(selectEnabledProviders) const dispatch = useAppDispatch() return { diff --git a/src/renderer/src/i18n/index.ts b/src/renderer/src/i18n/index.ts index 046d21a87..e7cfe431a 100644 --- a/src/renderer/src/i18n/index.ts +++ b/src/renderer/src/i18n/index.ts @@ -64,7 +64,18 @@ const resources = { 'input.clear.content': 'Are you sure to clear all messages?', 'input.placeholder': 'Type your message here...', 'input.send': 'Send', - 'input.pause': 'Pause' + 'input.pause': 'Pause', + 'input.settings': 'Settings', + 'settings.temperature': 'Temperature', + 'settings.temperature.tip': + 'Lower values make the model more creative and unpredictable, while higher values make it more deterministic and precise.', + 'settings.max_tokens': 'Max Tokens', + 'settings.max_tokens.tip': 'The maximum number of tokens to generate in the completion.', + 'settings.conext_count': 'Context', + 'settings.conext_count.tip': 'The number of previous messages to keep in the context.', + 'settings.reset': 'Reset', + 'settings.set_as_default': 'Apply to default assistant', + 'settings.max': 'Max' }, apps: { title: 'Agents' @@ -112,6 +123,7 @@ const resources = { 'models.add.group_name.placeholder': 'Optional e.g. ChatGPT', 'models.empty': 'No models found', 'assistant.title': 'Default Assistant', + 'assistant.model_params': 'Model Parameters', 'about.description': 'A powerful AI assistant for producer', 'about.updateNotAvailable': 'You are using the latest version', 'about.checkingUpdate': 'Checking for updates...', @@ -186,7 +198,20 @@ const resources = { 'input.clear.content': '确定要清除所有消息吗?', 'input.placeholder': '在这里输入消息...', 'input.send': '发送', - 'input.pause': '暂停' + 'input.pause': '暂停', + 'input.settings': '设置', + 'settings.temperature': '模型温度', + 'settings.temperature.tip': + '模型生成文本的随机程度。值越大,回复内容越赋有多样性、创造性、随机性;设为 0 根据事实回答。日常聊天建议设置为 0.7', + 'settings.max_tokens': '最大回复', + 'settings.max_tokens.tip': + '最大回复内容多少,数值越大,生成的文本越长。普通聊天建议 500-800;短文生成建议 800-2000;代码生成建议 2000-3600;长文生成建议切换模型到 4000 左右', + 'settings.conext_count': '上下文数', + 'settings.conext_count.tip': + '要保留在上下文中的消息数量,数值越大,上下文越长,消耗的 token 越多。普通聊天建议 5-10,代码生成建议 5-10', + 'settings.reset': '重置', + 'settings.set_as_default': '应用到默认助手', + 'settings.max': '不限' }, apps: { title: '智能体' @@ -234,6 +259,7 @@ const resources = { 'models.add.group_name.placeholder': '例如 ChatGPT', 'models.empty': '没有模型', 'assistant.title': '默认助手', + 'assistant.model_params': '模型参数', 'about.description': '一个为创造者而生的 AI 助手', 'about.updateNotAvailable': '你的软件已是最新版本', 'about.checkingUpdate': '正在检查更新...', diff --git a/src/renderer/src/pages/home/components/AssistantSettings.tsx b/src/renderer/src/pages/home/components/AssistantSettings.tsx new file mode 100644 index 000000000..55012eb93 --- /dev/null +++ b/src/renderer/src/pages/home/components/AssistantSettings.tsx @@ -0,0 +1,219 @@ +import { QuestionCircleOutlined } from '@ant-design/icons' +import { DEFAULT_CONEXTCOUNT, DEFAULT_MAXTOKENS, DEFAULT_TEMPERATURE } from '@renderer/config/constant' +import { useAssistants } from '@renderer/hooks/useAssistant' +import { Assistant } from '@renderer/types' +import { Button, Col, InputNumber, Popover, Row, Slider, Tooltip } from 'antd' +import { FC, PropsWithChildren, useState } from 'react' +import { useTranslation } from 'react-i18next' +import styled from 'styled-components' + +interface Props { + assistant: Assistant +} + +const PopoverContent: FC = ({ assistant }) => { + const { updateAssistant } = useAssistants() + const [temperature, setTemperature] = useState(assistant.settings?.temperature ?? DEFAULT_TEMPERATURE) + const [maxTokens, setMaxTokens] = useState(assistant.settings?.maxTokens ?? DEFAULT_MAXTOKENS) + const [contextCount, setConextCount] = useState(assistant.settings?.contextCount ?? DEFAULT_CONEXTCOUNT) + const { t } = useTranslation() + + const onUpdateAssistantSettings = ({ + _temperature, + _maxTokens, + _contextCount + }: { + _temperature?: number + _maxTokens?: number + _contextCount?: number + }) => { + updateAssistant({ + ...assistant, + settings: { + ...assistant.settings, + temperature: _temperature ?? temperature, + maxTokens: _maxTokens ?? maxTokens, + contextCount: _contextCount ?? contextCount + } + }) + } + + const onTemperatureChange = (value) => { + if (!isNaN(value as number)) { + setTemperature(value) + onUpdateAssistantSettings({ _temperature: value }) + } + } + + const onMaxTokensChange = (value) => { + if (!isNaN(value as number)) { + setMaxTokens(value) + onUpdateAssistantSettings({ _maxTokens: value }) + } + } + + const onConextCountChange = (value) => { + if (!isNaN(value as number)) { + setConextCount(value) + onUpdateAssistantSettings({ _contextCount: value }) + } + } + + const onReset = () => { + setTemperature(DEFAULT_TEMPERATURE) + setMaxTokens(DEFAULT_MAXTOKENS) + setConextCount(DEFAULT_CONEXTCOUNT) + updateAssistant({ + ...assistant, + settings: { + ...assistant.settings, + temperature: DEFAULT_TEMPERATURE, + maxTokens: DEFAULT_MAXTOKENS, + contextCount: DEFAULT_CONEXTCOUNT + } + }) + } + + return ( + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ) +} + +const AssistantSettings: FC = ({ children, assistant }) => { + const [open, setOpen] = useState(false) + const { t } = useTranslation() + + return ( + } trigger="click" onOpenChange={setOpen}> + {open ? ( + children + ) : ( + + {children} + + )} + + ) +} + +const Container = styled.div` + display: flex; + flex-direction: column; + margin-bottom: 8px; + width: 500px; + padding: 5px; +` + +const Label = styled.p` + margin: 0; + font-size: 14px; + font-weight: bold; + margin-right: 5px; +` + +const QuestionIcon = styled(QuestionCircleOutlined)` + font-size: 14px; + cursor: pointer; + color: var(--color-text-3); +` + +export default AssistantSettings diff --git a/src/renderer/src/pages/home/components/Chat.tsx b/src/renderer/src/pages/home/components/Chat.tsx index 2e503cdda..6fbf70031 100644 --- a/src/renderer/src/pages/home/components/Chat.tsx +++ b/src/renderer/src/pages/home/components/Chat.tsx @@ -1,5 +1,5 @@ -import { Assistant } from '@renderer/types' -import { FC } from 'react' +import { Assistant, Message } from '@renderer/types' +import { FC, useRef } from 'react' import styled from 'styled-components' import Inputbar from './Inputbar' import Messages from './Messages' @@ -15,6 +15,7 @@ interface Props { const Chat: FC = (props) => { const { assistant } = useAssistant(props.assistant.id) const { activeTopic, setActiveTopic } = useActiveTopic(assistant) + const messagesRef = useRef([]) if (!assistant) { return null @@ -23,8 +24,8 @@ const Chat: FC = (props) => { return ( - - + + diff --git a/src/renderer/src/pages/home/components/Inputbar.tsx b/src/renderer/src/pages/home/components/Inputbar.tsx index 1922bdef2..b111d4bbd 100644 --- a/src/renderer/src/pages/home/components/Inputbar.tsx +++ b/src/renderer/src/pages/home/components/Inputbar.tsx @@ -1,7 +1,7 @@ import { EVENT_NAMES, EventEmitter } from '@renderer/services/event' import { Assistant, Message, Topic } from '@renderer/types' -import { uuid } from '@renderer/utils' -import { FC, useCallback, useEffect, useRef, useState } from 'react' +import { estimateTokenCount, uuid } from '@renderer/utils' +import { FC, MutableRefObject, useCallback, useEffect, useRef, useState } from 'react' import styled from 'styled-components' import { MoreOutlined } from '@ant-design/icons' import { Button, Popconfirm, Tooltip } from 'antd' @@ -13,7 +13,8 @@ import { FullscreenOutlined, HistoryOutlined, PauseCircleOutlined, - PlusCircleOutlined + PlusCircleOutlined, + SettingOutlined } from '@ant-design/icons' import TextArea, { TextAreaRef } from 'antd/es/input/TextArea' import { isEmpty } from 'lodash' @@ -24,13 +25,15 @@ import store, { useAppSelector } from '@renderer/store' import { getDefaultTopic } from '@renderer/services/assistant' import { useTranslation } from 'react-i18next' import { setGenerating } from '@renderer/store/runtime' +import AssistantSettings from './AssistantSettings' interface Props { assistant: Assistant setActiveTopic: (topic: Topic) => void + messagesRef: MutableRefObject } -const Inputbar: FC = ({ assistant, setActiveTopic }) => { +const Inputbar: FC = ({ assistant, setActiveTopic, messagesRef }) => { const [text, setText] = useState('') const { setShowRightSidebar } = useShowRightSidebar() const { addTopic } = useAssistant(assistant.id) @@ -93,6 +96,8 @@ const Inputbar: FC = ({ assistant, setActiveTopic }) => { store.dispatch(setGenerating(false)) } + const textCount = text.length === 0 ? '' : (text: string) => estimateTokenCount(text, assistant, messagesRef.current) + // Command or Ctrl + N create new topic useEffect(() => { const onKeydown = (e) => { @@ -148,6 +153,11 @@ const Inputbar: FC = ({ assistant, setActiveTopic }) => { + + + + + setExpend(!expended)}> {expended ? : } @@ -177,8 +187,19 @@ const Inputbar: FC = ({ assistant, setActiveTopic }) => { autoFocus contextMenu="true" variant="borderless" - styles={{ textarea: { paddingLeft: 0 } }} + showCount + count={{ strategy: textCount }} ref={inputRef} + styles={{ + textarea: { paddingLeft: 0 }, + count: { + position: 'absolute', + right: 5, + bottom: 5, + fontSize: 11, + display: text.length === 0 ? 'none' : 'block' + } + }} /> ) diff --git a/src/renderer/src/pages/home/components/Messages.tsx b/src/renderer/src/pages/home/components/Messages.tsx index 70de87a3f..0461dd5f7 100644 --- a/src/renderer/src/pages/home/components/Messages.tsx +++ b/src/renderer/src/pages/home/components/Messages.tsx @@ -1,7 +1,7 @@ import { EVENT_NAMES, EventEmitter } from '@renderer/services/event' import { Assistant, Message, Topic } from '@renderer/types' import localforage from 'localforage' -import { FC, useCallback, useEffect, useRef, useState } from 'react' +import { FC, MutableRefObject, useCallback, useEffect, useRef, useState } from 'react' import styled from 'styled-components' import MessageItem from './Message' import { reverse } from 'lodash' @@ -15,14 +15,15 @@ import { t } from 'i18next' interface Props { assistant: Assistant topic: Topic + messagesRef: MutableRefObject } -const Messages: FC = ({ assistant, topic }) => { +const Messages: FC = ({ assistant, topic, messagesRef }) => { const [messages, setMessages] = useState([]) const [lastMessage, setLastMessage] = useState(null) const { updateTopic } = useAssistant(assistant.id) const provider = useProviderByAssistant(assistant) - const messagesRef = useRef(null) + const containerRef = useRef(null) const assistantDefaultMessage: Message = { id: 'assistant', @@ -95,11 +96,12 @@ const Messages: FC = ({ assistant, topic }) => { }, [topic.id]) useEffect(() => { - messagesRef.current?.scrollTo({ top: 100000, behavior: 'auto' }) - }, [messages]) + containerRef.current?.scrollTo({ top: 100000, behavior: 'auto' }) + messagesRef.current = messages + }, [messages, messagesRef]) return ( - + {lastMessage && } {reverse([...messages]).map((message, index) => ( diff --git a/src/renderer/src/pages/settings/AssistantSettings.tsx b/src/renderer/src/pages/settings/AssistantSettings.tsx index 45c557f5a..01b93f273 100644 --- a/src/renderer/src/pages/settings/AssistantSettings.tsx +++ b/src/renderer/src/pages/settings/AssistantSettings.tsx @@ -1,15 +1,77 @@ -import { FC } from 'react' -import { SettingContainer, SettingDivider, SettingSubtitle, SettingTitle } from './components' -import { Input } from 'antd' -import TextArea from 'antd/es/input/TextArea' +import { QuestionCircleOutlined } from '@ant-design/icons' +import { DEFAULT_CONEXTCOUNT, DEFAULT_MAXTOKENS, DEFAULT_TEMPERATURE } from '@renderer/config/constant' import { useDefaultAssistant } from '@renderer/hooks/useAssistant' +import { Button, Col, Input, InputNumber, Row, Slider, Tooltip } from 'antd' +import TextArea from 'antd/es/input/TextArea' +import { FC, useState } from 'react' import { useTranslation } from 'react-i18next' +import styled from 'styled-components' +import { SettingContainer, SettingDivider, SettingSubtitle, SettingTitle } from './components' const AssistantSettings: FC = () => { const { defaultAssistant, updateDefaultAssistant } = useDefaultAssistant() + const [temperature, setTemperature] = useState(defaultAssistant.settings?.temperature || DEFAULT_TEMPERATURE) + const [maxTokens, setMaxTokens] = useState(defaultAssistant.settings?.maxTokens || DEFAULT_MAXTOKENS) + const [contextCount, setConextCount] = useState(defaultAssistant.settings?.contextCount || DEFAULT_CONEXTCOUNT) const { t } = useTranslation() + const onUpdateAssistantSettings = ({ + _temperature, + _maxTokens, + _contextCount + }: { + _temperature?: number + _maxTokens?: number + _contextCount?: number + }) => { + updateDefaultAssistant({ + ...defaultAssistant, + settings: { + ...defaultAssistant.settings, + temperature: _temperature || temperature, + maxTokens: _maxTokens || maxTokens, + contextCount: _contextCount || contextCount + } + }) + } + + const onTemperatureChange = (value) => { + if (!isNaN(value as number)) { + setTemperature(value) + onUpdateAssistantSettings({ _temperature: value }) + } + } + + const onMaxTokensChange = (value) => { + if (!isNaN(value as number)) { + setMaxTokens(value) + onUpdateAssistantSettings({ _maxTokens: value }) + } + } + + const onConextCountChange = (value) => { + if (!isNaN(value as number)) { + setConextCount(value) + onUpdateAssistantSettings({ _contextCount: value }) + } + } + + const onReset = () => { + setTemperature(DEFAULT_TEMPERATURE) + setMaxTokens(DEFAULT_MAXTOKENS) + setConextCount(DEFAULT_CONEXTCOUNT) + updateDefaultAssistant({ + ...defaultAssistant, + settings: { + ...defaultAssistant.settings, + temperature: DEFAULT_TEMPERATURE, + maxTokens: DEFAULT_MAXTOKENS, + contextCount: DEFAULT_CONEXTCOUNT + } + }) + } + return ( {t('settings.assistant.title')} @@ -27,8 +89,110 @@ const AssistantSettings: FC = () => { value={defaultAssistant.prompt} onChange={(e) => updateDefaultAssistant({ ...defaultAssistant, prompt: e.target.value })} /> + + {t('settings.assistant.model_params')} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ) } +const Label = styled.p` + margin: 0; + font-size: 14px; + font-weight: bold; + margin-right: 5px; +` + +const QuestionIcon = styled(QuestionCircleOutlined)` + font-size: 14px; + cursor: pointer; + color: var(--color-text-3); +` + export default AssistantSettings diff --git a/src/renderer/src/services/ProviderSDK.ts b/src/renderer/src/services/ProviderSDK.ts index a1247dac5..a770366a8 100644 --- a/src/renderer/src/services/ProviderSDK.ts +++ b/src/renderer/src/services/ProviderSDK.ts @@ -6,7 +6,7 @@ import { ChatCompletionCreateParamsNonStreaming, ChatCompletionMessageParam } fr import { sum, takeRight } from 'lodash' import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk/resources' import { EVENT_NAMES } from './event' -import { removeQuotes } from '@renderer/utils' +import { getAssistantSettings, removeQuotes } from '@renderer/utils' export default class ProviderSDK { provider: Provider @@ -32,10 +32,11 @@ export default class ProviderSDK { ) { const defaultModel = getDefaultModel() const model = assistant.model || defaultModel + const { contextCount, maxTokens } = getAssistantSettings(assistant) const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined - const userMessages = takeRight(messages, 5).map((message) => ({ + const userMessages = takeRight(messages, contextCount).map((message) => ({ role: message.role, content: message.content })) @@ -43,9 +44,10 @@ export default class ProviderSDK { if (this.isAnthropic) { await this.anthropicSdk.messages .stream({ - max_tokens: 2048, + model: model.id, messages: [systemMessage, ...userMessages].filter(Boolean) as MessageParam[], - model: model.id + max_tokens: assistant.settings?.maxTokens || maxTokens, + temperature: assistant.settings?.temperature }) .on('text', (text) => onChunk({ text: text || '' })) .on('finalMessage', (message) => @@ -61,7 +63,9 @@ export default class ProviderSDK { const stream = await this.openaiSdk.chat.completions.create({ model: model.id, messages: [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[], - stream: true + stream: true, + max_tokens: assistant.settings?.maxTokens, + temperature: assistant.settings?.temperature }) for await (const chunk of stream) { if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) break diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index 48fcc123f..cf5a7cd0d 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -7,6 +7,13 @@ export type Assistant = { prompt: string topics: Topic[] model?: Model + settings?: AssistantSettings +} + +export type AssistantSettings = { + contextCount: number + temperature: number + maxTokens: number } export type Message = { diff --git a/src/renderer/src/utils/index.ts b/src/renderer/src/utils/index.ts index 1633a9472..a48014fc7 100644 --- a/src/renderer/src/utils/index.ts +++ b/src/renderer/src/utils/index.ts @@ -1,6 +1,9 @@ import { v4 as uuidv4 } from 'uuid' import imageCompression from 'browser-image-compression' -import { Model } from '@renderer/types' +import { Assistant, AssistantSettings, Message, Model } from '@renderer/types' +import { GPTTokens } from 'gpt-tokens' +import { DEFAULT_CONEXTCOUNT, DEFAULT_MAXTOKENS, DEFAULT_TEMPERATURE } from '@renderer/config/constant' +import { take } from 'lodash' export const runAsyncFunction = async (fn: () => void) => { await fn() @@ -164,3 +167,33 @@ export function getFirstCharacter(str) { return char } } + +export const getAssistantSettings = (assistant: Assistant): AssistantSettings => { + return { + contextCount: assistant.settings?.contextCount ?? DEFAULT_CONEXTCOUNT, + temperature: assistant.settings?.temperature ?? DEFAULT_TEMPERATURE, + maxTokens: assistant.settings?.maxTokens ?? DEFAULT_MAXTOKENS + } +} + +export function estimateTokenCount(text: string, assistant: Assistant, msgs: Message[]) { + const { contextCount } = getAssistantSettings(assistant) + + console.debug('contextCount', contextCount) + + const input = new GPTTokens({ + model: 'gpt-4o', + messages: [{ role: 'user', content: text }] + }) + + const all = new GPTTokens({ + model: 'gpt-4o', + messages: [ + { role: 'system', content: assistant.prompt }, + { role: 'user', content: text }, + ...take(msgs, contextCount).map((message) => ({ role: message.role, content: message.content })) + ] + }) + + return `Token ${input.usedTokens - 7} / ${all.usedTokens}` as unknown as number +} diff --git a/yarn.lock b/yarn.lock index 45c6df60f..8b52e3057 100644 --- a/yarn.lock +++ b/yarn.lock @@ -3440,6 +3440,7 @@ __metadata: eslint-plugin-react: "npm:^7.34.3" eslint-plugin-react-hooks: "npm:^4.6.2" eslint-plugin-unused-imports: "npm:^4.0.0" + gpt-tokens: "npm:^1.3.6" i18next: "npm:^23.11.5" localforage: "npm:^1.10.0" lodash: "npm:^4.17.21" @@ -3797,6 +3798,13 @@ __metadata: languageName: node linkType: hard +"decimal.js@npm:^10.4.3": + version: 10.4.3 + resolution: "decimal.js@npm:10.4.3" + checksum: 10c0/6d60206689ff0911f0ce968d40f163304a6c1bc739927758e6efc7921cfa630130388966f16bf6ef6b838cb33679fbe8e7a78a2f3c478afce841fd55ac8fb8ee + languageName: node + linkType: hard + "decode-named-character-reference@npm:^1.0.0": version: 1.0.2 resolution: "decode-named-character-reference@npm:1.0.2" @@ -5241,6 +5249,17 @@ __metadata: languageName: node linkType: hard +"gpt-tokens@npm:^1.3.6": + version: 1.3.6 + resolution: "gpt-tokens@npm:1.3.6" + dependencies: + decimal.js: "npm:^10.4.3" + js-tiktoken: "npm:^1.0.10" + openai-chat-tokens: "npm:^0.2.8" + checksum: 10c0/0efc1da655a16a306df4f17646832693d7cbec569fe44d4fcc9d4a605f8614f1eb974e04b24a4e8c71095fe0fab6de7251a34c6e2d6805a5e1b5811eea37437b + languageName: node + linkType: hard + "graceful-fs@npm:^4.1.6, graceful-fs@npm:^4.2.0, graceful-fs@npm:^4.2.6": version: 4.2.11 resolution: "graceful-fs@npm:4.2.11" @@ -6067,6 +6086,15 @@ __metadata: languageName: node linkType: hard +"js-tiktoken@npm:^1.0.10, js-tiktoken@npm:^1.0.7": + version: 1.0.12 + resolution: "js-tiktoken@npm:1.0.12" + dependencies: + base64-js: "npm:^1.5.1" + checksum: 10c0/7afb4826e21342386a1884754fbc1c1828f948c4dd0ab093bf778d1323e65343bd5343d15f7cda46af396f1fe4a0297739936149b7c40a0601eefe3fcaef8727 + languageName: node + linkType: hard + "js-tokens@npm:^3.0.0 || ^4.0.0, js-tokens@npm:^4.0.0": version: 4.0.0 resolution: "js-tokens@npm:4.0.0" @@ -7194,6 +7222,15 @@ __metadata: languageName: node linkType: hard +"openai-chat-tokens@npm:^0.2.8": + version: 0.2.8 + resolution: "openai-chat-tokens@npm:0.2.8" + dependencies: + js-tiktoken: "npm:^1.0.7" + checksum: 10c0/b415fda706b408f29b4584998990f29ad7f80f2ac1e84179a0976742ba8a80859fedeae5745a9bfe73443d95960b77328610074952ad198a18bc0e5c0ceb5b7b + languageName: node + linkType: hard + "openai@npm:^4.52.1": version: 4.52.1 resolution: "openai@npm:4.52.1"