diff --git a/.env.template b/.env.template index d53c1be6caa..b82a6dacb18 100644 --- a/.env.template +++ b/.env.template @@ -47,3 +47,4 @@ ENABLE_BALANCE_QUERY= # If you want to disable parse settings from url, set this value to 1. DISABLE_FAST_LINK= +CUSTOM_MODELS=+glm-3-turbo,+glm-4,+ERNIE-Bot=文心一言3.5,+ERNIE-Bot-turbo=文心一言3.5turbo,+ERNIE-Bot-4,+BLOOMZ-7B,+Qianfan-Chinese-Llama-2-7B,+Qianfan-Chinese-Llama-2-13B,+ChatGLM2-6B-32K,+AquilaChat-7B,+SQLCoder-7B,+CodeLlama-7B-Instruct,+XuanYuan-70B-Chat-4bit,+ChatLaw,+Yi-34B-Chat,+Mixtral-8x7B-Instruct diff --git a/app/constant.ts b/app/constant.ts index 9041706874f..502d5249ae8 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -10,6 +10,7 @@ export const RUNTIME_CONFIG_DOM = "danger-runtime-config"; export const DEFAULT_API_HOST = "https://api.nextchat.dev"; export const OPENAI_BASE_URL = "https://api.openai.com"; +export const ZHIPU_BASE_URL = ""; export const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/"; @@ -118,6 +119,24 @@ export const KnowledgeCutOffDate: Record = { }; export const DEFAULT_MODELS = [ + { + name: "glm-4", + available: true, + provider: { + id: "openai", + providerName: "OpenAI", + providerType: "openai", + }, + }, + { + name: "glm-3-turbo", + available: true, + provider: { + id: "openai", + providerName: "OpenAI", + providerType: "openai", + }, + }, { name: "gpt-4", available: true, diff --git a/app/locales/cn.ts b/app/locales/cn.ts index 5d0c284283e..b992b45d27d 100644 --- a/app/locales/cn.ts +++ b/app/locales/cn.ts @@ -360,7 +360,8 @@ const cn = { }, Store: { DefaultTopic: "新的聊天", - BotHello: "有什么可以帮你的吗", + BotHello: + "有什么可以帮你的吗\nTips:所有内容都将发送到对应外部服务器,注意保密。", Error: "出错了,稍后重试吧", Prompt: { History: (content: string) => "这是历史聊天总结作为前情提要:" + content, diff --git a/app/store/config.ts b/app/store/config.ts index 6f2f558a042..deecd67dcb7 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -46,15 +46,15 @@ export const DEFAULT_CONFIG = { models: DEFAULT_MODELS as any as LLMModel[], modelConfig: { - model: "gpt-3.5-turbo" as ModelType, + model: "glm-4" as ModelType, temperature: 0.5, - top_p: 1, - max_tokens: 4000, + top_p: 0.7, + max_tokens: 32768, presence_penalty: 0, frequency_penalty: 0, sendMemory: true, - historyMessageCount: 4, - compressMessageLengthThreshold: 1000, + historyMessageCount: 8, + compressMessageLengthThreshold: 16384, enableInjectSystemPrompts: true, template: DEFAULT_INPUT_TEMPLATE, },