From 3499332b743f55daef8a17ea0ebaf86dbd167a7b Mon Sep 17 00:00:00 2001 From: CaptainB Date: Sat, 14 Sep 2024 15:46:35 +0800 Subject: [PATCH 01/20] =?UTF-8?q?fix:=20=E5=8F=8A=E6=97=B6=E6=9B=B4?= =?UTF-8?q?=E6=96=B0=E6=B5=81=E7=A8=8B=E5=9B=BE=E9=87=8C=E7=9A=84=E5=8F=98?= =?UTF-8?q?=E9=87=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ui/src/workflow/nodes/base-node/index.vue | 2 ++ ui/src/workflow/nodes/start-node/index.vue | 10 +++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ui/src/workflow/nodes/base-node/index.vue b/ui/src/workflow/nodes/base-node/index.vue index f90550a151..7db1fb168e 100644 --- a/ui/src/workflow/nodes/base-node/index.vue +++ b/ui/src/workflow/nodes/base-node/index.vue @@ -412,6 +412,7 @@ function openAddDialog(data?: any, index?: any) { function deleteField(index: any) { inputFieldList.value.splice(index, 1) + props.nodeModel.graphModel.eventCenter.emit('refreshFieldList', inputFieldList.value) } function refreshFieldList(data: any) { @@ -428,6 +429,7 @@ function refreshFieldList(data: any) { } currentIndex.value = null FieldFormDialogRef.value.close() + props.nodeModel.graphModel.eventCenter.emit('refreshFieldList', inputFieldList.value) } onMounted(() => { diff --git a/ui/src/workflow/nodes/start-node/index.vue b/ui/src/workflow/nodes/start-node/index.vue index 5dd8835dbb..a91e98cbba 100644 --- a/ui/src/workflow/nodes/start-node/index.vue +++ b/ui/src/workflow/nodes/start-node/index.vue @@ -41,7 +41,7 @@ const showicon = ref(false) const inputFieldList = ref([]) -onMounted(() => { +function handleRefreshFieldList(data: any[]) { props.nodeModel.graphModel.nodes .filter((v: any) => v.id === 'base-node') .map((v: any) => { @@ -56,6 +56,14 @@ onMounted(() => { ] inputFieldList.value = v.properties.input_field_list }) +} + +props.nodeModel.graphModel.eventCenter.on('refreshFieldList', (data: any) => { + handleRefreshFieldList(data) +}) + +onMounted(() => { + handleRefreshFieldList([]) }) From 080ccd11c4d198dfa72bdd7e4fe71e2d29ff6d8c Mon Sep 17 00:00:00 2001 From: CaptainB Date: Sat, 14 Sep 2024 18:31:34 +0800 Subject: [PATCH 02/20] =?UTF-8?q?refactor:=20=E8=B0=83=E6=95=B4=E9=87=87?= =?UTF-8?q?=E6=A0=B7=E7=8E=87?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ui/src/components/ai-chat/index.vue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/src/components/ai-chat/index.vue b/ui/src/components/ai-chat/index.vue index 9e62a4b4c6..3784368d4a 100644 --- a/ui/src/components/ai-chat/index.vue +++ b/ui/src/components/ai-chat/index.vue @@ -812,7 +812,7 @@ const startRecording = async () => { mediaRecorder.value = new Recorder({ type: 'mp3', bitRate: 128, - sampleRate: 44100 + sampleRate: 16000 }) mediaRecorder.value.open( From a873719a4936a2479ce35b308ab8a3f84e9a0d23 Mon Sep 17 00:00:00 2001 From: wxg0103 <727495428@qq.com> Date: Sat, 14 Sep 2024 18:36:37 +0800 Subject: [PATCH 03/20] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8Dtokens=E6=8F=8F?= =?UTF-8?q?=E8=BF=B0=E9=94=99=E8=AF=AF=E7=9A=84=E7=BC=BA=E9=99=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --bug=1046202 --user=王孝刚 【github#1144】【应用】参数设置,输出最大tokens描述错误 https://www.tapd.cn/57709429/s/1579168 --- apps/dataset/urls.py | 2 +- apps/dataset/views/document.py | 2 +- .../credential/llm.py | 2 +- .../azure_model_provider/credential/llm.py | 2 +- .../deepseek_model_provider/credential/llm.py | 2 +- .../gemini_model_provider/credential/llm.py | 2 +- .../kimi_model_provider/credential/llm.py | 2 +- .../ollama_model_provider/credential/llm.py | 2 +- .../openai_model_provider/credential/llm.py | 2 +- .../qwen_model_provider/credential/llm.py | 2 +- .../vllm_model_provider/credential/llm.py | 2 +- .../credential/llm.py | 2 +- .../wenxin_model_provider/credential/llm.py | 2 +- .../impl/xf_model_provider/credential/llm.py | 4 +- .../credential/llm.py | 2 +- .../zhipu_model_provider/credential/llm.py | 2 +- ui/src/api/document.ts | 89 +++++++++++-------- .../component/AccessSettingDrawer.vue | 13 ++- ui/src/views/document/index.vue | 16 ++++ 19 files changed, 97 insertions(+), 55 deletions(-) diff --git a/apps/dataset/urls.py b/apps/dataset/urls.py index 2068922ee2..405101796f 100644 --- a/apps/dataset/urls.py +++ b/apps/dataset/urls.py @@ -23,6 +23,7 @@ path('dataset//document/_bach', views.Document.Batch.as_view()), path('dataset//document/batch_hit_handling', views.Document.BatchEditHitHandling.as_view()), path('dataset//document//', views.Document.Page.as_view()), + path('dataset//document/batch_refresh', views.Document.BatchRefresh.as_view()), path('dataset//document/', views.Document.Operate.as_view(), name="document_operate"), path('dataset/document/split', views.Document.Split.as_view(), @@ -34,7 +35,6 @@ name="document_export"), path('dataset//document//sync', views.Document.SyncWeb.as_view()), path('dataset//document//refresh', views.Document.Refresh.as_view()), - path('dataset//document/batch_refresh', views.Document.BatchRefresh.as_view()), path('dataset//document//paragraph', views.Paragraph.as_view()), path( 'dataset//document//paragraph/migrate/dataset//document/', diff --git a/apps/dataset/views/document.py b/apps/dataset/views/document.py index c2ef152a0b..d41535b0bf 100644 --- a/apps/dataset/views/document.py +++ b/apps/dataset/views/document.py @@ -239,7 +239,7 @@ def put(self, request: Request, dataset_id: str, document_id: str): class BatchRefresh(APIView): authentication_classes = [TokenAuth] - @action(methods=['POST'], detail=False) + @action(methods=['PUT'], detail=False) @swagger_auto_schema(operation_summary="批量刷新文档向量库", operation_id="批量刷新文档向量库", request_body= diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py index 2392d3d046..e64d8b282c 100644 --- a/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py @@ -19,7 +19,7 @@ class BedrockLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=1024, _min=1, _max=4096, diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py b/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py index 1906d20c58..b9e730aa0a 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py @@ -25,7 +25,7 @@ class AzureLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=800, _min=1, _max=4096, diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py b/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py index 72f101c421..ee2279bbc9 100644 --- a/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py @@ -25,7 +25,7 @@ class DeepSeekLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=800, _min=1, _max=4096, diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py index 77e01df391..2612205d49 100644 --- a/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py @@ -25,7 +25,7 @@ class GeminiLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=800, _min=1, _max=4096, diff --git a/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py b/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py index ef2aeb1222..1ee2fcee1e 100644 --- a/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py @@ -25,7 +25,7 @@ class KimiLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=1024, _min=1, _max=4096, diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py index 14634b478c..5558bcab4c 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py @@ -23,7 +23,7 @@ class OllamaLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=1024, _min=1, _max=4096, diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py b/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py index f7d244a534..58dfc13080 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py @@ -25,7 +25,7 @@ class OpenAILLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=800, _min=1, _max=4096, diff --git a/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py b/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py index 7ad0684545..a8177c5458 100644 --- a/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py @@ -25,7 +25,7 @@ class QwenModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=800, _min=1, _max=2048, diff --git a/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py b/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py index 7ea3cabe44..97c6217c36 100644 --- a/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py @@ -19,7 +19,7 @@ class VLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=800, _min=1, _max=4096, diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py index f918b437dd..15fffec2c5 100644 --- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py @@ -25,7 +25,7 @@ class VolcanicEngineLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=1024, _min=1, _max=4096, diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py b/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py index be294e81a9..a77a6303fb 100644 --- a/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py @@ -25,7 +25,7 @@ class WenxinLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=1024, _min=2, _max=2048, diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py b/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py index 770aff27d8..0a6d9a0ac7 100644 --- a/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py @@ -25,7 +25,7 @@ class XunFeiLLMModelGeneralParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=4096, _min=1, _max=4096, @@ -42,7 +42,7 @@ class XunFeiLLMModelProParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=4096, _min=1, _max=8192, diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py index bb17e5c223..6317ff6633 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py @@ -19,7 +19,7 @@ class XinferenceLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=800, _min=1, _max=4096, diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py b/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py index aee7441f14..dc1d1f191b 100644 --- a/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py @@ -25,7 +25,7 @@ class ZhiPuLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), required=True, default_value=1024, _min=1, _max=4096, diff --git a/ui/src/api/document.ts b/ui/src/api/document.ts index 0653f2d406..d4d9b505b0 100644 --- a/ui/src/api/document.ts +++ b/ui/src/api/document.ts @@ -3,6 +3,7 @@ import { get, post, del, put, exportExcel } from '@/request/index' import type { Ref } from 'vue' import type { KeyValue } from '@/api/type/common' import type { pageRequest } from '@/api/type/common' + const prefix = '/dataset' /** @@ -26,14 +27,14 @@ const listSplitPattern: ( /** * 文档分页列表 - * @param 参数 dataset_id, + * @param 参数 dataset_id, * page { - "current_page": "string", - "page_size": "string", - } -* param { - "name": "string", - } + "current_page": "string", + "page_size": "string", + } + * param { + "name": "string", + } */ const getDocument: ( @@ -58,22 +59,22 @@ const getAllDocument: (dataset_id: string, loading?: Ref) => Promise Promise> = (dataset_id, data, loading) => { return del(`${prefix}/${dataset_id}/document/_bach`, undefined, { id_list: data }, loading) } + +const batchRefresh: ( + dataset_id: string, + data: any, + loading?: Ref +) => Promise> = (dataset_id, data, loading) => { + return put( + `${prefix}/${dataset_id}/document/batch_refresh`, + { id_list: data }, + undefined, + loading + ) +} /** * 文档详情 * @param 参数 dataset_id @@ -180,14 +194,14 @@ const delMulSyncDocument: ( /** * 创建Web站点文档 - * @param 参数 + * @param 参数 * { - "source_url_list": [ - "string" - ], - "selector": "string" + "source_url_list": [ + "string" + ], + "selector": "string" + } } -} */ const postWebDocument: ( dataset_id: string, @@ -199,9 +213,9 @@ const postWebDocument: ( /** * 导入QA文档 - * @param 参数 + * @param 参数 * file -} + } */ const postQADocument: ( dataset_id: string, @@ -323,5 +337,6 @@ export default { exportTableTemplate, postQADocument, postTableDocument, - exportDocument + exportDocument, + batchRefresh } diff --git a/ui/src/views/application/component/AccessSettingDrawer.vue b/ui/src/views/application/component/AccessSettingDrawer.vue index 8dd7a2e8d4..2e9b126fe2 100644 --- a/ui/src/views/application/component/AccessSettingDrawer.vue +++ b/ui/src/views/application/component/AccessSettingDrawer.vue @@ -28,6 +28,10 @@ +
+ 是否是订阅号 + +

回调地址

@@ -102,7 +106,14 @@ const { } = route as any const form = reactive({ - wechat: { app_id: '', app_secret: '', token: '', encoding_aes_key: '', callback_url: '' }, + wechat: { + app_id: '', + app_secret: '', + token: '', + encoding_aes_key: '', + is_personal: false, + callback_url: '' + }, dingtalk: { client_id: '', client_secret: '', callback_url: '' }, wecom: { app_id: '', diff --git a/ui/src/views/document/index.vue b/ui/src/views/document/index.vue index f23c0cc5ca..3471290961 100644 --- a/ui/src/views/document/index.vue +++ b/ui/src/views/document/index.vue @@ -23,6 +23,9 @@ 迁移 + + 重新向量化 + 设置 @@ -538,6 +541,19 @@ function deleteMulDocument() { }) } +function batchRefresh() { + const arr: string[] = [] + multipleSelection.value.map((v) => { + if (v) { + arr.push(v.id) + } + }) + documentApi.batchRefresh(id, arr, loading).then(() => { + MsgSuccess('批量重新向量化成功') + multipleTableRef.value?.clearSelection() + }) +} + function deleteDocument(row: any) { MsgConfirm( `是否删除文档:${row.name} ?`, From 0b64e7a56c6f6bb54e9782e256100238c30290fe Mon Sep 17 00:00:00 2001 From: wxg0103 <727495428@qq.com> Date: Sat, 14 Sep 2024 18:54:11 +0800 Subject: [PATCH 04/20] =?UTF-8?q?refactor:=20=E4=BC=98=E5=8C=96=E7=94=A8?= =?UTF-8?q?=E6=88=B7=E5=90=8D=E6=A0=A1=E9=AA=8C=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --story=1016296 --user=王孝刚 【系统管理】用户创建限制太过于严格,与某些用户/企业的习惯或要求不符合 https://www.tapd.cn/57709429/s/1579177 --- .../impl/openai_model_provider/model/llm.py | 2 -- apps/users/serializers/user_serializers.py | 8 ++++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/llm.py b/apps/setting/models_provider/impl/openai_model_provider/model/llm.py index ff80b0e50b..a78ecc0c00 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/model/llm.py +++ b/apps/setting/models_provider/impl/openai_model_provider/model/llm.py @@ -40,8 +40,6 @@ def new_instance(model_type, model_name, model_credential: Dict[str, object], ** openai_api_base=model_credential.get('api_base'), openai_api_key=model_credential.get('api_key'), **optional_params, - streaming=True, - stream_usage=True, custom_get_token_ids=custom_get_token_ids ) return azure_chat_open_ai diff --git a/apps/users/serializers/user_serializers.py b/apps/users/serializers/user_serializers.py index bc18f97af4..4eb83bc227 100644 --- a/apps/users/serializers/user_serializers.py +++ b/apps/users/serializers/user_serializers.py @@ -132,8 +132,8 @@ class RegisterSerializer(ApiMixin, serializers.Serializer): max_length=20, min_length=6, validators=[ - validators.RegexValidator(regex=re.compile("^[a-zA-Z][a-zA-Z0-9_]{5,20}$"), - message="用户名字符数为 6-20 个字符,必须以字母开头,可使用字母、数字、下划线等") + validators.RegexValidator(regex=re.compile("^.{6,20}$"), + message="用户名字符数为 6-20 个字符") ]) password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"), validators=[validators.RegexValidator(regex=re.compile( @@ -590,8 +590,8 @@ class UserInstance(ApiMixin, serializers.Serializer): max_length=20, min_length=6, validators=[ - validators.RegexValidator(regex=re.compile("^[a-zA-Z][a-zA-Z0-9_]{5,20}$"), - message="用户名字符数为 6-20 个字符,必须以字母开头,可使用字母、数字、下划线等") + validators.RegexValidator(regex=re.compile("^.{6,20}$"), + message="用户名字符数为 6-20 个字符") ]) password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"), validators=[validators.RegexValidator(regex=re.compile( From b0f443f3d2495ac828c6af598c7461fc18fe9388 Mon Sep 17 00:00:00 2001 From: shaohuzhang1 <80892890+shaohuzhang1@users.noreply.github.com> Date: Sat, 14 Sep 2024 21:48:45 +0800 Subject: [PATCH 05/20] =?UTF-8?q?feat:=20=E7=AE=80=E6=98=93=E5=BA=94?= =?UTF-8?q?=E7=94=A8=E9=A1=B5=E9=9D=A2=E5=8F=82=E6=95=B0=E4=BC=98=E5=8C=96?= =?UTF-8?q?=20(#1182)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../i_generate_human_message_step.py | 4 + .../impl/base_generate_human_message_step.py | 7 + .../i_reset_problem_step.py | 3 + .../impl/base_reset_problem_step.py | 7 +- ...application_problem_optimization_prompt.py | 18 +++ apps/application/models/application.py | 11 +- .../serializers/application_serializers.py | 18 ++- .../serializers/chat_message_serializers.py | 24 ++- .../swagger_api/application_api.py | 56 +++++-- ui/src/api/type/application.ts | 3 +- .../locales/lang/zh_CN/views/application.ts | 6 +- .../views/application/ApplicationSetting.vue | 137 +++++++++--------- .../component/CreateApplicationDialog.vue | 27 +++- .../component/ParamSettingDialog.vue | 126 ++++++++++------ ui/src/workflow/nodes/reranker-node/index.vue | 5 +- .../nodes/search-dataset-node/index.vue | 4 +- 16 files changed, 306 insertions(+), 150 deletions(-) create mode 100644 apps/application/migrations/0014_application_problem_optimization_prompt.py diff --git a/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py b/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py index ca2d00e0b5..fe6be7d2eb 100644 --- a/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py +++ b/apps/application/chat_pipeline/step/generate_human_message_step/i_generate_human_message_step.py @@ -37,6 +37,8 @@ class InstanceSerializer(serializers.Serializer): "最大携带知识库段落长度")) # 模板 prompt = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词")) + system = serializers.CharField(required=False, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char("系统提示词(角色)")) # 补齐问题 padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char("补齐问题")) # 未查询到引用分段 @@ -59,6 +61,7 @@ def execute(self, prompt: str, padding_problem_text: str = None, no_references_setting=None, + system=None, **kwargs) -> List[BaseMessage]: """ @@ -71,6 +74,7 @@ def execute(self, :param padding_problem_text 用户修改文本 :param kwargs: 其他参数 :param no_references_setting: 无引用分段设置 + :param system 系统提示称 :return: """ pass diff --git a/apps/application/chat_pipeline/step/generate_human_message_step/impl/base_generate_human_message_step.py b/apps/application/chat_pipeline/step/generate_human_message_step/impl/base_generate_human_message_step.py index 8b769c7700..68cfbbcb95 100644 --- a/apps/application/chat_pipeline/step/generate_human_message_step/impl/base_generate_human_message_step.py +++ b/apps/application/chat_pipeline/step/generate_human_message_step/impl/base_generate_human_message_step.py @@ -9,6 +9,7 @@ from typing import List, Dict from langchain.schema import BaseMessage, HumanMessage +from langchain_core.messages import SystemMessage from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel from application.chat_pipeline.step.generate_human_message_step.i_generate_human_message_step import \ @@ -27,6 +28,7 @@ def execute(self, problem_text: str, prompt: str, padding_problem_text: str = None, no_references_setting=None, + system=None, **kwargs) -> List[BaseMessage]: prompt = prompt if (paragraph_list is not None and len(paragraph_list) > 0) else no_references_setting.get( 'value') @@ -35,6 +37,11 @@ def execute(self, problem_text: str, history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()] for index in range(start_index if start_index > 0 else 0, len(history_chat_record))] + if system is not None and len(system) > 0: + return [SystemMessage(system), *flat_map(history_message), + self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list, + no_references_setting)] + return [*flat_map(history_message), self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list, no_references_setting)] diff --git a/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py b/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py index ce30d96af3..fe80681190 100644 --- a/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py +++ b/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py @@ -29,6 +29,8 @@ class InstanceSerializer(serializers.Serializer): error_messages=ErrMessage.list("历史对答")) # 大语言模型 chat_model = ModelField(required=False, allow_null=True, error_messages=ErrMessage.base("大语言模型")) + problem_optimization_prompt = serializers.CharField(required=False, max_length=102400, + error_messages=ErrMessage.char("问题补全提示词")) def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]: return self.InstanceSerializer @@ -47,5 +49,6 @@ def _run(self, manage: PipelineManage): @abstractmethod def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, chat_model: BaseChatModel = None, + problem_optimization_prompt=None, **kwargs): pass diff --git a/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py b/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py index c0595d590f..3a32bbf021 100644 --- a/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py +++ b/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py @@ -21,6 +21,7 @@ class BaseResetProblemStep(IResetProblemStep): def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, chat_model: BaseChatModel = None, + problem_optimization_prompt=None, **kwargs) -> str: if chat_model is None: self.context['message_tokens'] = 0 @@ -30,8 +31,9 @@ def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = Non history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()] for index in range(start_index if start_index > 0 else 0, len(history_chat_record))] + reset_prompt = problem_optimization_prompt if problem_optimization_prompt else prompt message_list = [*flat_map(history_message), - HumanMessage(content=prompt.format(**{'question': problem_text}))] + HumanMessage(content=reset_prompt.replace('{question}', problem_text))] response = chat_model.invoke(message_list) padding_problem = problem_text if response.content.__contains__("") and response.content.__contains__(''): @@ -39,6 +41,9 @@ def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = Non response.content.index('') + 6:response.content.index('')] if padding_problem_data is not None and len(padding_problem_data.strip()) > 0: padding_problem = padding_problem_data + elif len(response.content) > 0: + padding_problem = response.content + try: request_token = chat_model.get_num_tokens_from_messages(message_list) response_token = chat_model.get_num_tokens(padding_problem) diff --git a/apps/application/migrations/0014_application_problem_optimization_prompt.py b/apps/application/migrations/0014_application_problem_optimization_prompt.py new file mode 100644 index 0000000000..e2efc1097c --- /dev/null +++ b/apps/application/migrations/0014_application_problem_optimization_prompt.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2024-09-13 18:57 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0013_application_tts_type'), + ] + + operations = [ + migrations.AddField( + model_name='application', + name='problem_optimization_prompt', + field=models.CharField(blank=True, default='()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中', max_length=102400, null=True, verbose_name='问题优化提示词'), + ), + ] diff --git a/apps/application/models/application.py b/apps/application/models/application.py index c03bec1eb7..13bf3b6aa9 100644 --- a/apps/application/models/application.py +++ b/apps/application/models/application.py @@ -35,7 +35,7 @@ def get_dataset_setting_dict(): def get_model_setting_dict(): - return {'prompt': Application.get_default_model_prompt()} + return {'prompt': Application.get_default_model_prompt(), 'no_references_prompt': '{question}'} class Application(AppModelMixin): @@ -54,8 +54,13 @@ class Application(AppModelMixin): work_flow = models.JSONField(verbose_name="工作流数据", default=dict) type = models.CharField(verbose_name="应用类型", choices=ApplicationTypeChoices.choices, default=ApplicationTypeChoices.SIMPLE, max_length=256) - tts_model = models.ForeignKey(Model, related_name='tts_model_id', on_delete=models.SET_NULL, db_constraint=False, blank=True, null=True) - stt_model = models.ForeignKey(Model, related_name='stt_model_id', on_delete=models.SET_NULL, db_constraint=False, blank=True, null=True) + problem_optimization_prompt = models.CharField(verbose_name="问题优化提示词", max_length=102400, blank=True, + null=True, + default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中") + tts_model = models.ForeignKey(Model, related_name='tts_model_id', on_delete=models.SET_NULL, db_constraint=False, + blank=True, null=True) + stt_model = models.ForeignKey(Model, related_name='stt_model_id', on_delete=models.SET_NULL, db_constraint=False, + blank=True, null=True) tts_model_enable = models.BooleanField(verbose_name="语音合成模型是否启用", default=False) stt_model_enable = models.BooleanField(verbose_name="语音识别模型是否启用", default=False) tts_type = models.CharField(verbose_name="语音播放类型", max_length=20, default="BROWSER") diff --git a/apps/application/serializers/application_serializers.py b/apps/application/serializers/application_serializers.py index a04af8999e..884b14e706 100644 --- a/apps/application/serializers/application_serializers.py +++ b/apps/application/serializers/application_serializers.py @@ -120,7 +120,12 @@ class DatasetSettingSerializer(serializers.Serializer): class ModelSettingSerializer(serializers.Serializer): - prompt = serializers.CharField(required=True, max_length=2048, error_messages=ErrMessage.char("提示词")) + prompt = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400, + error_messages=ErrMessage.char("提示词")) + system = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400, + error_messages=ErrMessage.char("角色提示词")) + no_references_prompt = serializers.CharField(required=True, max_length=102400, allow_null=True, allow_blank=True, + error_messages=ErrMessage.char("无引用分段提示词")) class ApplicationWorkflowSerializer(serializers.Serializer): @@ -174,7 +179,7 @@ class ApplicationSerializer(serializers.Serializer): error_messages=ErrMessage.char("应用描述")) model_id = serializers.CharField(required=False, allow_null=True, allow_blank=True, error_messages=ErrMessage.char("模型")) - multiple_rounds_dialogue = serializers.BooleanField(required=True, error_messages=ErrMessage.char("多轮对话")) + dialogue_number = serializers.BooleanField(required=True, error_messages=ErrMessage.char("会话次数")) prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096, error_messages=ErrMessage.char("开场白")) dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True), @@ -185,6 +190,8 @@ class ApplicationSerializer(serializers.Serializer): model_setting = ModelSettingSerializer(required=True) # 问题补全 problem_optimization = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("问题补全")) + problem_optimization_prompt = serializers.CharField(required=False, max_length=102400, + error_messages=ErrMessage.char("问题补全提示词")) # 应用类型 type = serializers.CharField(required=True, error_messages=ErrMessage.char("应用类型"), validators=[ @@ -364,8 +371,8 @@ class Edit(serializers.Serializer): error_messages=ErrMessage.char("应用描述")) model_id = serializers.CharField(required=False, allow_blank=True, allow_null=True, error_messages=ErrMessage.char("模型")) - multiple_rounds_dialogue = serializers.BooleanField(required=False, - error_messages=ErrMessage.boolean("多轮会话")) + dialogue_number = serializers.IntegerField(required=False, + error_messages=ErrMessage.boolean("多轮会话")) prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096, error_messages=ErrMessage.char("开场白")) dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True), @@ -430,13 +437,14 @@ def insert_simple(self, application: Dict): def to_application_model(user_id: str, application: Dict): return Application(id=uuid.uuid1(), name=application.get('name'), desc=application.get('desc'), prologue=application.get('prologue'), - dialogue_number=3 if application.get('multiple_rounds_dialogue') else 0, + dialogue_number=application.get('dialogue_number', 0), user_id=user_id, model_id=application.get('model_id'), dataset_setting=application.get('dataset_setting'), model_setting=application.get('model_setting'), problem_optimization=application.get('problem_optimization'), type=ApplicationTypeChoices.SIMPLE, model_params_setting=application.get('model_params_setting', {}), + problem_optimization_prompt=application.get('problem_optimization_prompt', None), work_flow={} ) diff --git a/apps/application/serializers/chat_message_serializers.py b/apps/application/serializers/chat_message_serializers.py index 8fbf0dbbc6..44f31759c9 100644 --- a/apps/application/serializers/chat_message_serializers.py +++ b/apps/application/serializers/chat_message_serializers.py @@ -60,6 +60,17 @@ def __init__(self, self.chat_record_list: List[ChatRecord] = [] self.work_flow_version = work_flow_version + @staticmethod + def get_no_references_setting(dataset_setting, model_setting): + no_references_setting = dataset_setting.get( + 'no_references_setting', { + 'status': 'ai_questioning', + 'value': '{question}'}) + if no_references_setting.get('status') == 'ai_questioning': + no_references_prompt = model_setting.get('no_references_prompt', '{question}') + no_references_setting['value'] = no_references_prompt if len(no_references_prompt) > 0 else "{question}" + return no_references_setting + def to_base_pipeline_manage_params(self): dataset_setting = self.application.dataset_setting model_setting = self.application.model_setting @@ -80,8 +91,13 @@ def to_base_pipeline_manage_params(self): 'history_chat_record': self.chat_record_list, 'chat_id': self.chat_id, 'dialogue_number': self.application.dialogue_number, + 'problem_optimization_prompt': self.application.problem_optimization_prompt if self.application.problem_optimization_prompt is not None and len( + self.application.problem_optimization_prompt) > 0 else '()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中', 'prompt': model_setting.get( - 'prompt') if 'prompt' in model_setting else Application.get_default_model_prompt(), + 'prompt') if 'prompt' in model_setting and len(model_setting.get( + 'prompt')) > 0 else Application.get_default_model_prompt(), + 'system': model_setting.get( + 'system', None), 'model_id': model_id, 'problem_optimization': self.application.problem_optimization, 'stream': True, @@ -89,11 +105,7 @@ def to_base_pipeline_manage_params(self): self.application.model_params_setting.keys()) == 0 else self.application.model_params_setting, 'search_mode': self.application.dataset_setting.get( 'search_mode') if 'search_mode' in self.application.dataset_setting else 'embedding', - 'no_references_setting': self.application.dataset_setting.get( - 'no_references_setting') if 'no_references_setting' in self.application.dataset_setting else { - 'status': 'ai_questioning', - 'value': '{question}', - }, + 'no_references_setting': self.get_no_references_setting(self.application.dataset_setting, model_setting), 'user_id': self.application.user_id } diff --git a/apps/application/swagger_api/application_api.py b/apps/application/swagger_api/application_api.py index e153f6279a..d05fbb0478 100644 --- a/apps/application/swagger_api/application_api.py +++ b/apps/application/swagger_api/application_api.py @@ -40,15 +40,15 @@ def get_request_body_api(): def get_response_body_api(): return openapi.Schema( type=openapi.TYPE_OBJECT, - required=['id', 'name', 'desc', 'model_id', 'multiple_rounds_dialogue', 'user_id', 'status', 'create_time', + required=['id', 'name', 'desc', 'model_id', 'dialogue_number', 'user_id', 'status', 'create_time', 'update_time'], properties={ 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description="主键id"), 'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"), 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"), 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"), - "multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话", - description="是否开启多轮对话"), + "dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数", + description="多轮对话次数"), 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"), 'example': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), title="示例列表", description="示例列表"), @@ -164,8 +164,8 @@ def get_request_body_api(): 'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"), 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"), 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"), - "multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话", - description="是否开启多轮对话"), + "dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数", + description="多轮对话次数"), 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"), 'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), @@ -176,7 +176,22 @@ def get_request_body_api(): description="是否开启问题优化", default=True), 'icon': openapi.Schema(type=openapi.TYPE_STRING, title="icon", description="icon", default="/ui/favicon.ico"), - 'work_flow': ApplicationApi.WorkFlow.get_request_body_api() + 'type': openapi.Schema(type=openapi.TYPE_STRING, title="应用类型", + description="应用类型 简易:SIMPLE|工作流:WORK_FLOW"), + 'work_flow': ApplicationApi.WorkFlow.get_request_body_api(), + 'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, title='问题优化提示词', + description="问题优化提示词", + default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中"), + 'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音模型ID", + description="文字转语音模型ID"), + 'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字模型id", + description="语音转文字模型id"), + 'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启", + description="语音转文字是否开启"), + 'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启", + description="语音转文字是否开启"), + 'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音类型", + description="文字转语音类型") } ) @@ -248,6 +263,11 @@ def get_request_body_api(): '\n问题:' '\n{question}')), + 'system': openapi.Schema(type=openapi.TYPE_STRING, title="系统提示词(角色)", + description="系统提示词(角色)"), + 'no_references_prompt': openapi.Schema(type=openapi.TYPE_STRING, title="无引用分段提示词", + default="{question}", description="无引用分段提示词") + } ) @@ -267,14 +287,14 @@ class Create(ApiMixin): def get_request_body_api(): return openapi.Schema( type=openapi.TYPE_OBJECT, - required=['name', 'desc', 'model_id', 'multiple_rounds_dialogue', 'dataset_setting', 'model_setting', - 'problem_optimization'], + required=['name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting', + 'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type'], properties={ 'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"), 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"), 'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"), - "multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话", - description="是否开启多轮对话"), + "dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数", + description="多轮对话次数"), 'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"), 'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING), @@ -284,8 +304,20 @@ def get_request_body_api(): 'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="问题优化", description="是否开启问题优化", default=True), 'type': openapi.Schema(type=openapi.TYPE_STRING, title="应用类型", - description="应用类型 简易:SIMPLE|工作流:WORK_FLOW") - + description="应用类型 简易:SIMPLE|工作流:WORK_FLOW"), + 'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, title='问题优化提示词', + description="问题优化提示词", + default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中"), + 'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音模型ID", + description="文字转语音模型ID"), + 'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字模型id", + description="语音转文字模型id"), + 'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启", + description="语音转文字是否开启"), + 'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启", + description="语音转文字是否开启"), + 'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音类型", + description="文字转语音类型") } ) diff --git a/ui/src/api/type/application.ts b/ui/src/api/type/application.ts index 24215a5fbb..97be3b2579 100644 --- a/ui/src/api/type/application.ts +++ b/ui/src/api/type/application.ts @@ -4,12 +4,13 @@ interface ApplicationFormType { name?: string desc?: string model_id?: string - multiple_rounds_dialogue?: boolean + dialogue_number?: number prologue?: string dataset_id_list?: string[] dataset_setting?: any model_setting?: any problem_optimization?: boolean + problem_optimization_prompt?: string icon?: string | undefined type?: string work_flow?: any diff --git a/ui/src/locales/lang/zh_CN/views/application.ts b/ui/src/locales/lang/zh_CN/views/application.ts index 8d0888a157..5554943278 100644 --- a/ui/src/locales/lang/zh_CN/views/application.ts +++ b/ui/src/locales/lang/zh_CN/views/application.ts @@ -104,8 +104,10 @@ export default { } }, prompt: { - defaultPrompt: - '已知信息:\n{data}\n回答要求:\n- 请使用简洁且专业的语言来回答用户的问题。\n- 如果你不知道答案,请回答“没有在知识库中查找到相关信息,建议咨询相关技术支持或参考官方文档进行操作”。\n- 避免提及你是从已知信息中获得的知识。\n- 请保证答案与已知信息中描述的一致。\n- 请使用 Markdown 语法优化答案的格式。\n- 已知信息中的图片、链接地址和脚本语言请直接返回。\n- 请使用与问题相同的语言来回答。\n问题:\n{question}', + defaultPrompt: `已知信息:{data} +用户问题:{question} +回答要求: + - 请使用中文回答用户问题`, defaultPrologue: '您好,我是 MaxKB 小助手,您可以向我提出 MaxKB 使用问题。\n- MaxKB 主要功能有什么?\n- MaxKB 支持哪些大语言模型?\n- MaxKB 支持哪些文档类型?' } diff --git a/ui/src/views/application/ApplicationSetting.vue b/ui/src/views/application/ApplicationSetting.vue index 9ef5cef2cb..9a49185442 100644 --- a/ui/src/views/application/ApplicationSetting.vue +++ b/ui/src/views/application/ApplicationSetting.vue @@ -61,10 +61,7 @@ /> - + + + + - - + + + + + + - - - - + - 浏览器播放(免费) - TTS模型 + 浏览器播放(免费) + TTS模型 ({ name: '', desc: '', model_id: '', - multiple_rounds_dialogue: false, + dialogue_number: 1, prologue: t('views.application.prompt.defaultPrologue'), dataset_id_list: [], dataset_setting: { @@ -555,10 +566,14 @@ const applicationForm = ref({ } }, model_setting: { - prompt: defaultPrompt + prompt: defaultPrompt, + system: '你是 xxx 小助手', + no_references_prompt: '{question}' }, model_params_setting: {}, problem_optimization: false, + problem_optimization_prompt: + '()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中', stt_model_id: '', tts_model_id: '', stt_model_enable: false, @@ -574,20 +589,6 @@ const rules = reactive>({ message: t('views.application.applicationForm.form.appName.placeholder'), trigger: 'blur' } - ], - model_id: [ - { - required: false, - message: t('views.application.applicationForm.form.aiModel.placeholder'), - trigger: 'change' - } - ], - 'model_setting.prompt': [ - { - required: true, - message: t('views.application.applicationForm.form.prompt.placeholder'), - trigger: 'blur' - } ] }) const modelOptions = ref(null) @@ -623,11 +624,11 @@ const openAIParamSettingDialog = () => { } const openParamSettingDialog = () => { - ParamSettingDialogRef.value?.open(applicationForm.value.dataset_setting) + ParamSettingDialogRef.value?.open(applicationForm.value) } function refreshParam(data: any) { - applicationForm.value.dataset_setting = data + applicationForm.value = { ...applicationForm.value, ...data } } function refreshForm(data: any) { @@ -666,6 +667,8 @@ function getDetail() { applicationForm.value.stt_model_id = res.data.stt_model applicationForm.value.tts_model_id = res.data.tts_model applicationForm.value.tts_type = res.data.tts_type + applicationForm.value.model_setting.no_references_prompt = + res.data.model_setting.no_references_prompt || '' }) } diff --git a/ui/src/views/application/component/CreateApplicationDialog.vue b/ui/src/views/application/component/CreateApplicationDialog.vue index 0e04346a56..8e78b9a325 100644 --- a/ui/src/views/application/component/CreateApplicationDialog.vue +++ b/ui/src/views/application/component/CreateApplicationDialog.vue @@ -104,7 +104,7 @@ const applicationForm = ref({ name: '', desc: '', model_id: '', - multiple_rounds_dialogue: false, + dialogue_number: 1, prologue: t('views.application.prompt.defaultPrologue'), dataset_id_list: [], dataset_setting: { @@ -118,9 +118,19 @@ const applicationForm = ref({ } }, model_setting: { - prompt: defaultPrompt + prompt: defaultPrompt, + system: '你是 xxx 小助手', + no_references_prompt: '{question}' }, + model_params_setting: {}, problem_optimization: false, + problem_optimization_prompt: + '()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中', + stt_model_id: '', + tts_model_id: '', + stt_model_enable: false, + tts_model_enable: false, + tts_type: 'BROWSER', type: 'SIMPLE' }) @@ -147,7 +157,7 @@ watch(dialogVisible, (bool) => { name: '', desc: '', model_id: '', - multiple_rounds_dialogue: false, + dialogue_number: 1, prologue: t('views.application.prompt.defaultPrologue'), dataset_id_list: [], dataset_setting: { @@ -161,9 +171,18 @@ watch(dialogVisible, (bool) => { } }, model_setting: { - prompt: defaultPrompt + prompt: defaultPrompt, + system: '你是 xxx 小助手', + no_references_prompt: '{question}' }, + model_params_setting: {}, problem_optimization: false, + problem_optimization_prompt: '', + stt_model_id: '', + tts_model_id: '', + stt_model_enable: false, + tts_model_enable: false, + tts_type: 'BROWSER', type: 'SIMPLE' } applicationFormRef.value?.clearValidate() diff --git a/ui/src/views/application/component/ParamSettingDialog.vue b/ui/src/views/application/component/ParamSettingDialog.vue index 9b81a9858f..7b953ceaee 100644 --- a/ui/src/views/application/component/ParamSettingDialog.vue +++ b/ui/src/views/application/component/ParamSettingDialog.vue @@ -14,7 +14,11 @@ - +

@@ -43,7 +47,10 @@ }} - +

{{ $t('views.application.applicationForm.dialogues.hybridSearch') }} @@ -69,7 +76,7 @@

{{ $t('views.application.applicationForm.dialogues.continueQuestioning') }}

- - -
-
+

{{ $t('views.application.applicationForm.dialogues.provideAnswer') }}

+ + + + + + + +
@@ -195,15 +214,21 @@ const defaultValue = { designated_answer: t('views.application.applicationForm.dialogues.designated_answer') } +const defaultPrompt = `()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中` + const form = ref({ - search_mode: 'embedding', - top_n: 3, - similarity: 0.6, - max_paragraph_char_number: 5000, - no_references_setting: { - status: 'ai_questioning', - value: '{question}' - } + dataset_setting: { + search_mode: 'embedding', + top_n: 3, + similarity: 0.6, + max_paragraph_char_number: 5000, + no_references_setting: { + status: 'ai_questioning', + value: '{question}' + } + }, + problem_optimization: false, + problem_optimization_prompt: defaultPrompt }) const noReferencesform = ref({ @@ -236,14 +261,18 @@ const isWorkflowType = ref(false) watch(dialogVisible, (bool) => { if (!bool) { form.value = { - search_mode: 'embedding', - top_n: 3, - similarity: 0.6, - max_paragraph_char_number: 5000, - no_references_setting: { - status: 'ai_questioning', - value: '' - } + dataset_setting: { + search_mode: 'embedding', + top_n: 3, + similarity: 0.6, + max_paragraph_char_number: 5000, + no_references_setting: { + status: 'ai_questioning', + value: '{question}' + } + }, + problem_optimization: false, + problem_optimization_prompt: '' } noReferencesform.value = { ai_questioning: defaultValue['ai_questioning'], @@ -255,9 +284,16 @@ watch(dialogVisible, (bool) => { const open = (data: any, type?: string) => { isWorkflowType.value = isWorkFlow(type) - form.value = { ...form.value, ...cloneDeep(data) } - noReferencesform.value[form.value.no_references_setting.status] = - form.value.no_references_setting.value + form.value = { + dataset_setting: { ...data.dataset_setting }, + problem_optimization: data.problem_optimization, + problem_optimization_prompt: data.problem_optimization_prompt + } + if (!isWorkflowType.value) { + noReferencesform.value[form.value.dataset_setting.no_references_setting.status] = + form.value.dataset_setting.no_references_setting.value + } + dialogVisible.value = true } @@ -270,8 +306,8 @@ const submit = async (formEl: FormInstance | undefined) => { if (!formEl) return await formEl.validate((valid, fields) => { if (valid) { - form.value.no_references_setting.value = - noReferencesform.value[form.value.no_references_setting.status] + form.value.dataset_setting.no_references_setting.value = + noReferencesform.value[form.value.dataset_setting.no_references_setting.status] emit('refresh', form.value) dialogVisible.value = false } @@ -281,9 +317,9 @@ const submit = async (formEl: FormInstance | undefined) => { function changeHandle(val: string) { if (val === 'keywords') { - form.value.similarity = 0 + form.value.dataset_setting.similarity = 0 } else { - form.value.similarity = 0.6 + form.value.dataset_setting.similarity = 0.6 } } diff --git a/ui/src/workflow/nodes/reranker-node/index.vue b/ui/src/workflow/nodes/reranker-node/index.vue index 08d4be5f58..4aaaef2707 100644 --- a/ui/src/workflow/nodes/reranker-node/index.vue +++ b/ui/src/workflow/nodes/reranker-node/index.vue @@ -22,6 +22,7 @@ :gutter="8" style="margin-bottom: 8px" v-for="(reranker_reference, index) in form_data.reranker_reference_list" + :key="index" > >([]) const modelOptions = ref(null) const openParamSettingDialog = () => { - ParamSettingDialogRef.value?.open(form_data.value.dataset_setting, 'WORK_FLOW') + ParamSettingDialogRef.value?.open(form_data.value, 'WORK_FLOW') } const deleteCondition = (index: number) => { const list = cloneDeep(props.nodeModel.properties.node_data.reranker_reference_list) @@ -242,7 +243,7 @@ const form_data = computed({ } }) function refreshParam(data: any) { - set(props.nodeModel.properties.node_data, 'reranker_setting', data) + set(props.nodeModel.properties.node_data, 'reranker_setting', data.dataset_setting) } function getModel() { if (id) { diff --git a/ui/src/workflow/nodes/search-dataset-node/index.vue b/ui/src/workflow/nodes/search-dataset-node/index.vue index c065668a4a..78d1a4f0f2 100644 --- a/ui/src/workflow/nodes/search-dataset-node/index.vue +++ b/ui/src/workflow/nodes/search-dataset-node/index.vue @@ -159,11 +159,11 @@ const datasetList = ref([]) const datasetLoading = ref(false) function refreshParam(data: any) { - set(props.nodeModel.properties.node_data, 'dataset_setting', data) + set(props.nodeModel.properties.node_data, 'dataset_setting', data.dataset_setting) } const openParamSettingDialog = () => { - ParamSettingDialogRef.value?.open(form_data.value.dataset_setting, 'WORK_FLOW') + ParamSettingDialogRef.value?.open(form_data.value, 'WORK_FLOW') } function removeDataset(id: any) { From 467b86489596a5ea4380c73ed14bb37f2bdc6392 Mon Sep 17 00:00:00 2001 From: CaptainB Date: Sat, 14 Sep 2024 22:43:30 +0800 Subject: [PATCH 06/20] =?UTF-8?q?refactor:=20=E8=AE=AF=E9=A3=9E=E6=8E=A5?= =?UTF-8?q?=E5=8F=A3=E6=A0=A1=E9=AA=8C=E5=8F=91=E9=80=81demo=E9=AA=8C?= =?UTF-8?q?=E8=AF=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../xf_model_provider/model/iat_mp3_16k.mp3 | Bin 0 -> 17876 bytes .../impl/xf_model_provider/model/stt.py | 14 +++++++------- .../impl/xf_model_provider/model/tts.py | 15 +++++++-------- 3 files changed, 14 insertions(+), 15 deletions(-) create mode 100644 apps/setting/models_provider/impl/xf_model_provider/model/iat_mp3_16k.mp3 diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/iat_mp3_16k.mp3 b/apps/setting/models_provider/impl/xf_model_provider/model/iat_mp3_16k.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..75e744c8ff5188208a3797561efb2e096d4fa015 GIT binary patch literal 17876 zcmeI3Wl$X3zV-(h+}$052PY6h&|z>15M&^@yORWWcTEVc!Civ8TY@LJ69}51{ciTD z+PP<+eNNSTKkR$!R@SF~ukNYo-}5{@OS%R)wdeu;A8f!sxDcTCYzpFv(!8A9TwEIe ztJ;6(w(hz3K%jqc{jZ|=c{9JberoZ{ub*1{-PNxg{#5)|4u5L#cUQl1_*3y;IsB=` z-(CI6;ZMbXJNQ=Jg)RbA5ua^Abf(4~73Vgki^0ddBDrz2kc{QhHAD>e4xjd}{1KKb5BH<< z>$HI|kzu36Z$cjj^Sx5Zb<}O}26?F5h^MG%H#&}K&EWA&6M8J!AAf|3-;KCX?$8k# z&@0t#irqN!_mN))`wxF3CmsL9{|v?Tt*5A79UF zwdklw8cQXv;XD~|NTu32zvlwvZwS8#yrRmZ6GC~`=ZSm!PQt-wTN3N+cm;-Mrx5jZ zjQs#T-~Wp{4eG+KO5e(tF>l(u53lj!W)Nv2T$$6tuUtro%QK)Xp;_w8r1&wUuu1cQ zw-}PUc5b@4QR^5asPfesXPPJ`mHg(~BHaN20c5n-4CtRXnao7h&`xjlM=~{1c=NdZ z^Ds=7rkkmvDEoHKuyvg!-tkFXn1tcTL!GKoxp0VPyPfYioqv1(0Hon#J%#_c8l=G3Fi4vF=qcRP}-_ zmdLTCguqxskJ`1d3Qsnk?ATMKOhQ~nShZd|U?aGjcCLQZ&&f>p8MWXfVBfZRc{v6c z{d_n#pTB5L$}Sc-G|nOFMMBQ}_N|3BmxFLraQHo!2n}Ae(F;i~P_*%jOUwaQ;m=6i zT&JPzS4T3cpqlScSI?yc&P@5iWetG@O`RuI$i@OPy`VLOLT1il!Ow|vIInE=oL+<6fwP6@spuQCw&v{20BIwcO)Bl!aR z=w*WlLYP(|UsOv?tYgF?Lj}=9qel^Z3VqX_AUNJPxXW^w$!kXNdeLkQmAGsvt zX8_tdP`0(^)wH}Dj_djQ4jurO9!(I{<#klYi-B5#qvw-=bqfQPImtOaq~&rq1Bk+Q z0$#lFFb)J+1lre!N?L9iw6tr!*vP-`rg>^3`G$ap(Cw$pdLLGwdyq&aV9 zu@yUy!A+;Hj6%Rsf0+&cB>sz*O0Z5uGXQLSL`@-^=<e=X@iTgz1Ec znN`45uew-Oetc<`^<`>S(NL?GyqGK`=JLm-|t2;ffS1(}yuo1T@^3@-)(SQ}{FI#nwiqV(2>d@`1$^x+p~Q&n zhTv+lQ|&mc-*cr?sjUqObU`!^I^UKu=zkpIx$w~5zPnEKA0vBCUKZ|V&IC|Uj8@O) zeZ64&y8oo1(8_gcDlpm9@bZd9QWMAtGHf=Uyj?&{q>5wbhxHZd%)X5_QpK%tU!CQ9 zC851pui9;5DLW7vRC8kJi-Y8Aq7L$}!bO9}<(d-*%h1Gda)RYULRK2KIhcmZInul)IyTtZTb*lE(kr_qsybFFaPvZ2GUUeAuN zUxQ^>dVp}0Faq2?*S;VhQB%Dnck)W87%79@+mDSI5VZ2uXwPhT60T5nsbZxT zSz_G5X7LK(MzXR}{i%o)<*Z(>Q*-*^n;xIndJ>8?3+VHVLh#mScRpTCm_v9*qCu9o zPm9YImiH|wlt|4K%`cnB`>KXrH9FIrK}I`G5uNeMv?57vlQXcVVGd3x(VIzy5)v&B z2s3!RqprkBKOSr;jG2rpDI6}=Qz{(58kU>8$oYR@5)SSO%Qdx}H28ujmaqKbWk;n; z_Yajs&N{lFzN+F`Fbc(b&45ubfB29a0lvQfvE>=xCWH$ zpYAhn>6BH3JDJ|RQDjlBy&g6%PMF@$f=(_>HM;cq%EXv65m(dM&ruB9iukcA5+AV-x@?CcuL3@Ecp7nqiM7s zZCEEJi9;Zq-vA+k?;}JI6Vj@ZN%g&7-yY`D6QdF#@_D~gK#wZ*l8L8lF3IPwr@Vx0-w6&{I)T z8SJFtzN@vrj7Xx4O5r((I6ufK8f*uy>I>qn{|V|I>}QMoQUH?A)9rqRmzI-jwUx`d zmeyfnth|4C&vnGv&02kNBCjTm0pI;;ANnvtzvViZ4v*8zQwY`dCS)(<#pJoq@b=7) zc>ac4*B7#8NxHa|Ezyk51iiNoA$E}mVOtMkUFmkt3|Goj39cR)XQrQv;yK z?ysd=4Rr-BOJQwwZw#eVxUx0L=TtVBwYX+*&3LLfOV0r*^P^q)rSEsSkw$v)&pt1< zc=_b$B#zbTs>Usw;OYDauAhXzS6@>&>iiRym$zK+xi;7~h|;2mhpAAM;#vGtE2^LP zOZDf}+ON}kx`uSps{Y7jXEjrxB=BcvNF7LiBg+F@gsMkyFi2^|+W|k?2626YS=Th| z!r7?e8%nvmrle7%Luhy4f#8e=Lyhxjr$IQ{}gH zsnq}Jb75gei#PGgJ_r}Rj_P?78xAtZG{#3)Ae6om1i$)gQ<${D5!7QdABv3m~DuWEp{+T*@w)vlQj@+8_V11nHKsE0A|domkFsYv2(Br&r{cemo;z_ zEl$R0x9o&|o3hp=0Tu_)@3&XvkPHRz=%d!$QP6@lZo@9YFbrgx@0))G9gixwvLY% zgIQJpVlLlOYYjaA@aw2RU3LOG9cM&H+HaJBx_up%XiXh% z+VPJJXOapNPB9<-j4&qb;^&J(NV91nF*!E$S*8qqyK|f}Y9*Mw*5+5eeL%o_`-O>2 zy`(BE4M>-(#M;%~l7zCNZRg9ioM-u-i~&;Ae5`Ofh&gIG3`BTYJL=zD!px9;WcKJH ztKmT;+;Tr=A?MF>5xY&2kWe7Vpgyc*0~|yGyfg+TL`HM%=B;4tctMJ8&`Ku_$`t9@ z;62wKhn1#-JIn+0t!yp!GnyY=5ehd=<7$77%F{1Rj|>bZD!B+MKP}a!vT+9* z>&kvp(Ss{MMhXGaCe>HkT!Z42NZB#2ZGF- zF#E-dp+5~#Gh0wsS@<2&aPn9E7nTd z*&5ErW^;N|m}34QYM8}5KS`*G{V6=x53grgHyE|=TCn5oX__~SV{5yWb@F9I9+W|3 z+IkzWdz|l9CTUIgqc~A>saX5W~EWF zBnA)nC*LzGa}7%-*vdy}C36IZWwb~`D`E{_Qd8z2!U?cK|IO(RaO>Zmf z9xf*>Cn~H4F4=iM7k}!cN|G$*B%6-B+7VOZcDxqX9Bqtmtv2U4kaLyrIQQ!l@?FrF z1fzSQadlyI#*PoY(?;XHUl~G1Tg^}B2H?Hk)n}HNOFVu2u$Uy>1kG9dz=t}udk{V0 z!&qJ$wcXjnfX6Nz-D;LLqZFtDn^VU27}w>%(WOTO^BiS(sGm7C5wbLJ>%*MS1p?oU zD>qYM;E;TYi|9L)B1+Z583+-B3`4NQ<0b-lsYDScvQQl4wzz0=5t)Uc-2pTj&sQZ! zc*jzDJDtC1A$BoOpo|mDNKq)MoXTJ+4AZnor>h!(z#05Ygo{otA zn}KZN+sK33&>2HCso3Yy;!g!pgifkMJEKi_-&vir2&(h2*EG@-O4l}Syo;3#F~=m# zu4E)cS4}YfkdlqHs1p+;5w-Txt*9*SBt_dwJvVl)MB^Ndj+{wvm{|kU=Wc?_7vd}0 zH-9{^tgFiPZEXUC@xJWN%~aV^f{Q=>%>Ek}T)YX19MVQ@^A?_AwXli+A-=`dYnBpG z1;(L#!+|dS2W{@NT~5Q<#RQ9Ycf5srcJXwp%J*C{++786kZH_tx$OP&#n>zsy!=ug zuo_gjQ%_U#h;uF>39=g_IrN_P(N%JSSP%yCz*s6TKYJJ*23BCd^tZ%UeCV?I{#EG- zjpOfhjg#kKLF5EU5>hr{1!QWFbS%~`Pq7t+cv)K0nfL?uoOPv>MW(7Cun)imujRUK zJ$OzsZivrb`21$O){3q6jZQ z1qC?s4g5PH?f7$$DZ;wo5}j7gwARI{R4a})b+?RzOSOA0d3IJiPskUoMu9OCtg}TO z=+sE$E??ULjV|`DzP@X0%Zq|_ zLsOF~2r>{TqpcCG!)ZY-98Dpc_=vI4R=6RS(+_KE295WvJf+&}r{xM}q<6!+kb(B8 zXq>82R^MSr{)`z4j@bGSd2}omUX(s_-Dh+Zq)1%lYE38)oZ5lquzN03+MGQ<(Z#;k z%Iy{sc~bg5OmA;$6~}K{@19FtvHN|KctC>yFZunq(K6rHEO~O&cF{OlxCNun=EQO+ zuJ={+^Xu=`$$)c9(o@GeW5a$K>0&sVI1FM|DtIJU9S#x{dtqZZ-%=%dOo8IahDnC* z{uheJ=b|}P#nJv>B$j3fkGAtI873pLAt8|=rO2?L57UCUpgA!Nc$`40VlQYnr8Z`? zrWrEP8cxVK`^%27dh`~5)3OYDN+rQzxY#UV^z_L%R20R|;H|f5r};uz306?#d#+-+ z4Wf*;?o%DQRvZ%vkw^*6OlB;SXI<@6WquWTQ5cwUsoB2La#E#S$~d4Gu3h0Kv^dE- zAPjy6WY%aG3zhX~#J6-@BV03Ns7(qz^kZmb%m^Cd-@WoAwEf{oZVACUCznkwM&v0o zRWIE4+fNh6w^>BL+Q~0N^F22F(T$w8IZ}G4hjh?%)m-7rG0lM%H-by@oma!zN6`>) zFd}1Sb~q%Tj4V@BDVR421AocFnuV3X1KjIE8E7C7)MuzdM}joK7s%*9Clx#LVBfSF zzUiI|`a~VgMZudItm6%dwy^a47$Pnu^bs+(Jr9*BS0X`KU!dRQU^3Mx^DwifeFkQi zBm8ViySp>Rk@T&;$nlFsqO$husJ-LSZuQ~t0(N(oy8Y8uGsK3_UJ(kT6A4kHbghS* z5|xYI!DPLt%n!D4i|De9h-sJx2P$yzZlcVYXbf@Z=ifC5{lIXfgpLHJtb2{-qe7c_ zpVX3QEzfX-jKA0Ox!b!IF0TV>j?g(8f@CvVgOUle3pCQDij#)sIz{Z5B3EMZqo&E) zPvTCd^JmF_+;dS<7_DU|8}!=MC9BV$1#@%CbLO1#L+K|NZUajc-_6QNmr*d&n4CQl zm+J?rCyH1idBX&Bt4clral@L66^k;wGmKf$e51folA`3}q?)JYh~NmpfseNUaN)_Q zT0IY!NX#D4z_7DpkXKOi#of#=stOEm*`bm^tw?5tr9c2Lt+8gfULc30gM!r6P|Ae@ zvd}+66{C;|Xuv{+hkx-SHc*_wUKbUm++DE}1j4yfZm8cLy*XY6zTaHTHh2xzYKtL0 z6CGy~>$N?2(&G(?>fdv@F{{m_3oK$Bw(h$hdhPD+`XQcO+q8Qge{cPK5xvi`zPS0E zZyVRj?9BPScg#7|B1x58!_gN#R*&!`-#wvY#8{!I5`q_6my1bb!OfOiF?0>`3-X)K zTk`8rl75*rc1FB=?nF+cc_BdPQvQ%eut6|;Kuw6oJ9$If^|7gZE=ibJBcn2}@1^;p zPopR>BIi-LOOg-Arb=_w5#9wJ{iIKYcQy6!WsHXM2j=8R3wq6if+ooIulWwzhF$@u z-&Z4TK2*7@u`fz9q8yLqN$V)x-E(EKsd?vLEKXfn#&>re5+S4_s2wDEhc2AX)$KFyaZ$@Na=bG}f7pi}vVS0h_` zVMe0$y@N#U_(#pK-=mi9(p3W8(%z24bB# zwbHD^>6h865b=E!QaeqS;^DS*whs5*EQug{mjG-$lL;s` z9bzn!|7j(oNQ&O(Z<|-FKACriQhs|WX58M_r=6%qbOr*SUroF=ctiR;j$6s9 z-nZ9HI_^bPW6uP^qPT)+BTlzwW7Xf%nNug(&TX!=yL#CmKViU%k;!2HuvRp{V* zpPS~Ion43Zo>nZ#^3Mqh`itA%HR|!DNW}*C%=K*r?M-ifjrnntjh4Ne%hNqoq|6TxLa;aQ%EJD>FSWACwW6M znv6sthyQwaWw9N&v{KRj%T<|%hJ|_PqHgH#``Go2r)Xn$Q~gxm71OmVLF`7k6Q-&< z2EJ%$U_~PcEVAZke@$h(Tr-tHG33Wg20B{|pV77Q_pDC1T$!zH zl9uHAA@EU8Pj)dZ%`hnNuyX~;IH|P(0N5UsL*VqkN_eQ%H+pnbFZtutF3ybg+W5uo zb^rILi?;6bnoA;*6)H4}I4T}|A8iVU3F(1+nRWp9;red~{_869U+R0mrXD!S4P6{Y z#%pLqCNLyRz;WK4YPVg1%qhQ4Lc$b%b`26K%f<0~H-0j(e^_Msm4&{VtWmf4yw(IsZbnT#9+d0~p>Ivt(PbSsyesx*k)u5SNOpspfXr zLsO^Zi~jUX#lwXbnZ4uqcv0f#`( z={Tp9c<7fYK|WW5?0xj2@M)hP@~a`HJQlPk6xbx59J5J^sIQ3Hug>sAHp(~ua)0Ar z4En9Z4~eP8Dvt3p!nEA)`)p0LXnVzN-AFIh$vh-%g>DlICRC;)8R-%1nNovhok}Q* z$X!7R+FoLk#a=eA6GMcb+-7AWNzD#zEX%A9b@W?3!*b-kH9xy;X28C)Z$HL*^4Q6% zgz@8mDhx(||3r&#`TZvx1Vow~8ee0VEaSOA3h9=My$wuN#LgKsq=%(FQ<^D5e8P9* z4k@2*K?F%+II7}?Xu#Se9umKiYM;tq`sIsg+-~8A?0wn)E<+_0D_AhfSZ>Ns2%Co? z&ZlGq<=(GZ=4MEV{_rSKr!Txn!!Dy3UMHAAXiz+`YdnhU!8S7^KQy!A2VN-wkYZS_ zsnaa zKJfwOH7gPfJkA*wT60qFtbcprfX$Zng_mZQzP@EytXOWn>mAP92P%Iyx#^M?kJh+K~Zx%)~)pfy}1Sod6e907*~ z`jc_ih7OX+2I54oO$$Fh#^lBIlR;hdDza?sK+;-#zNHJDSW`Cli44PA8eQjSQyG4J zD>|MqzUTt@i+4^=DL*+P79y&PM|RkYsc*z z-bE+Y@TynL<>Mt`AaQwet9&!2&5H^p)}TAw315RPALW?3R^NsH#zoF#6#0k%%#ETl zqkXyuRfLLzk%J}8I3AK8F?ecmNB6I+!4sDg(ivn~)v}w(t^dAnQ%bp}mfzMb)m95C zHUT9o@xP~*3iF6MGjR1J#Hc#5;umr?bz3GvayvdUFeW;Ue8;ND*Oix%0j_+DExs!3 zD2|VTl^$W3U{zM~MA3qa#x7n&*+KY%lue8`u zInROi7%z>&2+sFPC41s+PS-oaw}NOd?G-*5J5S>d#fsArB3qo1Lynyl3FYqny5ZhO zgTk+m+`&hosU-Z^H;dreJ-zG2tb@7dsNtt}OF1xiCGMhwp3(+IK9_^{!R65(ajo8? z|IvZqTOS$MBCG-v^%vXdm|!*0GiF={GnsN_wn6)%f~VS;OF9vJ*wLeLYoa##}1FQ&%p}J*Z?|EN^sBtn_F=>TuJcA<(=c zd)oZaRwAJfxl8s0NoKNHpTvv{a!#QgE{J)AiExNn4lG-ioN=}z`SD^DoBg&zpBuS> zdoDo12Fa4jTc*|;(=Hx+T*ENjlF!O-De-CLEB^}mD!c9!w&4nQ;pE5TRqn9g-rNFV ziSecZMCSym=xTl>Z)9Z}CJY4YDACvK#C(6S$Et^1H%7(r;@0T4vRI()sJ=iwQIkeL}(_}B?aF(yj?i)f)Qh6xrP@s)?V=zYc893JOB9mNf;1bM1O>B zOm9*)sbqZ5l`3L==SAg>JqiN*e$YhH?@47?3u!@JK|Y>4#BW>@X2rcMn?}(WWe3SS7^Sfm7x2{Ti?l|+X*Yr&T*+qGPX;K#al zy&}=*Fe2x6C4Uwfw6K^L+xkwspYr^7%A#_Hl*h&o$92rSEi)QYX1Exv&cehSTTcUq zs04f>W^lw5M1zQhdGEO#*~m`{^cT^V=Z{2Z4<`$;BOhT29h^qQswRrt={81=nOK#b z)9Toi8lBWHpUJ#jBbV`R8r`y zQOV9TyMagR?d`W86ZMF~KT4@+`g*Wd_n=Ky<-W(EjjOiTrLJ3dZPCpX{P+QF+J#Xq zmhSUVv+%1DxBaW6vh$tNR`w6K_gpb7-FpT4Q}WdBoD?L(f^xauXJ9M?S7mP}xd+6v zRzIRo5N$SRsb9AOst!&oqX>gu>d}tQjc4264Z?h~_eL&^bRG<66}2&`G^=^iBJ_>{P3VQ{pg(5_oJ^wFBlgt>8E*8K|*FiH=S_ zuz0NZeez|AoZLid@1$IWY&Zv3^P?Fl065x83(7$3&c_3PRY2`iA(a^ffdXL|XejUr zn8x%YG0x}#0mz*JumS{Q34ClnP@|pE@9!OI=&UcQ+Pcw>LMae=Rbv^talm?cmO*fr zfd-G%!CYGM+zvDtAR^&JV#@wBdv(XuY&C+H*3YIL^1T-t!=OSRn%lO^Rx~FVlF8@r zJjjRHXN*dVtc%Q9D~GHfD%QGy}u50iv$B2W9$9_t;G(j%Q=*p>$c_{)QWd8-TyVa3d` z%tnkIyOw-)v4i^oot5aQi3!SS6tGAUe|Uod9wPQD3a+MXQ(7sB4RsF*ixRz3dti3I z)ULYPz{Md%VaUC`=kpCPRQUB;5+>98{tW`t;Sr$RlI~4^*;ZKen<>rbEuCwI5H!*Yy6zq!5nY0XH5pz0?;U^a5FeVqbJ%>Xv;aVnaCq>dAu&ay10d& z1PVu$v8VPq1>AsFHo4#0vFUZNc?E~jSr`r)Zwx1zR49}gk&$YVH!$8u-I29~F-B-Ej!q2? zj!uez=nudC`T75kNB{841qpQ4rQrD=ul%1o`QPRG Date: Sat, 14 Sep 2024 22:53:40 +0800 Subject: [PATCH 07/20] =?UTF-8?q?refactor:=20=E8=B1=86=E5=8C=85=E6=8E=A5?= =?UTF-8?q?=E5=8F=A3=E6=A0=A1=E9=AA=8C=E5=8F=91=E9=80=81demo=E9=AA=8C?= =?UTF-8?q?=E8=AF=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../model/iat_mp3_16k.mp3 | Bin 0 -> 17876 bytes .../volcanic_engine_model_provider/model/stt.py | 13 +++++-------- .../volcanic_engine_model_provider/model/tts.py | 11 ++--------- 3 files changed, 7 insertions(+), 17 deletions(-) create mode 100644 apps/setting/models_provider/impl/volcanic_engine_model_provider/model/iat_mp3_16k.mp3 diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/iat_mp3_16k.mp3 b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/iat_mp3_16k.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..75e744c8ff5188208a3797561efb2e096d4fa015 GIT binary patch literal 17876 zcmeI3Wl$X3zV-(h+}$052PY6h&|z>15M&^@yORWWcTEVc!Civ8TY@LJ69}51{ciTD z+PP<+eNNSTKkR$!R@SF~ukNYo-}5{@OS%R)wdeu;A8f!sxDcTCYzpFv(!8A9TwEIe ztJ;6(w(hz3K%jqc{jZ|=c{9JberoZ{ub*1{-PNxg{#5)|4u5L#cUQl1_*3y;IsB=` z-(CI6;ZMbXJNQ=Jg)RbA5ua^Abf(4~73Vgki^0ddBDrz2kc{QhHAD>e4xjd}{1KKb5BH<< z>$HI|kzu36Z$cjj^Sx5Zb<}O}26?F5h^MG%H#&}K&EWA&6M8J!AAf|3-;KCX?$8k# z&@0t#irqN!_mN))`wxF3CmsL9{|v?Tt*5A79UF zwdklw8cQXv;XD~|NTu32zvlwvZwS8#yrRmZ6GC~`=ZSm!PQt-wTN3N+cm;-Mrx5jZ zjQs#T-~Wp{4eG+KO5e(tF>l(u53lj!W)Nv2T$$6tuUtro%QK)Xp;_w8r1&wUuu1cQ zw-}PUc5b@4QR^5asPfesXPPJ`mHg(~BHaN20c5n-4CtRXnao7h&`xjlM=~{1c=NdZ z^Ds=7rkkmvDEoHKuyvg!-tkFXn1tcTL!GKoxp0VPyPfYioqv1(0Hon#J%#_c8l=G3Fi4vF=qcRP}-_ zmdLTCguqxskJ`1d3Qsnk?ATMKOhQ~nShZd|U?aGjcCLQZ&&f>p8MWXfVBfZRc{v6c z{d_n#pTB5L$}Sc-G|nOFMMBQ}_N|3BmxFLraQHo!2n}Ae(F;i~P_*%jOUwaQ;m=6i zT&JPzS4T3cpqlScSI?yc&P@5iWetG@O`RuI$i@OPy`VLOLT1il!Ow|vIInE=oL+<6fwP6@spuQCw&v{20BIwcO)Bl!aR z=w*WlLYP(|UsOv?tYgF?Lj}=9qel^Z3VqX_AUNJPxXW^w$!kXNdeLkQmAGsvt zX8_tdP`0(^)wH}Dj_djQ4jurO9!(I{<#klYi-B5#qvw-=bqfQPImtOaq~&rq1Bk+Q z0$#lFFb)J+1lre!N?L9iw6tr!*vP-`rg>^3`G$ap(Cw$pdLLGwdyq&aV9 zu@yUy!A+;Hj6%Rsf0+&cB>sz*O0Z5uGXQLSL`@-^=<e=X@iTgz1Ec znN`45uew-Oetc<`^<`>S(NL?GyqGK`=JLm-|t2;ffS1(}yuo1T@^3@-)(SQ}{FI#nwiqV(2>d@`1$^x+p~Q&n zhTv+lQ|&mc-*cr?sjUqObU`!^I^UKu=zkpIx$w~5zPnEKA0vBCUKZ|V&IC|Uj8@O) zeZ64&y8oo1(8_gcDlpm9@bZd9QWMAtGHf=Uyj?&{q>5wbhxHZd%)X5_QpK%tU!CQ9 zC851pui9;5DLW7vRC8kJi-Y8Aq7L$}!bO9}<(d-*%h1Gda)RYULRK2KIhcmZInul)IyTtZTb*lE(kr_qsybFFaPvZ2GUUeAuN zUxQ^>dVp}0Faq2?*S;VhQB%Dnck)W87%79@+mDSI5VZ2uXwPhT60T5nsbZxT zSz_G5X7LK(MzXR}{i%o)<*Z(>Q*-*^n;xIndJ>8?3+VHVLh#mScRpTCm_v9*qCu9o zPm9YImiH|wlt|4K%`cnB`>KXrH9FIrK}I`G5uNeMv?57vlQXcVVGd3x(VIzy5)v&B z2s3!RqprkBKOSr;jG2rpDI6}=Qz{(58kU>8$oYR@5)SSO%Qdx}H28ujmaqKbWk;n; z_Yajs&N{lFzN+F`Fbc(b&45ubfB29a0lvQfvE>=xCWH$ zpYAhn>6BH3JDJ|RQDjlBy&g6%PMF@$f=(_>HM;cq%EXv65m(dM&ruB9iukcA5+AV-x@?CcuL3@Ecp7nqiM7s zZCEEJi9;Zq-vA+k?;}JI6Vj@ZN%g&7-yY`D6QdF#@_D~gK#wZ*l8L8lF3IPwr@Vx0-w6&{I)T z8SJFtzN@vrj7Xx4O5r((I6ufK8f*uy>I>qn{|V|I>}QMoQUH?A)9rqRmzI-jwUx`d zmeyfnth|4C&vnGv&02kNBCjTm0pI;;ANnvtzvViZ4v*8zQwY`dCS)(<#pJoq@b=7) zc>ac4*B7#8NxHa|Ezyk51iiNoA$E}mVOtMkUFmkt3|Goj39cR)XQrQv;yK z?ysd=4Rr-BOJQwwZw#eVxUx0L=TtVBwYX+*&3LLfOV0r*^P^q)rSEsSkw$v)&pt1< zc=_b$B#zbTs>Usw;OYDauAhXzS6@>&>iiRym$zK+xi;7~h|;2mhpAAM;#vGtE2^LP zOZDf}+ON}kx`uSps{Y7jXEjrxB=BcvNF7LiBg+F@gsMkyFi2^|+W|k?2626YS=Th| z!r7?e8%nvmrle7%Luhy4f#8e=Lyhxjr$IQ{}gH zsnq}Jb75gei#PGgJ_r}Rj_P?78xAtZG{#3)Ae6om1i$)gQ<${D5!7QdABv3m~DuWEp{+T*@w)vlQj@+8_V11nHKsE0A|domkFsYv2(Br&r{cemo;z_ zEl$R0x9o&|o3hp=0Tu_)@3&XvkPHRz=%d!$QP6@lZo@9YFbrgx@0))G9gixwvLY% zgIQJpVlLlOYYjaA@aw2RU3LOG9cM&H+HaJBx_up%XiXh% z+VPJJXOapNPB9<-j4&qb;^&J(NV91nF*!E$S*8qqyK|f}Y9*Mw*5+5eeL%o_`-O>2 zy`(BE4M>-(#M;%~l7zCNZRg9ioM-u-i~&;Ae5`Ofh&gIG3`BTYJL=zD!px9;WcKJH ztKmT;+;Tr=A?MF>5xY&2kWe7Vpgyc*0~|yGyfg+TL`HM%=B;4tctMJ8&`Ku_$`t9@ z;62wKhn1#-JIn+0t!yp!GnyY=5ehd=<7$77%F{1Rj|>bZD!B+MKP}a!vT+9* z>&kvp(Ss{MMhXGaCe>HkT!Z42NZB#2ZGF- zF#E-dp+5~#Gh0wsS@<2&aPn9E7nTd z*&5ErW^;N|m}34QYM8}5KS`*G{V6=x53grgHyE|=TCn5oX__~SV{5yWb@F9I9+W|3 z+IkzWdz|l9CTUIgqc~A>saX5W~EWF zBnA)nC*LzGa}7%-*vdy}C36IZWwb~`D`E{_Qd8z2!U?cK|IO(RaO>Zmf z9xf*>Cn~H4F4=iM7k}!cN|G$*B%6-B+7VOZcDxqX9Bqtmtv2U4kaLyrIQQ!l@?FrF z1fzSQadlyI#*PoY(?;XHUl~G1Tg^}B2H?Hk)n}HNOFVu2u$Uy>1kG9dz=t}udk{V0 z!&qJ$wcXjnfX6Nz-D;LLqZFtDn^VU27}w>%(WOTO^BiS(sGm7C5wbLJ>%*MS1p?oU zD>qYM;E;TYi|9L)B1+Z583+-B3`4NQ<0b-lsYDScvQQl4wzz0=5t)Uc-2pTj&sQZ! zc*jzDJDtC1A$BoOpo|mDNKq)MoXTJ+4AZnor>h!(z#05Ygo{otA zn}KZN+sK33&>2HCso3Yy;!g!pgifkMJEKi_-&vir2&(h2*EG@-O4l}Syo;3#F~=m# zu4E)cS4}YfkdlqHs1p+;5w-Txt*9*SBt_dwJvVl)MB^Ndj+{wvm{|kU=Wc?_7vd}0 zH-9{^tgFiPZEXUC@xJWN%~aV^f{Q=>%>Ek}T)YX19MVQ@^A?_AwXli+A-=`dYnBpG z1;(L#!+|dS2W{@NT~5Q<#RQ9Ycf5srcJXwp%J*C{++786kZH_tx$OP&#n>zsy!=ug zuo_gjQ%_U#h;uF>39=g_IrN_P(N%JSSP%yCz*s6TKYJJ*23BCd^tZ%UeCV?I{#EG- zjpOfhjg#kKLF5EU5>hr{1!QWFbS%~`Pq7t+cv)K0nfL?uoOPv>MW(7Cun)imujRUK zJ$OzsZivrb`21$O){3q6jZQ z1qC?s4g5PH?f7$$DZ;wo5}j7gwARI{R4a})b+?RzOSOA0d3IJiPskUoMu9OCtg}TO z=+sE$E??ULjV|`DzP@X0%Zq|_ zLsOF~2r>{TqpcCG!)ZY-98Dpc_=vI4R=6RS(+_KE295WvJf+&}r{xM}q<6!+kb(B8 zXq>82R^MSr{)`z4j@bGSd2}omUX(s_-Dh+Zq)1%lYE38)oZ5lquzN03+MGQ<(Z#;k z%Iy{sc~bg5OmA;$6~}K{@19FtvHN|KctC>yFZunq(K6rHEO~O&cF{OlxCNun=EQO+ zuJ={+^Xu=`$$)c9(o@GeW5a$K>0&sVI1FM|DtIJU9S#x{dtqZZ-%=%dOo8IahDnC* z{uheJ=b|}P#nJv>B$j3fkGAtI873pLAt8|=rO2?L57UCUpgA!Nc$`40VlQYnr8Z`? zrWrEP8cxVK`^%27dh`~5)3OYDN+rQzxY#UV^z_L%R20R|;H|f5r};uz306?#d#+-+ z4Wf*;?o%DQRvZ%vkw^*6OlB;SXI<@6WquWTQ5cwUsoB2La#E#S$~d4Gu3h0Kv^dE- zAPjy6WY%aG3zhX~#J6-@BV03Ns7(qz^kZmb%m^Cd-@WoAwEf{oZVACUCznkwM&v0o zRWIE4+fNh6w^>BL+Q~0N^F22F(T$w8IZ}G4hjh?%)m-7rG0lM%H-by@oma!zN6`>) zFd}1Sb~q%Tj4V@BDVR421AocFnuV3X1KjIE8E7C7)MuzdM}joK7s%*9Clx#LVBfSF zzUiI|`a~VgMZudItm6%dwy^a47$Pnu^bs+(Jr9*BS0X`KU!dRQU^3Mx^DwifeFkQi zBm8ViySp>Rk@T&;$nlFsqO$husJ-LSZuQ~t0(N(oy8Y8uGsK3_UJ(kT6A4kHbghS* z5|xYI!DPLt%n!D4i|De9h-sJx2P$yzZlcVYXbf@Z=ifC5{lIXfgpLHJtb2{-qe7c_ zpVX3QEzfX-jKA0Ox!b!IF0TV>j?g(8f@CvVgOUle3pCQDij#)sIz{Z5B3EMZqo&E) zPvTCd^JmF_+;dS<7_DU|8}!=MC9BV$1#@%CbLO1#L+K|NZUajc-_6QNmr*d&n4CQl zm+J?rCyH1idBX&Bt4clral@L66^k;wGmKf$e51folA`3}q?)JYh~NmpfseNUaN)_Q zT0IY!NX#D4z_7DpkXKOi#of#=stOEm*`bm^tw?5tr9c2Lt+8gfULc30gM!r6P|Ae@ zvd}+66{C;|Xuv{+hkx-SHc*_wUKbUm++DE}1j4yfZm8cLy*XY6zTaHTHh2xzYKtL0 z6CGy~>$N?2(&G(?>fdv@F{{m_3oK$Bw(h$hdhPD+`XQcO+q8Qge{cPK5xvi`zPS0E zZyVRj?9BPScg#7|B1x58!_gN#R*&!`-#wvY#8{!I5`q_6my1bb!OfOiF?0>`3-X)K zTk`8rl75*rc1FB=?nF+cc_BdPQvQ%eut6|;Kuw6oJ9$If^|7gZE=ibJBcn2}@1^;p zPopR>BIi-LOOg-Arb=_w5#9wJ{iIKYcQy6!WsHXM2j=8R3wq6if+ooIulWwzhF$@u z-&Z4TK2*7@u`fz9q8yLqN$V)x-E(EKsd?vLEKXfn#&>re5+S4_s2wDEhc2AX)$KFyaZ$@Na=bG}f7pi}vVS0h_` zVMe0$y@N#U_(#pK-=mi9(p3W8(%z24bB# zwbHD^>6h865b=E!QaeqS;^DS*whs5*EQug{mjG-$lL;s` z9bzn!|7j(oNQ&O(Z<|-FKACriQhs|WX58M_r=6%qbOr*SUroF=ctiR;j$6s9 z-nZ9HI_^bPW6uP^qPT)+BTlzwW7Xf%nNug(&TX!=yL#CmKViU%k;!2HuvRp{V* zpPS~Ion43Zo>nZ#^3Mqh`itA%HR|!DNW}*C%=K*r?M-ifjrnntjh4Ne%hNqoq|6TxLa;aQ%EJD>FSWACwW6M znv6sthyQwaWw9N&v{KRj%T<|%hJ|_PqHgH#``Go2r)Xn$Q~gxm71OmVLF`7k6Q-&< z2EJ%$U_~PcEVAZke@$h(Tr-tHG33Wg20B{|pV77Q_pDC1T$!zH zl9uHAA@EU8Pj)dZ%`hnNuyX~;IH|P(0N5UsL*VqkN_eQ%H+pnbFZtutF3ybg+W5uo zb^rILi?;6bnoA;*6)H4}I4T}|A8iVU3F(1+nRWp9;red~{_869U+R0mrXD!S4P6{Y z#%pLqCNLyRz;WK4YPVg1%qhQ4Lc$b%b`26K%f<0~H-0j(e^_Msm4&{VtWmf4yw(IsZbnT#9+d0~p>Ivt(PbSsyesx*k)u5SNOpspfXr zLsO^Zi~jUX#lwXbnZ4uqcv0f#`( z={Tp9c<7fYK|WW5?0xj2@M)hP@~a`HJQlPk6xbx59J5J^sIQ3Hug>sAHp(~ua)0Ar z4En9Z4~eP8Dvt3p!nEA)`)p0LXnVzN-AFIh$vh-%g>DlICRC;)8R-%1nNovhok}Q* z$X!7R+FoLk#a=eA6GMcb+-7AWNzD#zEX%A9b@W?3!*b-kH9xy;X28C)Z$HL*^4Q6% zgz@8mDhx(||3r&#`TZvx1Vow~8ee0VEaSOA3h9=My$wuN#LgKsq=%(FQ<^D5e8P9* z4k@2*K?F%+II7}?Xu#Se9umKiYM;tq`sIsg+-~8A?0wn)E<+_0D_AhfSZ>Ns2%Co? z&ZlGq<=(GZ=4MEV{_rSKr!Txn!!Dy3UMHAAXiz+`YdnhU!8S7^KQy!A2VN-wkYZS_ zsnaa zKJfwOH7gPfJkA*wT60qFtbcprfX$Zng_mZQzP@EytXOWn>mAP92P%Iyx#^M?kJh+K~Zx%)~)pfy}1Sod6e907*~ z`jc_ih7OX+2I54oO$$Fh#^lBIlR;hdDza?sK+;-#zNHJDSW`Cli44PA8eQjSQyG4J zD>|MqzUTt@i+4^=DL*+P79y&PM|RkYsc*z z-bE+Y@TynL<>Mt`AaQwet9&!2&5H^p)}TAw315RPALW?3R^NsH#zoF#6#0k%%#ETl zqkXyuRfLLzk%J}8I3AK8F?ecmNB6I+!4sDg(ivn~)v}w(t^dAnQ%bp}mfzMb)m95C zHUT9o@xP~*3iF6MGjR1J#Hc#5;umr?bz3GvayvdUFeW;Ue8;ND*Oix%0j_+DExs!3 zD2|VTl^$W3U{zM~MA3qa#x7n&*+KY%lue8`u zInROi7%z>&2+sFPC41s+PS-oaw}NOd?G-*5J5S>d#fsArB3qo1Lynyl3FYqny5ZhO zgTk+m+`&hosU-Z^H;dreJ-zG2tb@7dsNtt}OF1xiCGMhwp3(+IK9_^{!R65(ajo8? z|IvZqTOS$MBCG-v^%vXdm|!*0GiF={GnsN_wn6)%f~VS;OF9vJ*wLeLYoa##}1FQ&%p}J*Z?|EN^sBtn_F=>TuJcA<(=c zd)oZaRwAJfxl8s0NoKNHpTvv{a!#QgE{J)AiExNn4lG-ioN=}z`SD^DoBg&zpBuS> zdoDo12Fa4jTc*|;(=Hx+T*ENjlF!O-De-CLEB^}mD!c9!w&4nQ;pE5TRqn9g-rNFV ziSecZMCSym=xTl>Z)9Z}CJY4YDACvK#C(6S$Et^1H%7(r;@0T4vRI()sJ=iwQIkeL}(_}B?aF(yj?i)f)Qh6xrP@s)?V=zYc893JOB9mNf;1bM1O>B zOm9*)sbqZ5l`3L==SAg>JqiN*e$YhH?@47?3u!@JK|Y>4#BW>@X2rcMn?}(WWe3SS7^Sfm7x2{Ti?l|+X*Yr&T*+qGPX;K#al zy&}=*Fe2x6C4Uwfw6K^L+xkwspYr^7%A#_Hl*h&o$92rSEi)QYX1Exv&cehSTTcUq zs04f>W^lw5M1zQhdGEO#*~m`{^cT^V=Z{2Z4<`$;BOhT29h^qQswRrt={81=nOK#b z)9Toi8lBWHpUJ#jBbV`R8r`y zQOV9TyMagR?d`W86ZMF~KT4@+`g*Wd_n=Ky<-W(EjjOiTrLJ3dZPCpX{P+QF+J#Xq zmhSUVv+%1DxBaW6vh$tNR`w6K_gpb7-FpT4Q}WdBoD?L(f^xauXJ9M?S7mP}xd+6v zRzIRo5N$SRsb9AOst!&oqX>gu>d}tQjc4264Z?h~_eL&^bRG<66}2&`G^=^iBJ_>{P3VQ{pg(5_oJ^wFBlgt>8E*8K|*FiH=S_ zuz0NZeez|AoZLid@1$IWY&Zv3^P?Fl065x83(7$3&c_3PRY2`iA(a^ffdXL|XejUr zn8x%YG0x}#0mz*JumS{Q34ClnP@|pE@9!OI=&UcQ+Pcw>LMae=Rbv^talm?cmO*fr zfd-G%!CYGM+zvDtAR^&JV#@wBdv(XuY&C+H*3YIL^1T-t!=OSRn%lO^Rx~FVlF8@r zJjjRHXN*dVtc%Q9D~GHfD%QGy}u50iv$B2W9$9_t;G(j%Q=*p>$c_{)QWd8-TyVa3d` z%tnkIyOw-)v4i^oot5aQi3!SS6tGAUe|Uod9wPQD3a+MXQ(7sB4RsF*ixRz3dti3I z)ULYPz{Md%VaUC`=kpCPRQUB;5+>98{tW`t;Sr$RlI~4^*;ZKen<>rbEuCwI5H!*Yy6zq!5nY0XH5pz0?;U^a5FeVqbJ%>Xv;aVnaCq>dAu&ay10d& z1PVu$v8VPq1>AsFHo4#0vFUZNc?E~jSr`r)Zwx1zR49}gk&$YVH!$8u-I29~F-B-Ej!q2? zj!uez=nudC`T75kNB{841qpQ4rQrD=ul%1o`QPRG Date: Sun, 15 Sep 2024 15:43:12 +0800 Subject: [PATCH 08/20] =?UTF-8?q?feat:=20=E7=AE=80=E6=98=93=E5=BA=94?= =?UTF-8?q?=E7=94=A8=E6=94=AF=E6=8C=81=E8=AE=BE=E7=BD=AE=E5=BA=94=E7=94=A8?= =?UTF-8?q?Logo?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ui/src/styles/app.scss | 10 +++++++ ui/src/views/application-overview/index.vue | 9 ------ .../views/application/ApplicationSetting.vue | 28 +++++++++++++++++-- 3 files changed, 36 insertions(+), 11 deletions(-) diff --git a/ui/src/styles/app.scss b/ui/src/styles/app.scss index 2a6af0b4a1..1ba986e5b6 100644 --- a/ui/src/styles/app.scss +++ b/ui/src/styles/app.scss @@ -733,3 +733,13 @@ h5 { display: none !important; } } + + +.edit-avatar { + position: relative; + .edit-mask { + position: absolute; + left: 0; + background: rgba(0, 0, 0, 0.4); + } +} \ No newline at end of file diff --git a/ui/src/views/application-overview/index.vue b/ui/src/views/application-overview/index.vue index ae4724efb2..b08d3cd1aa 100644 --- a/ui/src/views/application-overview/index.vue +++ b/ui/src/views/application-overview/index.vue @@ -332,14 +332,5 @@ onMounted(() => { right: 16px; top: 21px; } - - .edit-avatar { - position: relative; - .edit-mask { - position: absolute; - left: 0; - background: rgba(0, 0, 0, 0.4); - } - } } diff --git a/ui/src/views/application/ApplicationSetting.vue b/ui/src/views/application/ApplicationSetting.vue index 9a49185442..4ed7062b35 100644 --- a/ui/src/views/application/ApplicationSetting.vue +++ b/ui/src/views/application/ApplicationSetting.vue @@ -457,7 +457,11 @@
-
+
+ + +
-

{{ applicationForm?.name || $t('views.application.applicationForm.form.appName.label') @@ -505,6 +517,7 @@ @change="openCreateModel($event)" > + - + From 45bf3477d1f6b9c0710e5a0e03622f54d091eed8 Mon Sep 17 00:00:00 2001 From: wangdan-fit2cloud Date: Wed, 18 Sep 2024 11:03:04 +0800 Subject: [PATCH 18/20] =?UTF-8?q?perf:=20=E4=BC=98=E5=8C=96=E4=B8=93?= =?UTF-8?q?=E4=B8=9A=E7=89=88=E9=99=90=E5=88=B6=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../component/CreateApplicationDialog.vue | 20 ++----------- ui/src/views/application/index.vue | 28 +++++++++++++++++-- .../dataset/component/CreateDatasetDialog.vue | 15 +--------- ui/src/views/dataset/index.vue | 28 +++++++++++++++++-- ui/src/views/user-manage/index.vue | 22 ++++++++++----- 5 files changed, 70 insertions(+), 43 deletions(-) diff --git a/ui/src/views/application/component/CreateApplicationDialog.vue b/ui/src/views/application/component/CreateApplicationDialog.vue index 8e78b9a325..8d3ab5985a 100644 --- a/ui/src/views/application/component/CreateApplicationDialog.vue +++ b/ui/src/views/application/component/CreateApplicationDialog.vue @@ -67,7 +67,7 @@ {{ $t('views.application.applicationForm.buttons.cancel') }} - + {{ $t('views.application.applicationForm.buttons.create') }} @@ -177,7 +177,8 @@ watch(dialogVisible, (bool) => { }, model_params_setting: {}, problem_optimization: false, - problem_optimization_prompt: '', + problem_optimization_prompt: + '()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在标签中', stt_model_id: '', tts_model_id: '', stt_model_enable: false, @@ -193,21 +194,6 @@ const open = () => { dialogVisible.value = true } -const submitValid = (formEl: FormInstance | undefined) => { - if (user.isEnterprise()) { - submitHandle(formEl) - } else { - common - .asyncGetValid(ValidType.Application, ValidCount.Application, loading) - .then(async (res: any) => { - if (res?.data) { - submitHandle(formEl) - } else { - MsgAlert('提示', '社区版最多支持 5 个应用,如需拥有更多应用,请升级为专业版。') - } - }) - } -} const submitHandle = async (formEl: FormInstance | undefined) => { if (!formEl) return await formEl.validate((valid) => { diff --git a/ui/src/views/application/index.vue b/ui/src/views/application/index.vue index 839f9b81be..03f92a6de4 100644 --- a/ui/src/views/application/index.vue +++ b/ui/src/views/application/index.vue @@ -131,9 +131,11 @@ import { MsgSuccess, MsgConfirm } from '@/utils/message' import { isAppIcon } from '@/utils/application' import { useRouter } from 'vue-router' import { isWorkFlow } from '@/utils/application' -import useStore from '@/stores' +import { ValidType, ValidCount } from '@/enums/common' import { t } from '@/locales' -const { application, user } = useStore() +import useStore from '@/stores' + +const { application, user, common } = useStore() const router = useRouter() const CopyApplicationDialogRef = ref() @@ -168,7 +170,27 @@ function settingApplication(row: any) { } function openCreateDialog() { - CreateApplicationDialogRef.value.open() + if (user.isEnterprise()) { + CreateApplicationDialogRef.value.open() + } else { + MsgConfirm(`提示`, '社区版最多支持 5 个应用,如需拥有更多应用,请升级为专业版。', { + cancelButtonText: '确定', + confirmButtonText: '购买专业版', + confirmButtonClass: 'primary' + }) + .then(() => { + window.open('https://maxkb.cn/pricing.html', '_blank') + }) + .catch(() => { + common + .asyncGetValid(ValidType.Application, ValidCount.Application, loading) + .then(async (res: any) => { + if (res?.data) { + CreateApplicationDialogRef.value.open() + } + }) + }) + } } function searchHandle() { diff --git a/ui/src/views/dataset/component/CreateDatasetDialog.vue b/ui/src/views/dataset/component/CreateDatasetDialog.vue index 1316a0373f..5238dc64a8 100644 --- a/ui/src/views/dataset/component/CreateDatasetDialog.vue +++ b/ui/src/views/dataset/component/CreateDatasetDialog.vue @@ -73,7 +73,7 @@ {{ $t('views.application.applicationForm.buttons.cancel') }} - + {{ $t('views.application.applicationForm.buttons.create') }} @@ -124,19 +124,6 @@ const open = () => { dialogVisible.value = true } -const submitValid = () => { - if (user.isEnterprise()) { - submitHandle() - } else { - common.asyncGetValid(ValidType.Dataset, ValidCount.Dataset, loading).then(async (res: any) => { - if (res?.data) { - submitHandle() - } else { - MsgAlert('提示', '社区版最多支持 50 个知识库,如需拥有更多知识库,请升级为专业版。') - } - }) - } -} const submitHandle = async () => { if (await BaseFormRef.value?.validate()) { await DatasetFormRef.value.validate((valid: any) => { diff --git a/ui/src/views/dataset/index.vue b/ui/src/views/dataset/index.vue index 001a1eca44..bc2a7d7bfb 100644 --- a/ui/src/views/dataset/index.vue +++ b/ui/src/views/dataset/index.vue @@ -107,7 +107,7 @@

- +