From f4300cf6f910de6a04f24c7e113d1c297299789d Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sun, 17 Mar 2024 23:36:32 -0400 Subject: [PATCH 1/2] =?UTF-8?q?deps:=20=E6=B7=BB=E5=8A=A0zhipuai=E4=BE=9D?= =?UTF-8?q?=E8=B5=96=E5=BA=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/core/bootutils/deps.py | 1 + requirements.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/pkg/core/bootutils/deps.py b/pkg/core/bootutils/deps.py index 41097f27..73624ff4 100644 --- a/pkg/core/bootutils/deps.py +++ b/pkg/core/bootutils/deps.py @@ -4,6 +4,7 @@ "requests": "requests", "openai": "openai", "anthropic": "anthropic", + "zhipuai": "zhipuai", "colorlog": "colorlog", "mirai": "yiri-mirai-rc", "aiocqhttp": "aiocqhttp", diff --git a/requirements.txt b/requirements.txt index 6a3e718c..47d5aab1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ requests openai>1.0.0 anthropic +zhipuai colorlog~=6.6.0 yiri-mirai-rc aiocqhttp From 066def7c4e6b59b6701c96435edf26bad84fa790 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Mon, 18 Mar 2024 02:38:42 -0400 Subject: [PATCH 2/2] =?UTF-8?q?feat:=20=E5=9F=BA=E6=9C=AC=E9=80=BB?= =?UTF-8?q?=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/core/bootutils/deps.py | 2 +- pkg/provider/modelmgr/apis/anthropicmsgs.py | 2 - pkg/provider/modelmgr/apis/zhipuchatcmpl.py | 147 ++++++++++++++++++++ pkg/provider/modelmgr/modelmgr.py | 2 +- templates/provider.json | 8 ++ 5 files changed, 157 insertions(+), 4 deletions(-) create mode 100644 pkg/provider/modelmgr/apis/zhipuchatcmpl.py diff --git a/pkg/core/bootutils/deps.py b/pkg/core/bootutils/deps.py index 73624ff4..6da03459 100644 --- a/pkg/core/bootutils/deps.py +++ b/pkg/core/bootutils/deps.py @@ -4,7 +4,7 @@ "requests": "requests", "openai": "openai", "anthropic": "anthropic", - "zhipuai": "zhipuai", + # "zhipuai": "zhipuai", "colorlog": "colorlog", "mirai": "yiri-mirai-rc", "aiocqhttp": "aiocqhttp", diff --git a/pkg/provider/modelmgr/apis/anthropicmsgs.py b/pkg/provider/modelmgr/apis/anthropicmsgs.py index 71423e0a..a6a72655 100644 --- a/pkg/provider/modelmgr/apis/anthropicmsgs.py +++ b/pkg/provider/modelmgr/apis/anthropicmsgs.py @@ -5,8 +5,6 @@ import anthropic -from .. import api, entities, errors - from .. import api, entities, errors from ....core import entities as core_entities from ... import entities as llm_entities diff --git a/pkg/provider/modelmgr/apis/zhipuchatcmpl.py b/pkg/provider/modelmgr/apis/zhipuchatcmpl.py new file mode 100644 index 00000000..55ad4bfc --- /dev/null +++ b/pkg/provider/modelmgr/apis/zhipuchatcmpl.py @@ -0,0 +1,147 @@ +from __future__ import annotations + +import asyncio +import typing +import json +from typing import AsyncGenerator + +import openai +import openai.types.chat.chat_completion as chat_completion +import httpx + +from .. import api, entities, errors +from ....core import entities as core_entities +from ... import entities as llm_entities +from ...tools import entities as tools_entities + + +@api.requester_class("zhipuai-chat-completions") +class ZhipuAIChatCompletions(api.LLMAPIRequester): + """ZhipuAI ChatCompletion API 请求器""" + + client: openai.AsyncClient + + async def initialize(self): + self.client = openai.AsyncClient( + api_key="", + base_url=self.ap.provider_cfg.data['requester']['zhipuai-chat-completions']['base-url'], + timeout=self.ap.provider_cfg.data['requester']['zhipuai-chat-completions']['timeout'], + http_client=httpx.AsyncClient( + proxies=self.ap.proxy_mgr.get_forward_proxies() + ) + ) + + async def _req( + self, + args: dict, + ) -> chat_completion.ChatCompletion: + self.ap.logger.debug(f"req chat_completion with args {args}") + return await self.client.chat.completions.create(**args) + + async def _make_msg( + self, + chat_completion: chat_completion.ChatCompletion, + ) -> llm_entities.Message: + chatcmpl_message = chat_completion.choices[0].message.dict() + + message = llm_entities.Message(**chatcmpl_message) + + return message + + async def _closure( + self, + req_messages: list[dict], + use_model: entities.LLMModelInfo, + use_funcs: list[tools_entities.LLMFunction] = None, + ) -> llm_entities.Message: + self.client.api_key = use_model.token_mgr.get_token() + print(self.client.api_key) + + args = self.ap.provider_cfg.data['requester']['zhipuai-chat-completions']['args'].copy() + args["model"] = use_model.name if use_model.model_name is None else use_model.model_name + + if use_model.tool_call_supported: + tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs) + + if tools: + args["tools"] = tools + + # 设置此次请求中的messages + messages = req_messages + args["messages"] = messages + + # 发送请求 + resp = await self._req(args) + + # 处理请求结果 + message = await self._make_msg(resp) + + return message + + async def _request( + self, query: core_entities.Query + ) -> typing.AsyncGenerator[llm_entities.Message, None]: + """请求""" + + pending_tool_calls = [] + + req_messages = [ # req_messages 仅用于类内,外部同步由 query.messages 进行 + m.dict(exclude_none=True) for m in query.prompt.messages + ] + [m.dict(exclude_none=True) for m in query.messages] + + # req_messages.append({"role": "user", "content": str(query.message_chain)}) + + msg = await self._closure(req_messages, query.use_model, query.use_funcs) + + yield msg + + pending_tool_calls = msg.tool_calls + + req_messages.append(msg.dict(exclude_none=True)) + + while pending_tool_calls: + for tool_call in pending_tool_calls: + func = tool_call.function + + parameters = json.loads(func.arguments) + + func_ret = await self.ap.tool_mgr.execute_func_call( + query, func.name, parameters + ) + + msg = llm_entities.Message( + role="tool", content=json.dumps(func_ret, ensure_ascii=False), tool_call_id=tool_call.id + ) + + yield msg + + req_messages.append(msg.dict(exclude_none=True)) + + # 处理完所有调用,继续请求 + msg = await self._closure(req_messages, query.use_model, query.use_funcs) + + yield msg + + pending_tool_calls = msg.tool_calls + + req_messages.append(msg.dict(exclude_none=True)) + + async def request(self, query: core_entities.Query) -> AsyncGenerator[llm_entities.Message, None]: + try: + async for msg in self._request(query): + yield msg + except asyncio.TimeoutError: + raise errors.RequesterError('请求超时') + except openai.BadRequestError as e: + if 'context_length_exceeded' in e.message: + raise errors.RequesterError(f'上文过长,请重置会话: {e.message}') + else: + raise errors.RequesterError(f'请求参数错误: {e.message}') + except openai.AuthenticationError as e: + raise errors.RequesterError(f'无效的 api-key: {e.message}') + except openai.NotFoundError as e: + raise errors.RequesterError(f'请求路径错误: {e.message}') + except openai.RateLimitError as e: + raise errors.RequesterError(f'请求过于频繁: {e.message}') + except openai.APIError as e: + raise errors.RequesterError(f'请求错误: {e.message}') diff --git a/pkg/provider/modelmgr/modelmgr.py b/pkg/provider/modelmgr/modelmgr.py index 21a1c757..925ea737 100644 --- a/pkg/provider/modelmgr/modelmgr.py +++ b/pkg/provider/modelmgr/modelmgr.py @@ -6,7 +6,7 @@ from ...core import app from . import token, api -from .apis import chatcmpl, anthropicmsgs +from .apis import chatcmpl, anthropicmsgs, zhipuchatcmpl FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list" diff --git a/templates/provider.json b/templates/provider.json index 1c26bd4c..2487c9ab 100644 --- a/templates/provider.json +++ b/templates/provider.json @@ -6,6 +6,9 @@ ], "anthropic": [ "sk-1234567890" + ], + "zhipuai": [ + "xxxxxxxxxxx" ] }, "requester": { @@ -20,6 +23,11 @@ "max_tokens": 1024 }, "timeout": 120 + }, + "zhipuai-chat-completions": { + "base-url": "https://open.bigmodel.cn/api/paas/v4", + "args": {}, + "timeout": 120 } }, "model": "gpt-3.5-turbo",