diff --git a/pkg/core/bootutils/deps.py b/pkg/core/bootutils/deps.py index 41097f27..6da03459 100644 --- a/pkg/core/bootutils/deps.py +++ b/pkg/core/bootutils/deps.py @@ -4,6 +4,7 @@ "requests": "requests", "openai": "openai", "anthropic": "anthropic", + # "zhipuai": "zhipuai", "colorlog": "colorlog", "mirai": "yiri-mirai-rc", "aiocqhttp": "aiocqhttp", diff --git a/pkg/provider/modelmgr/apis/anthropicmsgs.py b/pkg/provider/modelmgr/apis/anthropicmsgs.py index 71423e0a..a6a72655 100644 --- a/pkg/provider/modelmgr/apis/anthropicmsgs.py +++ b/pkg/provider/modelmgr/apis/anthropicmsgs.py @@ -5,8 +5,6 @@ import anthropic -from .. import api, entities, errors - from .. import api, entities, errors from ....core import entities as core_entities from ... import entities as llm_entities diff --git a/pkg/provider/modelmgr/apis/zhipuchatcmpl.py b/pkg/provider/modelmgr/apis/zhipuchatcmpl.py new file mode 100644 index 00000000..55ad4bfc --- /dev/null +++ b/pkg/provider/modelmgr/apis/zhipuchatcmpl.py @@ -0,0 +1,147 @@ +from __future__ import annotations + +import asyncio +import typing +import json +from typing import AsyncGenerator + +import openai +import openai.types.chat.chat_completion as chat_completion +import httpx + +from .. import api, entities, errors +from ....core import entities as core_entities +from ... import entities as llm_entities +from ...tools import entities as tools_entities + + +@api.requester_class("zhipuai-chat-completions") +class ZhipuAIChatCompletions(api.LLMAPIRequester): + """ZhipuAI ChatCompletion API 请求器""" + + client: openai.AsyncClient + + async def initialize(self): + self.client = openai.AsyncClient( + api_key="", + base_url=self.ap.provider_cfg.data['requester']['zhipuai-chat-completions']['base-url'], + timeout=self.ap.provider_cfg.data['requester']['zhipuai-chat-completions']['timeout'], + http_client=httpx.AsyncClient( + proxies=self.ap.proxy_mgr.get_forward_proxies() + ) + ) + + async def _req( + self, + args: dict, + ) -> chat_completion.ChatCompletion: + self.ap.logger.debug(f"req chat_completion with args {args}") + return await self.client.chat.completions.create(**args) + + async def _make_msg( + self, + chat_completion: chat_completion.ChatCompletion, + ) -> llm_entities.Message: + chatcmpl_message = chat_completion.choices[0].message.dict() + + message = llm_entities.Message(**chatcmpl_message) + + return message + + async def _closure( + self, + req_messages: list[dict], + use_model: entities.LLMModelInfo, + use_funcs: list[tools_entities.LLMFunction] = None, + ) -> llm_entities.Message: + self.client.api_key = use_model.token_mgr.get_token() + print(self.client.api_key) + + args = self.ap.provider_cfg.data['requester']['zhipuai-chat-completions']['args'].copy() + args["model"] = use_model.name if use_model.model_name is None else use_model.model_name + + if use_model.tool_call_supported: + tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs) + + if tools: + args["tools"] = tools + + # 设置此次请求中的messages + messages = req_messages + args["messages"] = messages + + # 发送请求 + resp = await self._req(args) + + # 处理请求结果 + message = await self._make_msg(resp) + + return message + + async def _request( + self, query: core_entities.Query + ) -> typing.AsyncGenerator[llm_entities.Message, None]: + """请求""" + + pending_tool_calls = [] + + req_messages = [ # req_messages 仅用于类内,外部同步由 query.messages 进行 + m.dict(exclude_none=True) for m in query.prompt.messages + ] + [m.dict(exclude_none=True) for m in query.messages] + + # req_messages.append({"role": "user", "content": str(query.message_chain)}) + + msg = await self._closure(req_messages, query.use_model, query.use_funcs) + + yield msg + + pending_tool_calls = msg.tool_calls + + req_messages.append(msg.dict(exclude_none=True)) + + while pending_tool_calls: + for tool_call in pending_tool_calls: + func = tool_call.function + + parameters = json.loads(func.arguments) + + func_ret = await self.ap.tool_mgr.execute_func_call( + query, func.name, parameters + ) + + msg = llm_entities.Message( + role="tool", content=json.dumps(func_ret, ensure_ascii=False), tool_call_id=tool_call.id + ) + + yield msg + + req_messages.append(msg.dict(exclude_none=True)) + + # 处理完所有调用,继续请求 + msg = await self._closure(req_messages, query.use_model, query.use_funcs) + + yield msg + + pending_tool_calls = msg.tool_calls + + req_messages.append(msg.dict(exclude_none=True)) + + async def request(self, query: core_entities.Query) -> AsyncGenerator[llm_entities.Message, None]: + try: + async for msg in self._request(query): + yield msg + except asyncio.TimeoutError: + raise errors.RequesterError('请求超时') + except openai.BadRequestError as e: + if 'context_length_exceeded' in e.message: + raise errors.RequesterError(f'上文过长,请重置会话: {e.message}') + else: + raise errors.RequesterError(f'请求参数错误: {e.message}') + except openai.AuthenticationError as e: + raise errors.RequesterError(f'无效的 api-key: {e.message}') + except openai.NotFoundError as e: + raise errors.RequesterError(f'请求路径错误: {e.message}') + except openai.RateLimitError as e: + raise errors.RequesterError(f'请求过于频繁: {e.message}') + except openai.APIError as e: + raise errors.RequesterError(f'请求错误: {e.message}') diff --git a/pkg/provider/modelmgr/modelmgr.py b/pkg/provider/modelmgr/modelmgr.py index 21a1c757..925ea737 100644 --- a/pkg/provider/modelmgr/modelmgr.py +++ b/pkg/provider/modelmgr/modelmgr.py @@ -6,7 +6,7 @@ from ...core import app from . import token, api -from .apis import chatcmpl, anthropicmsgs +from .apis import chatcmpl, anthropicmsgs, zhipuchatcmpl FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list" diff --git a/requirements.txt b/requirements.txt index 6a3e718c..47d5aab1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ requests openai>1.0.0 anthropic +zhipuai colorlog~=6.6.0 yiri-mirai-rc aiocqhttp diff --git a/templates/provider.json b/templates/provider.json index 1c26bd4c..2487c9ab 100644 --- a/templates/provider.json +++ b/templates/provider.json @@ -6,6 +6,9 @@ ], "anthropic": [ "sk-1234567890" + ], + "zhipuai": [ + "xxxxxxxxxxx" ] }, "requester": { @@ -20,6 +23,11 @@ "max_tokens": 1024 }, "timeout": 120 + }, + "zhipuai-chat-completions": { + "base-url": "https://open.bigmodel.cn/api/paas/v4", + "args": {}, + "timeout": 120 } }, "model": "gpt-3.5-turbo",