Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat: 支持 智谱AI 的接口 #733

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pkg/core/bootutils/deps.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
"requests": "requests",
"openai": "openai",
"anthropic": "anthropic",
# "zhipuai": "zhipuai",
"colorlog": "colorlog",
"mirai": "yiri-mirai-rc",
"aiocqhttp": "aiocqhttp",
Expand Down
2 changes: 0 additions & 2 deletions pkg/provider/modelmgr/apis/anthropicmsgs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@

import anthropic

from .. import api, entities, errors

from .. import api, entities, errors
from ....core import entities as core_entities
from ... import entities as llm_entities
Expand Down
147 changes: 147 additions & 0 deletions pkg/provider/modelmgr/apis/zhipuchatcmpl.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
from __future__ import annotations

import asyncio
import typing
import json
from typing import AsyncGenerator

import openai
import openai.types.chat.chat_completion as chat_completion
import httpx

from .. import api, entities, errors
from ....core import entities as core_entities
from ... import entities as llm_entities
from ...tools import entities as tools_entities


@api.requester_class("zhipuai-chat-completions")
class ZhipuAIChatCompletions(api.LLMAPIRequester):
"""ZhipuAI ChatCompletion API 请求器"""

client: openai.AsyncClient

async def initialize(self):
self.client = openai.AsyncClient(
api_key="",
base_url=self.ap.provider_cfg.data['requester']['zhipuai-chat-completions']['base-url'],
timeout=self.ap.provider_cfg.data['requester']['zhipuai-chat-completions']['timeout'],
http_client=httpx.AsyncClient(
proxies=self.ap.proxy_mgr.get_forward_proxies()
)
)

async def _req(
self,
args: dict,
) -> chat_completion.ChatCompletion:
self.ap.logger.debug(f"req chat_completion with args {args}")
return await self.client.chat.completions.create(**args)

async def _make_msg(
self,
chat_completion: chat_completion.ChatCompletion,
) -> llm_entities.Message:
chatcmpl_message = chat_completion.choices[0].message.dict()

message = llm_entities.Message(**chatcmpl_message)

return message

async def _closure(
self,
req_messages: list[dict],
use_model: entities.LLMModelInfo,
use_funcs: list[tools_entities.LLMFunction] = None,
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
print(self.client.api_key)

args = self.ap.provider_cfg.data['requester']['zhipuai-chat-completions']['args'].copy()
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name

if use_model.tool_call_supported:
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)

if tools:
args["tools"] = tools

# 设置此次请求中的messages
messages = req_messages
args["messages"] = messages

# 发送请求
resp = await self._req(args)

# 处理请求结果
message = await self._make_msg(resp)

return message

async def _request(
self, query: core_entities.Query
) -> typing.AsyncGenerator[llm_entities.Message, None]:
"""请求"""

pending_tool_calls = []

req_messages = [ # req_messages 仅用于类内,外部同步由 query.messages 进行
m.dict(exclude_none=True) for m in query.prompt.messages
] + [m.dict(exclude_none=True) for m in query.messages]

# req_messages.append({"role": "user", "content": str(query.message_chain)})

msg = await self._closure(req_messages, query.use_model, query.use_funcs)

yield msg

pending_tool_calls = msg.tool_calls

req_messages.append(msg.dict(exclude_none=True))

while pending_tool_calls:
for tool_call in pending_tool_calls:
func = tool_call.function

parameters = json.loads(func.arguments)

func_ret = await self.ap.tool_mgr.execute_func_call(
query, func.name, parameters
)

msg = llm_entities.Message(
role="tool", content=json.dumps(func_ret, ensure_ascii=False), tool_call_id=tool_call.id
)

yield msg

req_messages.append(msg.dict(exclude_none=True))

# 处理完所有调用,继续请求
msg = await self._closure(req_messages, query.use_model, query.use_funcs)

yield msg

pending_tool_calls = msg.tool_calls

req_messages.append(msg.dict(exclude_none=True))

async def request(self, query: core_entities.Query) -> AsyncGenerator[llm_entities.Message, None]:
try:
async for msg in self._request(query):
yield msg
except asyncio.TimeoutError:
raise errors.RequesterError('请求超时')
except openai.BadRequestError as e:
if 'context_length_exceeded' in e.message:
raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
else:
raise errors.RequesterError(f'请求参数错误: {e.message}')
except openai.AuthenticationError as e:
raise errors.RequesterError(f'无效的 api-key: {e.message}')
except openai.NotFoundError as e:
raise errors.RequesterError(f'请求路径错误: {e.message}')
except openai.RateLimitError as e:
raise errors.RequesterError(f'请求过于频繁: {e.message}')
except openai.APIError as e:
raise errors.RequesterError(f'请求错误: {e.message}')
2 changes: 1 addition & 1 deletion pkg/provider/modelmgr/modelmgr.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from ...core import app

from . import token, api
from .apis import chatcmpl, anthropicmsgs
from .apis import chatcmpl, anthropicmsgs, zhipuchatcmpl

FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list"

Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
requests
openai>1.0.0
anthropic
zhipuai
colorlog~=6.6.0
yiri-mirai-rc
aiocqhttp
Expand Down
8 changes: 8 additions & 0 deletions templates/provider.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@
],
"anthropic": [
"sk-1234567890"
],
"zhipuai": [
"xxxxxxxxxxx"
]
},
"requester": {
Expand All @@ -20,6 +23,11 @@
"max_tokens": 1024
},
"timeout": 120
},
"zhipuai-chat-completions": {
"base-url": "https://open.bigmodel.cn/api/paas/v4",
"args": {},
"timeout": 120
}
},
"model": "gpt-3.5-turbo",
Expand Down