Skip to content

Commit

Permalink
fix: improve usage of llm config
Browse files Browse the repository at this point in the history
  • Loading branch information
sunshinesmilelk authored and BroKun committed Oct 31, 2024
1 parent 80f5860 commit 34beabf
Show file tree
Hide file tree
Showing 11 changed files with 98 additions and 68 deletions.
22 changes: 16 additions & 6 deletions libro-ai/src/libro_ai/chat/__init__.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,29 @@
from .tongyi import TongyiChatObjectProvider
from .tongyi_chat_provider import TongyiChatObjectProvider
from .debug_provider import DebugChatObjectProvider
from .executor import LLMChat, ChatExecutor
from .openai import OpenAIChatObjectProvider
from .openai_chat_provider import OpenAIChatObjectProvider
from .object_manager import ChatObjectManager
from .source import CHAT_SOURCE
from .object import ChatObject, ChatObjectProvider
from .langchain_variable import LangChainVariableChatObjectProvider
from .chat_record import ChatMessage, ChatRecord, ChatRecordProvider
from .utils import get_message_str
from libro_core.config import libro_config

chat_object_manager = ChatObjectManager()
chat_object_manager.register_provider(OpenAIChatObjectProvider())
chat_object_manager.register_provider(TongyiChatObjectProvider())
libro_ai_config = libro_config.get_config().get("llm")
model_type = []
api_key:str = None
if libro_ai_config is not None:
if tongyi_api_key := libro_ai_config.get("DASHSCOPE_API_KEY"):
api_key = tongyi_api_key
chat_object_manager.register_provider(TongyiChatObjectProvider(api_key = api_key))
model_type.append('tongyi')
if openai_api_key := libro_ai_config.get("OPENAI_API_KEY"):
api_key = openai_api_key
chat_object_manager.register_provider(OpenAIChatObjectProvider(api_key = api_key))
model_type.append('openai')
chat_object_manager.register_provider(LangChainVariableChatObjectProvider())
chat_object_manager.register_provider(DebugChatObjectProvider())
chat_object_manager.register_provider(DebugChatObjectProvider(model_type = model_type,api_key = api_key))

chat_record_provider = ChatRecordProvider()

Expand Down
29 changes: 21 additions & 8 deletions libro-ai/src/libro_ai/chat/debug_executor.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
from typing import List, Union
from langchain_openai import ChatOpenAI
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain.callbacks import get_openai_callback
from .executor import LLMChat
from ..utils import is_langchain_installed
Expand All @@ -9,21 +11,32 @@
from libro_core.config import libro_config
from pydantic import Field

OPENAI = ['text-davinci-003',"gpt-4","gpt-4o","gpt-3.5-turbo"]
TONGYI = ["qwen-max","qwen-plus","qwen-turbo"]

class DebugChat(LLMChat):
name: str = "debug"
model: str = Field(default="gpt-4o")
system_message:SystemMessage = SystemMessage(content="你是一个代码调试小助手,在 notebook 执行时,输出了一些报错信息,请尝试解释报错并给出解决方案,每次对话都会给出代码以及报错信息")
chat: ChatOpenAI = None
system_message: SystemMessage = SystemMessage(content="你是一个代码调试小助手,在 notebook 执行时,输出了一些报错信息,请尝试解释报错并给出解决方案,每次对话都会给出代码以及报错信息")
chat: Union[ChatOpenAI, ChatTongyi] = None
api_key: str = None
model_type: List[str] = ['openai']

def load(self):
if is_langchain_installed():
extra_params = {}
libro_ai_config = libro_config.get_config().get("llm")
if libro_ai_config is not None:
if api_key := libro_ai_config.get("OPENAI_API_KEY"):
extra_params["api_key"] = api_key

self.chat = ChatOpenAI(model_name=self.model,**extra_params)
config = libro_config.get_config().get('llm')
if config is not None:
default_model = config.get("default_model")
self.model = default_model
if default_model in OPENAI:
if api_key := config.get("OPENAI_API_KEY"):
extra_params["api_key"] = api_key
self.chat = ChatOpenAI(model_name=self.model,**extra_params)
elif default_model in TONGYI:
if api_key := config.get("DASHSCOPE_API_KEY"):
extra_params["api_key"] = api_key
self.chat = ChatTongyi(model_name=self.model,**extra_params)
return True
return False

Expand Down
8 changes: 5 additions & 3 deletions libro-ai/src/libro_ai/chat/debug_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,15 @@ class DebugChatObjectProvider(ChatObjectProvider):
name: str = "debug"
cache: Dict[str, ChatExecutor] = {}
LLMs: List[str] = ["gpt-3.5-turbo", "gpt-4"]

model_type: List[str] = ['openai']
is_system_provider: bool = True

def get_or_create_executor(self, name: str) -> ChatExecutor:
if name in self.cache:
return self.cache[name]
from .debug_executor import DebugChat

executor = DebugChat(model=name, name=name)
executor = DebugChat(model=name, name=name, model_type = self.model_type, api_key=self.api_key)
if executor.load():
self.cache[name] = executor
return executor
Expand All @@ -31,7 +33,7 @@ def list(self) -> List[ChatObject]:
*list(
map(
lambda n: ChatObject(
name=MODEL_NAME_ALIASES.get(n, n),
name='debug',
to_executor=lambda: self.get_or_create_executor(n),
type=CHAT_SOURCE["LLM"],
),
Expand Down
1 change: 1 addition & 0 deletions libro-ai/src/libro_ai/chat/object.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ def model_dump(self):
class ChatObjectProvider(BaseModel, ABC):
name: str = "custom"
is_system_provider: bool = False
api_key: str = None

@abstractmethod
def list(self) -> List[ChatObject]:
Expand Down
11 changes: 10 additions & 1 deletion libro-ai/src/libro_ai/chat/object_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from pydantic import BaseModel
from .object import ChatObject, ChatObjectProvider
from .executor import ChatExecutor

from .utils import MODEL_NAME_ALIASES

class ChatObjectManager(BaseModel):
providers: List[ChatObjectProvider] = []
Expand Down Expand Up @@ -41,6 +41,15 @@ def get_executor(self, key: str) -> ChatExecutor:
return executor
raise Exception(f"Executor {key} not found")

def get_key(self, model_name: str):
dict = self.get_object_dict()
model_alias_name = MODEL_NAME_ALIASES.get(model_name)
if model_alias_name is not None:
for value in dict.values():
if value.name == model_alias_name:
return value.key
return None

def get_object_dict(self) -> Dict[str, ChatObject]:
chat_objects: Dict[str, ChatObject] = {}
for provider in self.providers:
Expand Down
13 changes: 8 additions & 5 deletions libro-ai/src/libro_ai/chat/openai_chat_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,13 @@ class OpenAIChat(LLMChat):
name: str = "chatgpt"
model: str = Field(default="gpt-3.5-turbo")
chat: ChatOpenAI = None
api_key: str = None

def load(self):
if is_langchain_installed():
extra_params = {}
libro_ai_config = libro_config.get_config().get("llm")
if libro_ai_config is not None:
if api_key := libro_ai_config.get("OPENAI_API_KEY"):
extra_params["api_key"] = api_key
if self.api_key:
extra_params["api_key"] = self.api_key
self.chat = ChatOpenAI(model_name=self.model, **extra_params)
return True
return False
Expand Down Expand Up @@ -79,10 +78,14 @@ class DalleChat(LLMChat):
name: str = "dalle-3"
model: str = Field(default="dall-e-3")
dalle: DallEAPIWrapper = None
api_key:str = None

def load(self):
extra_params = {}
if is_langchain_installed():
self.dalle = DallEAPIWrapper()
if self.api_key:
extra_params["api_key"] = self.api_key
self.dalle = DallEAPIWrapper(**extra_params)
self.dalle.model_name = self.model
return True
return False
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,7 @@
from .executor import ChatExecutor
from .object import ChatObject, ChatObjectProvider
from ..utils import is_langchain_installed

MODEL_NAME_ALIASES = {
"text-davinci-003": "gpt3",
"gpt-3.5-turbo": "chatgpt",
"gpt-4": "gpt4",
"dall-e-3": "dalle-3",
"dall-e-2": "dalle-2",
}

ALIASE_NAME_MODEL = {
"gpt3": "text-davinci-003",
"chatgpt": "gpt-3.5-turbo",
"gpt4": "gpt-4",
"dalle-3": "dall-e-3",
"dalle-2": "dall-e-2",
}
from .utils import ALIASE_NAME_MODEL, MODEL_NAME_ALIASES


class OpenAIChatObjectProvider(ChatObjectProvider):
Expand All @@ -35,7 +20,7 @@ def get_or_create_executor(self, name: str) -> ChatExecutor:
return self.cache[model]
from .openai_chat_executor import OpenAIChat

executor = OpenAIChat(model=model, name=name)
executor = OpenAIChat(model=model, name=name, api_key=self.api_key)
if executor.load():
self.cache[model] = executor
return executor
Expand All @@ -46,7 +31,7 @@ def get_or_create_lmm_executor(self, name: str) -> ChatExecutor:
return self.cache[model]
from .openai_chat_executor import DalleChat

executor = DalleChat(model=model, name=name)
executor = DalleChat(model=model, name=name,api_key=self.api_key)
if executor.load():
self.cache[model] = executor
return executor
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,13 @@ class TongyiChat(LLMChat):
name: str = "tongyi"
model: str = Field(default="qwen-max")
chat: ChatTongyi = None
api_key: str = None

def load(self):
if is_langchain_installed():
extra_params = {}
libro_ai_config = libro_config.get_config().get("llm")
if libro_ai_config is not None:
if api_key := libro_ai_config.get("DASHSCOPE_API_KEY"):
extra_params["api_key"] = api_key
if self.api_key:
extra_params["api_key"] = self.api_key
self.chat = ChatTongyi(model_name=self.model, **extra_params)
return True
return False
Expand All @@ -47,6 +46,7 @@ def run(self, value:StringPromptValue,stream=False, sync=True, system_prompt = N
result = chat.astream(input, streaming = True, **kwargs)
return result
except Exception as e:

return ""
else:
try:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,7 @@
from .executor import ChatExecutor
from .object import ChatObject, ChatObjectProvider
from ..utils import is_langchain_installed

MODEL_NAME_ALIASES = {
"qwen-max": "qwen-max",
"qwen-plus": "qwen-plus",
"qwen-turbo": "qwen-turbo",
}

ALIASE_NAME_MODEL = {
"qwen-max": "qwen-max",
"qwen-plus": "qwen-plus",
"qwen-turbo": "qwen-turbo",
}

from .utils import ALIASE_NAME_MODEL, MODEL_NAME_ALIASES

class TongyiChatObjectProvider(ChatObjectProvider):
name: str = "tongyi"
Expand All @@ -29,9 +17,9 @@ def get_or_create_executor(self, name: str) -> ChatExecutor:
model = ALIASE_NAME_MODEL.get(name, name)
if model in self.cache:
return self.cache[model]
from .tongyi_executor import TongyiChat
from .tongyi_chat_executor import TongyiChat

executor = TongyiChat(model=model, name=name)
executor = TongyiChat(model=model, name=name,api_key=self.api_key)
if executor.load():
self.cache[model] = executor
return executor
Expand Down
21 changes: 21 additions & 0 deletions libro-ai/src/libro_ai/chat/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,27 @@
from langchain.schema.messages import BaseMessage
from langchain_core.prompt_values import StringPromptValue

MODEL_NAME_ALIASES = {
"text-davinci-003": "gpt3",
"gpt-3.5-turbo": "chatgpt",
"gpt-4": "gpt4",
"dall-e-3": "dalle-3",
"dall-e-2": "dalle-2",
"qwen-max": "qwen-max",
"qwen-plus": "qwen-plus",
"qwen-turbo": "qwen-turbo",
}

ALIASE_NAME_MODEL = {
"gpt3": "text-davinci-003",
"chatgpt": "gpt-3.5-turbo",
"gpt4": "gpt-4",
"dalle-3": "dall-e-3",
"dalle-2": "dall-e-2",
"qwen-max": "qwen-max",
"qwen-plus": "qwen-plus",
"qwen-turbo": "qwen-turbo",
}

def get_message_str(message: Union[StringPromptValue, BaseMessage, List[BaseMessage]]):
if isinstance(message, list):
Expand Down
14 changes: 6 additions & 8 deletions libro-server/src/libro_server/libro_ai_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,8 @@ async def post(self):
config = libro_config.get_config().get('llm')
if chat_key is None or chat_key == "":
if config is not None:
model = config.get("model")
model_type = config.get("model_type")
if model is not None and model_type is not None:
chat_key = model_type + ":"+ model
model = config.get("default_model")
chat_key = chat_object_manager.get_key(model)
prompt: str = data.get("prompt")
system_prompt: str = data.get("system_prompt")
if (
Expand Down Expand Up @@ -87,10 +85,10 @@ def post(self):
config = libro_config.get_config().get('llm')
if chat_key is None or chat_key == "":
if config is not None:
model = config.get("model")
model_type = config.get("model_type")
if model is not None and model_type is not None:
chat_key = model_type + ":"+ model
model = config.get("default_model")
chat_key = chat_object_manager.get_key(model)
if chat_key is None:
raise Exception("Invalid api key and default model!")
prompt: str = data.get("prompt")
system_prompt: str = data.get("system_prompt")
# 流式输出响应
Expand Down

0 comments on commit 34beabf

Please sign in to comment.