Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ai abstraction refactor #141

Open
wants to merge 14 commits into
base: main
Choose a base branch
from
Open
214 changes: 214 additions & 0 deletions nextpy/ai/agent/assistant_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,214 @@
from typing import Any, Callable, Tuple
from nextpy.ai.agent.base_agent import BaseAgent
import logging
from pathlib import Path
from nextpy.ai import engine
import inspect
import asyncio
import logging


def _call_functions(functions):
for function, arguments, keyword_args in functions:
if inspect.iscoroutinefunction(function):
try:
other_loop = asyncio.get_event_loop()
import nest_asyncio

nest_asyncio.apply(other_loop)
except RuntimeError:
pass
loop = asyncio.new_event_loop()
loop.run_until_complete(function(*arguments, **keyword_args))
else:
function(*arguments, **keyword_args)


async def _a_call_functions(functions):
for function, arguments, keyword_args in functions:
if inspect.iscoroutinefunction(function):
await function(*arguments, **keyword_args)
else:
function(*arguments, **keyword_args)


class AssistantAgent(BaseAgent):
"""
AssistantAgent class represents an assistant agent that interacts with users in a conversational manner.

:param name: The name of the assistant agent.
:type name: str
:param llm: The language model used by the assistant agent.
:type llm: LanguageModel
:param memory: The memory used by the assistant agent.
:type memory: Memory
:param async_mode: Whether the assistant agent should run in asynchronous mode or not. Default is True.
:type async_mode: bool, optional
:param system_message: The system message included in the prompt. Default is None.
:type system_message: str, optional
:param functions_before_call: List of functions to be called before the main function call. Default is None.
:type functions_before_call: List[Callable], optional
:param functions_after_call: List of functions to be called after the main function call. Default is None.
:type functions_after_call: List[Callable], optional

The assistant agent is built on top of the existing BaseAgent and serves as a simple interface for creating an AI assistant agent.
It provides a convenient way to define an AI assistant agent that can interact with users in a conversational manner.
The assistant agent can be customized with a name, language model, memory, and other parameters.
It also supports asynchronous mode, allowing it to handle multiple conversations simultaneously.

MultiagentManager can be used to manage multiple assistant agents and coordinate their interactions with users.


Example:


tailwind_agent = AssistantAgent(name='Tailwind Class Generator', llm=llm, memory=None, async_mode=False,
system_message='''automates the creation of Tailwind CSS classes, streamlining the process of building stylish and responsive user interfaces. By leveraging advanced algorithms and design principles, the Tailwind Class Generator analyzes your design elements and dynamically generates the optimal set of Tailwind utility classes.
This tool is designed to enhance efficiency in web development, allowing developers to focus more on high-level design decisions and less on manually crafting individual CSS rules. With the Tailwind Class Generator, achieving a visually appealing and consistent design becomes a seamless experience.
'''
)
"""

DEFAULT_PROMPT = '''
{{#system~}} {{name}}, you are working in the following team :{{agents}}
{{~/system}}

{{#user~}}
Read the following CONVERSATION :
{{messages}}
Respond as {{name}}. Do not thank any team member or show appreciation."
{{~/user}}

{{#assistant~}}
{{gen 'answer' temperature=0 max_tokens=500}}
{{~/assistant}}
'''

def __init__(self,
name,
llm=None,
memory=None,
async_mode: bool = False,
system_message: str | None = None,
custom_engine=None,
functions_before_call: Tuple[Callable,
Tuple[Any], Tuple[Any]] | None = None,
functions_after_call: Tuple[Callable,
Tuple[Any], Tuple[Any]] | None = None,
description: str = "Helpful AI Assistant Agent",
**kwargs):
"""
Initializes an instance of the AssistantAgent class.

:param name: The name of the assistant agent.
:type name: str
:param llm: The language model used by the assistant agent.
:type llm: LanguageModel
:param memory: The memory used by the assistant agent.
:type memory: Memory
:param async_mode: Whether the assistant agent should run in asynchronous mode or not. Default is True.
:type async_mode: bool, optional
:param system_message: The system message to be displayed to the user. Default is None.
:type system_message: str, optional
:param engine: The engine used by the assistant agent. Either llm or engine must be provided.
:type engine: Engine, optional
:param functions_before_call: List of functions, args and kwargs, to be called before the main function call. Default is None.
:type functions_before_call: List[Callable], optional
:param functions_after_call: List of functions, args and kwargs to be called after the main function call. Default is None.
:type functions_after_call: List[Callable], optional
:param kwargs: Additional keyword arguments.
"""
super().__init__(llm=llm, **kwargs)
self.name = name
self.prompt = self.DEFAULT_PROMPT
self.system_message = system_message
# This is used by multiagent manager to determine whether to use receive or a_receive
self.async_mode = async_mode

if system_message is not None:
try:
system_message = Path(system_message).read_text()
except Exception:
pass
self.prompt = self.prompt[:self.prompt.find(
'{{~/system}}')] + system_message + self.prompt[self.prompt.find('{{~/system}}'):]

# Either llm or engine must be provided
if llm is not None or engine is not None:
logging.debug("Warning! Either llm or engine must be provided.")

self.engine = custom_engine if custom_engine is not None else engine(
template=self.prompt, llm=llm, memory=memory, async_mode=async_mode, **kwargs)
self.output_key = 'answer'
self.functions_before_call = functions_before_call
self.functions_after_call = functions_after_call
self.description = description

@staticmethod
def function_call_decorator(func):
"""
Decorator function that wraps the main function call with additional functions to be called before and after.

:param func: The main function to be called.
:type func: Callable
:return: The wrapped function.
:rtype: Callable
"""
if inspect.iscoroutinefunction(func):
async def a_inner(self, *args, **kwargs):
if self.functions_before_call is not None:
await _a_call_functions(self.functions_before_call)

result = await func(self, *args, **kwargs)

if self.functions_after_call is not None:
await _a_call_functions(self.functions_after_call)

return result
return a_inner
else:
def inner(self, *args, **kwargs):
if self.functions_before_call is not None:
_call_functions(self.functions_before_call)

result = func(self, *args, **kwargs)

if self.functions_after_call is not None:
_call_functions(self.functions_after_call)

return result
return inner

@function_call_decorator
def receive(self, agents, messages, termination_message):
"""
Receives messages from other agents and generates a response.

:param agents: The list of agents involved in the conversation.
:type agents: List[str]
:param messages: The list of messages in the conversation.
:type messages: List[str]
:param termination_message: The termination message for the conversation.
:type termination_message: str
:return: The generated response.
:rtype: str
"""
output = self.run(agents=agents, messages=messages, name=self.name)
return output

@function_call_decorator
async def a_receive(self, agents, messages, termination_message):
"""
Asynchronously receives messages from other agents and generates a response.

:param agents: The list of agents involved in the conversation.
:type agents: List[str]
:param messages: The list of messages in the conversation.
:type messages: List[str]
:param termination_message: The termination message for the conversation.
:type termination_message: str
:return: The generated response.
:rtype: str
"""
output = await self.arun(agents=agents, messages=messages, name=self.name)
return output
53 changes: 31 additions & 22 deletions nextpy/ai/agent/base_agent.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# This file has been modified by the Nextpy Team in 2023 using AI tools and automation scripts.
# This file has been modified by the Nextpy Team in 2023 using AI skills and automation scripts.
# We have rigorously tested these modifications to ensure reliability and performance. Based on successful test results, we are confident in the quality and stability of these changes.

import argparse
Expand All @@ -7,6 +7,7 @@
import json
import logging
from enum import Enum
from nextpy.ai.skills.base import BaseSkill
from typing import Any, Dict, List, Optional, Union

import nest_asyncio
Expand All @@ -16,7 +17,6 @@
from nextpy.ai.engine._program import extract_text
from nextpy.ai.memory.base import BaseMemory
from nextpy.ai.rag.doc_loader import document_loader
from nextpy.ai.tools.basetool import BaseTool

log = logging.getLogger(__name__)

Expand All @@ -32,16 +32,16 @@ class AgentState(Enum):

IDLE = 0
BUSY = 1
USED_AS_TOOL = 2
USED_AS_SKILL = 2
ERROR = 3


class BaseAgent:
def __init__(
self,
rag: Optional[Any] = None,
tools: Optional[List[BaseTool]] = None,
llm: Optional[Any] = None,
skills: Optional[List[BaseSkill]] = None,
prompt_template: str = None,
input_variables: Dict[str, Any] = {},
agent_id: str = "default",
Expand All @@ -52,8 +52,8 @@ def __init__(
):
self.agent_id = agent_id
self.rag = rag
self.tools = tools
self.llm = llm
self.skills = skills
self.prompt_template = prompt_template
self.input_variables = input_variables
self.memory = memory
Expand Down Expand Up @@ -98,14 +98,14 @@ def get_knowledge_variable(self):
def default_llm_model(self):
pass

def add_tool(self, tool: BaseTool) -> None:
"""Add a tool to the agent's tool list."""
self.tools.append(tool)
def add_skill(self, skill: BaseSkill) -> None:
"""Add a skill to the agent's skill list."""
self.skills.append(skill)

def remove_tool(self, tool: BaseTool) -> None:
"""Remove a tool from the agent's tool list."""
if tool in self.tools:
self.tools.remove(tool)
def remove_skill(self, skill: BaseSkill) -> None:
"""Remove a skill from the agent's skill list."""
if skill in self.skills:
self.skills.remove(skill)

AumJavalgikar marked this conversation as resolved.
Show resolved Hide resolved
def llm_instance(self) -> engine.llms.OpenAI:
"""Create an instance of the language model."""
Expand Down Expand Up @@ -146,7 +146,8 @@ def run(self, **kwargs) -> Union[str, Dict[str, Any]]:
RETRIEVED_KNOWLEDGE=retrieved_knowledge, **kwargs, silent=True
)
else:
raise ValueError("knowledge_variable not found in input kwargs")
raise ValueError(
"knowledge_variable not found in input kwargs")
else:
output = self.engine(**kwargs, silent=True, from_agent=True)

Expand All @@ -166,7 +167,8 @@ def run(self, **kwargs) -> Union[str, Dict[str, Any]]:
if output.variables().get(_output_key):
return output[_output_key]
else:
logging.warning("Output key not found in output, so full output returned")
logging.warning(
"Output key not found in output, so full output returned")
return output

async def arun(self, **kwargs) -> Union[str, Dict[str, Any]]:
Expand All @@ -188,7 +190,8 @@ async def arun(self, **kwargs) -> Union[str, Dict[str, Any]]:
RETRIEVED_KNOWLEDGE=retrieved_knowledge, **kwargs, silent=True
)
else:
raise ValueError("knowledge_variable not found in input kwargs")
raise ValueError(
"knowledge_variable not found in input kwargs")
else:
output = await self.engine(**kwargs, silent=True, from_agent=True)
# Handle memory here
Expand All @@ -207,7 +210,8 @@ async def arun(self, **kwargs) -> Union[str, Dict[str, Any]]:
if output.variables().get(_output_key):
return output[_output_key]
else:
logging.warning("Output key not found in output, so full output returned")
logging.warning(
"Output key not found in output, so full output returned")
return output

def _handle_memory(self, new_program):
Expand Down Expand Up @@ -260,7 +264,8 @@ def cli(self):
)

for var in _vars:
parser.add_argument(f"--{var}", help=f"Pass {var} as an input variable")
parser.add_argument(
f"--{var}", help=f"Pass {var} as an input variable")

args = parser.parse_args()

Expand Down Expand Up @@ -329,7 +334,7 @@ def export_agent_config(self, config_path, export_json=False):
"prompt_template": self.prompt_template,
"input_variables": self.input_variables,
"output_key": self.output_key,
# 'tools': None if self.tools is None else self.tools
# 'skills': None if self.skills is None else self.skills
}
AumJavalgikar marked this conversation as resolved.
Show resolved Hide resolved
with open(config_path, "w") as f:
yaml.dump(config, f)
Expand All @@ -352,7 +357,8 @@ def load_from_config(cls, config_file):

rag = None
if config["rag"] is not None:
rag_module_name, rag_class_name = config["rag"]["type"].rsplit(".", 1)
rag_module_name, rag_class_name = config["rag"]["type"].rsplit(
".", 1)
rag_module = importlib.import_module(rag_module_name)
rag_class = getattr(rag_module, rag_class_name)

Expand Down Expand Up @@ -391,9 +397,12 @@ def load_from_config(cls, config_file):
vector_store_module_name, vector_store_class_name = config["rag"][
"vector_store"
]["type"].rsplit(".", 1)
vector_store_module = importlib.import_module(vector_store_module_name)
vector_store_class = getattr(vector_store_module, vector_store_class_name)
vector_store = vector_store_class(embedding_function=embedding_function)
vector_store_module = importlib.import_module(
vector_store_module_name)
vector_store_class = getattr(
vector_store_module, vector_store_class_name)
vector_store = vector_store_class(
embedding_function=embedding_function)

rag = rag_class(
raw_data=raw_data,
Expand Down
Loading
Loading