Skip to content

Commit

Permalink
feat: Add basic support for integration tools to ToolStep (#519)
Browse files Browse the repository at this point in the history
Signed-off-by: Diwank Singh Tomer <[email protected]>

<!-- ELLIPSIS_HIDDEN -->


----

> [!IMPORTANT]
> This PR updates the handling of integrations and systems by adding new
models, updating workflows, and modifying session options, along with
dependency updates and a migration script.
> 
>   - **Behavior**:
> - Adds `execute_integration` function in `execute_integration.py` to
handle integration tool calls.
> - Updates `prompt_step.py` to handle unwrapping of prompt responses
and tool call results.
> - Modifies `tool_call_step.py` to handle tool calls using `Tool`
model.
>   - **Models**:
>     - Adds `IntegrationDef` and `SystemDef` models in `Tools.py`.
> - Updates `CreateToolRequest`, `PatchToolRequest`,
`UpdateToolRequest`, and `Tool` to use `IntegrationDef` and `SystemDef`.
> - Adds `forward_tool_results` option to session models in
`Sessions.py`.
>   - **Workflow**:
> - Updates `TaskExecutionWorkflow` in `task_execution/__init__.py` to
handle integration tool calls.
>   - **Dependencies**:
> - Updates `@typespec/*` dependencies in `package.json` to version
`0.60.x`.
>   - **Migration**:
> - Adds migration script
`migrate_1727235852_add_forward_tool_calls_option.py` to add
`forward_tool_calls` option to sessions.
> 
> <sup>This description was created by </sup>[<img alt="Ellipsis"
src="https://img.shields.io/badge/Ellipsis-blue?color=175173">](https://www.ellipsis.dev?ref=julep-ai%2Fjulep&utm_source=github&utm_medium=referral)<sup>
for a49aa12. It will automatically
update as commits are pushed.</sup>


<!-- ELLIPSIS_HIDDEN -->

---------

Signed-off-by: Diwank Singh Tomer <[email protected]>
  • Loading branch information
creatorrr committed Sep 30, 2024
1 parent f19dd75 commit c216301
Show file tree
Hide file tree
Showing 27 changed files with 1,156 additions and 195 deletions.
55 changes: 55 additions & 0 deletions agents-api/agents_api/activities/execute_integration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
from typing import Any

from beartype import beartype
from temporalio import activity

from ..autogen.openapi_model import IntegrationDef
from ..common.protocol.tasks import StepContext
from ..env import testing
from ..models.tools import get_tool_args_from_metadata


@beartype
async def execute_integration(
context: StepContext,
tool_name: str,
integration: IntegrationDef,
arguments: dict[str, Any],
) -> Any:
developer_id = context.execution_input.developer_id
agent_id = context.execution_input.agent.id
task_id = context.execution_input.task.id

merged_tool_args = get_tool_args_from_metadata(
developer_id=developer_id, agent_id=agent_id, task_id=task_id
)

arguments = merged_tool_args.get(tool_name, {}) | arguments

try:
if integration.provider == "dummy":
return arguments

else:
raise NotImplementedError(
f"Unknown integration provider: {integration.provider}"
)
except BaseException as e:
if activity.in_activity():
activity.logger.error(f"Error in execute_integration: {e}")

raise


async def mock_execute_integration(
context: StepContext,
tool_name: str,
integration: IntegrationDef,
arguments: dict[str, Any],
) -> Any:
return arguments


execute_integration = activity.defn(name="execute_integration")(
execute_integration if not testing else mock_execute_integration
)
16 changes: 14 additions & 2 deletions agents-api/agents_api/activities/task_steps/prompt_step.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from beartype import beartype
from temporalio import activity
from temporalio.exceptions import ApplicationError

from ...clients import (
litellm, # We dont directly import `acompletion` so we can mock it
Expand Down Expand Up @@ -63,18 +64,29 @@ async def prompt_step(context: StepContext) -> StepOutcome:
else:
passed_settings: dict = {}

# Wrap the prompt in a list if it is not already
if isinstance(prompt, str):
prompt = [{"role": "user", "content": prompt}]

completion_data: dict = {
"model": agent_model,
"tools": formatted_agent_tools or None,
("messages" if isinstance(prompt, list) else "prompt"): prompt,
"messages": prompt,
**agent_default_settings,
**passed_settings,
}

response = await litellm.acompletion(
**completion_data,
)

if context.current_step.unwrap:
if response.choices[0].finish_reason == "tool_calls":
raise ApplicationError("Tool calls cannot be unwrapped")

response = response.choices[0].message.content

return StepOutcome(
output=response.model_dump(),
output=response.model_dump() if hasattr(response, "model_dump") else response,
next=None,
)
22 changes: 13 additions & 9 deletions agents-api/agents_api/activities/task_steps/tool_call_step.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@

from beartype import beartype
from temporalio import activity
from temporalio.exceptions import ApplicationError

from ...activities.task_steps import base_evaluate
from ...autogen.openapi_model import ToolCallStep
from ...activities.task_steps.base_evaluate import base_evaluate
from ...autogen.openapi_model import Tool, ToolCallStep
from ...common.protocol.tasks import (
StepContext,
StepOutcome,
Expand All @@ -26,24 +27,27 @@ def generate_call_id():
async def tool_call_step(context: StepContext) -> StepOutcome:
assert isinstance(context.current_step, ToolCallStep)

tool_type, tool_name = context.current_step.tool.split(".")
tools: list[Tool] = context.tools
tool_name = context.current_step.tool

tool = next((t for t in tools if t.name == tool_name), None)

if tool is None:
raise ApplicationError(f"Tool {tool_name} not found in the toolset")

arguments = await base_evaluate(
context.current_step.arguments, context.model_dump()
)

tools = context.execution_input.tools

assert tool_name in [tool.name for tool in tools], f"Tool {tool_name} not found"

call_id = generate_call_id()

tool_call = {
tool_type: {
tool.type: {
"arguments": arguments,
"name": tool_name,
},
"id": call_id,
"type": tool_type,
"type": tool.type,
}

return StepOutcome(output=tool_call)
10 changes: 10 additions & 0 deletions agents-api/agents_api/autogen/Common.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,16 @@
from pydantic import AwareDatetime, BaseModel, ConfigDict, Field, RootModel


class JinjaTemplate(RootModel[str]):
model_config = ConfigDict(
populate_by_name=True,
)
root: str
"""
A valid jinja template.
"""


class Limit(RootModel[int]):
model_config = ConfigDict(
populate_by_name=True,
Expand Down
50 changes: 50 additions & 0 deletions agents-api/agents_api/autogen/Sessions.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,16 @@ class CreateSessionRequest(BaseModel):
"""
Action to start on context window overflow
"""
forward_tool_results: StrictBool | None = None
"""
Whether to forward the tool results to the model when available.
"true" => always forward
"false" => never forward
null => forward if applicable (default)
If a tool call is made, the tool's output will be sent back to the model as the model's input.
If a tool call is not made, the model's output will be returned as is.
"""
metadata: dict[str, Any] | None = None


Expand Down Expand Up @@ -70,6 +80,16 @@ class PatchSessionRequest(BaseModel):
"""
Action to start on context window overflow
"""
forward_tool_results: StrictBool | None = None
"""
Whether to forward the tool results to the model when available.
"true" => always forward
"false" => never forward
null => forward if applicable (default)
If a tool call is made, the tool's output will be sent back to the model as the model's input.
If a tool call is not made, the model's output will be returned as is.
"""
metadata: dict[str, Any] | None = None


Expand Down Expand Up @@ -97,6 +117,16 @@ class Session(BaseModel):
"""
Action to start on context window overflow
"""
forward_tool_results: StrictBool | None = None
"""
Whether to forward the tool results to the model when available.
"true" => always forward
"false" => never forward
null => forward if applicable (default)
If a tool call is made, the tool's output will be sent back to the model as the model's input.
If a tool call is not made, the model's output will be returned as is.
"""
id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})]
metadata: dict[str, Any] | None = None
created_at: Annotated[AwareDatetime, Field(json_schema_extra={"readOnly": True})]
Expand Down Expand Up @@ -160,6 +190,16 @@ class UpdateSessionRequest(BaseModel):
"""
Action to start on context window overflow
"""
forward_tool_results: StrictBool | None = None
"""
Whether to forward the tool results to the model when available.
"true" => always forward
"false" => never forward
null => forward if applicable (default)
If a tool call is made, the tool's output will be sent back to the model as the model's input.
If a tool call is not made, the model's output will be returned as is.
"""
metadata: dict[str, Any] | None = None


Expand Down Expand Up @@ -194,6 +234,16 @@ class CreateOrUpdateSessionRequest(CreateSessionRequest):
"""
Action to start on context window overflow
"""
forward_tool_results: StrictBool | None = None
"""
Whether to forward the tool results to the model when available.
"true" => always forward
"false" => never forward
null => forward if applicable (default)
If a tool call is made, the tool's output will be sent back to the model as the model's input.
If a tool call is not made, the model's output will be returned as is.
"""
metadata: dict[str, Any] | None = None


Expand Down
Loading

0 comments on commit c216301

Please sign in to comment.