Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(agents-api): linters checks and upgraded package versions #1203

Merged
merged 14 commits into from
Mar 3, 2025
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion agents-api/agents_api/activities/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,5 @@ async def mock_demo_activity(a: int, b: int) -> int:


demo_activity = activity.defn(name="demo_activity")(
demo_activity if not testing else mock_demo_activity
demo_activity if not testing else mock_demo_activity,
)
2 changes: 1 addition & 1 deletion agents-api/agents_api/activities/execute_api_call.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,5 +64,5 @@ async def execute_api_call(
mock_execute_api_call = execute_api_call

execute_api_call = activity.defn(name="execute_api_call")(
execute_api_call if not testing else mock_execute_api_call
execute_api_call if not testing else mock_execute_api_call,
)
2 changes: 1 addition & 1 deletion agents-api/agents_api/activities/execute_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,5 +85,5 @@ async def execute_integration(
mock_execute_integration = execute_integration

execute_integration = activity.defn(name="execute_integration")(
execute_integration if not testing else mock_execute_integration
execute_integration if not testing else mock_execute_integration,
)
2 changes: 1 addition & 1 deletion agents-api/agents_api/activities/execute_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,5 +182,5 @@ def _create_search_request(arguments: dict) -> Any:
mock_execute_system = execute_system

execute_system = activity.defn(name="execute_system")(
execute_system if not testing else mock_execute_system
execute_system if not testing else mock_execute_system,
)
9 changes: 6 additions & 3 deletions agents-api/agents_api/activities/humanization_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
}

SAPLING_URL = "https://api.sapling.ai/api/v1/aidetect"
COPLEYAKS_URL = "https://api.copyleaks.com/v2/writer-detector/{scan_id}/check"
COPYLEAKS_URL = "https://api.copyleaks.com/v2/writer-detector/{scan_id}/check"
ZEROGPT_URL = "https://api.zerogpt.com/api/detect/detectText"
DESKLIB_URL = "http://35.243.190.233/detect"

Expand Down Expand Up @@ -170,7 +170,9 @@ def is_human_copyleaks(text):

# Send the POST request with JSON payload and headers
response = requests.post(
COPLEYAKS_URL.format(scan_id=scan_id), json=payload, headers=headers
COPYLEAKS_URL.format(scan_id=scan_id),
json=payload,
headers=headers,
)
response.raise_for_status()

Expand Down Expand Up @@ -336,7 +338,8 @@ def split_with_langchain(markdown_text: str) -> list[Document]:

# MD splits
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on, strip_headers=True
headers_to_split_on=headers_to_split_on,
strip_headers=True,
)
return markdown_splitter.split_text(markdown_text)

Expand Down
3 changes: 2 additions & 1 deletion agents-api/agents_api/activities/task_steps/prompt_step.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ async def prompt_step(context: StepContext) -> StepOutcome:

# Get passed settings
passed_settings: dict = context.current_step.model_dump(
exclude=excluded_keys, exclude_unset=True
exclude=excluded_keys,
exclude_unset=True,
)
passed_settings.update(passed_settings.pop("settings", {}) or {})

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ async def transition_step(
if last_error is not None:
await asyncio.sleep(temporal_activity_after_retry_timeout)
await wf_handle.signal(
TaskExecutionWorkflow.set_last_error, LastErrorInput(last_error=None)
TaskExecutionWorkflow.set_last_error,
LastErrorInput(last_error=None),
)

if not isinstance(context.execution_input, ExecutionInput):
Expand Down
5 changes: 3 additions & 2 deletions agents-api/agents_api/activities/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -586,7 +586,8 @@ class stdlib_time:

@beartype
def get_evaluator(
names: dict[str, Any], extra_functions: dict[str, Callable] | None = None
names: dict[str, Any],
extra_functions: dict[str, Callable] | None = None,
) -> SimpleEval:
if len(names) > MAX_COLLECTION_SIZE:
msg = f"Too many variables (max {MAX_COLLECTION_SIZE})"
Expand Down Expand Up @@ -634,7 +635,7 @@ def filtered_handler(*args, **kwargs):

# Remove problematic parameters
filtered_handler.__signature__ = sig.replace(
parameters=[p for p in sig.parameters.values() if p.name not in parameters_to_exclude]
parameters=[p for p in sig.parameters.values() if p.name not in parameters_to_exclude],
)

return filtered_handler
Expand Down
3 changes: 2 additions & 1 deletion agents-api/agents_api/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,8 @@ async def lifespan(container: FastAPI | ObjectWithState):

# Enable metrics
Instrumentator(excluded_handlers=["/metrics", "/docs", "/openapi.json"]).instrument(app).expose(
app, include_in_schema=False
app,
include_in_schema=False,
)

# Create a new router for the docs
Expand Down
28 changes: 15 additions & 13 deletions agents-api/agents_api/autogen/Sessions.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ class CreateSessionRequest(BaseModel):
"""
Whether to forward tool calls to the model
"""
recall_options: HybridDocSearch | None = None
recall_options: VectorDocSearch | TextOnlyDocSearch | HybridDocSearch | None = None
"""
Recall options for the session
"""
Expand All @@ -100,15 +100,15 @@ class HybridDocSearch(BaseDocSearch):
model_config = ConfigDict(
populate_by_name=True,
)
mode: str = "hybrid"
mode: Literal["hybrid"] = "hybrid"
"""
The mode to use for the search.
"""
confidence: Annotated[float, Field(ge=-1.0, le=1.0)] = 0
"""
The confidence cutoff level
"""
alpha: Annotated[float, Field(ge=0.0, le=1.0)] = 0.75
alpha: Annotated[float, Field(ge=0.0, le=1.0)] = 0.5
"""
The weight to apply to BM25 vs Vector search results. 0 => pure BM25; 1 => pure vector;
"""
Expand All @@ -122,15 +122,15 @@ class HybridDocSearchUpdate(BaseDocSearchUpdate):
model_config = ConfigDict(
populate_by_name=True,
)
mode: str = "hybrid"
mode: Literal["hybrid"] = "hybrid"
"""
The mode to use for the search.
"""
confidence: Annotated[float, Field(ge=-1.0, le=1.0)] = 0
"""
The confidence cutoff level
"""
alpha: Annotated[float, Field(ge=0.0, le=1.0)] = 0.75
alpha: Annotated[float, Field(ge=0.0, le=1.0)] = 0.5
"""
The weight to apply to BM25 vs Vector search results. 0 => pure BM25; 1 => pure vector;
"""
Expand Down Expand Up @@ -180,7 +180,9 @@ class PatchSessionRequest(BaseModel):
"""
Whether to forward tool calls to the model
"""
recall_options: HybridDocSearchUpdate | None = None
recall_options: (
VectorDocSearchUpdate | TextOnlyDocSearchUpdate | HybridDocSearchUpdate | None
) = None
"""
Recall options for the session
"""
Expand Down Expand Up @@ -227,7 +229,7 @@ class Session(BaseModel):
"""
Whether to forward tool calls to the model
"""
recall_options: HybridDocSearch | None = None
recall_options: VectorDocSearch | TextOnlyDocSearch | HybridDocSearch | None = None
"""
Recall options for the session
"""
Expand Down Expand Up @@ -274,7 +276,7 @@ class TextOnlyDocSearch(BaseDocSearch):
model_config = ConfigDict(
populate_by_name=True,
)
mode: str = "text"
mode: Literal["text"] = "text"
"""
The mode to use for the search.
"""
Expand All @@ -284,7 +286,7 @@ class TextOnlyDocSearchUpdate(BaseDocSearchUpdate):
model_config = ConfigDict(
populate_by_name=True,
)
mode: str = "text"
mode: Literal["text"] = "text"
"""
The mode to use for the search.
"""
Expand Down Expand Up @@ -330,7 +332,7 @@ class UpdateSessionRequest(BaseModel):
"""
Whether to forward tool calls to the model
"""
recall_options: HybridDocSearch | None = None
recall_options: VectorDocSearch | TextOnlyDocSearch | HybridDocSearch | None = None
"""
Recall options for the session
"""
Expand All @@ -341,7 +343,7 @@ class VectorDocSearch(BaseDocSearch):
model_config = ConfigDict(
populate_by_name=True,
)
mode: str = "vector"
mode: Literal["vector"] = "vector"
"""
The mode to use for the search.
"""
Expand All @@ -359,7 +361,7 @@ class VectorDocSearchUpdate(BaseDocSearchUpdate):
model_config = ConfigDict(
populate_by_name=True,
)
mode: str = "vector"
mode: Literal["vector"] = "vector"
"""
The mode to use for the search.
"""
Expand Down Expand Up @@ -420,7 +422,7 @@ class CreateOrUpdateSessionRequest(CreateSessionRequest):
"""
Whether to forward tool calls to the model
"""
recall_options: HybridDocSearch | None = None
recall_options: VectorDocSearch | TextOnlyDocSearch | HybridDocSearch | None = None
"""
Recall options for the session
"""
Expand Down
9 changes: 7 additions & 2 deletions agents-api/agents_api/clients/litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,11 @@ def patch_litellm_response(
@wraps(_acompletion)
@beartype
async def acompletion(
*, model: str, messages: list[dict], custom_api_key: str | None = None, **kwargs
*,
model: str,
messages: list[dict],
custom_api_key: str | None = None,
**kwargs,
) -> ModelResponse | CustomStreamWrapper:
if not custom_api_key:
model = f"openai/{model}" # This is needed for litellm
Expand Down Expand Up @@ -130,7 +134,8 @@ async def get_model_list(*, custom_api_key: str | None = None) -> list[dict]:
async with (
aiohttp.ClientSession() as session,
session.get(
url=f"{litellm_url}/models" if not custom_api_key else "/models", headers=headers
url=f"{litellm_url}/models" if not custom_api_key else "/models",
headers=headers,
) as response,
):
response.raise_for_status()
Expand Down
4 changes: 3 additions & 1 deletion agents-api/agents_api/clients/pg.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,5 +16,7 @@ async def _init_conn(conn):

async def create_db_pool(dsn: str | None = None, **kwargs):
return await asyncpg.create_pool(
dsn if dsn is not None else pg_dsn, init=_init_conn, **kwargs
dsn if dsn is not None else pg_dsn,
init=_init_conn,
**kwargs,
)
6 changes: 4 additions & 2 deletions agents-api/agents_api/clients/temporal.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ async def get_client_with_metrics(
new_runtime = Runtime(
telemetry=TelemetryConfig(
metrics=PrometheusConfig(
bind_address=f"{temporal_metrics_bind_host}:{temporal_metrics_bind_port}"
bind_address=f"{temporal_metrics_bind_host}:{temporal_metrics_bind_port}",
),
),
)
Expand Down Expand Up @@ -111,7 +111,9 @@ async def run_task_execution_workflow(
raise ValueError(msg)

start: TransitionTarget = start or TransitionTarget(
workflow="main", step=0, scope_id=uuid7()
workflow="main",
step=0,
scope_id=uuid7(),
)

client = client or (await get_client())
Expand Down
9 changes: 6 additions & 3 deletions agents-api/agents_api/common/interceptors.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,8 @@ async def wrapper(

@wraps(func)
def wrapper_sync(
self, input: ExecuteActivityInput | ExecuteWorkflowInput
self,
input: ExecuteActivityInput | ExecuteWorkflowInput,
) -> T | RemoteObject:
# Load all remote arguments from the blob store
args: Sequence[Any] = input.args
Expand Down Expand Up @@ -348,15 +349,17 @@ class CustomInterceptor(Interceptor):
"""

def intercept_activity(
self, next: ActivityInboundInterceptor
self,
next: ActivityInboundInterceptor,
) -> ActivityInboundInterceptor:
"""
Creates and returns a custom activity interceptor.
"""
return CustomActivityInterceptor(super().intercept_activity(next))

def workflow_interceptor_class(
self, input: WorkflowInterceptorClassInput
self,
input: WorkflowInterceptorClassInput,
) -> type[WorkflowInboundInterceptor] | None:
"""
Returns the custom workflow interceptor class.
Expand Down
5 changes: 4 additions & 1 deletion agents-api/agents_api/common/nlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,10 @@ def extract_keywords(doc: Doc, top_n: int = 25, split_chunks: bool = True) -> li

@lru_cache(maxsize=1000)
def text_to_tsvector_query(
paragraph: str, top_n: int = 25, min_keywords: int = 1, split_chunks: bool = True
paragraph: str,
top_n: int = 25,
min_keywords: int = 1,
split_chunks: bool = True,
) -> str:
"""
Extracts meaningful keywords/phrases from text and joins them with OR.
Expand Down
6 changes: 4 additions & 2 deletions agents-api/agents_api/common/protocol/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,12 @@ def load_arguments(self) -> None:

@beartype
def task_to_spec(
task: Task | CreateTaskRequest | UpdateTaskRequest | PatchTaskRequest, **model_opts
task: Task | CreateTaskRequest | UpdateTaskRequest | PatchTaskRequest,
**model_opts,
) -> TaskSpecDef | PartialTaskSpecDef:
task_data = task.model_dump(
**model_opts, exclude={"version", "developer_id", "task_id", "id", "agent_id"}
**model_opts,
exclude={"version", "developer_id", "task_id", "id", "agent_id"},
)

if "tools" in task_data:
Expand Down
2 changes: 1 addition & 1 deletion agents-api/agents_api/common/protocol/sessions.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def make_session(
*,
agents: list[UUID],
users: list[UUID],
**data: dict,
**data: dict[str, dict],
) -> Session:
"""
Create a new session object.
Expand Down
6 changes: 4 additions & 2 deletions agents-api/agents_api/common/protocol/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,9 @@ def tools(self) -> list[Tool | CreateToolRequest]:
agent_tools = execution_input.agent_tools

step_tools: Literal["all"] | list[ToolRef | CreateToolRequest] = getattr(
self.current_step, "tools", "all"
self.current_step,
"tools",
"all",
)

if step_tools != "all":
Expand All @@ -186,7 +188,7 @@ def tools(self) -> list[Tool | CreateToolRequest]:
for tool in task.tools:
tool_def = tool.model_dump()
task_tools.append(
CreateToolRequest(**{tool_def["type"]: tool_def.pop("spec"), **tool_def})
CreateToolRequest(**{tool_def["type"]: tool_def.pop("spec"), **tool_def}),
)

if not task.inherit_tools:
Expand Down
Loading