Skip to content

Commit

Permalink
Merge pull request #151 from vvincent1234/feat/deepseek-r1
Browse files Browse the repository at this point in the history
Feat/deepseek r1
  • Loading branch information
warmshao authored Jan 25, 2025
2 parents 6e55321 + 96d02b5 commit 5bc4978
Show file tree
Hide file tree
Showing 8 changed files with 289 additions and 100 deletions.
81 changes: 62 additions & 19 deletions src/agent/custom_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,12 @@ def __init__(
max_actions_per_step=max_actions_per_step,
tool_call_in_content=tool_call_in_content,
)
if self.llm.model_name in ["deepseek-reasoner"]:
self.use_function_calling = False
# TODO: deepseek-reasoner only support 64000 context
self.max_input_tokens = 64000
else:
self.use_function_calling = True
self.add_infos = add_infos
self.agent_state = agent_state
self.message_manager = CustomMassageManager(
Expand All @@ -107,6 +113,7 @@ def __init__(
max_error_length=self.max_error_length,
max_actions_per_step=self.max_actions_per_step,
tool_call_in_content=tool_call_in_content,
use_function_calling=self.use_function_calling
)

def _setup_action_models(self) -> None:
Expand All @@ -127,7 +134,8 @@ def _log_response(self, response: CustomAgentOutput) -> None:

logger.info(f"{emoji} Eval: {response.current_state.prev_action_evaluation}")
logger.info(f"🧠 New Memory: {response.current_state.important_contents}")
logger.info(f"⏳ Task Progress: {response.current_state.completed_contents}")
logger.info(f"⏳ Task Progress: \n{response.current_state.task_progress}")
logger.info(f"📋 Future Plans: \n{response.current_state.future_plans}")
logger.info(f"🤔 Thought: {response.current_state.thought}")
logger.info(f"🎯 Summary: {response.current_state.summary}")
for i, action in enumerate(response.action):
Expand All @@ -153,28 +161,54 @@ def update_step_info(
):
step_info.memory += important_contents + "\n"

completed_contents = model_output.current_state.completed_contents
if completed_contents and "None" not in completed_contents:
step_info.task_progress = completed_contents
task_progress = model_output.current_state.task_progress
if task_progress and "None" not in task_progress:
step_info.task_progress = task_progress

future_plans = model_output.current_state.future_plans
if future_plans and "None" not in future_plans:
step_info.future_plans = future_plans

@time_execution_async("--get_next_action")
async def get_next_action(self, input_messages: list[BaseMessage]) -> AgentOutput:
"""Get next action from LLM based on current state"""
try:
structured_llm = self.llm.with_structured_output(self.AgentOutput, include_raw=True)
response: dict[str, Any] = await structured_llm.ainvoke(input_messages) # type: ignore
if self.use_function_calling:
try:
structured_llm = self.llm.with_structured_output(self.AgentOutput, include_raw=True)
response: dict[str, Any] = await structured_llm.ainvoke(input_messages) # type: ignore

parsed: AgentOutput = response['parsed']
# cut the number of actions to max_actions_per_step
parsed.action = parsed.action[: self.max_actions_per_step]
self._log_response(parsed)
self.n_steps += 1
parsed: AgentOutput = response['parsed']
# cut the number of actions to max_actions_per_step
parsed.action = parsed.action[: self.max_actions_per_step]
self._log_response(parsed)
self.n_steps += 1

return parsed
except Exception as e:
# If something goes wrong, try to invoke the LLM again without structured output,
# and Manually parse the response. Temporarily solution for DeepSeek
return parsed
except Exception as e:
# If something goes wrong, try to invoke the LLM again without structured output,
# and Manually parse the response. Temporarily solution for DeepSeek
ret = self.llm.invoke(input_messages)
if isinstance(ret.content, list):
parsed_json = json.loads(ret.content[0].replace("```json", "").replace("```", ""))
else:
parsed_json = json.loads(ret.content.replace("```json", "").replace("```", ""))
parsed: AgentOutput = self.AgentOutput(**parsed_json)
if parsed is None:
raise ValueError(f'Could not parse response.')

# cut the number of actions to max_actions_per_step
parsed.action = parsed.action[: self.max_actions_per_step]
self._log_response(parsed)
self.n_steps += 1

return parsed
else:
ret = self.llm.invoke(input_messages)
if not self.use_function_calling:
self.message_manager._add_message_with_tokens(ret)
logger.info(f"🤯 Start Deep Thinking: ")
logger.info(ret.reasoning_content)
logger.info(f"🤯 End Deep Thinking")
if isinstance(ret.content, list):
parsed_json = json.loads(ret.content[0].replace("```json", "").replace("```", ""))
else:
Expand Down Expand Up @@ -204,14 +238,22 @@ async def step(self, step_info: Optional[CustomAgentStepInfo] = None) -> None:
input_messages = self.message_manager.get_messages()
model_output = await self.get_next_action(input_messages)
self.update_step_info(model_output, step_info)
logger.info(f"🧠 All Memory: {step_info.memory}")
logger.info(f"🧠 All Memory: \n{step_info.memory}")
self._save_conversation(input_messages, model_output)
self.message_manager._remove_last_state_message() # we dont want the whole state in the chat history
self.message_manager.add_model_output(model_output)
if self.use_function_calling:
self.message_manager._remove_last_state_message() # we dont want the whole state in the chat history
self.message_manager.add_model_output(model_output)

result: list[ActionResult] = await self.controller.multi_act(
model_output.action, self.browser_context
)
if len(result) != len(model_output.action):
for ri in range(len(result), len(model_output.action)):
result.append(ActionResult(extracted_content=None,
include_in_memory=True,
error=f"{model_output.action[ri].model_dump_json(exclude_unset=True)} is Failed to execute. \
Something new appeared after action {model_output.action[len(result) - 1].model_dump_json(exclude_unset=True)}",
is_done=False))
self._last_result = result

if len(result) > 0 and result[-1].is_done:
Expand Down Expand Up @@ -369,6 +411,7 @@ async def run(self, max_steps: int = 100) -> AgentHistoryList:
max_steps=max_steps,
memory="",
task_progress="",
future_plans=""
)

for step in range(max_steps):
Expand Down
85 changes: 41 additions & 44 deletions src/agent/custom_massage_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def __init__(
max_error_length: int = 400,
max_actions_per_step: int = 10,
tool_call_in_content: bool = False,
use_function_calling: bool = True
):
super().__init__(
llm=llm,
Expand All @@ -53,63 +54,59 @@ def __init__(
max_actions_per_step=max_actions_per_step,
tool_call_in_content=tool_call_in_content,
)

self.use_function_calling = use_function_calling
# Custom: Move Task info to state_message
self.history = MessageHistory()
self._add_message_with_tokens(self.system_prompt)
tool_calls = [
{
'name': 'CustomAgentOutput',
'args': {
'current_state': {
'prev_action_evaluation': 'Unknown - No previous actions to evaluate.',
'important_contents': '',
'completed_contents': '',
'thought': 'Now Google is open. Need to type OpenAI to search.',
'summary': 'Type OpenAI to search.',

if self.use_function_calling:
tool_calls = [
{
'name': 'CustomAgentOutput',
'args': {
'current_state': {
'prev_action_evaluation': 'Unknown - No previous actions to evaluate.',
'important_contents': '',
'completed_contents': '',
'thought': 'Now Google is open. Need to type OpenAI to search.',
'summary': 'Type OpenAI to search.',
},
'action': [],
},
'action': [],
},
'id': '',
'type': 'tool_call',
}
]
if self.tool_call_in_content:
# openai throws error if tool_calls are not responded -> move to content
example_tool_call = AIMessage(
content=f'{tool_calls}',
tool_calls=[],
)
else:
example_tool_call = AIMessage(
content=f'',
tool_calls=tool_calls,
)
'id': '',
'type': 'tool_call',
}
]
if self.tool_call_in_content:
# openai throws error if tool_calls are not responded -> move to content
example_tool_call = AIMessage(
content=f'{tool_calls}',
tool_calls=[],
)
else:
example_tool_call = AIMessage(
content=f'',
tool_calls=tool_calls,
)

self._add_message_with_tokens(example_tool_call)
self._add_message_with_tokens(example_tool_call)

def cut_messages(self):
"""Get current message list, potentially trimmed to max tokens"""
diff = self.history.total_tokens - self.max_input_tokens
i = 1 # start from 1 to keep system message in history
while diff > 0 and i < len(self.history.messages):
self.history.remove_message(i)
diff = self.history.total_tokens - self.max_input_tokens
i += 1

def add_state_message(
self,
state: BrowserState,
result: Optional[List[ActionResult]] = None,
step_info: Optional[AgentStepInfo] = None,
) -> None:
"""Add browser state as human message"""

# if keep in memory, add to directly to history and add state without result
if result:
for r in result:
if r.include_in_memory:
if r.extracted_content:
msg = HumanMessage(content=str(r.extracted_content))
self._add_message_with_tokens(msg)
if r.error:
msg = HumanMessage(
content=str(r.error)[-self.max_error_length:]
)
self._add_message_with_tokens(msg)
result = None # if result in history, we dont want to add it again

# otherwise add state message and result to next message (which will not stay in memory)
state_message = CustomAgentMessagePrompt(
state,
Expand Down
50 changes: 29 additions & 21 deletions src/agent/custom_prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# @Author : wenshao
# @ProjectName: browser-use-webui
# @FileName: custom_prompts.py

import pdb
from typing import List, Optional

from browser_use.agent.prompts import SystemPrompt
Expand All @@ -25,8 +25,9 @@ def important_rules(self) -> str:
"current_state": {
"prev_action_evaluation": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Ignore the action result. The website is the ground truth. Also mention if something unexpected happened like new suggestions in an input field. Shortly state why/why not. Note that the result you output must be consistent with the reasoning you output afterwards. If you consider it to be 'Failed,' you should reflect on this during your thought.",
"important_contents": "Output important contents closely related to user\'s instruction or task on the current page. If there is, please output the contents. If not, please output empty string ''.",
"completed_contents": "Update the input Task Progress. Completed contents is a general summary of the current contents that have been completed. Just summarize the contents that have been actually completed based on the current page and the history operations. Please list each completed item individually, such as: 1. Input username. 2. Input Password. 3. Click confirm button",
"thought": "Think about the requirements that have been completed in previous operations and the requirements that need to be completed in the next one operation. If the output of prev_action_evaluation is 'Failed', please reflect and output your reflection here. If you think you have entered the wrong page, consider to go back to the previous page in next action.",
"task_progress": "Task Progress is a general summary of the current contents that have been completed. Just summarize the contents that have been actually completed based on the content at current step and the history operations. Please list each completed item individually, such as: 1. Input username. 2. Input Password. 3. Click confirm button. Please return string type not a list.",
"future_plans": "Based on the user's request and the current state, outline the remaining steps needed to complete the task. This should be a concise list of actions yet to be performed, such as: 1. Select a date. 2. Choose a specific time slot. 3. Confirm booking. Please return string type not a list.",
"thought": "Think about the requirements that have been completed in previous operations and the requirements that need to be completed in the next one operation. If your output of prev_action_evaluation is 'Failed', please reflect and output your reflection here.",
"summary": "Please generate a brief natural language description for the operation in next actions based on your Thought."
},
"action": [
Expand Down Expand Up @@ -70,6 +71,7 @@ def important_rules(self) -> str:
- Don't hallucinate actions.
- If the task requires specific information - make sure to include everything in the done function. This is what the user will see.
- If you are running out of steps (current step), think about speeding it up, and ALWAYS use the done action as the last action.
- Note that you must verify if you've truly fulfilled the user's request by examining the actual page content, not just by looking at the actions you output but also whether the action is executed successfully. Pay particular attention when errors occur during action execution.
6. VISUAL CONTEXT:
- When an image is provided, use it to understand the page layout
Expand Down Expand Up @@ -100,10 +102,9 @@ def input_format(self) -> str:
1. Task: The user\'s instructions you need to complete.
2. Hints(Optional): Some hints to help you complete the user\'s instructions.
3. Memory: Important contents are recorded during historical operations for use in subsequent operations.
4. Task Progress: Up to the current page, the content you have completed can be understood as the progress of the task.
5. Current URL: The webpage you're currently on
6. Available Tabs: List of open browser tabs
7. Interactive Elements: List in the format:
4. Current URL: The webpage you're currently on
5. Available Tabs: List of open browser tabs
6. Interactive Elements: List in the format:
index[:]<element_type>element_text</element_type>
- index: Numeric identifier for interaction
- element_type: HTML element type (button, input, etc.)
Expand Down Expand Up @@ -162,20 +163,27 @@ def __init__(
self.step_info = step_info

def get_user_message(self) -> HumanMessage:
if self.step_info:
step_info_description = f'Current step: {self.step_info.step_number + 1}/{self.step_info.max_steps}'
else:
step_info_description = ''

elements_text = self.state.element_tree.clickable_elements_to_string(include_attributes=self.include_attributes)
if not elements_text:
elements_text = 'empty page'
state_description = f"""
1. Task: {self.step_info.task}
2. Hints(Optional):
{self.step_info.add_infos}
3. Memory:
{self.step_info.memory}
4. Task Progress:
{self.step_info.task_progress}
5. Current url: {self.state.url}
6. Available tabs:
{self.state.tabs}
7. Interactive elements:
{self.state.element_tree.clickable_elements_to_string(include_attributes=self.include_attributes)}
"""
{step_info_description}
1. Task: {self.step_info.task}
2. Hints(Optional):
{self.step_info.add_infos}
3. Memory:
{self.step_info.memory}
4. Current url: {self.state.url}
5. Available tabs:
{self.state.tabs}
6. Interactive elements:
{elements_text}
"""

if self.result:
for i, result in enumerate(self.result):
Expand All @@ -202,4 +210,4 @@ def get_user_message(self) -> HumanMessage:
]
)

return HumanMessage(content=state_description)
return HumanMessage(content=state_description)
4 changes: 3 additions & 1 deletion src/agent/custom_views.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,16 @@ class CustomAgentStepInfo:
add_infos: str
memory: str
task_progress: str
future_plans: str


class CustomAgentBrain(BaseModel):
"""Current state of the agent"""

prev_action_evaluation: str
important_contents: str
completed_contents: str
task_progress: str
future_plans: str
thought: str
summary: str

Expand Down
Loading

0 comments on commit 5bc4978

Please sign in to comment.