Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

human in the loop #2732

Open
4 tasks done
nicho2 opened this issue Dec 12, 2024 · 3 comments
Open
4 tasks done

human in the loop #2732

nicho2 opened this issue Dec 12, 2024 · 3 comments

Comments

@nicho2
Copy link

nicho2 commented Dec 12, 2024

Checked other resources

  • This is a bug, not a usage question. For questions, please use GitHub Discussions.
  • I added a clear and detailed title that summarizes the issue.
  • I read what a minimal reproducible example is (https://stackoverflow.com/help/minimal-reproducible-example).
  • I included a self-contained, minimal example that demonstrates the issue INCLUDING all the relevant imports. The code run AS IS to reproduce the issue.

Example Code

def human_review_node(state) -> Command[Literal["assistant", "sensitive_tools"]]:
    last_message = state["messages"][-1]
    tool_call = last_message.tool_calls[-1]

    # this is the value we'll be providing via Command(resume=<human_review>)
    human_review = interrupt(
        {
            "question": "Is this correct?",
            # Surface tool calls for review
            "tool_call": tool_call,
        }
    )

    review_action = human_review["action"]
    review_data = human_review.get("data")

    # if approved, call the tool
    if review_action == "continue":
        return Command(goto="sensitive_tools")

    # update the AI message AND call tools
    elif review_action == "update":
        updated_message = {
            "role": "ai",
            "content": last_message.content,
            "tool_calls": [
                {
                    "id": tool_call["id"],
                    "name": tool_call["name"],
                    # This the update provided by the human
                    "args": review_data,
                }
            ],
            # This is important - this needs to be the same as the message you replacing!
            # Otherwise, it will show up as a separate message
            "id": last_message.id,
        }
        return Command(goto="sensitive_tools", update={"messages": [updated_message]})

    # provide feedback to LLM
    elif review_action == "feedback":
        # NOTE: we're adding feedback message as a ToolMessage
        # to preserve the correct order in the message history
        # (AI messages with tool calls need to be followed by tool call messages)
        tool_message = {
            "role": "tool",
            # This is our natural language feedback
            "content": review_data,
            "name": tool_call["name"],
            "tool_call_id": tool_call["id"],
        }
        return Command(goto="assistant", update={"messages": [tool_message]})

Error Message and Stack Trace (if applicable)

human_review_node -> {'messages': [HumanMessage(content='je veux réserver le bureau alain de 12h à 13h', additional_kwargs={}, response_metadata={}, id='94a98107-f5cb-4ed8-b84e-2f43eea7a471'),
              AIMessage(content='', additional_kwargs={}, response_metadata={'model': 'llama3.3:latest', 'created_at': '2024-12-12T13:28:50.342536747Z', 'done': True, 'done_reason': 'stop', 'total_duration': 4300589516, 'load_duration': 19115485, 'prompt_eval_count': 1339, 'prompt_eval_duration': 1453000000, 'eval_count': 50, 'eval_duration': 2820000000, 'message': Message(role='assistant', content='', images=None, tool_calls=[ToolCall(function=Function(name='create_reservations', arguments={'end_datetime_str': '2024-12-12T13:00:00', 'start_datetime_str': '2024-12-12T12:00:00'}))])}, id='run-2ebb4e90-2d72-4314-b9cf-be499c0793ab-0', tool_calls=[{'name': 'create_reservations', 'args': {'end_datetime_str': '2024-12-12T13:00:00', 'start_datetime_str': '2024-12-12T12:00:00'}, 'id': 'ae703317-b1b5-45aa-baed-9d8e942a244d', 'type': 'tool_call'}], usage_metadata={'input_tokens': 1339, 'output_tokens': 50, 'total_tokens': 1389})],
 'user_info': {'userId': 2, 'username': 'admin'}}
[chain/start] [chain:/chat > chain:human_review_node] Entering Chain run with input:
[inputs]
[chain/error] [chain:/chat > chain:human_review_node] [4ms] Chain run errored with error:
"GraphInterrupt((Interrupt(value={'question': 'Is this correct?', 'tool_call': {'name': 'create_reservations', 'args': {'end_datetime_str': '2024-12-12T13:00:00', 'start_datetime_str': '2024-12-12T12:00:00'}, 'id': 'ae703317-b1b5-45aa-baed-9d8e942a244d', 'type': 'tool_call'}}, resumable=True, ns=['human_review_node:45e45478-3673-4df5-ef4f-f2492bf5fdf5'], when='during'),))


Traceback (most recent call last):\n\n\n  File \"C:\\Users\\suchaudn\\OneDrive - Legrand France\\PYTHON\\gimelec_agent\\.venv\\Lib\\site-packages\\langgraph\\utils\\runnable.py\", line 445, in ainvoke\n    input = await step.ainvoke(input, config, **kwargs)\n            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n  File \"C:\\Users\\suchaudn\\OneDrive - Legrand France\\PYTHON\\gimelec_agent\\.venv\\Lib\\site-packages\\langgraph\\utils\\runnable.py\", line 236, in ainvoke\n    ret = await asyncio.create_task(coro, context=context)\n          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n  File \"C:\\Users\\suchaudn\\OneDrive - Legrand France\\PYTHON\\gimelec_agent\\.venv\\Lib\\site-packages\\langchain_core\\runnables\\config.py\", line 588, in run_in_executor\n    return await asyncio.get_running_loop().run_in_executor(\n           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n  File \"C:\\Python312\\Lib\\concurrent\\futures\\thread.py\", line 58, in run\n    result = self.fn(*self.args, **self.kwargs)\n             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n  File \"C:\\Users\\suchaudn\\OneDrive - Legrand France\\PYTHON\\gimelec_agent\\.venv\\Lib\\site-packages\\langchain_core\\runnables\\config.py\", line 579, in wrapper\n    return func(*args, **kwargs)\n           ^^^^^^^^^^^^^^^^^^^^^\n\n\n  File \"C:\\Users\\suchaudn\\OneDrive - Legrand France\\PYTHON\\gimelec_agent\\backend\\assistants\\assistant.py\", line 87, in human_review_node\n    human_review = interrupt(\n                   ^^^^^^^^^^\n\n\n  File \"C:\\Users\\suchaudn\\OneDrive - Legrand France\\PYTHON\\gimelec_agent\\.venv\\Lib\\site-packages\\langgraph\\types.py\", line 390, in interrupt\n    raise GraphInterrupt(\n\n\nlanggraph.errors.GraphInterrupt: (Interrupt(value={'question': 'Is this correct?', 'tool_call': {'name': 'create_reservations', 'args': {'end_datetime_str': '2024-12-12T13:00:00', 'start_datetime_str': '2024-12-12T12:00:00'}, 'id': 'ae703317-b1b5-45aa-baed-9d8e942a244d', 'type': 'tool_call'}}, resumable=True, ns=['human_review_node:45e45478-3673-4df5-ef4f-f2492bf5fdf5'], when='during'),)"
[chain/end] [chain:/chat] [4.37s] Exiting Chain run with output:
[outputs]
2024-12-12 14:28:48,773 - langchain_core.callbacks.manager - WARNING - Error in LangChainTracer.on_chain_error callback: TracerException('No indexed run ID f0a5023c-2ff7-4c5e-99cf-447923f7236b.')
2024-12-12 14:28:48,774 - langchain_core.callbacks.manager - WARNING - Error in ConsoleCallbackHandler.on_chain_error callback: TracerException('No indexed run ID f0a5023c-2ff7-4c5e-99cf-447923f7236b.')

Description

i try to use human in the loop with langgraph but i have the error listed above.

my graph is :

%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
	__start__([<p>__start__</p>]):::first
	fetch_user_info(fetch_user_info)
	assistant(assistant)
	safe_tools(safe_tools)
	human_review_node(human_review_node)
	sensitive_tools(sensitive_tools)
	__end__([<p>__end__</p>]):::last
	__start__ --> fetch_user_info;
	fetch_user_info --> assistant;
	safe_tools --> assistant;
	sensitive_tools --> assistant;
	assistant -.-> safe_tools;
	assistant -.-> human_review_node;
	assistant -.-> __end__;
	human_review_node -.-> assistant;
	human_review_node -.-> sensitive_tools;
	classDef default fill:#f2f0ff,line-height:1.2
	classDef first fill-opacity:0
	classDef last fill:#bfb6fc
Loading

System Info

System Information

OS: Windows
OS Version: 10.0.19045
Python Version: 3.12.7 (tags/v3.12.7:0b05ead, Oct 1 2024, 03:06:41) [MSC v.1941 64 bit (AMD64)]

Package Information

langchain_core: 0.3.24
langchain: 0.3.11
langchain_community: 0.3.11
langsmith: 0.2.3
langchain_ollama: 0.2.1
langchain_openai: 0.2.11
langchain_text_splitters: 0.3.2
langgraph_sdk: 0.1.43
langserve: 0.3.0

Other Dependencies

aiohttp: 3.11.10
async-timeout: Installed. No version info available.
dataclasses-json: 0.6.7
fastapi: 0.115.6
httpx: 0.27.2
httpx-sse: 0.4.0
jsonpatch: 1.33
langsmith-pyo3: Installed. No version info available.
numpy: 2.1.3
ollama: 0.4.2
openai: 1.57.0
orjson: 3.10.12
packaging: 24.2
pydantic: 2.10.3
pydantic-settings: 2.6.1
PyYAML: 6.0.2
requests: 2.32.3
requests-toolbelt: 1.0.0
SQLAlchemy: 2.0.36
sse-starlette: 2.1.3
tenacity: 9.0.0
tiktoken: 0.8.0
typing-extensions: 4.12.2

@vbarda
Copy link
Collaborator

vbarda commented Dec 12, 2024

@nicho2 can you provide a code snippet with the full graph and how you're invoking it? (ie minimal reproducible example)

@nicho2
Copy link
Author

nicho2 commented Dec 12, 2024

def create_graph() -> CompiledGraph:

memory = MemorySaver()      # memorisation en Ram, mais on peut utiliser un sqllite ou postgres

builder = StateGraph(State,ConfigurableForGraph)

# NEW: The fetch_user_info node runs first, meaning our assistant can see the user's flight information without
# having to take an action
builder.add_node("fetch_user_info", user_info)
builder.add_edge(START, "fetch_user_info")
assistant_runnable,safe_tools,sensitive_tools = create_chain()
builder.add_node("assistant", Assistant(assistant_runnable))
builder.add_node("safe_tools", create_tool_node_with_fallback(safe_tools))
builder.add_node(human_review_node)
builder.add_node(
    "sensitive_tools", create_tool_node_with_fallback(sensitive_tools)
)
builder.add_edge("fetch_user_info", "assistant")

builder.add_conditional_edges(
    "assistant",
    route_tools,
    ["safe_tools", "human_review_node", END]
)
builder.add_edge("safe_tools", "assistant")
builder.add_edge("sensitive_tools", "assistant")

graph = builder.compile(
    checkpointer=memory,
    # NEW: The graph will always halt before executing the "tools" node.
    # The user can approve or reject (or even alter the request) before
    # the assistant continues
    #interrupt_before=["sensitive_tools"],
    debug=True
)
print(graph.get_graph().draw_mermaid())
return graph

@vbarda
Copy link
Collaborator

vbarda commented Dec 12, 2024

@nicho2 thanks, but could you actually expand it with the actual node implementations (could be some fake code as long as it reproduces the actual issue) https://stackoverflow.com/help/minimal-reproducible-example

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants