设置¶
首先,我们需要安装所需的软件包
%%capture --no-stderr
%pip install --quiet -U langgraph langchain_anthropic
接下来,我们需要为 Anthropic(我们将使用的 LLM)设置 API 密钥
import getpass
import os
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("ANTHROPIC_API_KEY")
简单用法¶
让我们设置一个非常简单的图来实现这一点。首先,我们将进行一个 LLM 调用,该调用决定要采取的操作。然后我们转到一个人类节点。这个节点实际上不做任何事情 - 目的是在我们中断此节点之前,然后对状态应用任何更新。之后,我们检查状态,然后路由回 LLM 或路由到正确的工具。
让我们看看它是如何工作的!
from typing_extensions import TypedDict, Literal
from langgraph.graph import StateGraph, START, END, MessagesState
from langgraph.checkpoint.memory import MemorySaver
from langchain_anthropic import ChatAnthropic
from langchain_core.tools import tool
from langchain_core.messages import AIMessage
from IPython.display import Image, display
@tool
def weather_search(city: str):
"""Search for the weather"""
print("----")
print(f"Searching for: {city}")
print("----")
return "Sunny!"
model = ChatAnthropic(model_name="claude-3-5-sonnet-20240620").bind_tools(
[weather_search]
)
class State(MessagesState):
"""Simple state."""
def call_llm(state):
return {"messages": [model.invoke(state["messages"])]}
def human_review_node(state):
pass
def run_tool(state):
new_messages = []
tools = {"weather_search": weather_search}
tool_calls = state["messages"][-1].tool_calls
for tool_call in tool_calls:
tool = tools[tool_call["name"]]
result = tool.invoke(tool_call["args"])
new_messages.append(
{
"role": "tool",
"name": tool_call["name"],
"content": result,
"tool_call_id": tool_call["id"],
}
)
return {"messages": new_messages}
def route_after_llm(state) -> Literal[END, "human_review_node"]:
if len(state["messages"][-1].tool_calls) == 0:
return END
else:
return "human_review_node"
def route_after_human(state) -> Literal["run_tool", "call_llm"]:
if isinstance(state["messages"][-1], AIMessage):
return "run_tool"
else:
return "call_llm"
builder = StateGraph(State)
builder.add_node(call_llm)
builder.add_node(run_tool)
builder.add_node(human_review_node)
builder.add_edge(START, "call_llm")
builder.add_conditional_edges("call_llm", route_after_llm)
builder.add_conditional_edges("human_review_node", route_after_human)
builder.add_edge("run_tool", "call_llm")
# Set up memory
memory = MemorySaver()
# Add
graph = builder.compile(checkpointer=memory, interrupt_before=["human_review_node"])
# View
display(Image(graph.get_graph().draw_mermaid_png()))
没有审查的示例¶
让我们看看一个不需要审查的示例(因为没有调用工具)
# Input
initial_input = {"messages": [{"role": "user", "content": "hi!"}]}
# Thread
thread = {"configurable": {"thread_id": "1"}}
# Run the graph until the first interruption
for event in graph.stream(initial_input, thread, stream_mode="values"):
print(event)
{'messages': [HumanMessage(content='hi!', id='393fa21d-4bfb-445b-8faa-78e22b92e346')]} {'messages': [HumanMessage(content='hi!', id='393fa21d-4bfb-445b-8faa-78e22b92e346'), AIMessage(content="Hello! Welcome to our conversation. How can I assist you today? Is there anything specific you'd like to know or discuss?", response_metadata={'id': 'msg_017S671xYvZm1mi9EcsKvPzF', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 355, 'output_tokens': 29}}, id='run-8ec507a1-5caf-47d6-89eb-1a2e8f38423c-0', usage_metadata={'input_tokens': 355, 'output_tokens': 29, 'total_tokens': 384})]}
如果我们检查状态,我们可以看到它已完成
print("Pending Executions!")
print(graph.get_state(thread).next)
Pending Executions! ()
批准工具的示例¶
现在让我们看看批准工具调用是什么样子
# Input
initial_input = {"messages": [{"role": "user", "content": "what's the weather in sf?"}]}
# Thread
thread = {"configurable": {"thread_id": "2"}}
# Run the graph until the first interruption
for event in graph.stream(initial_input, thread, stream_mode="values"):
print(event)
{'messages': [HumanMessage(content="what's the weather in sf?", id='8bda37cc-4bd3-4a14-bca5-b992934e710b')]} {'messages': [HumanMessage(content="what's the weather in sf?", id='8bda37cc-4bd3-4a14-bca5-b992934e710b'), AIMessage(content=[{'text': 'To get the weather information for San Francisco, I can use the weather_search function. Let me do that for you.', 'type': 'text'}, {'id': 'toolu_01MW3ETLpq4b8s6VaAMgDBZP', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_019FjC1prjVv8BuQX7DmF65F', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 360, 'output_tokens': 80}}, id='run-1b580410-173c-4fe0-a149-22e8f516b259-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'toolu_01MW3ETLpq4b8s6VaAMgDBZP', 'type': 'tool_call'}], usage_metadata={'input_tokens': 360, 'output_tokens': 80, 'total_tokens': 440})]}
如果我们现在检查,我们可以看到它正在等待人工审查
print("Pending Executions!")
print(graph.get_state(thread).next)
Pending Executions! ('human_review_node',)
为了批准工具调用,我们可以简单地继续该线程,而不进行任何编辑。为此,我们只需要创建一个没有输入的新运行。
for event in graph.stream(None, thread, stream_mode="values"):
print(event)
---- Searching for: San Francisco ---- {'messages': [HumanMessage(content="what's the weather in sf?", id='8bda37cc-4bd3-4a14-bca5-b992934e710b'), AIMessage(content=[{'text': 'To get the weather information for San Francisco, I can use the weather_search function. Let me do that for you.', 'type': 'text'}, {'id': 'toolu_01MW3ETLpq4b8s6VaAMgDBZP', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_019FjC1prjVv8BuQX7DmF65F', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 360, 'output_tokens': 80}}, id='run-1b580410-173c-4fe0-a149-22e8f516b259-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'toolu_01MW3ETLpq4b8s6VaAMgDBZP', 'type': 'tool_call'}], usage_metadata={'input_tokens': 360, 'output_tokens': 80, 'total_tokens': 440}), ToolMessage(content='Sunny!', name='weather_search', id='835b0fe3-8aa0-45d5-ac29-03bbe57cc767', tool_call_id='toolu_01MW3ETLpq4b8s6VaAMgDBZP')]} {'messages': [HumanMessage(content="what's the weather in sf?", id='8bda37cc-4bd3-4a14-bca5-b992934e710b'), AIMessage(content=[{'text': 'To get the weather information for San Francisco, I can use the weather_search function. Let me do that for you.', 'type': 'text'}, {'id': 'toolu_01MW3ETLpq4b8s6VaAMgDBZP', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_019FjC1prjVv8BuQX7DmF65F', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 360, 'output_tokens': 80}}, id='run-1b580410-173c-4fe0-a149-22e8f516b259-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'toolu_01MW3ETLpq4b8s6VaAMgDBZP', 'type': 'tool_call'}], usage_metadata={'input_tokens': 360, 'output_tokens': 80, 'total_tokens': 440}), ToolMessage(content='Sunny!', name='weather_search', id='835b0fe3-8aa0-45d5-ac29-03bbe57cc767', tool_call_id='toolu_01MW3ETLpq4b8s6VaAMgDBZP'), AIMessage(content="Based on the search results, the weather in San Francisco is sunny! It's a beautiful day in the city. Is there anything else you'd like to know about the weather or any other information I can help you with?", response_metadata={'id': 'msg_01UY2d6RCzvwagwMb1J5etek', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 453, 'output_tokens': 49}}, id='run-7137f52c-abe6-4dc1-b536-92dd1d9187b0-0', usage_metadata={'input_tokens': 453, 'output_tokens': 49, 'total_tokens': 502})]}
编辑工具调用¶
现在假设我们想编辑工具调用。例如,更改某些参数(甚至更改调用的工具!),然后执行该工具。
# Input
initial_input = {"messages": [{"role": "user", "content": "what's the weather in sf?"}]}
# Thread
thread = {"configurable": {"thread_id": "5"}}
# Run the graph until the first interruption
for event in graph.stream(initial_input, thread, stream_mode="values"):
print(event)
{'messages': [HumanMessage(content="what's the weather in sf?", id='0c488edd-7b9c-4416-ba02-8a2d7e9f2597')]} {'messages': [HumanMessage(content="what's the weather in sf?", id='0c488edd-7b9c-4416-ba02-8a2d7e9f2597'), AIMessage(content=[{'text': "Certainly! I can help you check the weather in San Francisco. To get this information, I'll use the weather search tool. Let me fetch that for you.", 'type': 'text'}, {'id': 'toolu_01CpbVmprQnjxpQzx8MzE1g8', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_01Mv7iqdtPgZEX2LiBBqWDuY', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 360, 'output_tokens': 88}}, id='run-52a09799-efb5-4fff-82c3-884e20119ad3-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'toolu_01CpbVmprQnjxpQzx8MzE1g8', 'type': 'tool_call'}], usage_metadata={'input_tokens': 360, 'output_tokens': 88, 'total_tokens': 448})]}
print("Pending Executions!")
print(graph.get_state(thread).next)
Pending Executions! ('human_review_node',)
为此,我们首先需要更新状态。我们可以通过传递一条消息,该消息具有我们想要覆盖的消息的相同 ID 来实现。这将替换旧消息。请注意,这之所以可行,是因为我们正在使用的reducer 会用相同 ID 的消息替换消息 - 详细了解此处
# To get the ID of the message we want to replace, we need to fetch the current state and find it there.
state = graph.get_state(thread)
print("Current State:")
print(state.values)
print("\nCurrent Tool Call ID:")
current_content = state.values["messages"][-1].content
current_id = state.values["messages"][-1].id
tool_call_id = state.values["messages"][-1].tool_calls[0]["id"]
print(tool_call_id)
# We now need to construct a replacement tool call.
# We will change the argument to be `San Francisco, USA`
# Note that we could change any number of arguments or tool names - it just has to be a valid one
new_message = {
"role": "assistant",
"content": current_content,
"tool_calls": [
{
"id": tool_call_id,
"name": "weather_search",
"args": {"city": "San Francisco, USA"},
}
],
# This is important - this needs to be the same as the message you replacing!
# Otherwise, it will show up as a separate message
"id": current_id,
}
graph.update_state(
# This is the config which represents this thread
thread,
# This is the updated value we want to push
{"messages": [new_message]},
# We push this update acting as our human_review_node
as_node="human_review_node",
)
# Let's now continue executing from here
for event in graph.stream(None, thread, stream_mode="values"):
print(event)
Current State: {'messages': [HumanMessage(content="what's the weather in sf?", id='0c488edd-7b9c-4416-ba02-8a2d7e9f2597'), AIMessage(content=[{'text': "Certainly! I can help you check the weather in San Francisco. To get this information, I'll use the weather search tool. Let me fetch that for you.", 'type': 'text'}, {'id': 'toolu_01CpbVmprQnjxpQzx8MzE1g8', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_01Mv7iqdtPgZEX2LiBBqWDuY', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 360, 'output_tokens': 88}}, id='run-52a09799-efb5-4fff-82c3-884e20119ad3-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'toolu_01CpbVmprQnjxpQzx8MzE1g8', 'type': 'tool_call'}], usage_metadata={'input_tokens': 360, 'output_tokens': 88, 'total_tokens': 448})]} Current Tool Call ID: toolu_01CpbVmprQnjxpQzx8MzE1g8 ---- Searching for: San Francisco, USA ---- {'messages': [HumanMessage(content="what's the weather in sf?", id='0c488edd-7b9c-4416-ba02-8a2d7e9f2597'), AIMessage(content=[{'text': "Certainly! I can help you check the weather in San Francisco. To get this information, I'll use the weather search tool. Let me fetch that for you.", 'type': 'text'}, {'id': 'toolu_01CpbVmprQnjxpQzx8MzE1g8', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], id='run-52a09799-efb5-4fff-82c3-884e20119ad3-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco, USA'}, 'id': 'toolu_01CpbVmprQnjxpQzx8MzE1g8', 'type': 'tool_call'}]), ToolMessage(content='Sunny!', name='weather_search', id='ff968b9f-9b87-4893-9f32-dfb88dbe0536', tool_call_id='toolu_01CpbVmprQnjxpQzx8MzE1g8')]} {'messages': [HumanMessage(content="what's the weather in sf?", id='0c488edd-7b9c-4416-ba02-8a2d7e9f2597'), AIMessage(content=[{'text': "Certainly! I can help you check the weather in San Francisco. To get this information, I'll use the weather search tool. Let me fetch that for you.", 'type': 'text'}, {'id': 'toolu_01CpbVmprQnjxpQzx8MzE1g8', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], id='run-52a09799-efb5-4fff-82c3-884e20119ad3-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco, USA'}, 'id': 'toolu_01CpbVmprQnjxpQzx8MzE1g8', 'type': 'tool_call'}]), ToolMessage(content='Sunny!', name='weather_search', id='ff968b9f-9b87-4893-9f32-dfb88dbe0536', tool_call_id='toolu_01CpbVmprQnjxpQzx8MzE1g8'), AIMessage(content="Great news! The weather in San Francisco is currently sunny. It's a beautiful day in the city by the bay. Is there anything else you'd like to know about the weather or any other information I can help you with?", response_metadata={'id': 'msg_01PhwUeRWkSJB6kzHZS361XZ', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 464, 'output_tokens': 50}}, id='run-5aebcf37-626e-4675-b225-476bc99bdbb8-0', usage_metadata={'input_tokens': 464, 'output_tokens': 50, 'total_tokens': 514})]}
对工具调用进行反馈¶
有时,您可能不想执行工具调用,但也可能不想让用户手动修改工具调用。在这种情况下,从用户那里获得自然语言反馈可能更好。然后,您可以将这些反馈插入作为工具调用的模拟结果。
有多种方法可以做到这一点
- 您可以向状态添加一条新消息(代表工具调用的“结果”)
- 您可以向状态添加两条新消息 - 一条代表工具调用的“错误”,另一条 HumanMessage 代表反馈
两者都类似,因为它们涉及向状态添加消息。主要区别在于human_node
之后的逻辑以及它如何处理不同类型的消息。
在此示例中,我们只添加一条代表反馈的工具调用。让我们看看它是如何工作的!
# Input
initial_input = {"messages": [{"role": "user", "content": "what's the weather in sf?"}]}
# Thread
thread = {"configurable": {"thread_id": "6"}}
# Run the graph until the first interruption
for event in graph.stream(initial_input, thread, stream_mode="values"):
print(event)
{'messages': [HumanMessage(content="what's the weather in sf?", id='601c4c75-f506-4d91-896d-5e382123de24')]} {'messages': [HumanMessage(content="what's the weather in sf?", id='601c4c75-f506-4d91-896d-5e382123de24'), AIMessage(content=[{'text': "Certainly! I can help you check the weather in San Francisco. To get the most accurate and up-to-date information, I'll use the weather search tool. Let me fetch that for you right away.", 'type': 'text'}, {'id': 'toolu_014UTKh5uqfc885Fj4RRqGdg', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_013nHyPYxNXFSoXeS6q4oWua', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 360, 'output_tokens': 98}}, id='run-0537e15e-86a4-4c6f-8dfb-6e4c160812c4-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'toolu_014UTKh5uqfc885Fj4RRqGdg', 'type': 'tool_call'}], usage_metadata={'input_tokens': 360, 'output_tokens': 98, 'total_tokens': 458})]}
print("Pending Executions!")
print(graph.get_state(thread).next)
Pending Executions! ('human_review_node',)
为此,我们首先需要更新状态。我们可以通过传递一条消息,该消息具有我们想要响应的工具调用的相同工具调用 ID 来实现。请注意,这是一个与上面不同的 ID。
# To get the ID of the message we want to replace, we need to fetch the current state and find it there.
state = graph.get_state(thread)
print("Current State:")
print(state.values)
print("\nCurrent Tool Call ID:")
tool_call_id = state.values["messages"][-1].tool_calls[0]["id"]
print(tool_call_id)
# We now need to construct a replacement tool call.
# We will change the argument to be `San Francisco, USA`
# Note that we could change any number of arguments or tool names - it just has to be a valid one
new_message = {
"role": "tool",
# This is our natural language feedback
"content": "User requested changes: pass in the country as well",
"name": "weather_search",
"tool_call_id": tool_call_id,
}
graph.update_state(
# This is the config which represents this thread
thread,
# This is the updated value we want to push
{"messages": [new_message]},
# We push this update acting as our human_review_node
as_node="human_review_node",
)
# Let's now continue executing from here
for event in graph.stream(None, thread, stream_mode="values"):
print(event)
Current State: {'messages': [HumanMessage(content="what's the weather in sf?", id='601c4c75-f506-4d91-896d-5e382123de24'), AIMessage(content=[{'text': "Certainly! I can help you check the weather in San Francisco. To get the most accurate and up-to-date information, I'll use the weather search tool. Let me fetch that for you right away.", 'type': 'text'}, {'id': 'toolu_014UTKh5uqfc885Fj4RRqGdg', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_013nHyPYxNXFSoXeS6q4oWua', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 360, 'output_tokens': 98}}, id='run-0537e15e-86a4-4c6f-8dfb-6e4c160812c4-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'toolu_014UTKh5uqfc885Fj4RRqGdg', 'type': 'tool_call'}], usage_metadata={'input_tokens': 360, 'output_tokens': 98, 'total_tokens': 458})]} Current Tool Call ID: toolu_014UTKh5uqfc885Fj4RRqGdg {'messages': [HumanMessage(content="what's the weather in sf?", id='601c4c75-f506-4d91-896d-5e382123de24'), AIMessage(content=[{'text': "Certainly! I can help you check the weather in San Francisco. To get the most accurate and up-to-date information, I'll use the weather search tool. Let me fetch that for you right away.", 'type': 'text'}, {'id': 'toolu_014UTKh5uqfc885Fj4RRqGdg', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_013nHyPYxNXFSoXeS6q4oWua', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 360, 'output_tokens': 98}}, id='run-0537e15e-86a4-4c6f-8dfb-6e4c160812c4-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'toolu_014UTKh5uqfc885Fj4RRqGdg', 'type': 'tool_call'}], usage_metadata={'input_tokens': 360, 'output_tokens': 98, 'total_tokens': 458}), ToolMessage(content='User requested changes: pass in the country as well', name='weather_search', id='e20ceddc-a0d3-469d-b31e-512f3042a07e', tool_call_id='toolu_014UTKh5uqfc885Fj4RRqGdg'), AIMessage(content=[{'text': "I apologize for the oversight. It seems that the weather search function requires more specific information. Let's try again with a more detailed search, including the country. Since San Francisco is commonly associated with the one in California, USA, I'll use that. Here's the updated search:", 'type': 'text'}, {'id': 'toolu_01AaipBbWDLjHnPcoApx8wRq', 'input': {'city': 'San Francisco, USA'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_018rErqC2cLe2VVhebdJf81e', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 480, 'output_tokens': 116}}, id='run-fcba65ed-400a-4783-9ecd-e22051682399-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco, USA'}, 'id': 'toolu_01AaipBbWDLjHnPcoApx8wRq', 'type': 'tool_call'}], usage_metadata={'input_tokens': 480, 'output_tokens': 116, 'total_tokens': 596})]}
我们可以看到,我们现在又到了另一个断点 - 因为它返回到模型,并得到了对要调用的内容的全新预测。现在让我们批准这个并继续。
print("Pending Executions!")
print(graph.get_state(thread).next)
for event in graph.stream(None, thread, stream_mode="values"):
print(event)
Pending Executions! ('human_review_node',) ---- Searching for: San Francisco, USA ---- {'messages': [HumanMessage(content="what's the weather in sf?", id='601c4c75-f506-4d91-896d-5e382123de24'), AIMessage(content=[{'text': "Certainly! I can help you check the weather in San Francisco. To get the most accurate and up-to-date information, I'll use the weather search tool. Let me fetch that for you right away.", 'type': 'text'}, {'id': 'toolu_014UTKh5uqfc885Fj4RRqGdg', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_013nHyPYxNXFSoXeS6q4oWua', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 360, 'output_tokens': 98}}, id='run-0537e15e-86a4-4c6f-8dfb-6e4c160812c4-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'toolu_014UTKh5uqfc885Fj4RRqGdg', 'type': 'tool_call'}], usage_metadata={'input_tokens': 360, 'output_tokens': 98, 'total_tokens': 458}), ToolMessage(content='User requested changes: pass in the country as well', name='weather_search', id='e20ceddc-a0d3-469d-b31e-512f3042a07e', tool_call_id='toolu_014UTKh5uqfc885Fj4RRqGdg'), AIMessage(content=[{'text': "I apologize for the oversight. It seems that the weather search function requires more specific information. Let's try again with a more detailed search, including the country. Since San Francisco is commonly associated with the one in California, USA, I'll use that. Here's the updated search:", 'type': 'text'}, {'id': 'toolu_01AaipBbWDLjHnPcoApx8wRq', 'input': {'city': 'San Francisco, USA'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_018rErqC2cLe2VVhebdJf81e', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 480, 'output_tokens': 116}}, id='run-fcba65ed-400a-4783-9ecd-e22051682399-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco, USA'}, 'id': 'toolu_01AaipBbWDLjHnPcoApx8wRq', 'type': 'tool_call'}], usage_metadata={'input_tokens': 480, 'output_tokens': 116, 'total_tokens': 596}), ToolMessage(content='Sunny!', name='weather_search', id='3f3ee262-70f5-422c-8e3f-6a9af758514d', tool_call_id='toolu_01AaipBbWDLjHnPcoApx8wRq')]} {'messages': [HumanMessage(content="what's the weather in sf?", id='601c4c75-f506-4d91-896d-5e382123de24'), AIMessage(content=[{'text': "Certainly! I can help you check the weather in San Francisco. To get the most accurate and up-to-date information, I'll use the weather search tool. Let me fetch that for you right away.", 'type': 'text'}, {'id': 'toolu_014UTKh5uqfc885Fj4RRqGdg', 'input': {'city': 'San Francisco'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_013nHyPYxNXFSoXeS6q4oWua', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 360, 'output_tokens': 98}}, id='run-0537e15e-86a4-4c6f-8dfb-6e4c160812c4-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'toolu_014UTKh5uqfc885Fj4RRqGdg', 'type': 'tool_call'}], usage_metadata={'input_tokens': 360, 'output_tokens': 98, 'total_tokens': 458}), ToolMessage(content='User requested changes: pass in the country as well', name='weather_search', id='e20ceddc-a0d3-469d-b31e-512f3042a07e', tool_call_id='toolu_014UTKh5uqfc885Fj4RRqGdg'), AIMessage(content=[{'text': "I apologize for the oversight. It seems that the weather search function requires more specific information. Let's try again with a more detailed search, including the country. Since San Francisco is commonly associated with the one in California, USA, I'll use that. Here's the updated search:", 'type': 'text'}, {'id': 'toolu_01AaipBbWDLjHnPcoApx8wRq', 'input': {'city': 'San Francisco, USA'}, 'name': 'weather_search', 'type': 'tool_use'}], response_metadata={'id': 'msg_018rErqC2cLe2VVhebdJf81e', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 480, 'output_tokens': 116}}, id='run-fcba65ed-400a-4783-9ecd-e22051682399-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco, USA'}, 'id': 'toolu_01AaipBbWDLjHnPcoApx8wRq', 'type': 'tool_call'}], usage_metadata={'input_tokens': 480, 'output_tokens': 116, 'total_tokens': 596}), ToolMessage(content='Sunny!', name='weather_search', id='3f3ee262-70f5-422c-8e3f-6a9af758514d', tool_call_id='toolu_01AaipBbWDLjHnPcoApx8wRq'), AIMessage(content="Great news! The weather in San Francisco, USA is currently sunny. \n\nHere's a summary of the weather information:\n- Location: San Francisco, USA\n- Current conditions: Sunny\n\nIt's a beautiful day in San Francisco! The sunny weather is perfect for outdoor activities or simply enjoying the city. Remember to wear sunscreen and stay hydrated if you plan to spend time outside. \n\nIs there anything else you'd like to know about the weather in San Francisco or any other location?", response_metadata={'id': 'msg_017Pnjyte2ZXAREgUvEqbUVt', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 609, 'output_tokens': 107}}, id='run-30c0d0ef-09a3-40ad-b410-80019b284983-0', usage_metadata={'input_tokens': 609, 'output_tokens': 107, 'total_tokens': 716})]}