%%capture --no-stderr
%pip install -U langgraph
接下来,我们需要设置 OpenAI 的 API 密钥(我们将使用的 LLM)
import getpass
import os
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")
from langgraph.graph import StateGraph, END, START, MessagesState
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
@tool
def get_weather(city: str):
"""Get the weather for a specific city"""
return f"It's sunny in {city}!"
raw_model = ChatOpenAI()
model = raw_model.with_structured_output(get_weather)
class SubGraphState(MessagesState):
city: str
def model_node(state: SubGraphState):
result = model.invoke(state["messages"])
return {"city": result["city"]}
def weather_node(state: SubGraphState):
result = get_weather.invoke({"city": state["city"]})
return {"messages": [{"role": "assistant", "content": result}]}
subgraph = StateGraph(SubGraphState)
subgraph.add_node(model_node)
subgraph.add_node(weather_node)
subgraph.add_edge(START, "model_node")
subgraph.add_edge("model_node", "weather_node")
subgraph.add_edge("weather_node", END)
subgraph = subgraph.compile(interrupt_before=["weather_node"])
定义父图¶
我们现在可以设置整个图。此图将首先路由到子图(如果需要获取 weather),否则将路由到正常的 LLM。
from typing import Literal
from typing_extensions import TypedDict
from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
class RouterState(MessagesState):
route: Literal["weather", "other"]
class Router(TypedDict):
route: Literal["weather", "other"]
router_model = raw_model.with_structured_output(Router)
def router_node(state: RouterState):
system_message = "Classify the incoming query as either about weather or not."
messages = [{"role": "system", "content": system_message}] + state["messages"]
route = router_model.invoke(messages)
return {"route": route["route"]}
def normal_llm_node(state: RouterState):
response = raw_model.invoke(state["messages"])
return {"messages": [response]}
def route_after_prediction(
state: RouterState,
) -> Literal["weather_graph", "normal_llm_node"]:
if state["route"] == "weather":
return "weather_graph"
else:
return "normal_llm_node"
graph = StateGraph(RouterState)
graph.add_node(router_node)
graph.add_node(normal_llm_node)
graph.add_node("weather_graph", subgraph)
graph.add_edge(START, "router_node")
graph.add_conditional_edges("router_node", route_after_prediction)
graph.add_edge("normal_llm_node", END)
graph.add_edge("weather_graph", END)
graph = graph.compile(checkpointer=memory)
from IPython.display import Image, display
# Setting xray to 1 will show the internal structure of the nested graph
display(Image(graph.get_graph(xray=1).draw_mermaid_png()))
让我们使用正常的查询来测试一下,以确保它按预期工作!
config = {"configurable": {"thread_id": "1"}}
inputs = {"messages": [{"role": "user", "content": "hi!"}]}
for update in graph.stream(inputs, config=config, stream_mode="updates"):
print(update)
{'router_node': {'route': 'other'}} {'normal_llm_node': {'messages': [AIMessage(content='Hello! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 9, 'total_tokens': 18, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-35de4577-2117-40e4-ab3b-cd2ac6e27b4c-0', usage_metadata={'input_tokens': 9, 'output_tokens': 9, 'total_tokens': 18})]}}
太棒了!我们没有询问 weather,所以我们从 LLM 收到了正常的响应。
从断点恢复¶
现在让我们看看断点会发生什么。让我们使用一个应该被路由到我们有中断节点的 weather 子图的查询来调用它。
config = {"configurable": {"thread_id": "2"}}
inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]}
for update in graph.stream(inputs, config=config, stream_mode="updates"):
print(update)
{'router_node': {'route': 'weather'}}
请注意,图流不包含子图事件。如果我们想要流式传输子图事件,我们可以传递 subgraphs=True
并像这样获取子图事件
config = {"configurable": {"thread_id": "3"}}
inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]}
for update in graph.stream(inputs, config=config, stream_mode="values", subgraphs=True):
print(update)
((), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc')]}) ((), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc')], 'route': 'weather'}) (('weather_graph:0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20',), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc')]}) (('weather_graph:0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20',), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc')], 'city': 'San Francisco'})
如果我们现在获取状态,我们可以看到它在 weather_graph
上暂停
state = graph.get_state(config)
state.next
('weather_graph',)
如果我们查看当前状态的待处理任务,我们可以看到我们有一个名为 weather_graph
的任务,它对应于子图任务。
state.tasks
(PregelTask(id='0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20', name='weather_graph', path=('__pregel_pull', 'weather_graph'), error=None, interrupts=(), state={'configurable': {'thread_id': '3', 'checkpoint_ns': 'weather_graph:0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20'}}),)
但是,由于我们使用父图的配置获取了状态,因此我们无法访问子图状态。如果您查看上面 PregelTask
的 state
值,您会注意到它只是父图的配置。如果我们想要真正填充子图状态,我们可以像这样将 subgraphs=True
传递给 get_state
state = graph.get_state(config, subgraphs=True)
state.tasks[0]
PregelTask(id='0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20', name='weather_graph', path=('__pregel_pull', 'weather_graph'), error=None, interrupts=(), state=StateSnapshot(values={'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc')], 'city': 'San Francisco'}, next=('weather_node',), config={'configurable': {'thread_id': '3', 'checkpoint_ns': 'weather_graph:0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20', 'checkpoint_id': '1ef75ee0-d9c3-6242-8001-440e7a3fb19f', 'checkpoint_map': {'': '1ef75ee0-d4e8-6ede-8001-2542067239ef', 'weather_graph:0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20': '1ef75ee0-d9c3-6242-8001-440e7a3fb19f'}}}, metadata={'source': 'loop', 'writes': {'model_node': {'city': 'San Francisco'}}, 'step': 1, 'parents': {'': '1ef75ee0-d4e8-6ede-8001-2542067239ef'}}, created_at='2024-09-18T18:44:36.278105+00:00', parent_config={'configurable': {'thread_id': '3', 'checkpoint_ns': 'weather_graph:0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20', 'checkpoint_id': '1ef75ee0-d4ef-6dec-8000-5d5724f3ef73'}}, tasks=(PregelTask(id='26f4384a-41d7-5ca9-cb94-4001de62e8aa', name='weather_node', path=('__pregel_pull', 'weather_node'), error=None, interrupts=(), state=None),)))
现在我们可以访问子图状态了!如果您查看 PregelTask
的 state
值,您可以看到它包含了我们所需的所有信息,例如下一个节点 (weather_node
) 和当前状态值(例如 city
)。
要恢复执行,我们只需像往常一样调用外部图即可
for update in graph.stream(None, config=config, stream_mode="values", subgraphs=True):
print(update)
((), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc')], 'route': 'weather'}) (('weather_graph:0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20',), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc')], 'city': 'San Francisco'}) (('weather_graph:0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20',), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc'), AIMessage(content="It's sunny in San Francisco!", additional_kwargs={}, response_metadata={}, id='c996ce37-438c-44f4-9e60-5aed8bcdae8a')], 'city': 'San Francisco'}) ((), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc'), AIMessage(content="It's sunny in San Francisco!", additional_kwargs={}, response_metadata={}, id='c996ce37-438c-44f4-9e60-5aed8bcdae8a')], 'route': 'weather'})
从特定子图节点恢复¶
在上面的示例中,我们从外部图中重播——这会自动从子图的任何状态(在本例中,我们暂停在 weather_node
之前)重播子图,但也可以从子图内部重播。为此,我们需要从我们想要重播的精确子图状态获取配置。
我们可以通过探索子图的状态历史记录并选择 model_node
之前的状态来做到这一点——我们可以通过过滤 .next
参数来完成此操作。
要获取子图的状态历史记录,我们需要首先传递
parent_graph_state_before_subgraph = next(
h for h in graph.get_state_history(config) if h.next == ("weather_graph",)
)
subgraph_state_before_model_node = next(
h
for h in graph.get_state_history(parent_graph_state_before_subgraph.tasks[0].state)
if h.next == ("model_node",)
)
# This pattern can be extended no matter how many levels deep - image model node was another subgraph in this case
# subsubgraph_stat_history = next(h for h in graph.get_state_history(subgraph_state_before_model_node.tasks[0].state) if h.next == ('my_subsubgraph_node',))
我们可以通过比较 subgraph_state_before_model_node
的 .next
参数来确认我们已获取了正确状态。
subgraph_state_before_model_node.next
('model_node',)
完美!我们已经获得了正确状态快照,现在我们可以从子图中的 model_node
恢复
for value in graph.stream(
None,
config=subgraph_state_before_model_node.config,
stream_mode="values",
subgraphs=True,
):
print(value)
((), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc')], 'route': 'weather'}) (('weather_graph:0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20',), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc')]}) (('weather_graph:0c47aeb3-6f4d-5e68-ccf4-42bd48e8ef20',), {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='108eb27a-2cbf-48d2-a6e7-6e07e82eafbc')], 'city': 'San Francisco'})
太好了,本节展示了如何从任何节点(无论它在图中嵌套多深)重播——这是一种测试您的代理确定性程度的强大工具。
config = {"configurable": {"thread_id": "4"}}
inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]}
for update in graph.stream(inputs, config=config, stream_mode="updates"):
print(update)
{'router_node': {'route': 'weather'}}
state = graph.get_state(config, subgraphs=True)
state.values["messages"]
[HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='05ee2159-3b25-4d6c-97d6-82beda3cabd4')]
为了更新 **内部** 图的状态,我们需要传递 **内部** 图的配置,我们可以通过访问调用 state.tasks[0].state.config
来获取它——由于我们在子图内部中断,因此任务的状态只是子图的状态。
graph.update_state(state.tasks[0].state.config, {"city": "la"})
{'configurable': {'thread_id': '4', 'checkpoint_ns': 'weather_graph:67f32ef7-aee0-8a20-0eb0-eeea0fd6de6e', 'checkpoint_id': '1ef75e5a-0b00-6bc0-8002-5726e210fef4', 'checkpoint_map': {'': '1ef75e59-1b13-6ffe-8001-0844ae748fd5', 'weather_graph:67f32ef7-aee0-8a20-0eb0-eeea0fd6de6e': '1ef75e5a-0b00-6bc0-8002-5726e210fef4'}}}
现在我们可以继续流式传输外部图(这将恢复子图!),并检查我们是否将搜索更新为使用 LA 而不是 SF。
for update in graph.stream(None, config=config, stream_mode="updates", subgraphs=True):
print(update)
(('weather_graph:9e512e8e-bac5-5412-babe-fe5c12a47cc2',), {'weather_node': {'messages': [{'role': 'assistant', 'content': "It's sunny in la!"}]}}) ((), {'weather_graph': {'messages': [HumanMessage(content="what's the weather in sf", id='35e331c6-eb47-483c-a63c-585877b12f5d'), AIMessage(content="It's sunny in la!", id='c3d6b224-9642-4b21-94d5-eef8dc3f2cc9')]}})
太棒了!正如预期的那样,AI 回应了“It's sunny in LA!”。
充当子图节点¶
另一种更新状态的方式是,我们可以自己充当weather_node
的角色,而不是像上面那样在运行weather_node
之前编辑状态。我们可以通过传递子图配置和as_node
参数来实现这一点,它允许我们像指定节点一样更新状态。因此,通过在weather_node
之前设置一个中断,然后使用更新状态函数作为weather_node
,图本身永远不会直接调用weather_node
,而是由我们来决定weather_node
的输出应该是什么。
config = {"configurable": {"thread_id": "14"}}
inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]}
for update in graph.stream(
inputs, config=config, stream_mode="updates", subgraphs=True
):
print(update)
# Graph execution should stop before the weather node
print("interrupted!")
state = graph.get_state(config, subgraphs=True)
# We update the state by passing in the message we want returned from the weather node, and make sure to use as_node
graph.update_state(
state.tasks[0].state.config,
{"messages": [{"role": "assistant", "content": "rainy"}]},
as_node="weather_node",
)
for update in graph.stream(None, config=config, stream_mode="updates", subgraphs=True):
print(update)
print(graph.get_state(config).values["messages"])
((), {'router_node': {'route': 'weather'}}) (('weather_graph:c7eb1fc7-efab-b0e3-12ed-8586f37bc7a2',), {'model_node': {'city': 'San Francisco'}}) interrupted! ((), {'weather_graph': {'messages': [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='ad694c4e-8aac-4e1f-b5ca-790c60c1775b'), AIMessage(content='rainy', additional_kwargs={}, response_metadata={}, id='98a73aaf-3524-482a-9d07-971407df0389')]}}) [HumanMessage(content="what's the weather in sf", additional_kwargs={}, response_metadata={}, id='ad694c4e-8aac-4e1f-b5ca-790c60c1775b'), AIMessage(content='rainy', additional_kwargs={}, response_metadata={}, id='98a73aaf-3524-482a-9d07-971407df0389')]
完美!AI 回复了我们自己传递的消息。
充当整个子图¶
最后,我们还可以通过充当整个子图的角色来更新图。这与上面的情况类似,但与仅仅充当weather_node
不同,我们充当整个子图。这可以通过传递正常的图配置以及as_node
参数来实现,我们指定自己充当整个子图节点。
config = {"configurable": {"thread_id": "8"}}
inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]}
for update in graph.stream(
inputs, config=config, stream_mode="updates", subgraphs=True
):
print(update)
# Graph execution should stop before the weather node
print("interrupted!")
# We update the state by passing in the message we want returned from the weather graph, making sure to use as_node
# Note that we don't need to pass in the subgraph config, since we aren't updating the state inside the subgraph
graph.update_state(
config,
{"messages": [{"role": "assistant", "content": "rainy"}]},
as_node="weather_graph",
)
for update in graph.stream(None, config=config, stream_mode="updates"):
print(update)
print(graph.get_state(config).values["messages"])
((), {'router_node': {'route': 'weather'}}) (('weather_graph:53ab3fb1-23e8-5de0-acc6-9fb904fd4dc4',), {'model_node': {'city': 'San Francisco'}}) interrupted! [HumanMessage(content="what's the weather in sf", id='64b1b683-778b-4623-b783-4a8f81322ec8'), AIMessage(content='rainy', id='c1d1a2f3-c117-41e9-8c1f-8fb0a02a3b70')]
同样,AI 如预期地回复了“rainy”。
双重嵌套子图¶
无论嵌套级别如何,此功能都继续有效。以下是在双重嵌套子图上执行相同操作的示例(尽管任何嵌套级别都将起作用)。我们在已定义的图之上添加另一个路由器。
from typing import Literal
from typing_extensions import TypedDict
from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
class RouterState(MessagesState):
route: Literal["weather", "other"]
class Router(TypedDict):
route: Literal["weather", "other"]
router_model = raw_model.with_structured_output(Router)
def router_node(state: RouterState):
system_message = "Classify the incoming query as either about weather or not."
messages = [{"role": "system", "content": system_message}] + state["messages"]
route = router_model.invoke(messages)
return {"route": route["route"]}
def normal_llm_node(state: RouterState):
response = raw_model.invoke(state["messages"])
return {"messages": [response]}
def route_after_prediction(
state: RouterState,
) -> Literal["weather_graph", "normal_llm_node"]:
if state["route"] == "weather":
return "weather_graph"
else:
return "normal_llm_node"
graph = StateGraph(RouterState)
graph.add_node(router_node)
graph.add_node(normal_llm_node)
graph.add_node("weather_graph", subgraph)
graph.add_edge(START, "router_node")
graph.add_conditional_edges("router_node", route_after_prediction)
graph.add_edge("normal_llm_node", END)
graph.add_edge("weather_graph", END)
graph = graph.compile()
from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
class GrandfatherState(MessagesState):
to_continue: bool
def router_node(state: GrandfatherState):
# Dummy logic that will always continue
return {"to_continue": True}
def route_after_prediction(state: GrandfatherState):
if state["to_continue"]:
return "graph"
else:
return END
grandparent_graph = StateGraph(GrandfatherState)
grandparent_graph.add_node(router_node)
grandparent_graph.add_node("graph", graph)
grandparent_graph.add_edge(START, "router_node")
grandparent_graph.add_conditional_edges(
"router_node", route_after_prediction, ["graph", END]
)
grandparent_graph.add_edge("graph", END)
grandparent_graph = grandparent_graph.compile(checkpointer=MemorySaver())
from IPython.display import Image, display
# Setting xray to 1 will show the internal structure of the nested graph
display(Image(grandparent_graph.get_graph(xray=2).draw_mermaid_png()))
如果我们运行到中断,现在可以看出所有三个图的状态都有快照
config = {"configurable": {"thread_id": "2"}}
inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]}
for update in grandparent_graph.stream(
inputs, config=config, stream_mode="updates", subgraphs=True
):
print(update)
((), {'router_node': {'to_continue': True}}) (('graph:e18ecd45-5dfb-53b0-bcb7-db793924e9a8',), {'router_node': {'route': 'weather'}}) (('graph:e18ecd45-5dfb-53b0-bcb7-db793924e9a8', 'weather_graph:12bd3069-de24-5bc6-b4f1-f39527605781'), {'model_node': {'city': 'San Francisco'}})
state = grandparent_graph.get_state(config, subgraphs=True)
print("Grandparent State:")
print(state.values)
print("---------------")
print("Parent Graph State:")
print(state.tasks[0].state.values)
print("---------------")
print("Subgraph State:")
print(state.tasks[0].state.tasks[0].state.values)
Grandparent State: {'messages': [HumanMessage(content="what's the weather in sf", id='3bb28060-3d30-49a7-9f84-c90b6ada7848')], 'to_continue': True} --------------- Parent Graph State: {'messages': [HumanMessage(content="what's the weather in sf", id='3bb28060-3d30-49a7-9f84-c90b6ada7848')], 'route': 'weather'} --------------- Subgraph State: {'messages': [HumanMessage(content="what's the weather in sf", id='3bb28060-3d30-49a7-9f84-c90b6ada7848')], 'city': 'San Francisco'}
现在我们可以继续,充当三层之下的节点
grandparent_graph_state = state
parent_graph_state = grandparent_graph_state.tasks[0].state
subgraph_state = parent_graph_state.tasks[0].state
grandparent_graph.update_state(
subgraph_state.config,
{"messages": [{"role": "assistant", "content": "rainy"}]},
as_node="weather_node",
)
for update in grandparent_graph.stream(
None, config=config, stream_mode="updates", subgraphs=True
):
print(update)
print(grandparent_graph.get_state(config).values["messages"])
(('graph:e18ecd45-5dfb-53b0-bcb7-db793924e9a8',), {'weather_graph': {'messages': [HumanMessage(content="what's the weather in sf", id='3bb28060-3d30-49a7-9f84-c90b6ada7848'), AIMessage(content='rainy', id='be926b59-c647-4355-88fd-a429b9e2b420')]}}) ((), {'graph': {'messages': [HumanMessage(content="what's the weather in sf", id='3bb28060-3d30-49a7-9f84-c90b6ada7848'), AIMessage(content='rainy', id='be926b59-c647-4355-88fd-a429b9e2b420')]}}) [HumanMessage(content="what's the weather in sf", id='3bb28060-3d30-49a7-9f84-c90b6ada7848'), AIMessage(content='rainy', id='be926b59-c647-4355-88fd-a429b9e2b420')]
与上面的情况一样,我们可以看到 AI 如预期地回复了“rainy”。
我们可以探索状态历史,查看祖父母图的状态在每个步骤中是如何更新的。
for state in grandparent_graph.get_state_history(config):
print(state)
print("-----")
StateSnapshot(values={'messages': [HumanMessage(content="what's the weather in sf", id='5ff89e4d-8255-4d23-8b55-01633c112720'), AIMessage(content='rainy', id='7c80f847-248d-4b8f-8238-633ed757b353')], 'to_continue': True}, next=(), config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef66f40-7a2c-6f9e-8002-a37a61b26709'}}, metadata={'source': 'loop', 'writes': {'graph': {'messages': [HumanMessage(content="what's the weather in sf", id='5ff89e4d-8255-4d23-8b55-01633c112720'), AIMessage(content='rainy', id='7c80f847-248d-4b8f-8238-633ed757b353')]}}, 'step': 2, 'parents': {}}, created_at='2024-08-30T17:19:35.793847+00:00', parent_config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef66f3f-f312-6338-8001-766acddc781e'}}, tasks=()) ----- StateSnapshot(values={'messages': [HumanMessage(content="what's the weather in sf", id='5ff89e4d-8255-4d23-8b55-01633c112720')], 'to_continue': True}, next=('graph',), config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef66f3f-f312-6338-8001-766acddc781e'}}, metadata={'source': 'loop', 'writes': {'router_node': {'to_continue': True}}, 'step': 1, 'parents': {}}, created_at='2024-08-30T17:19:21.627097+00:00', parent_config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef66f3f-f303-61d0-8000-1945c8a74e9e'}}, tasks=(PregelTask(id='b59fe96f-fdce-5afe-aa58-bd2876a0d592', name='graph', error=None, interrupts=(), state={'configurable': {'thread_id': '2', 'checkpoint_ns': 'graph:b59fe96f-fdce-5afe-aa58-bd2876a0d592'}}),)) ----- StateSnapshot(values={'messages': [HumanMessage(content="what's the weather in sf", id='5ff89e4d-8255-4d23-8b55-01633c112720')]}, next=('router_node',), config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef66f3f-f303-61d0-8000-1945c8a74e9e'}}, metadata={'source': 'loop', 'writes': None, 'step': 0, 'parents': {}}, created_at='2024-08-30T17:19:21.620923+00:00', parent_config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef66f3f-f2f9-6d6a-bfff-c8b76e5b2462'}}, tasks=(PregelTask(id='e3d4a97a-f4ca-5260-801e-e65b02907825', name='router_node', error=None, interrupts=(), state=None),)) ----- StateSnapshot(values={'messages': []}, next=('__start__',), config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef66f3f-f2f9-6d6a-bfff-c8b76e5b2462'}}, metadata={'source': 'input', 'writes': {'messages': [{'role': 'user', 'content': "what's the weather in sf"}]}, 'step': -1, 'parents': {}}, created_at='2024-08-30T17:19:21.617127+00:00', parent_config=None, tasks=(PregelTask(id='f0538638-b794-58fc-a406-980d2fea28a1', name='__start__', error=None, interrupts=(), state=None),)) -----