%%capture --no-stderr
%pip install -U langgraph langsmith langchain_anthropic
接下来,设置您的 API 密钥
import getpass
import os
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("ANTHROPIC_API_KEY")
ANTHROPIC_API_KEY: ········
第一部分:构建基本聊天机器人¶
我们将首先使用 LangGraph 创建一个简单的聊天机器人。这个聊天机器人将直接响应用户消息。虽然简单,但它将说明使用 LangGraph 构建的核心概念。在本节结束时,您将构建一个基本的聊天机器人。
首先创建一个 StateGraph
。StateGraph
对象将我们聊天机器人的结构定义为“状态机”。我们将添加 nodes
来表示聊天机器人可以调用的 llm 和函数,以及 edges
来指定机器人如何在这些函数之间转换。
from typing import Annotated
from typing_extensions import TypedDict
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
class State(TypedDict):
# Messages have the type "list". The `add_messages` function
# in the annotation defines how this state key should be updated
# (in this case, it appends messages to the list, rather than overwriting them)
messages: Annotated[list, add_messages]
graph_builder = StateGraph(State)
注意
定义图时,您首先要做的是定义图的 State
。State
包含图的模式以及 reducer 函数,这些函数指定如何将更新应用于状态。在我们的示例中,State
是一个具有单个键 messages
的 TypedDict
。messages
键用 add_messages
reducer 函数进行注释,该函数告诉 LangGraph 将新消息追加到现有列表中,而不是覆盖它。没有注释的状态键将被每次更新覆盖,存储最新的值。查看 本概念指南 了解有关状态、reducer 和其他低级概念的更多信息。
现在我们的图了解两件事
- 我们定义的每个
node
都将接收当前State
作为输入,并返回一个更新该状态的值。 messages
将被追加到当前列表中,而不是直接覆盖。这是通过Annotated
语法中的预构建add_messages
函数传达的。
接下来,添加一个“chatbot
”节点。节点表示工作单元。它们通常是普通的 python 函数。
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
def chatbot(state: State):
return {"messages": [llm.invoke(state["messages"])]}
# The first argument is the unique node name
# The second argument is the function or object that will be called whenever
# the node is used.
graph_builder.add_node("chatbot", chatbot)
注意 chatbot
节点函数如何以当前 State
作为输入,并返回一个包含更新后的 messages
列表(位于键“messages”下)的字典。这是所有 LangGraph 节点函数的基本模式。
我们 State
中的 add_messages
函数将 llm 的响应消息追加到状态中已存在的任何消息。
接下来,添加一个 entry
点。这告诉我们的图每次运行时从哪里开始工作。
graph_builder.add_edge(START, "chatbot")
类似地,设置一个 finish
点。这指示图“任何时候运行此节点,您都可以退出。”
graph_builder.add_edge("chatbot", END)
最后,我们将需要能够运行我们的图。为此,在图构建器上调用“compile()
”。这将创建一个“CompiledGraph
”,我们可以在其上调用我们的状态。
graph = graph_builder.compile()
您可以使用 get_graph
方法和一个“draw”方法(如 draw_ascii
或 draw_png
)来可视化该图。draw
方法每个都需要额外的依赖项。
from IPython.display import Image, display
try:
display(Image(graph.get_graph().draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass
现在让我们运行聊天机器人!
提示:您可以随时通过键入“quit”、“exit”或“q”退出聊天循环。
def stream_graph_updates(user_input: str):
for event in graph.stream({"messages": [("user", user_input)]}):
for value in event.values():
print("Assistant:", value["messages"][-1].content)
while True:
try:
user_input = input("User: ")
if user_input.lower() in ["quit", "exit", "q"]:
print("Goodbye!")
break
stream_graph_updates(user_input)
except:
# fallback if input() is not available
user_input = "What do you know about LangGraph?"
print("User: " + user_input)
stream_graph_updates(user_input)
break
Assistant: LangGraph is a library designed to help build stateful multi-agent applications using language models. It provides tools for creating workflows and state machines to coordinate multiple AI agents or language model interactions. LangGraph is built on top of LangChain, leveraging its components while adding graph-based coordination capabilities. It's particularly useful for developing more complex, stateful AI applications that go beyond simple query-response interactions. Goodbye!
恭喜!您已使用 LangGraph 构建了您的第一个聊天机器人。这个机器人可以通过接受用户输入并使用 LLM 生成响应来进行基本对话。您可以在提供的链接处查看上面调用的 LangSmith 跟踪。
但是,您可能已经注意到,该机器人的知识仅限于其训练数据中的内容。在下一部分中,我们将添加一个网络搜索工具来扩展机器人的知识并使其更强大。
以下是本节的完整代码,供您参考
完整代码
from typing import Annotated from langchain_anthropic import ChatAnthropic from typing_extensions import TypedDict from langgraph.graph import StateGraph from langgraph.graph.message import add_messages class State(TypedDict): messages: Annotated[list, add_messages] graph_builder = StateGraph(State) llm = ChatAnthropic(model="claude-3-5-sonnet-20240620") def chatbot(state: State): return {"messages": [llm.invoke(state["messages"])]} # The first argument is the unique node name # The second argument is the function or object that will be called whenever # the node is used. graph_builder.add_node("chatbot", chatbot) graph_builder.set_entry_point("chatbot") graph_builder.set_finish_point("chatbot") graph = graph_builder.compile()
第二部分:使用工具增强聊天机器人¶
为了处理我们的聊天机器人无法“从记忆中”回答的查询,我们将集成一个网络搜索工具。我们的机器人可以使用此工具查找相关信息并提供更好的响应。
需求¶
在我们开始之前,请确保您已安装必要的软件包并设置 API 密钥
首先,安装使用 Tavily 搜索引擎 的必需项,并设置您的 TAVILY_API_KEY。
%%capture --no-stderr
%pip install -U tavily-python langchain_community
_set_env("TAVILY_API_KEY")
TAVILY_API_KEY: ········
接下来,定义工具
from langchain_community.tools.tavily_search import TavilySearchResults
tool = TavilySearchResults(max_results=2)
tools = [tool]
tool.invoke("What's a 'node' in LangGraph?")
[{'url': 'https://medium.com/@cplog/introduction-to-langgraph-a-beginners-guide-14f9be027141', 'content': 'Nodes: Nodes are the building blocks of your LangGraph. Each node represents a function or a computation step. You define nodes to perform specific tasks, such as processing input, making ...'}, {'url': 'https://saksheepatil05.medium.com/demystifying-langgraph-a-beginner-friendly-dive-into-langgraph-concepts-5ffe890ddac0', 'content': 'Nodes (Tasks): Nodes are like the workstations on the assembly line. Each node performs a specific task on the product. In LangGraph, nodes are Python functions that take the current state, do some work, and return an updated state. Next, we define the nodes, each representing a task in our sandwich-making process.'}]
结果是我们的聊天机器人可以用来回答问题的页面摘要。
接下来,我们将开始定义我们的图。以下内容与第一部分完全相同,除了我们在 LLM 上添加了 bind_tools
。这使 LLM 知道如果它想要使用我们的搜索引擎,则应使用正确的 JSON 格式。
from typing import Annotated
from langchain_anthropic import ChatAnthropic
from typing_extensions import TypedDict
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
class State(TypedDict):
messages: Annotated[list, add_messages]
graph_builder = StateGraph(State)
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
# Modification: tell the LLM which tools it can call
llm_with_tools = llm.bind_tools(tools)
def chatbot(state: State):
return {"messages": [llm_with_tools.invoke(state["messages"])]}
graph_builder.add_node("chatbot", chatbot)
接下来,我们需要创建一个函数来实际运行工具(如果它们被调用)。我们将通过将工具添加到新节点来实现这一点。
下面,我们实现了一个 BasicToolNode
,它检查状态中的最新消息,并在消息包含 tool_calls
时调用工具。它依赖于 LLM 的 tool_calling
支持,该支持在 Anthropic、OpenAI、Google Gemini 和许多其他 LLM 提供商中可用。
我们将在以后用 LangGraph 的预构建 ToolNode 替换它以加快速度,但首先自己构建它是有指导意义的。
import json
from langchain_core.messages import ToolMessage
class BasicToolNode:
"""A node that runs the tools requested in the last AIMessage."""
def __init__(self, tools: list) -> None:
self.tools_by_name = {tool.name: tool for tool in tools}
def __call__(self, inputs: dict):
if messages := inputs.get("messages", []):
message = messages[-1]
else:
raise ValueError("No message found in input")
outputs = []
for tool_call in message.tool_calls:
tool_result = self.tools_by_name[tool_call["name"]].invoke(
tool_call["args"]
)
outputs.append(
ToolMessage(
content=json.dumps(tool_result),
name=tool_call["name"],
tool_call_id=tool_call["id"],
)
)
return {"messages": outputs}
tool_node = BasicToolNode(tools=[tool])
graph_builder.add_node("tools", tool_node)
添加了工具节点后,我们可以定义 conditional_edges
。
回想一下,边将控制流从一个节点路由到另一个节点。条件边通常包含“if”语句,以根据当前图状态路由到不同的节点。这些函数接收当前图 state
并返回一个字符串或字符串列表,指示要调用的下一个节点。
下面,调用定义一个名为 route_tools
的路由器函数,它检查聊天机器人的输出中是否存在工具调用。通过调用 add_conditional_edges
将此函数提供给图,这告诉图在 chatbot
节点完成后检查此函数,以查看接下来应该去哪里。
如果存在工具调用,则条件将路由到 tools
,如果不存在,则路由到 END
。
稍后,我们将用预构建的 tools_condition 替换它,以使其更简洁,但首先自己实现它可以使事情更清楚。
from typing import Literal
def route_tools(
state: State,
):
"""
Use in the conditional_edge to route to the ToolNode if the last message
has tool calls. Otherwise, route to the end.
"""
if isinstance(state, list):
ai_message = state[-1]
elif messages := state.get("messages", []):
ai_message = messages[-1]
else:
raise ValueError(f"No messages found in input state to tool_edge: {state}")
if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
return "tools"
return END
# The `tools_condition` function returns "tools" if the chatbot asks to use a tool, and "END" if
# it is fine directly responding. This conditional routing defines the main agent loop.
graph_builder.add_conditional_edges(
"chatbot",
route_tools,
# The following dictionary lets you tell the graph to interpret the condition's outputs as a specific node
# It defaults to the identity function, but if you
# want to use a node named something else apart from "tools",
# You can update the value of the dictionary to something else
# e.g., "tools": "my_tools"
{"tools": "tools", END: END},
)
# Any time a tool is called, we return to the chatbot to decide the next step
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge(START, "chatbot")
graph = graph_builder.compile()
请注意,条件边从单个节点开始。这告诉图表“每次‘chatbot
’节点运行时,如果它调用工具,则转到‘tools’,或者如果它直接响应,则结束循环。”
与预构建的 tools_condition
一样,我们的函数如果未进行工具调用,则返回 END
字符串。当图表过渡到 END
时,它将没有更多要完成的任务,并将停止执行。由于条件可以返回 END
,因此我们这次不需要显式设置 finish_point
。我们的图表已经有了完成的方法!
让我们可视化我们构建的图表。以下函数具有一些额外的依赖项才能运行,对于本教程来说这些并不重要。
from IPython.display import Image, display
try:
display(Image(graph.get_graph().draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass
现在我们可以向机器人询问超出其训练数据的问题。
while True:
try:
user_input = input("User: ")
if user_input.lower() in ["quit", "exit", "q"]:
print("Goodbye!")
break
stream_graph_updates(user_input)
except:
# fallback if input() is not available
user_input = "What do you know about LangGraph?"
print("User: " + user_input)
stream_graph_updates(user_input)
break
Assistant: [{'text': "To provide you with accurate and up-to-date information about LangGraph, I'll need to search for the latest details. Let me do that for you.", 'type': 'text'}, {'id': 'toolu_01Q588CszHaSvvP2MxRq9zRD', 'input': {'query': 'LangGraph AI tool information'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}] Assistant: [{"url": "https://www.langchain.ac.cn/langgraph", "content": "LangGraph sets the foundation for how we can build and scale AI workloads \u2014 from conversational agents, complex task automation, to custom LLM-backed experiences that 'just work'. The next chapter in building complex production-ready features with LLMs is agentic, and with LangGraph and LangSmith, LangChain delivers an out-of-the-box solution ..."}, {"url": "https://github.com/langchain-ai/langgraph", "content": "Overview. LangGraph is a library for building stateful, multi-actor applications with LLMs, used to create agent and multi-agent workflows. Compared to other LLM frameworks, it offers these core benefits: cycles, controllability, and persistence. LangGraph allows you to define flows that involve cycles, essential for most agentic architectures ..."}] Assistant: Based on the search results, I can provide you with information about LangGraph: 1. Purpose: LangGraph is a library designed for building stateful, multi-actor applications with Large Language Models (LLMs). It's particularly useful for creating agent and multi-agent workflows. 2. Developer: LangGraph is developed by LangChain, a company known for its tools and frameworks in the AI and LLM space. 3. Key Features: - Cycles: LangGraph allows the definition of flows that involve cycles, which is essential for most agentic architectures. - Controllability: It offers enhanced control over the application flow. - Persistence: The library provides ways to maintain state and persistence in LLM-based applications. 4. Use Cases: LangGraph can be used for various applications, including: - Conversational agents - Complex task automation - Custom LLM-backed experiences 5. Integration: LangGraph works in conjunction with LangSmith, another tool by LangChain, to provide an out-of-the-box solution for building complex, production-ready features with LLMs. 6. Significance: LangGraph is described as setting the foundation for building and scaling AI workloads. It's positioned as a key tool in the next chapter of LLM-based application development, particularly in the realm of agentic AI. 7. Availability: LangGraph is open-source and available on GitHub, which suggests that developers can access and contribute to its codebase. 8. Comparison to Other Frameworks: LangGraph is noted to offer unique benefits compared to other LLM frameworks, particularly in its ability to handle cycles, provide controllability, and maintain persistence. LangGraph appears to be a significant tool in the evolving landscape of LLM-based application development, offering developers new ways to create more complex, stateful, and interactive AI systems. Goodbye!
恭喜!您已在 LangGraph 中创建了一个对话代理,它可以在需要时使用搜索引擎检索更新的信息。现在它可以处理更广泛的用户查询。要检查您的代理刚刚执行的所有步骤,请查看此 LangSmith 跟踪。
我们的聊天机器人仍然无法自行记住过去的互动,限制了它进行连贯的多轮对话的能力。在下一部分中,我们将添加记忆来解决此问题。
在本节中创建的图表的完整代码如下所示,用预构建的 ToolNode 替换我们的 BasicToolNode
,用预构建的 tools_condition 替换我们的 route_tools
条件。
完整代码
from typing import Annotated from langchain_anthropic import ChatAnthropic from langchain_community.tools.tavily_search import TavilySearchResults from langchain_core.messages import BaseMessage from typing_extensions import TypedDict from langgraph.graph import StateGraph from langgraph.graph.message import add_messages from langgraph.prebuilt import ToolNode, tools_condition class State(TypedDict): messages: Annotated[list, add_messages] graph_builder = StateGraph(State) tool = TavilySearchResults(max_results=2) tools = [tool] llm = ChatAnthropic(model="claude-3-5-sonnet-20240620") llm_with_tools = llm.bind_tools(tools) def chatbot(state: State): return {"messages": [llm_with_tools.invoke(state["messages"])]} graph_builder.add_node("chatbot", chatbot) tool_node = ToolNode(tools=[tool]) graph_builder.add_node("tools", tool_node) graph_builder.add_conditional_edges( "chatbot", tools_condition, ) # Any time a tool is called, we return to the chatbot to decide the next step graph_builder.add_edge("tools", "chatbot") graph_builder.set_entry_point("chatbot") graph = graph_builder.compile()
第 3 部分:向聊天机器人添加记忆¶
我们的聊天机器人现在可以使用工具来回答用户的问题,但它不记得之前交互的上下文。这限制了它进行连贯的多轮对话的能力。
LangGraph 通过持久性检查点解决了这个问题。如果您在编译图表时提供 checkpointer
以及在调用图表时提供 thread_id
,LangGraph 会在每一步之后自动保存状态。当您再次使用相同的 thread_id
调用图表时,图表将加载其保存的状态,使聊天机器人能够从停止的地方继续。
我们将在稍后看到,检查点比简单的聊天记忆强大得多 - 它允许您随时保存和恢复复杂的状态,以进行错误恢复、人工在环工作流程、时间旅行交互等等。但在我们过早地得意洋洋之前,让我们添加检查点以启用多轮对话。
要开始,请创建一个 MemorySaver
检查点。
from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
请注意,我们正在使用内存中的检查点。这对于我们的教程来说很方便(它将所有内容都保存在内存中)。在生产应用程序中,您可能会将其更改为使用 SqliteSaver
或 PostgresSaver
并连接到您自己的数据库。
接下来定义图表。现在您已经构建了自己的 BasicToolNode
,我们将用 LangGraph 的预构建 ToolNode
和 tools_condition
替换它,因为它们执行了一些不错的事情,例如并行 API 执行。除此之外,以下内容都是从第 2 部分复制的。
from typing import Annotated
from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import BaseMessage
from typing_extensions import TypedDict
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
class State(TypedDict):
messages: Annotated[list, add_messages]
graph_builder = StateGraph(State)
tool = TavilySearchResults(max_results=2)
tools = [tool]
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
llm_with_tools = llm.bind_tools(tools)
def chatbot(state: State):
return {"messages": [llm_with_tools.invoke(state["messages"])]}
graph_builder.add_node("chatbot", chatbot)
tool_node = ToolNode(tools=[tool])
graph_builder.add_node("tools", tool_node)
graph_builder.add_conditional_edges(
"chatbot",
tools_condition,
)
# Any time a tool is called, we return to the chatbot to decide the next step
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge(START, "chatbot")
最后,使用提供的检查点编译图表。
graph = graph_builder.compile(checkpointer=memory)
请注意,图表的连接性自第 2 部分以来没有发生变化。我们所做的只是在图表处理每个节点时对 State
进行检查点。
from IPython.display import Image, display
try:
display(Image(graph.get_graph().draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass
现在您可以与您的机器人互动了!首先,选择一个线程作为此次对话的键。
config = {"configurable": {"thread_id": "1"}}
接下来,调用您的聊天机器人。
user_input = "Hi there! My name is Will."
# The config is the **second positional argument** to stream() or invoke()!
events = graph.stream(
{"messages": [("user", user_input)]}, config, stream_mode="values"
)
for event in events:
event["messages"][-1].pretty_print()
================================ Human Message ================================= Hi there! My name is Will. ================================== Ai Message ================================== Hello Will! It's nice to meet you. How can I assist you today? Is there anything specific you'd like to know or discuss?
注意:config 是在调用图表时作为第二个位置参数提供的。重要的是,它不嵌套在图表输入({'messages': []}
)中。
让我们询问一个后续问题:看看它是否记得您的姓名。
user_input = "Remember my name?"
# The config is the **second positional argument** to stream() or invoke()!
events = graph.stream(
{"messages": [("user", user_input)]}, config, stream_mode="values"
)
for event in events:
event["messages"][-1].pretty_print()
================================ Human Message ================================= Remember my name? ================================== Ai Message ================================== Of course, I remember your name, Will. I always try to pay attention to important details that users share with me. Is there anything else you'd like to talk about or any questions you have? I'm here to help with a wide range of topics or tasks.
请注意,我们没有使用外部列表作为记忆:所有这些都由检查点处理!您可以在此 LangSmith 跟踪 中检查完整的执行,以了解发生了什么。
不相信我?试试用不同的配置。
# The only difference is we change the `thread_id` here to "2" instead of "1"
events = graph.stream(
{"messages": [("user", user_input)]},
{"configurable": {"thread_id": "2"}},
stream_mode="values",
)
for event in events:
event["messages"][-1].pretty_print()
================================ Human Message ================================= Remember my name? ================================== Ai Message ================================== I apologize, but I don't have any previous context or memory of your name. As an AI assistant, I don't retain information from past conversations. Each interaction starts fresh. Could you please tell me your name so I can address you properly in this conversation?
请注意,我们所做的唯一更改是修改 config 中的 thread_id
。将此调用的 LangSmith 跟踪 与此进行比较。
到目前为止,我们已经在两个不同的线程中创建了一些检查点。但是检查点中包含什么?要随时检查图表在给定配置下的 state
,请调用 get_state(config)
。
snapshot = graph.get_state(config)
snapshot
StateSnapshot(values={'messages': [HumanMessage(content='Hi there! My name is Will.', additional_kwargs={}, response_metadata={}, id='8c1ca919-c553-4ebf-95d4-b59a2d61e078'), AIMessage(content="Hello Will! It's nice to meet you. How can I assist you today? Is there anything specific you'd like to know or discuss?", additional_kwargs={}, response_metadata={'id': 'msg_01WTQebPhNwmMrmmWojJ9KXJ', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 405, 'output_tokens': 32}}, id='run-58587b77-8c82-41e6-8a90-d62c444a261d-0', usage_metadata={'input_tokens': 405, 'output_tokens': 32, 'total_tokens': 437}), HumanMessage(content='Remember my name?', additional_kwargs={}, response_metadata={}, id='daba7df6-ad75-4d6b-8057-745881cea1ca'), AIMessage(content="Of course, I remember your name, Will. I always try to pay attention to important details that users share with me. Is there anything else you'd like to talk about or any questions you have? I'm here to help with a wide range of topics or tasks.", additional_kwargs={}, response_metadata={'id': 'msg_01E41KitY74HpENRgXx94vag', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 444, 'output_tokens': 58}}, id='run-ffeaae5c-4d2d-4ddb-bd59-5d5cbf2a5af8-0', usage_metadata={'input_tokens': 444, 'output_tokens': 58, 'total_tokens': 502})]}, next=(), config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef7d06e-93e0-6acc-8004-f2ac846575d2'}}, metadata={'source': 'loop', 'writes': {'chatbot': {'messages': [AIMessage(content="Of course, I remember your name, Will. I always try to pay attention to important details that users share with me. Is there anything else you'd like to talk about or any questions you have? I'm here to help with a wide range of topics or tasks.", additional_kwargs={}, response_metadata={'id': 'msg_01E41KitY74HpENRgXx94vag', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 444, 'output_tokens': 58}}, id='run-ffeaae5c-4d2d-4ddb-bd59-5d5cbf2a5af8-0', usage_metadata={'input_tokens': 444, 'output_tokens': 58, 'total_tokens': 502})]}}, 'step': 4, 'parents': {}}, created_at='2024-09-27T19:30:10.820758+00:00', parent_config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef7d06e-859f-6206-8003-e1bd3c264b8f'}}, tasks=())
snapshot.next # (since the graph ended this turn, `next` is empty. If you fetch a state from within a graph invocation, next tells which node will execute next)
()
上面的快照包含当前状态值、相应的配置以及要处理的 next
节点。在我们的例子中,图表已经到达 END
状态,因此 next
为空。
恭喜!由于 LangGraph 的检查点系统,您的聊天机器人现在可以跨会话维护对话状态。这为更自然、更具上下文意义的交互打开了激动人心的可能性。LangGraph 的检查点甚至可以处理任意复杂的图表状态,这比简单的聊天记忆更具表现力和功能。
在下一部分中,我们将向我们的机器人介绍人工监督,以处理它可能需要指导或验证才能继续进行的情况。
查看下面的代码片段以回顾本节的图表。
完整代码
from typing import Annotated from langchain_anthropic import ChatAnthropic from langchain_community.tools.tavily_search import TavilySearchResults from langchain_core.messages import BaseMessage from typing_extensions import TypedDict from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import StateGraph from langgraph.graph.message import add_messages from langgraph.prebuilt import ToolNode class State(TypedDict): messages: Annotated[list, add_messages] graph_builder = StateGraph(State) tool = TavilySearchResults(max_results=2) tools = [tool] llm = ChatAnthropic(model="claude-3-5-sonnet-20240620") llm_with_tools = llm.bind_tools(tools) def chatbot(state: State): return {"messages": [llm_with_tools.invoke(state["messages"])]} graph_builder.add_node("chatbot", chatbot) tool_node = ToolNode(tools=[tool]) graph_builder.add_node("tools", tool_node) graph_builder.add_conditional_edges( "chatbot", tools_condition, ) graph_builder.add_edge("tools", "chatbot") graph_builder.set_entry_point("chatbot") graph = graph_builder.compile(checkpointer=memory)
第 4 部分:人工在环¶
代理可能不可靠,可能需要人工输入才能成功完成任务。类似地,对于某些操作,您可能希望在运行之前要求人工批准,以确保一切按预期运行。
LangGraph 以多种方式支持人工在环
工作流程。在本节中,我们将使用 LangGraph 的 interrupt_before
功能来始终中断工具节点。
首先,从我们现有的代码开始。以下内容是从第 3 部分复制的。
from typing import Annotated
from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults
from typing_extensions import TypedDict
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, START
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
memory = MemorySaver()
class State(TypedDict):
messages: Annotated[list, add_messages]
graph_builder = StateGraph(State)
tool = TavilySearchResults(max_results=2)
tools = [tool]
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
llm_with_tools = llm.bind_tools(tools)
def chatbot(state: State):
return {"messages": [llm_with_tools.invoke(state["messages"])]}
graph_builder.add_node("chatbot", chatbot)
tool_node = ToolNode(tools=[tool])
graph_builder.add_node("tools", tool_node)
graph_builder.add_conditional_edges(
"chatbot",
tools_condition,
)
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge(START, "chatbot")
现在,编译图表,指定 interrupt_before
工具节点。
graph = graph_builder.compile(
checkpointer=memory,
# This is new!
interrupt_before=["tools"],
# Note: can also interrupt __after__ tools, if desired.
# interrupt_after=["tools"]
)
user_input = "I'm learning LangGraph. Could you do some research on it for me?"
config = {"configurable": {"thread_id": "1"}}
# The config is the **second positional argument** to stream() or invoke()!
events = graph.stream(
{"messages": [("user", user_input)]}, config, stream_mode="values"
)
for event in events:
if "messages" in event:
event["messages"][-1].pretty_print()
================================ Human Message ================================= I'm learning LangGraph. Could you do some research on it for me? ================================== Ai Message ================================== [{'text': "Certainly! I'd be happy to research LangGraph for you. To get the most up-to-date and comprehensive information, I'll use the Tavily search engine to look this up. Let me do that for you now.", 'type': 'text'}, {'id': 'toolu_01R4ZFcb5hohpiVZwr88Bxhc', 'input': {'query': 'LangGraph framework for building language model applications'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}] Tool Calls: tavily_search_results_json (toolu_01R4ZFcb5hohpiVZwr88Bxhc) Call ID: toolu_01R4ZFcb5hohpiVZwr88Bxhc Args: query: LangGraph framework for building language model applications
让我们检查图表状态以确认它是否正常工作。
snapshot = graph.get_state(config)
snapshot.next
('tools',)
请注意,与上次不同,"next" 节点设置为'tools'。我们已经在此中断!让我们检查工具调用。
existing_message = snapshot.values["messages"][-1]
existing_message.tool_calls
[{'name': 'tavily_search_results_json', 'args': {'query': 'LangGraph framework for building language model applications'}, 'id': 'toolu_01R4ZFcb5hohpiVZwr88Bxhc', 'type': 'tool_call'}]
此查询似乎合理。这里没有什么要过滤的。人可以做的最简单的事情就是让图表继续执行。让我们在下面这样做。
接下来,继续图表!传递 None
将只让图表从中断的地方继续,而不会向状态添加任何新内容。
# `None` will append nothing new to the current state, letting it resume as if it had never been interrupted
events = graph.stream(None, config, stream_mode="values")
for event in events:
if "messages" in event:
event["messages"][-1].pretty_print()
================================== Ai Message ================================== [{'text': "Certainly! I'd be happy to research LangGraph for you. To get the most up-to-date and comprehensive information, I'll use the Tavily search engine to look this up. Let me do that for you now.", 'type': 'text'}, {'id': 'toolu_01R4ZFcb5hohpiVZwr88Bxhc', 'input': {'query': 'LangGraph framework for building language model applications'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}] Tool Calls: tavily_search_results_json (toolu_01R4ZFcb5hohpiVZwr88Bxhc) Call ID: toolu_01R4ZFcb5hohpiVZwr88Bxhc Args: query: LangGraph framework for building language model applications ================================= Tool Message ================================= Name: tavily_search_results_json [{"url": "https://towardsdatascience.com/from-basics-to-advanced-exploring-langgraph-e8c1cf4db787", "content": "LangChain is one of the leading frameworks for building applications powered by Lardge Language Models. With the LangChain Expression Language (LCEL), defining and executing step-by-step action sequences — also known as chains — becomes much simpler. In more technical terms, LangChain allows us to create DAGs (directed acyclic graphs). As LLM applications, particularly LLM agents, have ..."}, {"url": "https://github.com/langchain-ai/langgraph", "content": "Overview. LangGraph is a library for building stateful, multi-actor applications with LLMs, used to create agent and multi-agent workflows. Compared to other LLM frameworks, it offers these core benefits: cycles, controllability, and persistence. LangGraph allows you to define flows that involve cycles, essential for most agentic architectures ..."}] ================================== Ai Message ================================== Thank you for your patience. I've found some valuable information about LangGraph for you. Let me summarize the key points: 1. LangGraph is a library for building stateful, multi-actor applications with Large Language Models (LLMs). 2. It is particularly useful for creating agent and multi-agent workflows. 3. LangGraph is built on top of LangChain, which is one of the leading frameworks for building LLM-powered applications. 4. Key benefits of LangGraph compared to other LLM frameworks include: a) Cycles: It allows you to define flows that involve cycles, which is essential for most agent architectures. b) Controllability: Offers more control over the application flow. c) Persistence: Provides ways to maintain state across interactions. 5. LangGraph works well with the LangChain Expression Language (LCEL), which simplifies the process of defining and executing step-by-step action sequences (chains). 6. In technical terms, LangGraph enables the creation of Directed Acyclic Graphs (DAGs) for LLM applications. 7. It's particularly useful for building more complex LLM agents and multi-agent systems. LangGraph seems to be an advanced tool that builds upon LangChain to provide more sophisticated capabilities for creating stateful and multi-actor LLM applications. It's especially valuable if you're looking to create complex agent systems or applications that require maintaining state across interactions. Is there any specific aspect of LangGraph you'd like to know more about? I'd be happy to dive deeper into any particular area of interest.
查看此调用的 LangSmith 跟踪,以查看上面调用中完成的确切工作。请注意,状态是在第一步中加载的,以便您的聊天机器人可以从停止的地方继续。
恭喜! 您已经使用interrupt
向您的聊天机器人添加了人机交互执行,允许在需要时进行人工监督和干预。这打开了您可以使用 AI 系统创建的潜在 UI 的可能性。由于我们已经添加了检查点,因此图形可以无限期地暂停,并在任何时候恢复,就好像什么也没发生一样。
接下来,我们将探讨如何使用自定义状态更新进一步定制机器人的行为。
以下是您在本节中使用的代码副本。此代码与前几部分的唯一区别是添加了interrupt_before
参数。
完整代码
from typing import Annotated from langchain_anthropic import ChatAnthropic from langchain_community.tools.tavily_search import TavilySearchResults from langchain_core.messages import BaseMessage from typing_extensions import TypedDict from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import StateGraph from langgraph.graph.message import add_messages from langgraph.prebuilt import ToolNode, tools_condition class State(TypedDict): messages: Annotated[list, add_messages] graph_builder = StateGraph(State) tool = TavilySearchResults(max_results=2) tools = [tool] llm = ChatAnthropic(model="claude-3-5-sonnet-20240620") llm_with_tools = llm.bind_tools(tools) def chatbot(state: State): return {"messages": [llm_with_tools.invoke(state["messages"])]} graph_builder.add_node("chatbot", chatbot) tool_node = ToolNode(tools=[tool]) graph_builder.add_node("tools", tool_node) graph_builder.add_conditional_edges( "chatbot", tools_condition, ) graph_builder.add_edge("tools", "chatbot") graph_builder.set_entry_point("chatbot") memory = MemorySaver() graph = graph_builder.compile( checkpointer=memory, # This is new! interrupt_before=["tools"], # Note: can also interrupt __after__ actions, if desired. # interrupt_after=["tools"] )
第 5 部分:手动更新状态¶
在上一节中,我们展示了如何中断图,以便人类可以检查其操作。这允许人类读取
状态,但如果他们想改变其代理的路线,则需要拥有写入
权限。
值得庆幸的是,LangGraph 允许您手动更新状态!更新状态使您可以通过修改其操作(甚至修改过去!)来控制代理的轨迹。当您想纠正代理的错误、探索替代路径或引导代理走向特定目标时,此功能特别有用。
我们将在下面展示如何更新检查点状态。与之前一样,首先定义您的图。我们将重复使用与之前完全相同的图。
from typing import Annotated
from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults
from typing_extensions import TypedDict
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, START
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
class State(TypedDict):
messages: Annotated[list, add_messages]
graph_builder = StateGraph(State)
tool = TavilySearchResults(max_results=2)
tools = [tool]
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
llm_with_tools = llm.bind_tools(tools)
def chatbot(state: State):
return {"messages": [llm_with_tools.invoke(state["messages"])]}
graph_builder.add_node("chatbot", chatbot)
tool_node = ToolNode(tools=[tool])
graph_builder.add_node("tools", tool_node)
graph_builder.add_conditional_edges(
"chatbot",
tools_condition,
)
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge(START, "chatbot")
memory = MemorySaver()
graph = graph_builder.compile(
checkpointer=memory,
# This is new!
interrupt_before=["tools"],
# Note: can also interrupt **after** actions, if desired.
# interrupt_after=["tools"]
)
user_input = "I'm learning LangGraph. Could you do some research on it for me?"
config = {"configurable": {"thread_id": "1"}}
# The config is the **second positional argument** to stream() or invoke()!
events = graph.stream({"messages": [("user", user_input)]}, config)
for event in events:
if "messages" in event:
event["messages"][-1].pretty_print()
snapshot = graph.get_state(config)
existing_message = snapshot.values["messages"][-1]
existing_message.pretty_print()
================================== Ai Message ==================================
[{'text': "Certainly! I'd be happy to research LangGraph for you. To get the most up-to-date and comprehensive information, I'll use the Tavily search engine to look this up. Let me do that for you now.", 'type': 'text'}, {'id': 'toolu_018YcbFR37CG8RRXnavH5fxZ', 'input': {'query': 'LangGraph: what is it, how is it used in AI development'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}]
Tool Calls:
tavily_search_results_json (toolu_018YcbFR37CG8RRXnavH5fxZ)
Call ID: toolu_018YcbFR37CG8RRXnavH5fxZ
Args:
query: LangGraph: what is it, how is it used in AI development
到目前为止,所有这些都与上一节完全相同。LLM 只是请求使用搜索引擎工具,我们的图被中断了。如果我们像以前一样继续,该工具将被调用来搜索网络。
但是,如果用户想要介入呢?如果我们认为聊天机器人不需要使用该工具呢?
让我们直接提供正确的答案!
from langchain_core.messages import AIMessage, ToolMessage
answer = (
"LangGraph is a library for building stateful, multi-actor applications with LLMs."
)
new_messages = [
# The LLM API expects some ToolMessage to match its tool call. We'll satisfy that here.
ToolMessage(content=answer, tool_call_id=existing_message.tool_calls[0]["id"]),
# And then directly "put words in the LLM's mouth" by populating its response.
AIMessage(content=answer),
]
new_messages[-1].pretty_print()
graph.update_state(
# Which state to update
config,
# The updated values to provide. The messages in our `State` are "append-only", meaning this will be appended
# to the existing state. We will review how to update existing messages in the next section!
{"messages": new_messages},
)
print("\n\nLast 2 messages;")
print(graph.get_state(config).values["messages"][-2:])
================================== Ai Message ==================================
LangGraph is a library for building stateful, multi-actor applications with LLMs.
Last 2 messages;
[ToolMessage(content='LangGraph is a library for building stateful, multi-actor applications with LLMs.', id='675f7618-367f-44b7-b80e-2834afb02ac5', tool_call_id='toolu_018YcbFR37CG8RRXnavH5fxZ'), AIMessage(content='LangGraph is a library for building stateful, multi-actor applications with LLMs.', additional_kwargs={}, response_metadata={}, id='35fd5682-0c2a-4200-b192-71c59ac6d412')]
现在图已完成,因为我们已经提供了最终的响应消息!由于状态更新模拟了图步骤,它们甚至会生成相应的跟踪。检查上面update_state
调用的LangSmith 跟踪,以查看发生了什么。
注意,我们的新消息被附加到状态中已有的消息。还记得我们是如何定义State
类型的吗?
class State(TypedDict):
messages: Annotated[list, add_messages]
我们使用预先构建的add_messages
函数对messages
进行了注释。这指示图始终将值追加到现有列表,而不是直接覆盖列表。此处应用了相同的逻辑,因此传递给update_state
的消息以相同的方式追加了!
update_state
函数的行为就好像它是您图中的一个节点一样!默认情况下,更新操作使用上次执行的节点,但您可以在下面手动指定它。让我们添加一个更新并告诉图将它视为来自“聊天机器人”。
graph.update_state(
config,
{"messages": [AIMessage(content="I'm an AI expert!")]},
# Which node for this function to act as. It will automatically continue
# processing as if this node just ran.
as_node="chatbot",
)
{'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef7d134-3958-6412-8002-3f4b4112062f'}}
查看提供的链接中此更新调用的LangSmith 跟踪。注意从跟踪中可以看出,图继续进入tools_condition
边缘。我们只是告诉图将更新视为as_node="chatbot"
。如果我们遵循下面的图表并从chatbot
节点开始,我们自然会最终进入tools_condition
边缘,然后进入__end__
,因为我们更新的消息缺少工具调用。
from IPython.display import Image, display
try:
display(Image(graph.get_graph().draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass
像以前一样检查当前状态,以确认检查点反映了我们的手动更新。
snapshot = graph.get_state(config)
print(snapshot.values["messages"][-3:])
print(snapshot.next)
[ToolMessage(content='LangGraph is a library for building stateful, multi-actor applications with LLMs.', id='675f7618-367f-44b7-b80e-2834afb02ac5', tool_call_id='toolu_018YcbFR37CG8RRXnavH5fxZ'), AIMessage(content='LangGraph is a library for building stateful, multi-actor applications with LLMs.', additional_kwargs={}, response_metadata={}, id='35fd5682-0c2a-4200-b192-71c59ac6d412'), AIMessage(content="I'm an AI expert!", additional_kwargs={}, response_metadata={}, id='288e2f74-f1cb-4082-8c3c-af4695c83117')] ()
注意:我们一直在向状态添加 AI 消息。由于我们充当聊天机器人
并使用不包含tool_calls
的 AIMessage 进行响应,因此图知道它已进入完成状态(next
为空)。
如果您想覆盖现有消息怎么办?¶
我们在上面用于注释图的State
的add_messages
函数控制如何对messages
键进行更新。此函数查看新messages
列表中的任何消息 ID。如果 ID 与现有状态中的消息匹配,add_messages
将使用新内容覆盖现有消息。
例如,让我们更新工具调用以确保从我们的搜索引擎获得良好的结果!首先,启动一个新线程
user_input = "I'm learning LangGraph. Could you do some research on it for me?"
config = {"configurable": {"thread_id": "2"}} # we'll use thread_id = 2 here
events = graph.stream(
{"messages": [("user", user_input)]}, config, stream_mode="values"
)
for event in events:
if "messages" in event:
event["messages"][-1].pretty_print()
================================ Human Message ================================= I'm learning LangGraph. Could you do some research on it for me? ================================== Ai Message ================================== [{'text': "Certainly! I'd be happy to research LangGraph for you. To get the most up-to-date and accurate information, I'll use the Tavily search engine to look this up. Let me do that for you now.", 'type': 'text'}, {'id': 'toolu_01TfAeisrpx4ddgJpoAxqrVh', 'input': {'query': 'LangGraph framework for language models'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}] Tool Calls: tavily_search_results_json (toolu_01TfAeisrpx4ddgJpoAxqrVh) Call ID: toolu_01TfAeisrpx4ddgJpoAxqrVh Args: query: LangGraph framework for language models
接下来,让我们更新代理的工具调用。也许我们想特别搜索人机交互工作流程。
from langchain_core.messages import AIMessage
snapshot = graph.get_state(config)
existing_message = snapshot.values["messages"][-1]
print("Original")
print("Message ID", existing_message.id)
print(existing_message.tool_calls[0])
new_tool_call = existing_message.tool_calls[0].copy()
new_tool_call["args"]["query"] = "LangGraph human-in-the-loop workflow"
new_message = AIMessage(
content=existing_message.content,
tool_calls=[new_tool_call],
# Important! The ID is how LangGraph knows to REPLACE the message in the state rather than APPEND this messages
id=existing_message.id,
)
print("Updated")
print(new_message.tool_calls[0])
print("Message ID", new_message.id)
graph.update_state(config, {"messages": [new_message]})
print("\n\nTool calls")
graph.get_state(config).values["messages"][-1].tool_calls
Original Message ID run-342f3f54-356b-4cc1-b747-573f6aa31054-0 {'name': 'tavily_search_results_json', 'args': {'query': 'LangGraph framework for language models'}, 'id': 'toolu_01TfAeisrpx4ddgJpoAxqrVh', 'type': 'tool_call'} Updated {'name': 'tavily_search_results_json', 'args': {'query': 'LangGraph human-in-the-loop workflow'}, 'id': 'toolu_01TfAeisrpx4ddgJpoAxqrVh', 'type': 'tool_call'} Message ID run-342f3f54-356b-4cc1-b747-573f6aa31054-0 Tool calls
[{'name': 'tavily_search_results_json', 'args': {'query': 'LangGraph human-in-the-loop workflow'}, 'id': 'toolu_01TfAeisrpx4ddgJpoAxqrVh', 'type': 'tool_call'}]
注意,我们已修改了 AI 的工具调用,以搜索“LangGraph 人机交互工作流程”,而不是简单的“LangGraph”。
查看LangSmith 跟踪,以查看状态更新调用 - 您可以看到我们的新消息已成功更新了之前的 AI 消息。
通过使用None
和现有配置进行流式传输来恢复图。
events = graph.stream(None, config, stream_mode="values")
for event in events:
if "messages" in event:
event["messages"][-1].pretty_print()
================================== Ai Message ================================== [{'text': "Certainly! I'd be happy to research LangGraph for you. To get the most up-to-date and accurate information, I'll use the Tavily search engine to look this up. Let me do that for you now.", 'type': 'text'}, {'id': 'toolu_01TfAeisrpx4ddgJpoAxqrVh', 'input': {'query': 'LangGraph framework for language models'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}] Tool Calls: tavily_search_results_json (toolu_01TfAeisrpx4ddgJpoAxqrVh) Call ID: toolu_01TfAeisrpx4ddgJpoAxqrVh Args: query: LangGraph human-in-the-loop workflow ================================= Tool Message ================================= Name: tavily_search_results_json [{"url": "https://www.youtube.com/watch?v=9BPCV5TYPmg", "content": "In this video, I'll show you how to handle persistence with LangGraph, enabling a unique Human-in-the-Loop workflow. This approach allows a human to grant an..."}, {"url": "https://medium.com/@kbdhunga/implementing-human-in-the-loop-with-langgraph-ccfde023385c", "content": "Implementing a Human-in-the-Loop (HIL) framework in LangGraph with the Streamlit app provides a robust mechanism for user engagement and decision-making. By incorporating breakpoints and ..."}] ================================== Ai Message ================================== Thank you for your patience. I've found some information about LangGraph, particularly focusing on its human-in-the-loop workflow capabilities. Let me summarize what I've learned for you: 1. LangGraph Overview: LangGraph is a framework for building stateful, multi-actor applications with Large Language Models (LLMs). It's particularly useful for creating complex, interactive AI systems. 2. Human-in-the-Loop (HIL) Workflow: One of the key features of LangGraph is its support for human-in-the-loop workflows. This means that it allows for human intervention and decision-making within AI-driven processes. 3. Persistence Handling: LangGraph offers capabilities for handling persistence, which is crucial for maintaining state across interactions in a workflow. 4. Implementation with Streamlit: There are examples of implementing LangGraph's human-in-the-loop functionality using Streamlit, a popular Python library for creating web apps. This combination allows for the creation of interactive user interfaces for AI applications. 5. Breakpoints and User Engagement: LangGraph allows the incorporation of breakpoints in the workflow. These breakpoints are points where the system can pause and wait for human input or decision-making, enhancing user engagement and control over the AI process. 6. Decision-Making Mechanism: The human-in-the-loop framework in LangGraph provides a robust mechanism for integrating user decision-making into AI workflows. This is particularly useful in scenarios where human judgment or expertise is needed to guide or validate AI actions. 7. Flexibility and Customization: From the information available, it seems that LangGraph offers flexibility in how human-in-the-loop processes are implemented, allowing developers to customize the interaction points and the nature of human involvement based on their specific use case. LangGraph appears to be a powerful tool for developers looking to create more interactive and controllable AI applications, especially those that benefit from human oversight or input at crucial stages of the process. Would you like me to research any specific aspect of LangGraph in more detail, or do you have any questions about what I've found so far?
查看跟踪,以查看工具调用和后续的 LLM 响应。注意,现在图使用我们更新的查询词查询搜索引擎 - 我们能够手动覆盖此处的 LLM 搜索!
所有这些都反映在图的检查点内存中,这意味着如果我们继续对话,它将回忆所有修改后的状态。
events = graph.stream(
{
"messages": (
"user",
"Remember what I'm learning about?",
)
},
config,
stream_mode="values",
)
for event in events:
if "messages" in event:
event["messages"][-1].pretty_print()
================================ Human Message ================================= Remember what I'm learning about? ================================== Ai Message ================================== I apologize for my oversight. You're absolutely right to remind me. You mentioned that you're learning LangGraph. Thank you for bringing that back into focus. Since you're in the process of learning LangGraph, it would be helpful to know more about your current level of understanding and what specific aspects of LangGraph you're most interested in or finding challenging. This way, I can provide more targeted information or explanations that align with your learning journey. Are there any particular areas of LangGraph you'd like to explore further? For example: 1. Basic concepts and architecture of LangGraph 2. Setting up and getting started with LangGraph 3. Implementing specific features like the human-in-the-loop workflow 4. Best practices for using LangGraph in projects 5. Comparisons with other similar frameworks Or if you have any specific questions about what you've learned so far, I'd be happy to help clarify or expand on those topics. Please let me know what would be most useful for your learning process.
恭喜! 您已使用interrupt_before
和update_state
手动修改状态,作为人机交互工作流程的一部分。中断和状态修改使您可以控制代理的行为。结合持久检查点,这意味着您可以暂停
操作并恢复
到任何点。您的用户不必在图中断时可用!
本节的图代码与之前的代码相同。要记住的关键片段是添加.compile(..., interrupt_before=[...])
(或interrupt_after
),如果您想在图到达节点时明确暂停图。然后,您可以使用update_state
修改检查点并控制图的执行方式。
第 6 部分:自定义状态¶
到目前为止,我们一直在依赖一个简单的状态(它只是一个消息列表!)。您可以使用此简单状态做很多事情,但如果您想定义复杂的行为而不依赖于消息列表,则可以向状态添加其他字段。在本节中,我们将通过一个新的节点扩展我们的聊天机器人来说明这一点。
在上面的示例中,我们以确定性的方式让一个人参与进来:每当调用工具时,图都会始终中断。假设我们希望我们的聊天机器人有选择地依赖于一个人。
一种方法是创建一个直通“人类”节点,在此之前,图将始终停止。我们只会在 LLM 调用“人类”工具时执行此节点。为了方便起见,我们将在图状态中包含一个“ask_human”标志,如果 LLM 调用此工具,我们将翻转该标志。
在下面,定义这个新的图,并更新State
from typing import Annotated
from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults
from typing_extensions import TypedDict
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, START
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
class State(TypedDict):
messages: Annotated[list, add_messages]
# This flag is new
ask_human: bool
接下来,定义一个模式来展示模型,让它决定是否请求帮助。
使用 Pydantic 与 LangChain
此笔记本使用 Pydantic v2 BaseModel
,它需要 langchain-core >= 0.3
。使用 langchain-core < 0.3
将导致错误,因为混合了 Pydantic v1 和 v2 BaseModels
。
from pydantic import BaseModel
class RequestAssistance(BaseModel):
"""Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions.
To use this function, relay the user's 'request' so the expert can provide the right guidance.
"""
request: str
接下来,定义聊天机器人节点。这里的主要修改是,如果我们看到聊天机器人调用了 RequestAssistance
标志,则翻转 ask_human
标志。
tool = TavilySearchResults(max_results=2)
tools = [tool]
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
# We can bind the llm to a tool definition, a pydantic model, or a json schema
llm_with_tools = llm.bind_tools(tools + [RequestAssistance])
def chatbot(state: State):
response = llm_with_tools.invoke(state["messages"])
ask_human = False
if (
response.tool_calls
and response.tool_calls[0]["name"] == RequestAssistance.__name__
):
ask_human = True
return {"messages": [response], "ask_human": ask_human}
接下来,创建图构建器并将聊天机器人和工具节点添加到图中,与之前相同。
graph_builder = StateGraph(State)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_node("tools", ToolNode(tools=[tool]))
接下来,创建“人类”node
。此node
函数在我们的图中基本上是一个占位符,它将触发中断。如果人类在interrupt
期间没有手动更新状态,它会插入一个工具消息,以便 LLM 知道已请求用户但没有响应。此节点还会取消设置ask_human
标志,以便图知道除非有进一步的请求,否则不要重新访问该节点。
from langchain_core.messages import AIMessage, ToolMessage
def create_response(response: str, ai_message: AIMessage):
return ToolMessage(
content=response,
tool_call_id=ai_message.tool_calls[0]["id"],
)
def human_node(state: State):
new_messages = []
if not isinstance(state["messages"][-1], ToolMessage):
# Typically, the user will have updated the state during the interrupt.
# If they choose not to, we will include a placeholder ToolMessage to
# let the LLM continue.
new_messages.append(
create_response("No response from human.", state["messages"][-1])
)
return {
# Append the new messages
"messages": new_messages,
# Unset the flag
"ask_human": False,
}
graph_builder.add_node("human", human_node)
接下来,定义条件逻辑。select_next_node
如果设置了标志,将路由到human
节点。否则,它将让预构建的tools_condition
函数选择下一个节点。
回想一下,tools_condition
函数只是检查chatbot
是否在其响应消息中使用任何tool_calls
响应。如果是,则路由到action
节点。否则,它将结束图。
def select_next_node(state: State):
if state["ask_human"]:
return "human"
# Otherwise, we can route as before
return tools_condition(state)
graph_builder.add_conditional_edges(
"chatbot",
select_next_node,
{"human": "human", "tools": "tools", END: END},
)
最后,添加简单的有向边并编译图。这些边指示图在a
完成执行时,始终从节点a
->b
流动。
# The rest is the same
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge("human", "chatbot")
graph_builder.add_edge(START, "chatbot")
memory = MemorySaver()
graph = graph_builder.compile(
checkpointer=memory,
# We interrupt before 'human' here instead.
interrupt_before=["human"],
)
如果您安装了可视化依赖项,您可以在下面看到图结构。
from IPython.display import Image, display
try:
display(Image(graph.get_graph().draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass
聊天机器人可以请求人类的帮助(chatbot->select->human),调用搜索引擎工具(chatbot->select->action),或者直接响应(chatbot->select->end)。一旦执行了操作或请求,图将过渡回chatbot
节点以继续操作。
让我们看看此图的实际应用。我们将请求专家协助来说明我们的图。
user_input = "I need some expert guidance for building this AI agent. Could you request assistance for me?"
config = {"configurable": {"thread_id": "1"}}
# The config is the **second positional argument** to stream() or invoke()!
events = graph.stream(
{"messages": [("user", user_input)]}, config, stream_mode="values"
)
for event in events:
if "messages" in event:
event["messages"][-1].pretty_print()
================================ Human Message ================================= I need some expert guidance for building this AI agent. Could you request assistance for me? ================================== Ai Message ================================== [{'text': "Certainly! I understand that you need expert guidance for building an AI agent. I'll use the RequestAssistance function to escalate your request to an expert who can provide you with the specialized knowledge and support you need. Let me do that for you right away.", 'type': 'text'}, {'id': 'toolu_01Mo3N2c1byuSZwT1vyJWRia', 'input': {'request': 'The user needs expert guidance for building an AI agent. They require specialized knowledge and support in AI development and implementation.'}, 'name': 'RequestAssistance', 'type': 'tool_use'}] Tool Calls: RequestAssistance (toolu_01Mo3N2c1byuSZwT1vyJWRia) Call ID: toolu_01Mo3N2c1byuSZwT1vyJWRia Args: request: The user needs expert guidance for building an AI agent. They require specialized knowledge and support in AI development and implementation.
注意: LLM 调用了我们提供的“RequestAssistance
”工具,并且已设置中断。让我们检查图状态以确认。
snapshot = graph.get_state(config)
snapshot.next
('human',)
图状态确实在'human'
节点之前中断。我们可以在此场景中充当“专家”,并通过添加包含我们输入的新 ToolMessage 来手动更新状态。
接下来,通过以下步骤响应聊天机器人的请求:
- 创建一个包含我们响应的
ToolMessage
。这将被传递回chatbot
。 - 调用
update_state
手动更新图状态。
ai_message = snapshot.values["messages"][-1]
human_response = (
"We, the experts are here to help! We'd recommend you check out LangGraph to build your agent."
" It's much more reliable and extensible than simple autonomous agents."
)
tool_message = create_response(human_response, ai_message)
graph.update_state(config, {"messages": [tool_message]})
{'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef7d092-bb30-6bee-8002-015e7e1c56c0'}}
您可以检查状态以确认我们的响应已添加。
graph.get_state(config).values["messages"]
[HumanMessage(content='I need some expert guidance for building this AI agent. Could you request assistance for me?', additional_kwargs={}, response_metadata={}, id='3f28f959-9ab7-489a-9c58-7ed1b49cedf3'), AIMessage(content=[{'text': "Certainly! I understand that you need expert guidance for building an AI agent. I'll use the RequestAssistance function to escalate your request to an expert who can provide you with the specialized knowledge and support you need. Let me do that for you right away.", 'type': 'text'}, {'id': 'toolu_01Mo3N2c1byuSZwT1vyJWRia', 'input': {'request': 'The user needs expert guidance for building an AI agent. They require specialized knowledge and support in AI development and implementation.'}, 'name': 'RequestAssistance', 'type': 'tool_use'}], additional_kwargs={}, response_metadata={'id': 'msg_01VRnZvVbgsVRbQaQuvsziDx', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 516, 'output_tokens': 130}}, id='run-4e3f7906-5887-40d9-9267-5beefe7b3b76-0', tool_calls=[{'name': 'RequestAssistance', 'args': {'request': 'The user needs expert guidance for building an AI agent. They require specialized knowledge and support in AI development and implementation.'}, 'id': 'toolu_01Mo3N2c1byuSZwT1vyJWRia', 'type': 'tool_call'}], usage_metadata={'input_tokens': 516, 'output_tokens': 130, 'total_tokens': 646}), ToolMessage(content="We, the experts are here to help! We'd recommend you check out LangGraph to build your agent. It's much more reliable and extensible than simple autonomous agents.", id='8583b899-d898-4051-9f36-f5e5d11e9a37', tool_call_id='toolu_01Mo3N2c1byuSZwT1vyJWRia')]
接下来,通过使用None
作为输入来恢复图。
events = graph.stream(None, config, stream_mode="values")
for event in events:
if "messages" in event:
event["messages"][-1].pretty_print()
================================= Tool Message ================================= We, the experts are here to help! We'd recommend you check out LangGraph to build your agent. It's much more reliable and extensible than simple autonomous agents. ================================= Tool Message ================================= We, the experts are here to help! We'd recommend you check out LangGraph to build your agent. It's much more reliable and extensible than simple autonomous agents. ================================== Ai Message ================================== Thank you for your patience. I've escalated your request to our expert team, and they have provided some initial guidance. Here's what they suggest: The experts recommend that you check out LangGraph for building your AI agent. They mention that LangGraph is a more reliable and extensible option compared to simple autonomous agents. LangGraph is likely a framework or tool designed specifically for creating complex AI agents. It seems to offer advantages in terms of reliability and extensibility, which are crucial factors when developing sophisticated AI systems. To further assist you, I can provide some additional context and next steps: 1. Research LangGraph: Look up documentation, tutorials, and examples of LangGraph to understand its features and how it can help you build your AI agent. 2. Compare with other options: While the experts recommend LangGraph, it might be useful to understand how it compares to other AI agent development frameworks or tools you might have been considering. 3. Assess your requirements: Consider your specific needs for the AI agent you want to build. Think about the tasks it needs to perform, the level of complexity required, and how LangGraph's features align with these requirements. 4. Start with a small project: If you decide to use LangGraph, consider beginning with a small, manageable project to familiarize yourself with the framework. 5. Seek community support: Look for LangGraph user communities, forums, or discussion groups where you can ask questions and get additional support as you build your agent. 6. Consider additional training: Depending on your current skill level, you might want to look into courses or workshops that focus on AI agent development, particularly those that cover LangGraph. Do you have any specific questions about LangGraph or AI agent development that you'd like me to try to answer? Or would you like me to search for more detailed information about LangGraph and its features?
注意,聊天机器人已在其最终响应中合并了更新后的状态。由于所有内容都已检查点,因此循环中的“专家”人类可以在任何时间执行更新,而不会影响图的执行。
恭喜!您现在已将另一个节点添加到您的助手图中,以便聊天机器人自行决定是否需要中断执行。您通过使用新ask_human
字段更新图State
以及在编译图时修改中断逻辑来实现此目的。这使您可以在保持每次执行图时完全记忆的同时,动态地在循环中包含人类。
我们快结束本教程了,但在完成之前,我们想回顾一下另一个概念,它将checkpointing
和state updates
连接起来。
本节的代码在下面复制,供您参考。
完整代码
from typing import Annotated from langchain_anthropic import ChatAnthropic from langchain_community.tools.tavily_search import TavilySearchResults from langchain_core.messages import BaseMessage # NOTE: you must use langchain-core >= 0.3 with Pydantic v2 from pydantic import BaseModel from typing_extensions import TypedDict from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import StateGraph from langgraph.graph.message import add_messages from langgraph.prebuilt import ToolNode, tools_condition class State(TypedDict): messages: Annotated[list, add_messages] # This flag is new ask_human: bool class RequestAssistance(BaseModel): """Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions. To use this function, relay the user's 'request' so the expert can provide the right guidance. """ request: str tool = TavilySearchResults(max_results=2) tools = [tool] llm = ChatAnthropic(model="claude-3-5-sonnet-20240620") # We can bind the llm to a tool definition, a pydantic model, or a json schema llm_with_tools = llm.bind_tools(tools + [RequestAssistance]) def chatbot(state: State): response = llm_with_tools.invoke(state["messages"]) ask_human = False if ( response.tool_calls and response.tool_calls[0]["name"] == RequestAssistance.__name__ ): ask_human = True return {"messages": [response], "ask_human": ask_human} graph_builder = StateGraph(State) graph_builder.add_node("chatbot", chatbot) graph_builder.add_node("tools", ToolNode(tools=[tool])) def create_response(response: str, ai_message: AIMessage): return ToolMessage( content=response, tool_call_id=ai_message.tool_calls[0]["id"], ) def human_node(state: State): new_messages = [] if not isinstance(state["messages"][-1], ToolMessage): # Typically, the user will have updated the state during the interrupt. # If they choose not to, we will include a placeholder ToolMessage to # let the LLM continue. new_messages.append( create_response("No response from human.", state["messages"][-1]) ) return { # Append the new messages "messages": new_messages, # Unset the flag "ask_human": False, } graph_builder.add_node("human", human_node) def select_next_node(state: State): if state["ask_human"]: return "human" # Otherwise, we can route as before return tools_condition(state) graph_builder.add_conditional_edges( "chatbot", select_next_node, {"human": "human", "tools": "tools", "__end__": "__end__"}, ) graph_builder.add_edge("tools", "chatbot") graph_builder.add_edge("human", "chatbot") graph_builder.set_entry_point("chatbot") memory = MemorySaver() graph = graph_builder.compile( checkpointer=memory, interrupt_before=["human"], )
第 7 部分:时光旅行¶
在典型的聊天机器人工作流程中,用户会与机器人交互一次或多次以完成一项任务。在前面的部分中,我们看到了如何添加记忆和循环中的一个人,以便能够检查点我们的图状态并手动覆盖状态以控制未来的响应。
但是,如果您想让您的用户从之前的响应开始并“分支”以探索不同的结果,该怎么办?或者,如果您想让用户能够“倒带”您的助手的作品以纠正一些错误或尝试不同的策略(在自主软件工程师等应用程序中很常见),该怎么办?
您可以使用 LangGraph 内置的“时光旅行”功能来创建这两种体验以及更多体验。
在本节中,您将使用图的get_state_history
方法获取检查点来“倒带”您的图。然后,您可以从该时间点恢复执行。
首先,回想一下我们的聊天机器人图。我们无需进行任何更改
from typing import Annotated, Literal
from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import AIMessage, ToolMessage
# NOTE: you must use langchain-core >= 0.3 with Pydantic v2
from pydantic import BaseModel
from typing_extensions import TypedDict
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, START
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
class State(TypedDict):
messages: Annotated[list, add_messages]
# This flag is new
ask_human: bool
class RequestAssistance(BaseModel):
"""Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions.
To use this function, relay the user's 'request' so the expert can provide the right guidance.
"""
request: str
tool = TavilySearchResults(max_results=2)
tools = [tool]
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
# We can bind the llm to a tool definition, a pydantic model, or a json schema
llm_with_tools = llm.bind_tools(tools + [RequestAssistance])
def chatbot(state: State):
response = llm_with_tools.invoke(state["messages"])
ask_human = False
if (
response.tool_calls
and response.tool_calls[0]["name"] == RequestAssistance.__name__
):
ask_human = True
return {"messages": [response], "ask_human": ask_human}
graph_builder = StateGraph(State)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_node("tools", ToolNode(tools=[tool]))
def create_response(response: str, ai_message: AIMessage):
return ToolMessage(
content=response,
tool_call_id=ai_message.tool_calls[0]["id"],
)
def human_node(state: State):
new_messages = []
if not isinstance(state["messages"][-1], ToolMessage):
# Typically, the user will have updated the state during the interrupt.
# If they choose not to, we will include a placeholder ToolMessage to
# let the LLM continue.
new_messages.append(
create_response("No response from human.", state["messages"][-1])
)
return {
# Append the new messages
"messages": new_messages,
# Unset the flag
"ask_human": False,
}
graph_builder.add_node("human", human_node)
def select_next_node(state: State):
if state["ask_human"]:
return "human"
# Otherwise, we can route as before
return tools_condition(state)
graph_builder.add_conditional_edges(
"chatbot",
select_next_node,
{"human": "human", "tools": "tools", END: END},
)
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge("human", "chatbot")
graph_builder.add_edge(START, "chatbot")
memory = MemorySaver()
graph = graph_builder.compile(
checkpointer=memory,
interrupt_before=["human"],
)
from IPython.display import Image, display
try:
display(Image(graph.get_graph().draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass