设置¶
首先,让我们设置我们要使用的软件包
在 [ ]
已复制!
%%capture --no-stderr
%pip install --quiet -U langgraph langchain_anthropic
%%capture --no-stderr %pip install --quiet -U langgraph langchain_anthropic
接下来,我们需要为 Anthropic(我们将使用的 LLM)设置 API 密钥
在 [2]
已复制!
import getpass
import os
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("ANTHROPIC_API_KEY")
import getpass import os def _set_env(var: str): if not os.environ.get(var): os.environ[var] = getpass.getpass(f"{var}: ") _set_env("ANTHROPIC_API_KEY")
构建代理¶
现在让我们构建一个简单的 ReAct 风格的代理。
在 [13]
已复制!
from typing import Literal
from langchain_anthropic import ChatAnthropic
from langchain_core.tools import tool
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import MessagesState, StateGraph, START, END
from langgraph.prebuilt import ToolNode
memory = MemorySaver()
@tool
def search(query: str):
"""Call to surf the web."""
# This is a placeholder for the actual implementation
# Don't let the LLM know this though 😊
return "It's sunny in San Francisco, but you better look out if you're a Gemini 😈."
tools = [search]
tool_node = ToolNode(tools)
model = ChatAnthropic(model_name="claude-3-haiku-20240307")
bound_model = model.bind_tools(tools)
def should_continue(state: MessagesState):
"""Return the next node to execute."""
last_message = state["messages"][-1]
# If there is no function call, then we finish
if not last_message.tool_calls:
return END
# Otherwise if there is, we continue
return "action"
# Define the function that calls the model
def call_model(state: MessagesState):
response = bound_model.invoke(state["messages"])
# We return a list, because this will get added to the existing list
return {"messages": response}
# Define a new graph
workflow = StateGraph(MessagesState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("action", tool_node)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.add_edge(START, "agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
# Next, we pass in the path map - all the possible nodes this edge could go to
["action", END],
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("action", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile(checkpointer=memory)
from typing import Literal from langchain_anthropic import ChatAnthropic from langchain_core.tools import tool from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import MessagesState, StateGraph, START, END from langgraph.prebuilt import ToolNode memory = MemorySaver() @tool def search(query: str): """调用搜索网络。""" # 这是一个实际实现的占位符 # 不要让 LLM 知道这一点 😊 return "旧金山阳光明媚,但如果你是一个双子座,你最好小心点😈。" tools = [search] tool_node = ToolNode(tools) model = ChatAnthropic(model_name="claude-3-haiku-20240307") bound_model = model.bind_tools(tools) def should_continue(state: MessagesState): """返回要执行的下一个节点。""" last_message = state["messages"][-1] # 如果没有函数调用,则完成 if not last_message.tool_calls: return END # 否则如果有,则继续 return "action" # 定义调用模型的函数 def call_model(state: MessagesState): response = bound_model.invoke(state["messages"]) # 我们返回一个列表,因为它将被添加到现有列表中 return {"messages": response} # 定义一个新的图 workflow = StateGraph(MessagesState) # 定义我们将循环的两个节点 workflow.add_node("agent", call_model) workflow.add_node("action", tool_node) # 将入口点设置为`agent` # 这意味着此节点是第一个被调用的节点 workflow.add_edge(START, "agent") # 现在添加一个条件边 workflow.add_conditional_edges( # 首先,我们定义起始节点。我们使用`agent`。 # 这意味着这些是在调用`agent`节点之后采取的边。 "agent", # 接下来,我们传入将确定调用哪个节点的函数。 should_continue, # 接下来,我们传入路径映射 - 此边可以到达的所有可能节点 ["action", END], ) # 现在从`tools`添加一条普通边到`agent`。 # 这意味着在调用`tools`之后,`agent`节点被调用 next. workflow.add_edge("action", "agent") # 最后,我们编译它! # 这将其编译成 LangChain Runnable, # 意味着您可以像使用其他任何可运行应用程序一样使用它 app = workflow.compile(checkpointer=memory)
在 [14]
已复制!
from langchain_core.messages import HumanMessage
config = {"configurable": {"thread_id": "2"}}
input_message = HumanMessage(content="hi! I'm bob")
for event in app.stream({"messages": [input_message]}, config, stream_mode="values"):
event["messages"][-1].pretty_print()
input_message = HumanMessage(content="what's my name?")
for event in app.stream({"messages": [input_message]}, config, stream_mode="values"):
event["messages"][-1].pretty_print()
from langchain_core.messages import HumanMessage config = {"configurable": {"thread_id": "2"}} input_message = HumanMessage(content="hi! I'm bob") for event in app.stream({"messages": [input_message]}, config, stream_mode="values"): event["messages"][-1].pretty_print() input_message = HumanMessage(content="what's my name?") for event in app.stream({"messages": [input_message]}, config, stream_mode="values"): event["messages"][-1].pretty_print()
================================ Human Message ================================= hi! I'm bob ================================== Ai Message ================================== Nice to meet you, Bob! As an AI assistant, I don't have a physical form, but I'm happy to chat with you and try my best to help out however I can. Please feel free to ask me anything, and I'll do my best to provide useful information or assistance. ================================ Human Message ================================= what's my name? ================================== Ai Message ================================== You said your name is Bob, so that is the name I have for you.
过滤消息¶
为了防止对话历史记录爆炸,最直接的方法是在将消息传递给 LLM 之前对其进行过滤。这涉及两个部分:定义一个过滤消息的函数,然后将其添加到图中。请查看下面的示例,它定义了一个非常简单的filter_messages
函数,然后使用它。
在 [15]
已复制!
from typing import Literal
from langchain_anthropic import ChatAnthropic
from langchain_core.tools import tool
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import MessagesState, StateGraph, START
from langgraph.prebuilt import ToolNode
memory = MemorySaver()
@tool
def search(query: str):
"""Call to surf the web."""
# This is a placeholder for the actual implementation
# Don't let the LLM know this though 😊
return "It's sunny in San Francisco, but you better look out if you're a Gemini 😈."
tools = [search]
tool_node = ToolNode(tools)
model = ChatAnthropic(model_name="claude-3-haiku-20240307")
bound_model = model.bind_tools(tools)
def should_continue(state: MessagesState):
"""Return the next node to execute."""
last_message = state["messages"][-1]
# If there is no function call, then we finish
if not last_message.tool_calls:
return END
# Otherwise if there is, we continue
return "action"
def filter_messages(messages: list):
# This is very simple helper function which only ever uses the last message
return messages[-1:]
# Define the function that calls the model
def call_model(state: MessagesState):
messages = filter_messages(state["messages"])
response = bound_model.invoke(messages)
# We return a list, because this will get added to the existing list
return {"messages": response}
# Define a new graph
workflow = StateGraph(MessagesState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("action", tool_node)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.add_edge(START, "agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
# Next, we pass in the pathmap - all the possible nodes this edge could go to
["action", END],
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("action", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile(checkpointer=memory)
from typing import Literal from langchain_anthropic import ChatAnthropic from langchain_core.tools import tool from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import MessagesState, StateGraph, START from langgraph.prebuilt import ToolNode memory = MemorySaver() @tool def search(query: str): """调用搜索网络。""" # 这是一个实际实现的占位符 # 不要让 LLM 知道这一点 😊 return "旧金山阳光明媚,但如果你是一个双子座,你最好小心点😈。" tools = [search] tool_node = ToolNode(tools) model = ChatAnthropic(model_name="claude-3-haiku-20240307") bound_model = model.bind_tools(tools) def should_continue(state: MessagesState): """返回要执行的下一个节点。""" last_message = state["messages"][-1] # 如果没有函数调用,则完成 if not last_message.tool_calls: return END # 否则如果有,则继续 return "action" def filter_messages(messages: list): # 这是一个非常简单的辅助函数,它只使用最后一条消息 return messages[-1:] # 定义调用模型的函数 def call_model(state: MessagesState): messages = filter_messages(state["messages"]) response = bound_model.invoke(messages) # 我们返回一个列表,因为它将被添加到现有列表中 return {"messages": response} # 定义一个新的图 workflow = StateGraph(MessagesState) # 定义我们将循环的两个节点 workflow.add_node("agent", call_model) workflow.add_node("action", tool_node) # 将入口点设置为`agent` # 这意味着此节点是第一个被调用的节点 workflow.add_edge(START, "agent") # 现在添加一个条件边 workflow.add_conditional_edges( # 首先,我们定义起始节点。我们使用`agent`。 # 这意味着这些是在调用`agent`节点之后采取的边。 "agent", # 接下来,我们传入将确定调用哪个节点的函数。 should_continue, # 接下来,我们传入路径映射 - 此边可以到达的所有可能节点 ["action", END], ) # 现在从`tools`添加一条普通边到`agent`。 # 这意味着在调用`tools`之后,`agent`节点被调用 next. workflow.add_edge("action", "agent") # 最后,我们编译它! # 这将其编译成 LangChain Runnable, # 意味着您可以像使用其他任何可运行应用程序一样使用它 app = workflow.compile(checkpointer=memory)
在 [16]
已复制!
from langchain_core.messages import HumanMessage
config = {"configurable": {"thread_id": "2"}}
input_message = HumanMessage(content="hi! I'm bob")
for event in app.stream({"messages": [input_message]}, config, stream_mode="values"):
event["messages"][-1].pretty_print()
# This will now not remember the previous messages
# (because we set `messages[-1:]` in the filter messages argument)
input_message = HumanMessage(content="what's my name?")
for event in app.stream({"messages": [input_message]}, config, stream_mode="values"):
event["messages"][-1].pretty_print()
from langchain_core.messages import HumanMessage config = {"configurable": {"thread_id": "2"}} input_message = HumanMessage(content="hi! I'm bob") for event in app.stream({"messages": [input_message]}, config, stream_mode="values"): event["messages"][-1].pretty_print() # 这将不再记住之前的消息 # (因为我们在过滤消息参数中设置了`messages[-1:]`) input_message = HumanMessage(content="what's my name?") for event in app.stream({"messages": [input_message]}, config, stream_mode="values"): event["messages"][-1].pretty_print()
================================ Human Message ================================= hi! I'm bob ================================== Ai Message ================================== Nice to meet you, Bob! I'm Claude, an AI assistant created by Anthropic. It's a pleasure to chat with you. Feel free to ask me anything, I'm here to help! ================================ Human Message ================================= what's my name? ================================== Ai Message ================================== I'm afraid I don't actually know your name. As an AI assistant, I don't have information about the specific identities of the people I talk to. I only know what is provided to me during our conversation.