如何将配置传递给工具¶
设置¶
首先,让我们安装所需的软件包并设置我们的 API 密钥
在 [1]
已复制!
%%capture --no-stderr
%pip install --quiet -U langgraph langchain_anthropic
%%capture --no-stderr %pip install --quiet -U langgraph langchain_anthropic
在 [2]
已复制!
import getpass
import os
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("ANTHROPIC_API_KEY")
import getpass import os def _set_env(var: str): if not os.environ.get(var): os.environ[var] = getpass.getpass(f"{var}: ") _set_env("ANTHROPIC_API_KEY")
定义工具和模型¶
在 [3]
已复制!
from typing import List
from langchain_core.tools import tool
from langchain_core.runnables.config import RunnableConfig
from langgraph.prebuilt import ToolNode
user_to_pets = {}
@tool(parse_docstring=True)
def update_favorite_pets(
# NOTE: config arg does not need to be added to docstring, as we don't want it to be included in the function signature attached to the LLM
pets: List[str],
config: RunnableConfig,
) -> None:
"""Add the list of favorite pets.
Args:
pets: List of favorite pets to set.
"""
user_id = config.get("configurable", {}).get("user_id")
user_to_pets[user_id] = pets
@tool
def delete_favorite_pets(config: RunnableConfig) -> None:
"""Delete the list of favorite pets."""
user_id = config.get("configurable", {}).get("user_id")
if user_id in user_to_pets:
del user_to_pets[user_id]
@tool
def list_favorite_pets(config: RunnableConfig) -> None:
"""List favorite pets if any."""
user_id = config.get("configurable", {}).get("user_id")
return ", ".join(user_to_pets.get(user_id, []))
from typing import List from langchain_core.tools import tool from langchain_core.runnables.config import RunnableConfig from langgraph.prebuilt import ToolNode user_to_pets = {} @tool(parse_docstring=True) def update_favorite_pets( # NOTE: config 参数不需要添加到文档字符串中,因为我们不希望它包含在附加到 LLM 的函数签名中 pets: List[str], config: RunnableConfig, ) -> None: """添加宠物列表。参数:pets: 要设置的宠物列表。""" user_id = config.get("configurable", {}).get("user_id") user_to_pets[user_id] = pets @tool def delete_favorite_pets(config: RunnableConfig) -> None: """删除宠物列表。""" user_id = config.get("configurable", {}).get("user_id") if user_id in user_to_pets: del user_to_pets[user_id] @tool def list_favorite_pets(config: RunnableConfig) -> None: """列出宠物(如果有)。""" user_id = config.get("configurable", {}).get("user_id") return ", ".join(user_to_pets.get(user_id, []))
在 [4]
已复制!
tools = [update_favorite_pets, delete_favorite_pets, list_favorite_pets]
tool_node = ToolNode(tools)
tools = [update_favorite_pets, delete_favorite_pets, list_favorite_pets] tool_node = ToolNode(tools)
在我们的示例中,我们将使用 Anthropic 的小型聊天模型。要将聊天模型与工具调用一起使用,我们需要首先确保模型了解可用的工具。我们通过在 ChatAnthropic
模型上调用 .bind_tools
方法来做到这一点
在 [5]
已复制!
from langchain_anthropic import ChatAnthropic
from langgraph.graph import StateGraph, MessagesState
from langgraph.prebuilt import ToolNode
model_with_tools = ChatAnthropic(
model="claude-3-haiku-20240307", temperature=0
).bind_tools(tools)
from langchain_anthropic import ChatAnthropic from langgraph.graph import StateGraph, MessagesState from langgraph.prebuilt import ToolNode model_with_tools = ChatAnthropic( model="claude-3-haiku-20240307", temperature=0 ).bind_tools(tools)
在 [6]
已复制!
from typing import Literal
from langgraph.graph import StateGraph, MessagesState, START, END
def should_continue(state: MessagesState):
messages = state["messages"]
last_message = messages[-1]
if last_message.tool_calls:
return "tools"
return END
def call_model(state: MessagesState):
messages = state["messages"]
response = model_with_tools.invoke(messages)
return {"messages": [response]}
builder = StateGraph(MessagesState)
# Define the two nodes we will cycle between
builder.add_node("agent", call_model)
builder.add_node("tools", tool_node)
builder.add_edge(START, "agent")
builder.add_conditional_edges("agent", should_continue, ["tools", END])
builder.add_edge("tools", "agent")
graph = builder.compile()
from typing import Literal from langgraph.graph import StateGraph, MessagesState, START, END def should_continue(state: MessagesState): messages = state["messages"] last_message = messages[-1] if last_message.tool_calls: return "tools" return END def call_model(state: MessagesState): messages = state["messages"] response = model_with_tools.invoke(messages) return {"messages": [response]} builder = StateGraph(MessagesState) # 定义我们将循环遍历的两个节点 builder.add_node("agent", call_model) builder.add_node("tools", tool_node) builder.add_edge(START, "agent") builder.add_conditional_edges("agent", should_continue, ["tools", END]) builder.add_edge("tools", "agent") graph = builder.compile()
在 [7]
已复制!
from IPython.display import Image, display
try:
display(Image(graph.get_graph().draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass
from IPython.display import Image, display try: display(Image(graph.get_graph().draw_mermaid_png())) except Exception: # 这需要一些额外的依赖项,并且是可选的 pass
开始使用!¶
在 [8]
已复制!
from langchain_core.messages import HumanMessage
user_to_pets.clear() # Clear the state
print(f"User information prior to run: {user_to_pets}")
inputs = {"messages": [HumanMessage(content="my favorite pets are cats and dogs")]}
for chunk in graph.stream(
inputs, {"configurable": {"user_id": "123"}}, stream_mode="values"
):
chunk["messages"][-1].pretty_print()
print(f"User information after the run: {user_to_pets}")
from langchain_core.messages import HumanMessage user_to_pets.clear() # 清除状态 print(f"运行前的用户信息:{user_to_pets}") inputs = {"messages": [HumanMessage(content="我最喜欢的宠物是猫和狗")]} for chunk in graph.stream( inputs, {"configurable": {"user_id": "123"}}, stream_mode="values" ): chunk["messages"][-1].pretty_print() print(f"运行后的用户信息:{user_to_pets}")
User information prior to run: {} ================================ Human Message ================================= my favorite pets are cats and dogs ================================== Ai Message ================================== [{'text': "Okay, let's update your favorite pets:", 'type': 'text'}, {'id': 'toolu_01SU6vhbKDjSsPj2z86QA3wy', 'input': {'pets': ['cats', 'dogs']}, 'name': 'update_favorite_pets', 'type': 'tool_use'}] Tool Calls: update_favorite_pets (toolu_01SU6vhbKDjSsPj2z86QA3wy) Call ID: toolu_01SU6vhbKDjSsPj2z86QA3wy Args: pets: ['cats', 'dogs'] ================================= Tool Message ================================= Name: update_favorite_pets null ================================== Ai Message ================================== Your favorite pets have been updated to cats and dogs. User information after the run: {'123': ['cats', 'dogs']}
在 [9]
已复制!
from langchain_core.messages import HumanMessage
print(f"User information prior to run: {user_to_pets}")
inputs = {"messages": [HumanMessage(content="what are my favorite pets")]}
for chunk in graph.stream(
inputs, {"configurable": {"user_id": "123"}}, stream_mode="values"
):
chunk["messages"][-1].pretty_print()
print(f"User information prior to run: {user_to_pets}")
from langchain_core.messages import HumanMessage print(f"运行前的用户信息:{user_to_pets}") inputs = {"messages": [HumanMessage(content="我最喜欢的宠物是什么")]} for chunk in graph.stream( inputs, {"configurable": {"user_id": "123"}}, stream_mode="values" ): chunk["messages"][-1].pretty_print() print(f"运行前的用户信息:{user_to_pets}")
User information prior to run: {'123': ['cats', 'dogs']} ================================ Human Message ================================= what are my favorite pets ================================== Ai Message ================================== [{'id': 'toolu_01DdpiqiCxzbR4RjQdEoR6mJ', 'input': {}, 'name': 'list_favorite_pets', 'type': 'tool_use'}] Tool Calls: list_favorite_pets (toolu_01DdpiqiCxzbR4RjQdEoR6mJ) Call ID: toolu_01DdpiqiCxzbR4RjQdEoR6mJ Args: ================================= Tool Message ================================= Name: list_favorite_pets cats, dogs ================================== Ai Message ================================== Based on the list_favorite_pets tool, your favorite pets are cats and dogs. User information prior to run: {'123': ['cats', 'dogs']}
在 [10]
已复制!
print(f"User information prior to run: {user_to_pets}")
inputs = {
"messages": [
HumanMessage(content="please forget what i told you about my favorite animals")
]
}
for chunk in graph.stream(
inputs, {"configurable": {"user_id": "123"}}, stream_mode="values"
):
chunk["messages"][-1].pretty_print()
print(f"User information prior to run: {user_to_pets}")
print(f"运行前的用户信息:{user_to_pets}") inputs = { "messages": [ HumanMessage(content="请忘记我告诉你的关于我最喜欢的动物的信息") ] } for chunk in graph.stream( inputs, {"configurable": {"user_id": "123"}}, stream_mode="values" ): chunk["messages"][-1].pretty_print() print(f"运行前的用户信息:{user_to_pets}")
User information prior to run: {'123': ['cats', 'dogs']} ================================ Human Message ================================= please forget what i told you about my favorite animals ================================== Ai Message ================================== [{'id': 'toolu_013TXG6yTxvuWiugbdKGTKSF', 'input': {}, 'name': 'delete_favorite_pets', 'type': 'tool_use'}] Tool Calls: delete_favorite_pets (toolu_013TXG6yTxvuWiugbdKGTKSF) Call ID: toolu_013TXG6yTxvuWiugbdKGTKSF Args: ================================= Tool Message ================================= Name: delete_favorite_pets null ================================== Ai Message ================================== I have deleted the information about your favorite pets. The list of favorite pets has been cleared. User information prior to run: {}