在 [1]
已复制!
%%capture --no-stderr
%pip install -U langchain_openai langgraph
%%capture --no-stderr %pip install -U langchain_openai langgraph
在 [2]
已复制!
import getpass
import os
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("ANTHROPIC_API_KEY")
import getpass import os def _set_env(var: str): if not os.environ.get(var): os.environ[var] = getpass.getpass(f"{var}: ") _set_env("ANTHROPIC_API_KEY")
定义存储¶
在本例中,我们将创建一个能够检索有关用户偏好的信息的图。我们将通过定义一个 InMemoryStore
来做到这一点 - 一个可以将数据存储在内存中并查询该数据的对象。然后,我们将在编译图时传递存储对象。这允许图中的每个节点访问存储:当你定义节点函数时,你可以定义 store
关键字参数,LangGraph 将自动传递你用来编译图的存储对象。
使用 Store
接口存储对象时,你定义两件事
- 对象的命名空间,一个元组(类似于目录)
- 对象键(类似于文件名)
在本例中,我们将使用 ("memories", <user_id>)
作为命名空间,并将随机 UUID 作为每个新内存的键。
重要的是,为了确定用户,我们将通过节点函数的 config 关键字参数传递 user_id
。
让我们首先定义一个 InMemoryStore
,它已经填充了一些关于用户的记忆。
在 [3]
已复制!
from langgraph.store.memory import InMemoryStore
in_memory_store = InMemoryStore()
from langgraph.store.memory import InMemoryStore in_memory_store = InMemoryStore()
创建图¶
在 [4]
已复制!
import uuid
from typing import Annotated
from typing_extensions import TypedDict
from langchain_anthropic import ChatAnthropic
from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph, MessagesState, START
from langgraph.checkpoint.memory import MemorySaver
from langgraph.store.base import BaseStore
model = ChatAnthropic(model="claude-3-5-sonnet-20240620")
# NOTE: we're passing the Store param to the node --
# this is the Store we compile the graph with
def call_model(state: MessagesState, config: RunnableConfig, *, store: BaseStore):
user_id = config["configurable"]["user_id"]
namespace = ("memories", user_id)
memories = store.search(namespace)
info = "\n".join([d.value["data"] for d in memories])
system_msg = f"You are a helpful assistant talking to the user. User info: {info}"
# Store new memories if the user asks the model to remember
last_message = state["messages"][-1]
if "remember" in last_message.content.lower():
memory = "User name is Bob"
store.put(namespace, str(uuid.uuid4()), {"data": memory})
response = model.invoke(
[{"type": "system", "content": system_msg}] + state["messages"]
)
return {"messages": response}
builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_edge(START, "call_model")
# NOTE: we're passing the store object here when compiling the graph
graph = builder.compile(checkpointer=MemorySaver(), store=in_memory_store)
# If you're using LangGraph Cloud or LangGraph Studio, you don't need to pass the store or checkpointer when compiling the graph, since it's done automatically.
import uuid from typing import Annotated from typing_extensions import TypedDict from langchain_anthropic import ChatAnthropic from langchain_core.runnables import RunnableConfig from langgraph.graph import StateGraph, MessagesState, START from langgraph.checkpoint.memory import MemorySaver from langgraph.store.base import BaseStore model = ChatAnthropic(model="claude-3-5-sonnet-20240620") # 注意:我们将 Store 参数传递给节点 - # 这是我们用来编译图的 Store def call_model(state: MessagesState, config: RunnableConfig, *, store: BaseStore): user_id = config["configurable"]["user_id"] namespace = ("memories", user_id) memories = store.search(namespace) info = "\n".join([d.value["data"] for d in memories]) system_msg = f"你是一个帮助用户说话的助手。用户信息:{info}" # 如果用户要求模型记住,则存储新的记忆 last_message = state["messages"][-1] if "remember" in last_message.content.lower(): memory = "用户名是 Bob" store.put(namespace, str(uuid.uuid4()), {"data": memory}) response = model.invoke( [{"type": "system", "content": system_msg}] + state["messages"] ) return {"messages": response} builder = StateGraph(MessagesState) builder.add_node("call_model", call_model) builder.add_edge(START, "call_model") # 注意:我们在编译图时在此处传递存储对象 graph = builder.compile(checkpointer=MemorySaver(), store=in_memory_store) # 如果你使用的是 LangGraph Cloud 或 LangGraph Studio,则在编译图时无需传递存储或检查点,因为它是自动完成的。
注意
如果你使用的是 LangGraph Cloud 或 LangGraph Studio,则 **无需** 在编译图时传递存储,因为它会自动完成。
运行图!¶
现在让我们在配置中指定一个用户 ID 并告诉模型我们的姓名
在 [5]
已复制!
config = {"configurable": {"thread_id": "1", "user_id": "1"}}
input_message = {"type": "user", "content": "Hi! Remember: my name is Bob"}
for chunk in graph.stream({"messages": [input_message]}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()
config = {"configurable": {"thread_id": "1", "user_id": "1"}} input_message = {"type": "user", "content": "你好!记住:我的名字是 Bob"} for chunk in graph.stream({"messages": [input_message]}, config, stream_mode="values"): chunk["messages"][-1].pretty_print()
================================ Human Message ================================= Hi! Remember: my name is Bob ================================== Ai Message ================================== Hello Bob! It's nice to meet you. I'll remember that your name is Bob. How can I assist you today?
在 [6]
已复制!
config = {"configurable": {"thread_id": "2", "user_id": "1"}}
input_message = {"type": "user", "content": "what is my name?"}
for chunk in graph.stream({"messages": [input_message]}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()
config = {"configurable": {"thread_id": "2", "user_id": "1"}} input_message = {"type": "user", "content": "我的名字是什么?"} for chunk in graph.stream({"messages": [input_message]}, config, stream_mode="values"): chunk["messages"][-1].pretty_print()
================================ Human Message ================================= what is my name? ================================== Ai Message ================================== Your name is Bob.
现在我们可以检查我们的内存存储并验证我们是否确实保存了用户的记忆
在 [7]
已复制!
for memory in in_memory_store.search(("memories", "1")):
print(memory.value)
for memory in in_memory_store.search(("memories", "1")): print(memory.value)
{'data': 'User name is Bob'}
现在让我们为另一个用户运行图,以验证有关第一个用户的记忆是自包含的
在 [8]
已复制!
config = {"configurable": {"thread_id": "3", "user_id": "2"}}
input_message = {"type": "user", "content": "what is my name?"}
for chunk in graph.stream({"messages": [input_message]}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()
config = {"configurable": {"thread_id": "3", "user_id": "2"}} input_message = {"type": "user", "content": "我的名字是什么?"} for chunk in graph.stream({"messages": [input_message]}, config, stream_mode="values"): chunk["messages"][-1].pretty_print()
================================ Human Message ================================= what is my name? ================================== Ai Message ================================== I apologize, but I don't have any information about your name. As an AI assistant, I don't have access to personal information about users unless it has been specifically shared in our conversation. If you'd like, you can tell me your name and I'll be happy to use it in our discussion.