在 [1]
已复制!
%%capture --no-stderr
%pip install -U langgraph langchain_anthropic
%%capture --no-stderr %pip install -U langgraph langchain_anthropic
在 [2]
已复制!
import getpass
import os
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("ANTHROPIC_API_KEY")
import getpass import os def _set_env(var: str): if not os.environ.get(var): os.environ[var] = getpass.getpass(f"{var}: ") _set_env("ANTHROPIC_API_KEY")
定义图¶
首先,让我们创建一个非常简单的图
在 [3]
已复制!
import operator
from typing import Annotated, Sequence
from typing_extensions import TypedDict
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import BaseMessage, HumanMessage
from langgraph.graph import END, StateGraph, START
model = ChatAnthropic(model_name="claude-2.1")
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
def _call_model(state):
state["messages"]
response = model.invoke(state["messages"])
return {"messages": [response]}
# Define a new graph
builder = StateGraph(AgentState)
builder.add_node("model", _call_model)
builder.add_edge(START, "model")
builder.add_edge("model", END)
graph = builder.compile()
import operator from typing import Annotated, Sequence from typing_extensions import TypedDict from langchain_anthropic import ChatAnthropic from langchain_core.messages import BaseMessage, HumanMessage from langgraph.graph import END, StateGraph, START model = ChatAnthropic(model_name="claude-2.1") class AgentState(TypedDict): messages: Annotated[Sequence[BaseMessage], operator.add] def _call_model(state): state["messages"] response = model.invoke(state["messages"]) return {"messages": [response]} # 定义一个新的图 builder = StateGraph(AgentState) builder.add_node("model", _call_model) builder.add_edge(START, "model") builder.add_edge("model", END) graph = builder.compile()
配置图¶
太好了!现在假设我们希望扩展这个例子,以便用户能够从多个 LLM 中进行选择。我们可以通过传递一个配置来轻松地做到这一点。所有配置信息都需要在 configurable
键中传递,如下所示。此配置旨在包含不属于输入的部分(因此我们不希望将其作为状态的一部分进行跟踪)。
在 [4]
已复制!
from langchain_openai import ChatOpenAI
from typing import Optional
from langchain_core.runnables.config import RunnableConfig
openai_model = ChatOpenAI()
models = {
"anthropic": model,
"openai": openai_model,
}
def _call_model(state: AgentState, config: RunnableConfig):
# Access the config through the configurable key
model_name = config["configurable"].get("model", "anthropic")
model = models[model_name]
response = model.invoke(state["messages"])
return {"messages": [response]}
# Define a new graph
builder = StateGraph(AgentState)
builder.add_node("model", _call_model)
builder.add_edge(START, "model")
builder.add_edge("model", END)
graph = builder.compile()
from langchain_openai import ChatOpenAI from typing import Optional from langchain_core.runnables.config import RunnableConfig openai_model = ChatOpenAI() models = { "anthropic": model, "openai": openai_model, } def _call_model(state: AgentState, config: RunnableConfig): # 通过 configurable 键访问配置 model_name = config["configurable"].get("model", "anthropic") model = models[model_name] response = model.invoke(state["messages"]) return {"messages": [response]} # 定义一个新的图 builder = StateGraph(AgentState) builder.add_node("model", _call_model) builder.add_edge(START, "model") builder.add_edge("model", END) graph = builder.compile()
如果我们不带任何配置调用它,它将使用我们定义的默认值(Anthropic)。
在 [5]
已复制!
graph.invoke({"messages": [HumanMessage(content="hi")]})
graph.invoke({"messages": [HumanMessage(content="hi")]})
输出[5]
{'messages': [HumanMessage(content='hi', additional_kwargs={}, response_metadata={}), AIMessage(content='Hello!', additional_kwargs={}, response_metadata={'id': 'msg_01WFXkfgK8AvSckLvYYrHshi', 'model': 'claude-2.1', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 10, 'output_tokens': 6}}, id='run-ece54b16-f8fc-4201-8405-b97122edf8d8-0', usage_metadata={'input_tokens': 10, 'output_tokens': 6, 'total_tokens': 16})]}
我们也可以使用配置调用它,使其使用不同的模型。
在 [6]
已复制!
config = {"configurable": {"model": "openai"}}
graph.invoke({"messages": [HumanMessage(content="hi")]}, config=config)
config = {"configurable": {"model": "openai"}} graph.invoke({"messages": [HumanMessage(content="hi")]}, config=config)
输出[6]
{'messages': [HumanMessage(content='hi', additional_kwargs={}, response_metadata={}), AIMessage(content='Hello! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 8, 'total_tokens': 17, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8331964-d811-4b44-afb8-56c30ade7c15-0', usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17})]}
我们也可以调整我们的图以接受更多配置!例如系统消息。
在 [7]
已复制!
from langchain_core.messages import SystemMessage
# We can define a config schema to specify the configuration options for the graph
# A config schema is useful for indicating which fields are available in the configurable dict inside the config
class ConfigSchema(TypedDict):
model: Optional[str]
system_message: Optional[str]
def _call_model(state: AgentState, config: RunnableConfig):
# Access the config through the configurable key
model_name = config["configurable"].get("model", "anthropic")
model = models[model_name]
messages = state["messages"]
if "system_message" in config["configurable"]:
messages = [
SystemMessage(content=config["configurable"]["system_message"])
] + messages
response = model.invoke(messages)
return {"messages": [response]}
# Define a new graph - note that we pass in the configuration schema here, but it is not necessary
workflow = StateGraph(AgentState, ConfigSchema)
workflow.add_node("model", _call_model)
workflow.add_edge(START, "model")
workflow.add_edge("model", END)
graph = workflow.compile()
from langchain_core.messages import SystemMessage # 我们可以定义一个配置模式来指定图的配置选项 # 配置模式对于指示 configurable 字典中可用的字段很有用 class ConfigSchema(TypedDict): model: Optional[str] system_message: Optional[str] def _call_model(state: AgentState, config: RunnableConfig): # 通过 configurable 键访问配置 model_name = config["configurable"].get("model", "anthropic") model = models[model_name] messages = state["messages"] if "system_message" in config["configurable"]: messages = [ SystemMessage(content=config["configurable"]["system_message"]) ] + messages response = model.invoke(messages) return {"messages": [response]} # 定义一个新的图 - 请注意,我们将配置模式传递到这里,但这不是必需的 workflow = StateGraph(AgentState, ConfigSchema) workflow.add_node("model", _call_model) workflow.add_edge(START, "model") workflow.add_edge("model", END) graph = workflow.compile()
在 [8]
已复制!
graph.invoke({"messages": [HumanMessage(content="hi")]})
graph.invoke({"messages": [HumanMessage(content="hi")]})
输出[8]
{'messages': [HumanMessage(content='hi', additional_kwargs={}, response_metadata={}), AIMessage(content='Hello!', additional_kwargs={}, response_metadata={'id': 'msg_01VgCANVHr14PsHJSXyKkLVh', 'model': 'claude-2.1', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 10, 'output_tokens': 6}}, id='run-f8c5f18c-be58-4e44-9a4e-d43692d7eed1-0', usage_metadata={'input_tokens': 10, 'output_tokens': 6, 'total_tokens': 16})]}
在 [9]
已复制!
config = {"configurable": {"system_message": "respond in italian"}}
graph.invoke({"messages": [HumanMessage(content="hi")]}, config=config)
config = {"configurable": {"system_message": "respond in italian"}} graph.invoke({"messages": [HumanMessage(content="hi")]}, config=config)
输出[9]
{'messages': [HumanMessage(content='hi', additional_kwargs={}, response_metadata={}), AIMessage(content='Ciao!', additional_kwargs={}, response_metadata={'id': 'msg_011YuCYQk1Rzc8PEhVCpQGr6', 'model': 'claude-2.1', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 14, 'output_tokens': 7}}, id='run-a583341e-5868-4e8c-a536-881338f21252-0', usage_metadata={'input_tokens': 14, 'output_tokens': 7, 'total_tokens': 21})]}