设置¶
首先,让我们设置我们将要使用的包
yarn add langchain @langchain/anthropic @langchain/core
接下来,我们需要为 Anthropic(我们将使用的 LLM)设置 API 密钥
export ANTHROPIC_API_KEY=your_api_key
可选地,我们可以为 LangSmith 跟踪 设置 API 密钥,这将为我们提供一流的可观察性。
export LANGCHAIN_TRACING_V2="true"
export LANGCHAIN_CALLBACKS_BACKGROUND="true"
export LANGCHAIN_API_KEY=your_api_key
构建代理¶
现在让我们构建一个简单的 ReAct 样式代理。
在 [2]
已复制!
import { ChatAnthropic } from "@langchain/anthropic";
import { tool } from "@langchain/core/tools";
import { BaseMessage, AIMessage } from "@langchain/core/messages";
import { StateGraph, Annotation, START, END } from "@langchain/langgraph";
import { ToolNode } from "@langchain/langgraph/prebuilt";
import { MemorySaver } from "@langchain/langgraph";
import { z } from "zod";
const AgentState = Annotation.Root({
messages: Annotation<BaseMessage[]>({
reducer: (x, y) => x.concat(y),
}),
});
const memory = new MemorySaver();
const searchTool = tool((_): string => {
// This is a placeholder for the actual implementation
// Don't let the LLM know this though 😊
return "It's sunny in San Francisco, but you better look out if you're a Gemini 😈."
}, {
name: "search",
description: "Call to surf the web.",
schema: z.object({
query: z.string()
})
})
const tools = [searchTool]
const toolNode = new ToolNode<typeof AgentState.State>(tools)
const model = new ChatAnthropic({ model: "claude-3-haiku-20240307" })
const boundModel = model.bindTools(tools)
function shouldContinue(state: typeof AgentState.State): "action" | typeof END {
const lastMessage = state.messages[state.messages.length - 1];
// If there is no function call, then we finish
if (lastMessage && !(lastMessage as AIMessage).tool_calls?.length) {
return END;
}
// Otherwise if there is, we continue
return "action";
}
// Define the function that calls the model
async function callModel(state: typeof AgentState.State) {
const response = await model.invoke(state.messages);
// We return an object, because this will get merged with the existing state
return { messages: [response] };
}
// Define a new graph
const workflow = new StateGraph(AgentState)
// Define the two nodes we will cycle between
.addNode("agent", callModel)
.addNode("action", toolNode)
// We now add a conditional edge
.addConditionalEdges(
// First, we define the start node. We use `agent`.
// This means these are the edges taken after the `agent` node is called.
"agent",
// Next, we pass in the function that will determine which node is called next.
shouldContinue
)
// We now add a normal edge from `action` to `agent`.
// This means that after `action` is called, `agent` node is called next.
.addEdge("action", "agent")
// Set the entrypoint as `agent`
// This means that this node is the first one called
.addEdge(START, "agent");
// Finally, we compile it!
// This compiles it into a LangChain Runnable,
// meaning you can use it as you would any other runnable
const app = workflow.compile({
checkpointer: memory,
});
import { ChatAnthropic } from "@langchain/anthropic"; import { tool } from "@langchain/core/tools"; import { BaseMessage, AIMessage } from "@langchain/core/messages"; import { StateGraph, Annotation, START, END } from "@langchain/langgraph"; import { ToolNode } from "@langchain/langgraph/prebuilt"; import { MemorySaver } from "@langchain/langgraph"; import { z } from "zod"; const AgentState = Annotation.Root({ messages: Annotation({ reducer: (x, y) => x.concat(y), }), }); const memory = new MemorySaver(); const searchTool = tool((_): string => { // 这是实际实现的占位符 // 不要让 LLM 知道这一点 😊 return "旧金山阳光明媚,但如果你是一只双子座😈,那你最好小心点。" }, { name: "search", description: "调用以冲浪网络。", schema: z.object({ query: z.string() }) }) const tools = [searchTool] const toolNode = new ToolNode(tools) const model = new ChatAnthropic({ model: "claude-3-haiku-20240307" }) const boundModel = model.bindTools(tools) function shouldContinue(state: typeof AgentState.State): "action" | typeof END { const lastMessage = state.messages[state.messages.length - 1]; // 如果没有函数调用,则结束 if (lastMessage && !(lastMessage as AIMessage).tool_calls?.length) { return END; } // 否则如果有,我们继续 return "action"; } // 定义调用模型的函数 async function callModel(state: typeof AgentState.State) { const response = await model.invoke(state.messages); // 我们返回一个对象,因为这将与现有状态合并 return { messages: [response] }; } // 定义一个新的图 const workflow = new StateGraph(AgentState) // 定义我们将循环的两个节点 .addNode("agent", callModel) .addNode("action", toolNode) // 我们现在添加一个条件边 .addConditionalEdges( // 首先,我们定义开始节点。我们使用 `agent`。 // 这意味着这些是调用 `agent` 节点后执行的边。 "agent", // 接下来,我们传入将确定调用哪个节点的下一个函数。 shouldContinue ) // 我们现在从 `action` 到 `agent` 添加一个普通边。 // 这意味着在调用 `action` 之后,将调用 `agent` 节点。 .addEdge("action", "agent") // 将入口点设置为 `agent` // 这意味着该节点是第一个被调用的节点 .addEdge(START, "agent"); // 最后,我们编译它! // 这将其编译成 LangChain Runnable, // 意味着您可以像使用其他任何 Runnable 一样使用它 const app = workflow.compile({ checkpointer: memory, });
在 [3]
已复制!
import { HumanMessage } from "@langchain/core/messages";
const config = { configurable: { thread_id: "2"}, streamMode: "values" as const }
const inputMessage = new HumanMessage("hi! I'm bob");
for await (const event of await app.stream({
messages: [inputMessage]
}, config)) {
const recentMsg = event.messages[event.messages.length - 1];
console.log(`================================ ${recentMsg._getType()} Message (1) =================================`)
console.log(recentMsg.content);
}
console.log("\n\n================================= END =================================\n\n")
const inputMessage2 = new HumanMessage("what's my name?");
for await (const event of await app.stream({
messages: [inputMessage2]
}, config)) {
const recentMsg = event.messages[event.messages.length - 1];
console.log(`================================ ${recentMsg._getType()} Message (2) =================================`)
console.log(recentMsg.content);
}
import { HumanMessage } from "@langchain/core/messages"; const config = { configurable: { thread_id: "2"}, streamMode: "values" as const } const inputMessage = new HumanMessage("hi! I'm bob"); for await (const event of await app.stream({ messages: [inputMessage] }, config)) { const recentMsg = event.messages[event.messages.length - 1]; console.log(`================================ ${recentMsg._getType()} Message (1) =================================`) console.log(recentMsg.content); } console.log("\n\n================================= END =================================\n\n") const inputMessage2 = new HumanMessage("what's my name?"); for await (const event of await app.stream({ messages: [inputMessage2] }, config)) { const recentMsg = event.messages[event.messages.length - 1]; console.log(`================================ ${recentMsg._getType()} Message (2) =================================`) console.log(recentMsg.content); }
================================ human Message (1) ================================= hi! I'm bob ================================ ai Message (1) ================================= Hello Bob! It's nice to meet you. I'm an AI assistant created by Anthropic. I'm here to help with any questions or tasks you may have. Please let me know if there's anything I can assist you with. ================================= END ================================= ================================ human Message (2) ================================= what's my name? ================================ ai Message (2) ================================= Your name is Bob, as you introduced yourself earlier.
过滤消息¶
防止对话历史爆炸的最直接方法是在将消息传递给 LLM 之前过滤消息列表。这涉及两个部分:定义一个函数来过滤消息,然后将其添加到图中。请查看以下示例,该示例定义了一个非常简单的 filterMessages
函数,然后使用它。
在 [4]
已复制!
import { ChatAnthropic } from "@langchain/anthropic";
import { tool } from "@langchain/core/tools";
import { BaseMessage, AIMessage } from "@langchain/core/messages";
import { StateGraph, Annotation, START, END } from "@langchain/langgraph";
import { ToolNode } from "@langchain/langgraph/prebuilt";
import { MemorySaver } from "@langchain/langgraph";
import { z } from "zod";
const MessageFilteringAgentState = Annotation.Root({
messages: Annotation<BaseMessage[]>({
reducer: (x, y) => x.concat(y),
}),
});
const messageFilteringMemory = new MemorySaver();
const messageFilteringSearchTool = tool((_): string => {
// This is a placeholder for the actual implementation
// Don't let the LLM know this though 😊
return "It's sunny in San Francisco, but you better look out if you're a Gemini 😈."
}, {
name: "search",
description: "Call to surf the web.",
schema: z.object({
query: z.string()
})
})
// We can re-use the same search tool as above as we don't need to change it for this example.
const messageFilteringTools = [messageFilteringSearchTool]
const messageFilteringToolNode = new ToolNode<typeof MessageFilteringAgentState.State>(messageFilteringTools)
const messageFilteringModel = new ChatAnthropic({ model: "claude-3-haiku-20240307" })
const boundMessageFilteringModel = messageFilteringModel.bindTools(messageFilteringTools)
async function shouldContinueMessageFiltering(state: typeof MessageFilteringAgentState.State): Promise<"action" | typeof END> {
const lastMessage = state.messages[state.messages.length - 1];
// If there is no function call, then we finish
if (lastMessage && !(lastMessage as AIMessage).tool_calls?.length) {
return END;
}
// Otherwise if there is, we continue
return "action";
}
const filterMessages = (messages: BaseMessage[]): BaseMessage[] => {
// This is very simple helper function which only ever uses the last message
return messages.slice(-1);
}
// Define the function that calls the model
async function callModelMessageFiltering(state: typeof MessageFilteringAgentState.State) {
const response = await boundMessageFilteringModel.invoke(filterMessages(state.messages));
// We return an object, because this will get merged with the existing state
return { messages: [response] };
}
// Define a new graph
const messageFilteringWorkflow = new StateGraph(MessageFilteringAgentState)
// Define the two nodes we will cycle between
.addNode("agent", callModelMessageFiltering)
.addNode("action", messageFilteringToolNode)
// We now add a conditional edge
.addConditionalEdges(
// First, we define the start node. We use `agent`.
// This means these are the edges taken after the `agent` node is called.
"agent",
// Next, we pass in the function that will determine which node is called next.
shouldContinueMessageFiltering
)
// We now add a normal edge from `action` to `agent`.
// This means that after `action` is called, `agent` node is called next.
.addEdge("action", "agent")
// Set the entrypoint as `agent`
// This means that this node is the first one called
.addEdge(START, "agent");
// Finally, we compile it!
// This compiles it into a LangChain Runnable,
// meaning you can use it as you would any other runnable
const messageFilteringApp = messageFilteringWorkflow.compile({
checkpointer: messageFilteringMemory,
});
import { ChatAnthropic } from "@langchain/anthropic"; import { tool } from "@langchain/core/tools"; import { BaseMessage, AIMessage } from "@langchain/core/messages"; import { StateGraph, Annotation, START, END } from "@langchain/langgraph"; import { ToolNode } from "@langchain/langgraph/prebuilt"; import { MemorySaver } from "@langchain/langgraph"; import { z } from "zod"; const MessageFilteringAgentState = Annotation.Root({ messages: Annotation({ reducer: (x, y) => x.concat(y), }), }); const messageFilteringMemory = new MemorySaver(); const messageFilteringSearchTool = tool((_): string => { // 这是实际实现的占位符 // 不要让 LLM 知道这一点 😊 return "旧金山阳光明媚,但如果你是一只双子座😈,那你最好小心点。" }, { name: "search", description: "调用以冲浪网络。", schema: z.object({ query: z.string() }) }) // 我们可以重用上面的相同搜索工具,因为我们不需要为此示例进行更改。 const messageFilteringTools = [messageFilteringSearchTool] const messageFilteringToolNode = new ToolNode(messageFilteringTools) const messageFilteringModel = new ChatAnthropic({ model: "claude-3-haiku-20240307" }) const boundMessageFilteringModel = messageFilteringModel.bindTools(messageFilteringTools) async function shouldContinueMessageFiltering(state: typeof MessageFilteringAgentState.State): Promise<"action" | typeof END> { const lastMessage = state.messages[state.messages.length - 1]; // 如果没有函数调用,则结束 if (lastMessage && !(lastMessage as AIMessage).tool_calls?.length) { return END; } // 否则如果有,我们继续 return "action"; } const filterMessages = (messages: BaseMessage[]): BaseMessage[] => { // 这是一个非常简单的辅助函数,它只使用最后一条消息 return messages.slice(-1); } // 定义调用模型的函数 async function callModelMessageFiltering(state: typeof MessageFilteringAgentState.State) { const response = await boundMessageFilteringModel.invoke(filterMessages(state.messages)); // 我们返回一个对象,因为这将与现有状态合并 return { messages: [response] }; } // 定义一个新的图 const messageFilteringWorkflow = new StateGraph(MessageFilteringAgentState) // 定义我们将循环的两个节点 .addNode("agent", callModelMessageFiltering) .addNode("action", messageFilteringToolNode) // 我们现在添加一个条件边 .addConditionalEdges( // 首先,我们定义开始节点。我们使用 `agent`。 // 这意味着这些是调用 `agent` 节点后执行的边。 "agent", // 接下来,我们传入将确定调用哪个节点的下一个函数。 shouldContinueMessageFiltering ) // 我们现在从 `action` 到 `agent` 添加一个普通边。 // 这意味着在调用 `action` 之后,将调用 `agent` 节点。 .addEdge("action", "agent") // 将入口点设置为 `agent` // 这意味着该节点是第一个被调用的节点 .addEdge(START, "agent"); // 最后,我们编译它! // 这将其编译成 LangChain Runnable, // 意味着您可以像使用其他任何 Runnable 一样使用它 const messageFilteringApp = messageFilteringWorkflow.compile({ checkpointer: messageFilteringMemory, });
在 [5]
已复制!
import { HumanMessage } from "@langchain/core/messages";
const messageFilteringConfig = { configurable: { thread_id: "2"}, streamMode: "values" as const }
const messageFilteringInput = new HumanMessage("hi! I'm bob");
for await (const event of await messageFilteringApp.stream({
messages: [messageFilteringInput]
}, messageFilteringConfig)) {
const recentMsg = event.messages[event.messages.length - 1];
console.log(`================================ ${recentMsg._getType()} Message (1) =================================`)
console.log(recentMsg.content);
}
console.log("\n\n================================= END =================================\n\n")
const messageFilteringInput2 = new HumanMessage("what's my name?");
for await (const event of await messageFilteringApp.stream(
{
messages: [messageFilteringInput2]
},
messageFilteringConfig
)) {
const recentMsg = event.messages[event.messages.length - 1];
console.log(`================================ ${recentMsg._getType()} Message (2) =================================`)
console.log(recentMsg.content);
}
import { HumanMessage } from "@langchain/core/messages"; const messageFilteringConfig = { configurable: { thread_id: "2"}, streamMode: "values" as const } const messageFilteringInput = new HumanMessage("hi! I'm bob"); for await (const event of await messageFilteringApp.stream({ messages: [messageFilteringInput] }, messageFilteringConfig)) { const recentMsg = event.messages[event.messages.length - 1]; console.log(`================================ ${recentMsg._getType()} Message (1) =================================`) console.log(recentMsg.content); } console.log("\n\n================================= END =================================\n\n") const messageFilteringInput2 = new HumanMessage("what's my name?"); for await (const event of await messageFilteringApp.stream( { messages: [messageFilteringInput2] }, messageFilteringConfig )) { const recentMsg = event.messages[event.messages.length - 1]; console.log(`================================ ${recentMsg._getType()} Message (2) =================================`) console.log(recentMsg.content); }
================================ human Message (1) ================================= hi! I'm bob ================================ ai Message (1) ================================= Hello, nice to meet you Bob! I'm an AI assistant here to help out. Feel free to let me know if you have any questions or if there's anything I can assist with. ================================= END ================================= ================================ human Message (2) ================================= what's my name? ================================ ai Message (2) ================================= I'm afraid I don't actually know your name, since you haven't provided that information to me. As an AI assistant, I don't have access to personal details about you unless you share them with me directly. I'm happy to continue our conversation, but I don't have enough context to know your specific name. Please feel free to introduce yourself if you'd like me to address you by name.