在 [1] 中
已复制!
// import dotenv from 'dotenv';
// dotenv.config();
// import dotenv from 'dotenv'; // dotenv.config();
安装依赖项¶
npm install cheerio zod zod-to-json-schema langchain @langchain/openai @langchain/core @langchain/community @langchain/textsplitters
检索器¶
在 [2] 中
已复制!
import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { OpenAIEmbeddings } from "@langchain/openai";
const urls = [
"https://lilianweng.github.io/posts/2023-06-23-agent/",
"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/",
"https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/",
];
const docs = await Promise.all(
urls.map((url) => new CheerioWebBaseLoader(url).load()),
);
const docsList = docs.flat();
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 50,
});
const docSplits = await textSplitter.splitDocuments(docsList);
// Add to vectorDB
const vectorStore = await MemoryVectorStore.fromDocuments(
docSplits,
new OpenAIEmbeddings(),
);
const retriever = vectorStore.asRetriever();
import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings } from "@langchain/openai"; const urls = [ "https://lilianweng.github.io/posts/2023-06-23-agent/", "https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/", "https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/", ]; const docs = await Promise.all( urls.map((url) => new CheerioWebBaseLoader(url).load()), ); const docsList = docs.flat(); const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 500, chunkOverlap: 50, }); const docSplits = await textSplitter.splitDocuments(docsList); // 添加到向量数据库 const vectorStore = await MemoryVectorStore.fromDocuments( docSplits, new OpenAIEmbeddings(), ); const retriever = vectorStore.asRetriever();
在 [3] 中
已复制!
import { Annotation } from "@langchain/langgraph";
import { BaseMessage } from "@langchain/core/messages";
const GraphState = Annotation.Root({
messages: Annotation<BaseMessage[]>({
reducer: (x, y) => x.concat(y),
default: () => [],
})
})
import { Annotation } from "@langchain/langgraph"; import { BaseMessage } from "@langchain/core/messages"; const GraphState = Annotation.Root({ messages: Annotation({ reducer: (x, y) => x.concat(y), default: () => [], }) })
在 [4] 中
已复制!
import { createRetrieverTool } from "langchain/tools/retriever";
import { ToolNode } from "@langchain/langgraph/prebuilt";
const tool = createRetrieverTool(
retriever,
{
name: "retrieve_blog_posts",
description:
"Search and return information about Lilian Weng blog posts on LLM agents, prompt engineering, and adversarial attacks on LLMs.",
},
);
const tools = [tool];
const toolNode = new ToolNode<typeof GraphState.State>(tools);
import { createRetrieverTool } from "langchain/tools/retriever"; import { ToolNode } from "@langchain/langgraph/prebuilt"; const tool = createRetrieverTool( retriever, { name: "retrieve_blog_posts", description: "搜索并返回有关 Lilian Weng 关于 LLM 代理、提示工程和对 LLM 的对抗攻击的博客文章的信息。", }, ); const tools = [tool]; const toolNode = new ToolNode(tools);
边¶
在 [5] 中
已复制!
import { END } from "@langchain/langgraph";
import { pull } from "langchain/hub";
import { z } from "zod";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
import { AIMessage, BaseMessage } from "@langchain/core/messages";
/**
* Decides whether the agent should retrieve more information or end the process.
* This function checks the last message in the state for a function call. If a tool call is
* present, the process continues to retrieve information. Otherwise, it ends the process.
* @param {typeof GraphState.State} state - The current state of the agent, including all messages.
* @returns {string} - A decision to either "continue" the retrieval process or "end" it.
*/
function shouldRetrieve(state: typeof GraphState.State): string {
const { messages } = state;
console.log("---DECIDE TO RETRIEVE---");
const lastMessage = messages[messages.length - 1];
if ("tool_calls" in lastMessage && Array.isArray(lastMessage.tool_calls) && lastMessage.tool_calls.length) {
console.log("---DECISION: RETRIEVE---");
return "retrieve";
}
// If there are no tool calls then we finish.
return END;
}
/**
* Determines whether the Agent should continue based on the relevance of retrieved documents.
* This function checks if the last message in the conversation is of type FunctionMessage, indicating
* that document retrieval has been performed. It then evaluates the relevance of these documents to the user's
* initial question using a predefined model and output parser. If the documents are relevant, the conversation
* is considered complete. Otherwise, the retrieval process is continued.
* @param {typeof GraphState.State} state - The current state of the agent, including all messages.
* @returns {Promise<Partial<typeof GraphState.State>>} - The updated state with the new message added to the list of messages.
*/
async function gradeDocuments(state: typeof GraphState.State): Promise<Partial<typeof GraphState.State>> {
console.log("---GET RELEVANCE---");
const { messages } = state;
const tool = {
name: "give_relevance_score",
description: "Give a relevance score to the retrieved documents.",
schema: z.object({
binaryScore: z.string().describe("Relevance score 'yes' or 'no'"),
})
}
const prompt = ChatPromptTemplate.fromTemplate(
`You are a grader assessing relevance of retrieved docs to a user question.
Here are the retrieved docs:
\n ------- \n
{context}
\n ------- \n
Here is the user question: {question}
If the content of the docs are relevant to the users question, score them as relevant.
Give a binary score 'yes' or 'no' score to indicate whether the docs are relevant to the question.
Yes: The docs are relevant to the question.
No: The docs are not relevant to the question.`,
);
const model = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
}).bindTools([tool], {
tool_choice: tool.name,
});
const chain = prompt.pipe(model);
const lastMessage = messages[messages.length - 1];
const score = await chain.invoke({
question: messages[0].content as string,
context: lastMessage.content as string,
});
return {
messages: [score]
};
}
/**
* Check the relevance of the previous LLM tool call.
*
* @param {typeof GraphState.State} state - The current state of the agent, including all messages.
* @returns {string} - A directive to either "yes" or "no" based on the relevance of the documents.
*/
function checkRelevance(state: typeof GraphState.State): string {
console.log("---CHECK RELEVANCE---");
const { messages } = state;
const lastMessage = messages[messages.length - 1];
if (!("tool_calls" in lastMessage)) {
throw new Error("The 'checkRelevance' node requires the most recent message to contain tool calls.")
}
const toolCalls = (lastMessage as AIMessage).tool_calls;
if (!toolCalls || !toolCalls.length) {
throw new Error("Last message was not a function message");
}
if (toolCalls[0].args.binaryScore === "yes") {
console.log("---DECISION: DOCS RELEVANT---");
return "yes";
}
console.log("---DECISION: DOCS NOT RELEVANT---");
return "no";
}
// Nodes
/**
* Invokes the agent model to generate a response based on the current state.
* This function calls the agent model to generate a response to the current conversation state.
* The response is added to the state's messages.
* @param {typeof GraphState.State} state - The current state of the agent, including all messages.
* @returns {Promise<Partial<typeof GraphState.State>>} - The updated state with the new message added to the list of messages.
*/
async function agent(state: typeof GraphState.State): Promise<Partial<typeof GraphState.State>> {
console.log("---CALL AGENT---");
const { messages } = state;
// Find the AIMessage which contains the `give_relevance_score` tool call,
// and remove it if it exists. This is because the agent does not need to know
// the relevance score.
const filteredMessages = messages.filter((message) => {
if ("tool_calls" in message && Array.isArray(message.tool_calls) && message.tool_calls.length > 0) {
return message.tool_calls[0].name !== "give_relevance_score";
}
return true;
});
const model = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
streaming: true,
}).bindTools(tools);
const response = await model.invoke(filteredMessages);
return {
messages: [response],
};
}
/**
* Transform the query to produce a better question.
* @param {typeof GraphState.State} state - The current state of the agent, including all messages.
* @returns {Promise<Partial<typeof GraphState.State>>} - The updated state with the new message added to the list of messages.
*/
async function rewrite(state: typeof GraphState.State): Promise<Partial<typeof GraphState.State>> {
console.log("---TRANSFORM QUERY---");
const { messages } = state;
const question = messages[0].content as string;
const prompt = ChatPromptTemplate.fromTemplate(
`Look at the input and try to reason about the underlying semantic intent / meaning. \n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Formulate an improved question:`,
);
// Grader
const model = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
streaming: true,
});
const response = await prompt.pipe(model).invoke({ question });
return {
messages: [response],
};
}
/**
* Generate answer
* @param {typeof GraphState.State} state - The current state of the agent, including all messages.
* @returns {Promise<Partial<typeof GraphState.State>>} - The updated state with the new message added to the list of messages.
*/
async function generate(state: typeof GraphState.State): Promise<Partial<typeof GraphState.State>> {
console.log("---GENERATE---");
const { messages } = state;
const question = messages[0].content as string;
// Extract the most recent ToolMessage
const lastToolMessage = messages.slice().reverse().find((msg) => msg._getType() === "tool");
if (!lastToolMessage) {
throw new Error("No tool message found in the conversation history");
}
const docs = lastToolMessage.content as string;
const prompt = await pull<ChatPromptTemplate>("rlm/rag-prompt");
const llm = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
streaming: true,
});
const ragChain = prompt.pipe(llm);
const response = await ragChain.invoke({
context: docs,
question,
});
return {
messages: [response],
};
}
import { END } from "@langchain/langgraph"; import { pull } from "langchain/hub"; import { z } from "zod"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { ChatOpenAI } from "@langchain/openai"; import { AIMessage, BaseMessage } from "@langchain/core/messages"; /** * 决定代理是否应该检索更多信息或结束流程。 * 此函数检查状态中的最后一条消息以查找函数调用。如果存在工具调用, * 则流程将继续检索信息。否则,它将结束流程。 * @param {typeof GraphState.State} state - 代理的当前状态,包括所有消息。 * @returns {string} - 决定是“继续”检索过程还是“结束”它。 */ function shouldRetrieve(state: typeof GraphState.State): string { const { messages } = state; console.log("---决定检索---"); const lastMessage = messages[messages.length - 1]; if ("tool_calls" in lastMessage && Array.isArray(lastMessage.tool_calls) && lastMessage.tool_calls.length) { console.log("---决定:检索---"); return "retrieve"; } // 如果没有工具调用,我们将完成。 return END; } /** * 确定代理是否应根据检索到的文档的相关性继续执行。 * 此函数检查对话中的最后一条消息是否为 FunctionMessage 类型,指示 * 是否已执行文档检索。然后,它使用预定义的模型和输出解析器评估这些文档与用户 * 的初始问题的相关性。如果文档相关,则对话 * 被认为已完成。否则,将继续检索过程。 * @param {typeof GraphState.State} state - 代理的当前状态,包括所有消息。 * @returns {Promise>} - 带有新消息添加到消息列表的更新状态。> { console.log("---获取相关性---"); const { messages } = state; const tool = { name: "give_relevance_score", description: "对检索到的文档给出相关性评分。", schema: z.object({ binaryScore: z.string().describe("相关性评分 'yes' 或 'no'"), }) } const prompt = ChatPromptTemplate.fromTemplate( `您是一位评估检索到的文档与用户问题的相关性的评估者。以下是检索到的文档: \n ------- \n {context} \n ------- \n 以下是用户问题:{question} 如果文档的内容与用户问题相关,请对其评分为相关。给出二进制评分“yes”或“no”以指示文档是否与问题相关。Yes: 文档与问题相关。No: 文档与问题无关。`, ); const model = new ChatOpenAI({ model: "gpt-4o", temperature: 0, }).bindTools([tool], { tool_choice: tool.name, }); const chain = prompt.pipe(model); const lastMessage = messages[messages.length - 1]; const score = await chain.invoke({ question: messages[0].content as string, context: lastMessage.content as string, }); return { messages: [score] }; } /** * 检查上一次 LLM 工具调用的相关性。 * * @param {typeof GraphState.State} state - 代理的当前状态,包括所有消息。 * @returns {string} - 根据文档的相关性,指示“yes”或“no”的指令。 */ function checkRelevance(state: typeof GraphState.State): string { console.log("---检查相关性---"); const { messages } = state; const lastMessage = messages[messages.length - 1]; if (!("tool_calls" in lastMessage)) { throw new Error("“checkRelevance”节点需要最近的消息包含工具调用。") } const toolCalls = (lastMessage as AIMessage).tool_calls; if (!toolCalls || !toolCalls.length) { throw new Error("最后一条消息不是函数消息"); } if (toolCalls[0].args.binaryScore === "yes") { console.log("---决定:文档相关---"); return "yes"; } console.log("---决定:文档不相关---"); return "no"; } // 节点 /** * 调用代理模型以根据当前状态生成响应。 * 此函数调用代理模型以生成对当前对话状态的响应。 * 响应将添加到状态的消息中。 * @param {typeof GraphState.State} state - 代理的当前状态,包括所有消息。 * @returns {Promise>} - 带有新消息添加到消息列表的更新状态。> { console.log("---调用代理---"); const { messages } = state; // 查找包含“give_relevance_score”工具调用的 AIMessage, // 并删除它(如果存在)。这是因为代理不需要知道 // 相关性评分。 const filteredMessages = messages.filter((message) => { if ("tool_calls" in message && Array.isArray(message.tool_calls) && message.tool_calls.length > 0) { return message.tool_calls[0].name !== "give_relevance_score"; } return true; }); const model = new ChatOpenAI({ model: "gpt-4o", temperature: 0, streaming: true, }).bindTools(tools); const response = await model.invoke(filteredMessages); return { messages: [response], }; } /** * 转换查询以生成更好的问题。 * @param {typeof GraphState.State} state - 代理的当前状态,包括所有消息。 * @returns {Promise>} - 带有新消息添加到消息列表的更新状态。> { console.log("---转换查询---"); const { messages } = state; const question = messages[0].content as string; const prompt = ChatPromptTemplate.fromTemplate( `查看输入并尝试推断底层的语义意图/含义。 \n 以下是初始问题: \n ------- \n {question} \n ------- \n 制定一个改进的问题:`, ); // 评估者 const model = new ChatOpenAI({ model: "gpt-4o", temperature: 0, streaming: true, }); const response = await prompt.pipe(model).invoke({ question }); return { messages: [response], }; } /** * 生成答案 * @param {typeof GraphState.State} state - 代理的当前状态,包括所有消息。 * @returns {Promise>} - 带有新消息添加到消息列表的更新状态。> { console.log("---生成---"); const { messages } = state; const question = messages[0].content as string; // 提取最近的 ToolMessage const lastToolMessage = messages.slice().reverse().find((msg) => msg._getType() === "tool"); if (!lastToolMessage) { throw new Error("在对话历史记录中找不到工具消息"); } const docs = lastToolMessage.content as string; const prompt = await pull("rlm/rag-prompt"); const llm = new ChatOpenAI({ model: "gpt-4o", temperature: 0, streaming: true, }); const ragChain = prompt.pipe(llm); const response = await ragChain.invoke({ context: docs, question, }); return { messages: [response], }; }
图¶
- 从一个代理开始,
callModel
- 代理决定调用一个函数
- 如果是这样,那么
action
用于调用工具(检索器) - 然后,使用添加到消息(
state
)中的工具输出调用代理
在 [6] 中
已复制!
import { StateGraph } from "@langchain/langgraph";
// Define the graph
const workflow = new StateGraph(GraphState)
// Define the nodes which we'll cycle between.
.addNode("agent", agent)
.addNode("retrieve", toolNode)
.addNode("gradeDocuments", gradeDocuments)
.addNode("rewrite", rewrite)
.addNode("generate", generate);
import { StateGraph } from "@langchain/langgraph"; // 定义图 const workflow = new StateGraph(GraphState) // 定义我们将循环遍历的节点。 .addNode("agent", agent) .addNode("retrieve", toolNode) .addNode("gradeDocuments", gradeDocuments) .addNode("rewrite", rewrite) .addNode("generate", generate);
在 [7] 中
已复制!
import { START } from "@langchain/langgraph";
// Call agent node to decide to retrieve or not
workflow.addEdge(START, "agent");
// Decide whether to retrieve
workflow.addConditionalEdges(
"agent",
// Assess agent decision
shouldRetrieve,
);
workflow.addEdge("retrieve", "gradeDocuments");
// Edges taken after the `action` node is called.
workflow.addConditionalEdges(
"gradeDocuments",
// Assess agent decision
checkRelevance,
{
// Call tool node
yes: "generate",
no: "rewrite", // placeholder
},
);
workflow.addEdge("generate", END);
workflow.addEdge("rewrite", "agent");
// Compile
const app = workflow.compile();
import { START } from "@langchain/langgraph"; // 调用代理节点以决定是否检索 workflow.addEdge(START, "agent"); // 决定是否检索 workflow.addConditionalEdges( "agent", // 评估代理决定 shouldRetrieve, ); workflow.addEdge("retrieve", "gradeDocuments"); // 在调用“action”节点后执行的边。 workflow.addConditionalEdges( "gradeDocuments", // 评估代理决定 checkRelevance, { // 调用工具节点 yes: "generate", no: "rewrite", // 占位符 }, ); workflow.addEdge("generate", END); workflow.addEdge("rewrite", "agent"); // 编译 const app = workflow.compile();
在 [8] 中
已复制!
import { HumanMessage } from "@langchain/core/messages";
const inputs = {
messages: [
new HumanMessage(
"What are the types of agent memory based on Lilian Weng's blog post?",
),
],
};
let finalState;
for await (const output of await app.stream(inputs)) {
for (const [key, value] of Object.entries(output)) {
const lastMsg = output[key].messages[output[key].messages.length - 1];
console.log(`Output from node: '${key}'`);
console.dir({
type: lastMsg._getType(),
content: lastMsg.content,
tool_calls: lastMsg.tool_calls,
}, { depth: null });
console.log("---\n");
finalState = value;
}
}
console.log(JSON.stringify(finalState, null, 2));
import { HumanMessage } from "@langchain/core/messages"; const inputs = { messages: [ new HumanMessage( "根据 Lilian Weng 的博客文章,有哪些类型的代理内存?", ), ], }; let finalState; for await (const output of await app.stream(inputs)) { for (const [key, value] of Object.entries(output)) { const lastMsg = output[key].messages[output[key].messages.length - 1]; console.log(`Output from node: '${key}'`); console.dir({ type: lastMsg._getType(), content: lastMsg.content, tool_calls: lastMsg.tool_calls, }, { depth: null }); console.log("---\n"); finalState = value; } } console.log(JSON.stringify(finalState, null, 2));
---CALL AGENT--- ---DECIDE TO RETRIEVE--- ---DECISION: RETRIEVE--- Output from node: 'agent' { type: 'ai', content: '', tool_calls: [ { name: 'retrieve_blog_posts', args: { query: 'types of agent memory' }, id: 'call_adLYkV7T2ry1EZFboT0jPuwn', type: 'tool_call' } ] } --- Output from node: 'retrieve' { type: 'tool', content: 'Agent System Overview\n' + ' \n' + ' Component One: Planning\n' + ' \n' + ' \n' + ' Task Decomposition\n' + ' \n' + ' Self-Reflection\n' + ' \n' + ' \n' + ' Component Two: Memory\n' + ' \n' + ' \n' + ' Types of Memory\n' + ' \n' + ' Maximum Inner Product Search (MIPS)\n' + '\n' + 'Memory stream: is a long-term memory module (external database) that records a comprehensive list of agents’ experience in natural language.\n' + '\n' + 'Each element is an observation, an event directly provided by the agent.\n' + '- Inter-agent communication can trigger new natural language statements.\n' + '\n' + '\n' + 'Retrieval model: surfaces the context to inform the agent’s behavior, according to relevance, recency and importance.\n' + '\n' + 'Planning\n' + '\n' + 'Subgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\n' + 'Reflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\n' + '\n' + '\n' + 'Memory\n' + '\n' + 'The design of generative agents combines LLM with memory, planning and reflection mechanisms to enable agents to behave conditioned on past experience, as well as to interact with other agents.', tool_calls: undefined } --- ---GET RELEVANCE--- ---CHECK RELEVANCE--- ---DECISION: DOCS NOT RELEVANT--- Output from node: 'gradeDocuments' { type: 'ai', content: '', tool_calls: [ { name: 'give_relevance_score', args: { binaryScore: 'no' }, type: 'tool_call', id: 'call_AGE7gORVFubExfJWcjb0C2nV' } ] } --- ---TRANSFORM QUERY--- Output from node: 'rewrite' { type: 'ai', content: "What are the different types of agent memory described in Lilian Weng's blog post?", tool_calls: [] } --- ---CALL AGENT--- ---DECIDE TO RETRIEVE--- Output from node: 'agent' { type: 'ai', content: "Lilian Weng's blog post describes the following types of agent memory:\n" + '\n' + '1. **Memory Stream**:\n' + ' - This is a long-term memory module (external database) that records a comprehensive list of agents’ experiences in natural language.\n' + ' - Each element in the memory stream is an observation or an event directly provided by the agent.\n' + ' - Inter-agent communication can trigger new natural language statements to be added to the memory stream.\n' + '\n' + '2. **Retrieval Model**:\n' + ' - This model surfaces the context to inform the agent’s behavior based on relevance, recency, and importance.\n' + '\n' + 'These memory types are part of a broader design that combines generative agents with memory, planning, and reflection mechanisms to enable agents to behave based on past experiences and interact with other agents.', tool_calls: [] } --- { "messages": [ { "lc": 1, "type": "constructor", "id": [ "langchain_core", "messages", "AIMessageChunk" ], "kwargs": { "content": "Lilian Weng's blog post describes the following types of agent memory:\n\n1. **Memory Stream**:\n - This is a long-term memory module (external database) that records a comprehensive list of agents’ experiences in natural language.\n - Each element in the memory stream is an observation or an event directly provided by the agent.\n - Inter-agent communication can trigger new natural language statements to be added to the memory stream.\n\n2. **Retrieval Model**:\n - This model surfaces the context to inform the agent’s behavior based on relevance, recency, and importance.\n\nThese memory types are part of a broader design that combines generative agents with memory, planning, and reflection mechanisms to enable agents to behave based on past experiences and interact with other agents.", "additional_kwargs": {}, "response_metadata": { "estimatedTokenUsage": { "promptTokens": 280, "completionTokens": 155, "totalTokens": 435 }, "prompt": 0, "completion": 0, "finish_reason": "stop", "system_fingerprint": "fp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3bfp_3cd8b62c3b" }, "tool_call_chunks": [], "id": "chatcmpl-9zAaVQGmTLiCaFvtbxUK60qMFsSmU", "usage_metadata": { "input_tokens": 363, "output_tokens": 156, "total_tokens": 519 }, "tool_calls": [], "invalid_tool_calls": [] } } ] }