206 lines
7.5 KiB
Plaintext
206 lines
7.5 KiB
Plaintext
{
|
||
"cells": [
|
||
{
|
||
"cell_type": "code",
|
||
"id": "initial_id",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-09-12T15:52:56.909385Z",
|
||
"start_time": "2025-09-12T15:52:56.417088Z"
|
||
}
|
||
},
|
||
"source": [
|
||
"import os\n",
|
||
"\n",
|
||
"os.environ['DASHSCOPE_API_KEY'] = 'sk-e2a05bbcfac84e53b73f98acef15a009'\n",
|
||
"\n",
|
||
"# Step 0: Define tools and model\n",
|
||
"\n",
|
||
"from langchain_core.tools import tool\n",
|
||
"from langchain_community.chat_models.tongyi import ChatTongyi\n",
|
||
"\n",
|
||
"llm = ChatTongyi(\n",
|
||
" model=\"qwen-max\", # 此处以qwen-max为例,您可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models\n",
|
||
" streaming=True,\n",
|
||
" # other params...\n",
|
||
")"
|
||
],
|
||
"outputs": [],
|
||
"execution_count": 1
|
||
},
|
||
{
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-09-12T15:54:08.425580Z",
|
||
"start_time": "2025-09-12T15:54:08.374623Z"
|
||
}
|
||
},
|
||
"cell_type": "code",
|
||
"source": [
|
||
"\n",
|
||
"\n",
|
||
"# Define tools\n",
|
||
"@tool\n",
|
||
"def multiply(a: int, b: int) -> int:\n",
|
||
" \"\"\"Multiply a and b.\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" a: first int\n",
|
||
" b: second int\n",
|
||
" \"\"\"\n",
|
||
" return a * b\n",
|
||
"\n",
|
||
"\n",
|
||
"@tool\n",
|
||
"def add(a: int, b: int) -> int:\n",
|
||
" \"\"\"Adds a and b.\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" a: first int\n",
|
||
" b: second int\n",
|
||
" \"\"\"\n",
|
||
" return a + b\n",
|
||
"\n",
|
||
"\n",
|
||
"@tool\n",
|
||
"def divide(a: int, b: int) -> float:\n",
|
||
" \"\"\"Divide a and b.\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" a: first int\n",
|
||
" b: second int\n",
|
||
" \"\"\"\n",
|
||
" return a / b\n",
|
||
"\n",
|
||
"\n",
|
||
"# Augment the LLM with tools\n",
|
||
"tools = [add, multiply, divide]\n",
|
||
"tools_by_name = {tool.name: tool for tool in tools}\n",
|
||
"llm_with_tools = llm.bind_tools(tools)\n",
|
||
"\n",
|
||
"from langgraph.graph import add_messages\n",
|
||
"from langchain_core.messages import (\n",
|
||
" SystemMessage,\n",
|
||
" HumanMessage,\n",
|
||
" BaseMessage,\n",
|
||
" ToolCall,\n",
|
||
")\n",
|
||
"from langgraph.func import entrypoint, task\n",
|
||
"\n",
|
||
"# Step 1: define model node\n",
|
||
"@task\n",
|
||
"def call_llm(messages: list[BaseMessage]):\n",
|
||
" \"\"\"LLM decides whether to call a tool or not\"\"\"\n",
|
||
" return llm_with_tools.invoke(\n",
|
||
" [\n",
|
||
" SystemMessage(\n",
|
||
" content=\"You are a helpful assistant tasked with performing arithmetic on a set of inputs.\"\n",
|
||
" )\n",
|
||
" ]\n",
|
||
" + messages\n",
|
||
" )\n",
|
||
"\n",
|
||
"\n",
|
||
"# Step 2: define tool node\n",
|
||
"@task\n",
|
||
"def call_tool(tool_call: ToolCall):\n",
|
||
" \"\"\"Performs the tool call\"\"\"\n",
|
||
" tool = tools_by_name[tool_call[\"name\"]]\n",
|
||
" return tool.invoke(tool_call)\n",
|
||
"\n",
|
||
"\n",
|
||
"# Step 3: define agent\n",
|
||
"@entrypoint()\n",
|
||
"def agent(messages: list[BaseMessage]):\n",
|
||
" llm_response = call_llm(messages).result()\n",
|
||
"\n",
|
||
" while True:\n",
|
||
" if not llm_response.tool_calls:\n",
|
||
" break\n",
|
||
"\n",
|
||
" # Execute tools\n",
|
||
" tool_result_futures = [\n",
|
||
" call_tool(tool_call) for tool_call in llm_response.tool_calls\n",
|
||
" ]\n",
|
||
" tool_results = [fut.result() for fut in tool_result_futures]\n",
|
||
" messages = add_messages(messages, [llm_response, *tool_results])\n",
|
||
" llm_response = call_llm(messages).result()\n",
|
||
"\n",
|
||
" messages = add_messages(messages, llm_response)\n",
|
||
" return messages"
|
||
],
|
||
"id": "8a77a9b24ee9616d",
|
||
"outputs": [],
|
||
"execution_count": 2
|
||
},
|
||
{
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-09-12T15:54:11.693756Z",
|
||
"start_time": "2025-09-12T15:54:10.101700Z"
|
||
}
|
||
},
|
||
"cell_type": "code",
|
||
"source": [
|
||
"\n",
|
||
"# Invoke\n",
|
||
"messages = [HumanMessage(content=\"Add 3 and 4.\")]\n",
|
||
"for chunk in agent.stream(messages, stream_mode=\"updates\"):\n",
|
||
" print(chunk)\n",
|
||
" print(\"\\n\")"
|
||
],
|
||
"id": "7c4b06da8200b106",
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"{'call_llm': AIMessage(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_ef8c897dd4f84afbbf0927', 'type': 'function', 'function': {'name': 'add', 'arguments': '{\"a\": 3, \"b\": 4}'}}]}, response_metadata={'model_name': 'qwen-max', 'finish_reason': 'tool_calls', 'request_id': '6d8c6555-1a67-4cc9-a93f-57e94bc20842', 'token_usage': {'input_tokens': 354, 'output_tokens': 22, 'total_tokens': 376, 'prompt_tokens_details': {'cached_tokens': 0}}}, id='lc_run--afcea3de-940e-45c6-ba96-bbd7e41fa115-0', tool_calls=[{'name': 'add', 'args': {'a': 3, 'b': 4}, 'id': 'call_ef8c897dd4f84afbbf0927', 'type': 'tool_call'}], chunk_position=None)}\n",
|
||
"\n",
|
||
"\n",
|
||
"{'call_tool': ToolMessage(content='7', name='add', id='aeaf3d29-254b-48ab-a933-814e9ea72394', tool_call_id='call_ef8c897dd4f84afbbf0927')}\n",
|
||
"\n",
|
||
"\n",
|
||
"{'call_llm': AIMessage(content='The sum of 3 and 4 is 7.', additional_kwargs={}, response_metadata={'model_name': 'qwen-max', 'finish_reason': 'stop', 'request_id': '310102ab-48dc-4e80-bc57-ca8814239a65', 'token_usage': {'input_tokens': 386, 'output_tokens': 13, 'total_tokens': 399, 'prompt_tokens_details': {'cached_tokens': 0}}}, id='lc_run--b3dffae8-42c2-492e-a1f4-e659eba6a879-0', chunk_position=None)}\n",
|
||
"\n",
|
||
"\n",
|
||
"{'agent': [HumanMessage(content='Add 3 and 4.', additional_kwargs={}, response_metadata={}, id='40fc3758-a8ab-4302-aff9-8dfbdec16fa0'), AIMessage(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_ef8c897dd4f84afbbf0927', 'type': 'function', 'function': {'name': 'add', 'arguments': '{\"a\": 3, \"b\": 4}'}}]}, response_metadata={'model_name': 'qwen-max', 'finish_reason': 'tool_calls', 'request_id': '6d8c6555-1a67-4cc9-a93f-57e94bc20842', 'token_usage': {'input_tokens': 354, 'output_tokens': 22, 'total_tokens': 376, 'prompt_tokens_details': {'cached_tokens': 0}}}, id='lc_run--afcea3de-940e-45c6-ba96-bbd7e41fa115-0', tool_calls=[{'name': 'add', 'args': {'a': 3, 'b': 4}, 'id': 'call_ef8c897dd4f84afbbf0927', 'type': 'tool_call'}], chunk_position=None), ToolMessage(content='7', name='add', id='aeaf3d29-254b-48ab-a933-814e9ea72394', tool_call_id='call_ef8c897dd4f84afbbf0927'), AIMessage(content='The sum of 3 and 4 is 7.', additional_kwargs={}, response_metadata={'model_name': 'qwen-max', 'finish_reason': 'stop', 'request_id': '310102ab-48dc-4e80-bc57-ca8814239a65', 'token_usage': {'input_tokens': 386, 'output_tokens': 13, 'total_tokens': 399, 'prompt_tokens_details': {'cached_tokens': 0}}}, id='lc_run--b3dffae8-42c2-492e-a1f4-e659eba6a879-0', chunk_position=None)]}\n",
|
||
"\n",
|
||
"\n"
|
||
]
|
||
}
|
||
],
|
||
"execution_count": 3
|
||
},
|
||
{
|
||
"metadata": {},
|
||
"cell_type": "code",
|
||
"outputs": [],
|
||
"execution_count": null,
|
||
"source": "",
|
||
"id": "7e55492ae0289f06"
|
||
}
|
||
],
|
||
"metadata": {
|
||
"kernelspec": {
|
||
"display_name": "Python 3",
|
||
"language": "python",
|
||
"name": "python3"
|
||
},
|
||
"language_info": {
|
||
"codemirror_mode": {
|
||
"name": "ipython",
|
||
"version": 2
|
||
},
|
||
"file_extension": ".py",
|
||
"mimetype": "text/x-python",
|
||
"name": "python",
|
||
"nbconvert_exporter": "python",
|
||
"pygments_lexer": "ipython2",
|
||
"version": "2.7.6"
|
||
}
|
||
},
|
||
"nbformat": 4,
|
||
"nbformat_minor": 5
|
||
}
|