This commit is contained in:
William Fu-Hinthorn
2024-09-13 16:30:31 -07:00
parent 523dc46aff
commit 4e6582bd91
13 changed files with 29 additions and 113 deletions

View File

@@ -7,7 +7,7 @@ This template showcases a [ReAct agent](https://arxiv.org/abs/2210.03629) implem
![Graph view in LangGraph studio UI](./static/studio_ui.png)
The core logic, defined in `src/react_agent/graph.py`, demonstrates a flexible ReAct agent that iteratively reasons about user queries and executes actions, showcasing the power of this approach for complex problem-solving tasks.
The core logic, defined in `src/agent/graph.py`, demonstrates a flexible ReAct agent that iteratively reasons about user queries and executes actions, showcasing the power of this approach for complex problem-solving tasks.
## What it does
@@ -33,7 +33,7 @@ cp .env.example .env
2. Define required API keys in your `.env` file.
The primary [search tool](./src/react_agent/tools.py) [^1] used is [Tavily](https://tavily.com/). Create an API key [here](https://app.tavily.com/sign-in).
The primary [search tool](./src/agent/tools.py) [^1] used is [Tavily](https://tavily.com/). Create an API key [here](https://app.tavily.com/sign-in).
<!--
Setup instruction auto-generated by `langgraph template lock`. DO NOT EDIT MANUALLY.
@@ -50,18 +50,18 @@ End setup instructions
## How to customize
1. **Add new tools**: Extend the agent's capabilities by adding new tools in [tools.py](./src/react_agent/tools.py). These can be any Python functions that perform specific tasks.
1. **Add new tools**: Extend the agent's capabilities by adding new tools in [tools.py](./src/agent/tools.py). These can be any Python functions that perform specific tasks.
2. **Select a different model**: We default to Anthropic's Claude 3 Sonnet. You can select a compatible chat model using `provider/model-name` via configuration. Example: `openai/gpt-4-turbo-preview`.
3. **Customize the prompt**: We provide a default system prompt in [configuration.py](./src/react_agent/configuration.py). You can easily update this via configuration in the studio.
3. **Customize the prompt**: We provide a default system prompt in [configuration.py](./src/agent/configuration.py). You can easily update this via configuration in the studio.
You can also quickly extend this template by:
- Modifying the agent's reasoning process in [graph.py](./src/react_agent/graph.py).
- Modifying the agent's reasoning process in [graph.py](./src/agent/graph.py).
- Adjusting the ReAct loop or adding additional steps to the agent's decision-making process.
## Development
While iterating on your graph, you can edit past state and rerun your app from past states to debug specific nodes. Local changes will be automatically applied via hot reload. Try adding an interrupt before the agent calls tools, updating the default system message in `src/react_agent/configuration.py` to take on a persona, or adding additional nodes and edges!
While iterating on your graph, you can edit past state and rerun your app from past states to debug specific nodes. Local changes will be automatically applied via hot reload. Try adding an interrupt before the agent calls tools, updating the default system message in `src/agent/configuration.py` to take on a persona, or adding additional nodes and edges!
Follow up requests will be appended to the same thread. You can create an entirely new thread, clearing previous history, using the `+` button in the top right.

View File

@@ -1,7 +1,7 @@
{
"dependencies": ["."],
"graphs": {
"agent": "./src/react_agent/graph.py:graph"
"agent": "./src/agent/graph.py:graph"
},
"env": ".env"
}

View File

@@ -1,23 +1,14 @@
[project]
name = "react-agent"
name = "agent"
version = "0.0.1"
description = "Starter template for making a custom Reasoning and Action agent (using tool calling) in LangGraph."
description = "Starter template for making a new agent LangGraph."
authors = [
{ name = "William Fu-Hinthorn", email = "13333726+hinthornw@users.noreply.github.com" },
]
readme = "README.md"
license = { text = "MIT" }
requires-python = ">=3.9"
dependencies = [
"langgraph>=0.2.6",
"langchain-openai>=0.1.22",
"langchain-anthropic>=0.1.23",
"langchain>=0.2.14",
"langchain-fireworks>=0.1.7",
"python-dotenv>=1.0.1",
"langchain-community>=0.2.17",
"tavily-python>=0.4.0",
]
dependencies = ["langgraph>=0.2.6", "python-dotenv>=1.0.1"]
[project.optional-dependencies]
@@ -28,10 +19,10 @@ requires = ["setuptools>=73.0.0", "wheel"]
build-backend = "setuptools.build_meta"
[tool.setuptools]
packages = ["langgraph.templates.react_agent", "react_agent"]
packages = ["langgraph.templates.agent", "agent"]
[tool.setuptools.package-dir]
"langgraph.templates.react_agent" = "src/react_agent"
"react_agent" = "src/react_agent"
"langgraph.templates.agent" = "src/agent"
"agent" = "src/agent"
[tool.setuptools.package-data]

8
src/agent/__init__.py Normal file
View File

@@ -0,0 +1,8 @@
"""New LangGraph Agent.
This module defines a custom graph.
"""
from agent.graph import graph
__all__ = ["graph"]

View File

@@ -7,7 +7,7 @@ from typing import Annotated, Optional
from langchain_core.runnables import RunnableConfig, ensure_config
from react_agent import prompts
from agent import prompts
@dataclass(kw_only=True)

View File

@@ -12,10 +12,10 @@ from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph
from langgraph.prebuilt import ToolNode
from react_agent.configuration import Configuration
from react_agent.state import InputState, State
from react_agent.tools import TOOLS
from react_agent.utils import load_chat_model
from agent.configuration import Configuration
from agent.state import InputState, State
from agent.tools import TOOLS
from agent.utils import load_chat_model
# Define the function that calls the model

View File

@@ -1,9 +0,0 @@
"""React Agent.
This module defines a custom reasoning and action agent graph.
It invokes tools in a simple loop.
"""
from react_agent.graph import graph
__all__ = ["graph"]

View File

@@ -1,74 +0,0 @@
"""This module provides example tools for web scraping and search functionality.
It includes:
- A web scraper that uses an LLM to summarize content based on instructions
- A basic Tavily search function
These tools are intended as free examples to get started. For production use,
consider implementing more robust and specialized tools tailored to your needs.
"""
from datetime import datetime, timezone
from typing import Any, Callable, List, Optional, cast
import httpx
from langchain.chat_models import init_chat_model
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import InjectedToolArg
from typing_extensions import Annotated
from react_agent.configuration import Configuration
from react_agent.utils import get_message_text
# note that arguments typed as "RunnableConfig" in tools will be excluded from the schema generated
# for the model.
# They are treated as "injected arguments"
async def scrape_webpage(url: str, instructions: str, *, config: RunnableConfig) -> str:
"""Scrape the given webpage and return a summary of text based on the instructions.
Args:
url: The URL of the webpage to scrape.
instructions: The instructions to give to the scraper. An LLM will be used to respond using the
instructions and the scraped text.
"""
async with httpx.AsyncClient() as client:
response = await client.get(url)
web_text = response.text
configuration = Configuration.from_runnable_config(config)
model = init_chat_model(configuration.model_name)
response_msg = await model.ainvoke(
[
(
"system",
"You are a helpful web scraper AI assistant. You are working in extractive Q&A mode, meaning you refrain from making overly abstractive responses."
"Respond to the user's instructions."
" Based on the provided webpage. If you are unable to answer the question, let the user know. Do not guess."
" Provide citations and direct quotes when possible."
f" \n\n<webpage_text>\n{web_text}\n</webpage_text>"
f"\n\nSystem time: {datetime.now(tz=timezone.utc)}",
),
("user", instructions),
]
)
return get_message_text(response_msg)
async def search(
query: str, *, config: Annotated[RunnableConfig, InjectedToolArg]
) -> Optional[list[dict[str, Any]]]:
"""Search for general web results.
This function performs a search using the Tavily search engine, which is designed
to provide comprehensive, accurate, and trusted results. It's particularly useful
for answering questions about current events.
"""
configuration = Configuration.from_runnable_config(config)
wrapped = TavilySearchResults(max_results=configuration.max_search_results)
result = await wrapped.ainvoke({"query": query})
return cast(list[dict[str, Any]], result)
TOOLS: List[Callable[..., Any]] = [scrape_webpage, search]

View File

@@ -1,12 +1,12 @@
import pytest
from langsmith import unit
from react_agent import graph
from agent import graph
@pytest.mark.asyncio
@unit
async def test_react_agent_simple_passthrough() -> None:
async def test_agent_simple_passthrough() -> None:
res = await graph.ainvoke(
{"messages": [("user", "Who is the founder of LangChain?")]}
)

View File

@@ -1,4 +1,4 @@
from react_agent.configuration import Configuration
from agent.configuration import Configuration
def test_configuration_empty():