Initial commit
This commit is contained in:
0
.codespellignore
Normal file
0
.codespellignore
Normal file
11
.env.example
Normal file
11
.env.example
Normal file
@@ -0,0 +1,11 @@
|
||||
TAVILY_API_KEY=...
|
||||
|
||||
# To separate your traces from other application
|
||||
LANGSMITH_PROJECT=retrieval-agent
|
||||
|
||||
# The following depend on your selected configuration
|
||||
|
||||
## LLM choice:
|
||||
ANTHROPIC_API_KEY=....
|
||||
FIREWORKS_API_KEY=...
|
||||
OPENAI_API_KEY=...
|
||||
43
.github/workflows/integration-tests.yml
vendored
Normal file
43
.github/workflows/integration-tests.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# This workflow will run integration tests for the current project once per day
|
||||
|
||||
name: Integration Tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "37 14 * * *" # Run at 7:37 AM Pacific Time (14:37 UTC) every day
|
||||
workflow_dispatch: # Allows triggering the workflow manually in GitHub UI
|
||||
|
||||
# If another scheduled run starts while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
integration-tests:
|
||||
name: Integration Tests
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ["3.11", "3.12"]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
uv venv
|
||||
uv pip install -r pyproject.toml
|
||||
uv pip install -U pytest-asyncio
|
||||
- name: Run integration tests
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }}
|
||||
LANGSMITH_API_KEY: ${{ secrets.LANGSMITH_API_KEY }}
|
||||
LANGSMITH_TRACING: true
|
||||
run: |
|
||||
uv run pytest tests/integration_tests
|
||||
57
.github/workflows/unit-tests.yml
vendored
Normal file
57
.github/workflows/unit-tests.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
# This workflow will run unit tests for the current project
|
||||
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
workflow_dispatch: # Allows triggering the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
unit-tests:
|
||||
name: Unit Tests
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ["3.11", "3.12"]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
uv venv
|
||||
uv pip install -r pyproject.toml
|
||||
- name: Lint with ruff
|
||||
run: |
|
||||
uv pip install ruff
|
||||
uv run ruff check .
|
||||
- name: Lint with mypy
|
||||
run: |
|
||||
uv pip install mypy
|
||||
uv run mypy --strict src/
|
||||
- name: Check README spelling
|
||||
uses: codespell-project/actions-codespell@v2
|
||||
with:
|
||||
ignore_words_file: .codespellignore
|
||||
path: README.md
|
||||
- name: Check code spelling
|
||||
uses: codespell-project/actions-codespell@v2
|
||||
with:
|
||||
ignore_words_file: .codespellignore
|
||||
path: src/
|
||||
- name: Run tests with pytest
|
||||
run: |
|
||||
uv pip install pytest
|
||||
uv run pytest tests/unit_tests
|
||||
162
.gitignore
vendored
Normal file
162
.gitignore
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
||||
.pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 LangChain
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
64
Makefile
Normal file
64
Makefile
Normal file
@@ -0,0 +1,64 @@
|
||||
.PHONY: all format lint test tests test_watch integration_tests docker_tests help extended_tests
|
||||
|
||||
# Default target executed when no arguments are given to make.
|
||||
all: help
|
||||
|
||||
# Define a variable for the test file path.
|
||||
TEST_FILE ?= tests/unit_tests/
|
||||
|
||||
test:
|
||||
python -m pytest $(TEST_FILE)
|
||||
|
||||
test_watch:
|
||||
python -m ptw --snapshot-update --now . -- -vv tests/unit_tests
|
||||
|
||||
test_profile:
|
||||
python -m pytest -vv tests/unit_tests/ --profile-svg
|
||||
|
||||
extended_tests:
|
||||
python -m pytest --only-extended $(TEST_FILE)
|
||||
|
||||
|
||||
######################
|
||||
# LINTING AND FORMATTING
|
||||
######################
|
||||
|
||||
# Define a variable for Python and notebook files.
|
||||
PYTHON_FILES=src/
|
||||
MYPY_CACHE=.mypy_cache
|
||||
lint format: PYTHON_FILES=.
|
||||
lint_diff format_diff: PYTHON_FILES=$(shell git diff --name-only --diff-filter=d main | grep -E '\.py$$|\.ipynb$$')
|
||||
lint_package: PYTHON_FILES=src
|
||||
lint_tests: PYTHON_FILES=tests
|
||||
lint_tests: MYPY_CACHE=.mypy_cache_test
|
||||
|
||||
lint lint_diff lint_package lint_tests:
|
||||
python -m ruff check .
|
||||
[ "$(PYTHON_FILES)" = "" ] || python -m ruff format $(PYTHON_FILES) --diff
|
||||
[ "$(PYTHON_FILES)" = "" ] || python -m ruff check --select I $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || python -m mypy --strict $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && python -m mypy --strict $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
|
||||
|
||||
format format_diff:
|
||||
ruff format $(PYTHON_FILES)
|
||||
ruff check --select I --fix $(PYTHON_FILES)
|
||||
|
||||
spell_check:
|
||||
codespell --toml pyproject.toml
|
||||
|
||||
spell_fix:
|
||||
codespell --toml pyproject.toml -w
|
||||
|
||||
######################
|
||||
# HELP
|
||||
######################
|
||||
|
||||
help:
|
||||
@echo '----'
|
||||
@echo 'format - run code formatters'
|
||||
@echo 'lint - run linters'
|
||||
@echo 'test - run unit tests'
|
||||
@echo 'tests - run unit tests'
|
||||
@echo 'test TEST_FILE=<test_file> - run all tests in file'
|
||||
@echo 'test_watch - run unit tests in watch mode'
|
||||
|
||||
463
README.md
Normal file
463
README.md
Normal file
@@ -0,0 +1,463 @@
|
||||
# LangGraph ReAct Agent Template
|
||||
|
||||
[](https://github.com/langchain-ai/react-agent/actions/workflows/unit-tests.yml)
|
||||
[](https://github.com/langchain-ai/react-agent/actions/workflows/integration-tests.yml)
|
||||
|
||||
This template showcases a [ReAct agent](https://arxiv.org/abs/2210.03629) implemented using [LangGraph](https://github.com/langchain-ai/langgraph), designed for [LangGraph Studio](https://github.com/langchain-ai/langgraph-studio). ReAct agents are uncomplicated, prototypical agents that can be flexibly extended to many tools.
|
||||
|
||||

|
||||
|
||||
The core logic, defined in `src/react_agent/graph.py`, demonstrates a flexible ReAct agent that iteratively reasons about user queries and executes actions, showcasing the power of this approach for complex problem-solving tasks.
|
||||
|
||||
## What it does
|
||||
|
||||
The ReAct agent:
|
||||
|
||||
1. Takes a user **query** as input
|
||||
2. Reasons about the query and decides on an action
|
||||
3. Executes the chosen action using available tools
|
||||
4. Observes the result of the action
|
||||
5. Repeats steps 2-4 until it can provide a final answer
|
||||
|
||||
By default, it's set up with a basic set of tools, but can be easily extended with custom tools to suit various use cases.
|
||||
|
||||
## Getting Started
|
||||
|
||||
Assuming you have already [installed LangGraph Studio](https://github.com/langchain-ai/langgraph-studio?tab=readme-ov-file#download), to set up:
|
||||
|
||||
1. Create a `.env` file.
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
2. Define required API keys in your `.env` file.
|
||||
|
||||
The primary [search tool](./src/react_agent/tools.py) [^1] used is [Tavily](https://tavily.com/). Create an API key [here](https://app.tavily.com/sign-in).
|
||||
|
||||
<!--
|
||||
Setup instruction auto-generated by `langgraph template lock`. DO NOT EDIT MANUALLY.
|
||||
-->
|
||||
|
||||
Set up your LLM API keys. This repo defaults to using [Claude](https://console.anthropic.com/login).
|
||||
|
||||
<!--
|
||||
End setup instructions
|
||||
-->
|
||||
|
||||
3. Customize whatever you'd like in the code.
|
||||
4. Open the folder LangGraph Studio!
|
||||
|
||||
## How to customize
|
||||
|
||||
1. **Add new tools**: Extend the agent's capabilities by adding new tools in [tools.py](./src/react_agent/tools.py). These can be any Python functions that perform specific tasks.
|
||||
2. **Select a different model**: We default to Anthropic's Claude 3 Sonnet. You can select a compatible chat model using `provider/model-name` via configuration. Example: `openai/gpt-4-turbo-preview`.
|
||||
3. **Customize the prompt**: We provide a default system prompt in [configuration.py](./src/react_agent/configuration.py). You can easily update this via configuration in the studio.
|
||||
|
||||
You can also quickly extend this template by:
|
||||
|
||||
- Modifying the agent's reasoning process in [graph.py](./src/react_agent/graph.py).
|
||||
- Adjusting the ReAct loop or adding additional steps to the agent's decision-making process.
|
||||
|
||||
## Development
|
||||
|
||||
While iterating on your graph, you can edit past state and rerun your app from past states to debug specific nodes. Local changes will be automatically applied via hot reload. Try adding an interrupt before the agent calls tools, updating the default system message in `src/react_agent/configuration.py` to take on a persona, or adding additional nodes and edges!
|
||||
|
||||
Follow up requests will be appended to the same thread. You can create an entirely new thread, clearing previous history, using the `+` button in the top right.
|
||||
|
||||
You can find the latest (under construction) docs on [LangGraph](https://github.com/langchain-ai/langgraph) here, including examples and other references. Using those guides can help you pick the right patterns to adapt here for your use case.
|
||||
|
||||
LangGraph Studio also integrates with [LangSmith](https://smith.langchain.com/) for more in-depth tracing and collaboration with teammates.
|
||||
|
||||
<!--
|
||||
Configuration auto-generated by `langgraph template lock`. DO NOT EDIT MANUALLY.
|
||||
{
|
||||
"config_schemas": {
|
||||
"agent": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"system_prompt": {
|
||||
"type": "string",
|
||||
"default": "You are a helpful AI assistant.\n\nSystem time: {system_time}"
|
||||
},
|
||||
"model_name": {
|
||||
"type": "string",
|
||||
"default": "anthropic/claude-3-5-sonnet-20240620",
|
||||
"environment": [
|
||||
{
|
||||
"value": "anthropic/claude-1.2",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-2.0",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-2.1",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-3-5-sonnet-20240620",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-3-haiku-20240307",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-3-opus-20240229",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-3-sonnet-20240229",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-instant-1.2",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/gemma2-9b-it",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3-70b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3-70b-instruct-hf",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3-8b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3-8b-instruct-hf",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3p1-405b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3p1-405b-instruct-long",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3p1-70b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3p1-8b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/mixtral-8x22b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/mixtral-8x7b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/mixtral-8x7b-instruct-hf",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/mythomax-l2-13b",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/phi-3-vision-128k-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/phi-3p5-vision-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/starcoder-16b",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/yi-large",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-0125",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-0301",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-0613",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-1106",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-16k",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-16k-0613",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-0125-preview",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-0314",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-0613",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-1106-preview",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-32k",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-32k-0314",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-32k-0613",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-turbo",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-turbo-preview",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-vision-preview",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4o",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4o-mini",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
}
|
||||
]
|
||||
},
|
||||
"scraper_tool_model_name": {
|
||||
"type": "string",
|
||||
"default": "accounts/fireworks/models/firefunction-v2",
|
||||
"environment": [
|
||||
{
|
||||
"value": "anthropic/claude-1.2",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-2.0",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-2.1",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-3-5-sonnet-20240620",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-3-haiku-20240307",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-3-opus-20240229",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-3-sonnet-20240229",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "anthropic/claude-instant-1.2",
|
||||
"variables": "ANTHROPIC_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/gemma2-9b-it",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3-70b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3-70b-instruct-hf",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3-8b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3-8b-instruct-hf",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3p1-405b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3p1-405b-instruct-long",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3p1-70b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/llama-v3p1-8b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/mixtral-8x22b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/mixtral-8x7b-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/mixtral-8x7b-instruct-hf",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/mythomax-l2-13b",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/phi-3-vision-128k-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/phi-3p5-vision-instruct",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/starcoder-16b",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "fireworks/yi-large",
|
||||
"variables": "FIREWORKS_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-0125",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-0301",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-0613",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-1106",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-16k",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-3.5-turbo-16k-0613",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-0125-preview",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-0314",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-0613",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-1106-preview",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-32k",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-32k-0314",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-32k-0613",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-turbo",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-turbo-preview",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4-vision-preview",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4o",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
},
|
||||
{
|
||||
"value": "openai/gpt-4o-mini",
|
||||
"variables": "OPENAI_API_KEY"
|
||||
}
|
||||
]
|
||||
},
|
||||
"max_search_results": {
|
||||
"type": "integer",
|
||||
"default": 10
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
-->
|
||||
7
langgraph.json
Normal file
7
langgraph.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"dependencies": ["."],
|
||||
"graphs": {
|
||||
"agent": "./src/react_agent/graph.py:graph"
|
||||
},
|
||||
"env": ".env"
|
||||
}
|
||||
2166
poetry.lock
generated
Normal file
2166
poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
62
pyproject.toml
Normal file
62
pyproject.toml
Normal file
@@ -0,0 +1,62 @@
|
||||
[project]
|
||||
name = "react-agent"
|
||||
version = "0.0.1"
|
||||
description = "Starter template for making a custom Reasoning and Action agent (using tool calling) in LangGraph."
|
||||
authors = [
|
||||
{ name = "William Fu-Hinthorn", email = "13333726+hinthornw@users.noreply.github.com" },
|
||||
]
|
||||
readme = "README.md"
|
||||
license = { text = "MIT" }
|
||||
requires-python = ">=3.9"
|
||||
dependencies = [
|
||||
"langgraph>=0.2.6",
|
||||
"langchain-openai>=0.1.22",
|
||||
"langchain-anthropic>=0.1.23",
|
||||
"langchain>=0.2.14",
|
||||
"langchain-fireworks>=0.1.7",
|
||||
"python-dotenv>=1.0.1",
|
||||
"langchain-community>=0.2.17",
|
||||
"tavily-python>=0.4.0",
|
||||
]
|
||||
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = ["mypy>=1.11.1", "ruff>=0.6.1"]
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools>=73.0.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.setuptools]
|
||||
packages = ["langgraph.templates.react_agent", "react_agent"]
|
||||
[tool.setuptools.package-dir]
|
||||
"langgraph.templates.react_agent" = "src/react_agent"
|
||||
"react_agent" = "src/react_agent"
|
||||
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
"*" = ["py.typed"]
|
||||
|
||||
[tool.ruff]
|
||||
lint.select = [
|
||||
"E", # pycodestyle
|
||||
"F", # pyflakes
|
||||
"I", # isort
|
||||
"D", # pydocstyle
|
||||
"D401", # First line should be in imperative mood
|
||||
"T201",
|
||||
"UP",
|
||||
]
|
||||
lint.ignore = [
|
||||
"UP006",
|
||||
"UP007",
|
||||
# We actually do want to import from typing_extensions
|
||||
"UP035",
|
||||
# Relax the convention by _not_ requiring documentation for every function parameter.
|
||||
"D417",
|
||||
"E501",
|
||||
]
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"tests/*" = ["D", "UP"]
|
||||
[tool.ruff.lint.pydocstyle]
|
||||
convention = "google"
|
||||
9
src/react_agent/__init__.py
Normal file
9
src/react_agent/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""React Agent.
|
||||
|
||||
This module defines a custom reasoning and action agent graph.
|
||||
It invokes tools in a simple loop.
|
||||
"""
|
||||
|
||||
from react_agent.graph import graph
|
||||
|
||||
__all__ = ["graph"]
|
||||
49
src/react_agent/configuration.py
Normal file
49
src/react_agent/configuration.py
Normal file
@@ -0,0 +1,49 @@
|
||||
"""Define the configurable parameters for the agent."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field, fields
|
||||
from typing import Annotated, Optional
|
||||
|
||||
from langchain_core.runnables import RunnableConfig, ensure_config
|
||||
|
||||
from react_agent import prompts
|
||||
|
||||
|
||||
@dataclass(kw_only=True)
|
||||
class Configuration:
|
||||
"""The configuration for the agent."""
|
||||
|
||||
system_prompt: str = field(default=prompts.SYSTEM_PROMPT)
|
||||
"""The system prompt to use for the agent's interactions.
|
||||
|
||||
This prompt sets the context and behavior for the agent.
|
||||
"""
|
||||
|
||||
model_name: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = (
|
||||
"anthropic/claude-3-5-sonnet-20240620"
|
||||
)
|
||||
"""The name of the language model to use for the agent's main interactions.
|
||||
|
||||
Should be in the form: provider/model-name.
|
||||
"""
|
||||
|
||||
scraper_tool_model_name: Annotated[
|
||||
str, {"__template_metadata__": {"kind": "llm"}}
|
||||
] = "accounts/fireworks/models/firefunction-v2"
|
||||
"""The name of the language model to use for the web scraping tool.
|
||||
|
||||
This model is specifically used for summarizing and extracting information from web pages.
|
||||
"""
|
||||
max_search_results: int = 10
|
||||
"""The maximum number of search results to return for each search query."""
|
||||
|
||||
@classmethod
|
||||
def from_runnable_config(
|
||||
cls, config: Optional[RunnableConfig] = None
|
||||
) -> Configuration:
|
||||
"""Create a Configuration instance from a RunnableConfig object."""
|
||||
config = ensure_config(config)
|
||||
configurable = config.get("configurable") or {}
|
||||
_fields = {f.name for f in fields(cls) if f.init}
|
||||
return cls(**{k: v for k, v in configurable.items() if k in _fields})
|
||||
127
src/react_agent/graph.py
Normal file
127
src/react_agent/graph.py
Normal file
@@ -0,0 +1,127 @@
|
||||
"""Define a custom Reasoning and Action agent.
|
||||
|
||||
Works with a chat model with tool calling support.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, List, Literal, cast
|
||||
|
||||
from langchain_core.messages import AIMessage
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langgraph.graph import StateGraph
|
||||
from langgraph.prebuilt import ToolNode
|
||||
|
||||
from react_agent.configuration import Configuration
|
||||
from react_agent.state import InputState, State
|
||||
from react_agent.tools import TOOLS
|
||||
from react_agent.utils import load_chat_model
|
||||
|
||||
# Define the function that calls the model
|
||||
|
||||
|
||||
async def call_model(
|
||||
state: State, config: RunnableConfig
|
||||
) -> Dict[str, List[AIMessage]]:
|
||||
"""Call the LLM powering our "agent".
|
||||
|
||||
This function prepares the prompt, initializes the model, and processes the response.
|
||||
|
||||
Args:
|
||||
state (State): The current state of the conversation.
|
||||
config (RunnableConfig): Configuration for the model run.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the model's response message.
|
||||
"""
|
||||
configuration = Configuration.from_runnable_config(config)
|
||||
|
||||
# Create a prompt template. Customize this to change the agent's behavior.
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[("system", configuration.system_prompt), ("placeholder", "{messages}")]
|
||||
)
|
||||
|
||||
# Initialize the model with tool binding. Change the model or add more tools here.
|
||||
model = load_chat_model(configuration.model_name).bind_tools(TOOLS)
|
||||
|
||||
# Prepare the input for the model, including the current system time
|
||||
message_value = await prompt.ainvoke(
|
||||
{
|
||||
"messages": state.messages,
|
||||
"system_time": datetime.now(tz=timezone.utc).isoformat(),
|
||||
},
|
||||
config,
|
||||
)
|
||||
|
||||
# Get the model's response
|
||||
response = cast(AIMessage, await model.ainvoke(message_value, config))
|
||||
|
||||
# Handle the case when it's the last step and the model still wants to use a tool
|
||||
if state.is_last_step and response.tool_calls:
|
||||
return {
|
||||
"messages": [
|
||||
AIMessage(
|
||||
id=response.id,
|
||||
content="Sorry, I could not find an answer to your question in the specified number of steps.",
|
||||
)
|
||||
]
|
||||
}
|
||||
|
||||
# Return the model's response as a list to be added to existing messages
|
||||
return {"messages": [response]}
|
||||
|
||||
|
||||
# Define a new graph
|
||||
|
||||
workflow = StateGraph(State, input=InputState, config_schema=Configuration)
|
||||
|
||||
# Define the two nodes we will cycle between
|
||||
workflow.add_node(call_model)
|
||||
workflow.add_node("tools", ToolNode(TOOLS))
|
||||
|
||||
# Set the entrypoint as `call_model`
|
||||
# This means that this node is the first one called
|
||||
workflow.add_edge("__start__", "call_model")
|
||||
|
||||
|
||||
def route_model_output(state: State) -> Literal["__end__", "tools"]:
|
||||
"""Determine the next node based on the model's output.
|
||||
|
||||
This function checks if the model's last message contains tool calls.
|
||||
|
||||
Args:
|
||||
state (State): The current state of the conversation.
|
||||
|
||||
Returns:
|
||||
str: The name of the next node to call ("__end__" or "tools").
|
||||
"""
|
||||
last_message = state.messages[-1]
|
||||
if not isinstance(last_message, AIMessage):
|
||||
raise ValueError(
|
||||
f"Expected AIMessage in output edges, but got {type(last_message).__name__}"
|
||||
)
|
||||
# If there is no tool call, then we finish
|
||||
if not last_message.tool_calls:
|
||||
return "__end__"
|
||||
# Otherwise we execute the requested actions
|
||||
return "tools"
|
||||
|
||||
|
||||
# Add a conditional edge to determine the next step after `call_model`
|
||||
workflow.add_conditional_edges(
|
||||
"call_model",
|
||||
# After call_model finishes running, the next node(s) are scheduled
|
||||
# based on the output from route_model_output
|
||||
route_model_output,
|
||||
)
|
||||
|
||||
# Add a normal edge from `tools` to `call_model`
|
||||
# This creates a cycle: after using tools, we always return to the model
|
||||
workflow.add_edge("tools", "call_model")
|
||||
|
||||
# Compile the workflow into an executable graph
|
||||
# You can customize this by adding interrupt points for state updates
|
||||
graph = workflow.compile(
|
||||
interrupt_before=[], # Add node names here to update state before they're called
|
||||
interrupt_after=[], # Add node names here to update state after they're called
|
||||
)
|
||||
5
src/react_agent/prompts.py
Normal file
5
src/react_agent/prompts.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Default prompts used by the agent."""
|
||||
|
||||
SYSTEM_PROMPT = """You are a helpful AI assistant.
|
||||
|
||||
System time: {system_time}"""
|
||||
60
src/react_agent/state.py
Normal file
60
src/react_agent/state.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""Define the state structures for the agent."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Sequence
|
||||
|
||||
from langchain_core.messages import AnyMessage
|
||||
from langgraph.graph import add_messages
|
||||
from langgraph.managed import IsLastStep
|
||||
from typing_extensions import Annotated
|
||||
|
||||
|
||||
@dataclass
|
||||
class InputState:
|
||||
"""Defines the input state for the agent, representing a narrower interface to the outside world.
|
||||
|
||||
This class is used to define the initial state and structure of incoming data.
|
||||
"""
|
||||
|
||||
messages: Annotated[Sequence[AnyMessage], add_messages] = field(
|
||||
default_factory=list
|
||||
)
|
||||
"""
|
||||
Messages tracking the primary execution state of the agent.
|
||||
|
||||
Typically accumulates a pattern of:
|
||||
1. HumanMessage - user input
|
||||
2. AIMessage with .tool_calls - agent picking tool(s) to use to collect information
|
||||
3. ToolMessage(s) - the responses (or errors) from the executed tools
|
||||
4. AIMessage without .tool_calls - agent responding in unstructured format to the user
|
||||
5. HumanMessage - user responds with the next conversational turn
|
||||
|
||||
Steps 2-5 may repeat as needed.
|
||||
|
||||
The `add_messages` annotation ensures that new messages are merged with existing ones,
|
||||
updating by ID to maintain an "append-only" state unless a message with the same ID is provided.
|
||||
"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class State(InputState):
|
||||
"""Represents the complete state of the agent, extending InputState with additional attributes.
|
||||
|
||||
This class can be used to store any information needed throughout the agent's lifecycle.
|
||||
"""
|
||||
|
||||
is_last_step: IsLastStep = field(default=False)
|
||||
"""
|
||||
Indicates whether the current step is the last one before the graph raises an error.
|
||||
|
||||
This is a 'managed' variable, controlled by the state machine rather than user code.
|
||||
It is set to 'True' when the step count reaches recursion_limit - 1.
|
||||
"""
|
||||
|
||||
# Additional attributes can be added here as needed.
|
||||
# Common examples include:
|
||||
# retrieved_documents: List[Document] = field(default_factory=list)
|
||||
# extracted_entities: Dict[str, Any] = field(default_factory=dict)
|
||||
# api_connections: Dict[str, Any] = field(default_factory=dict)
|
||||
74
src/react_agent/tools.py
Normal file
74
src/react_agent/tools.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""This module provides example tools for web scraping and search functionality.
|
||||
|
||||
It includes:
|
||||
- A web scraper that uses an LLM to summarize content based on instructions
|
||||
- A basic Tavily search function
|
||||
|
||||
These tools are intended as free examples to get started. For production use,
|
||||
consider implementing more robust and specialized tools tailored to your needs.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Callable, List, Optional, cast
|
||||
|
||||
import httpx
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_community.tools.tavily_search import TavilySearchResults
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langchain_core.tools import InjectedToolArg
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from react_agent.configuration import Configuration
|
||||
from react_agent.utils import get_message_text
|
||||
|
||||
|
||||
# note that arguments typed as "RunnableConfig" in tools will be excluded from the schema generated
|
||||
# for the model.
|
||||
# They are treated as "injected arguments"
|
||||
async def scrape_webpage(url: str, instructions: str, *, config: RunnableConfig) -> str:
|
||||
"""Scrape the given webpage and return a summary of text based on the instructions.
|
||||
|
||||
Args:
|
||||
url: The URL of the webpage to scrape.
|
||||
instructions: The instructions to give to the scraper. An LLM will be used to respond using the
|
||||
instructions and the scraped text.
|
||||
"""
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(url)
|
||||
web_text = response.text
|
||||
|
||||
configuration = Configuration.from_runnable_config(config)
|
||||
model = init_chat_model(configuration.model_name)
|
||||
response_msg = await model.ainvoke(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful web scraper AI assistant. You are working in extractive Q&A mode, meaning you refrain from making overly abstractive responses."
|
||||
"Respond to the user's instructions."
|
||||
" Based on the provided webpage. If you are unable to answer the question, let the user know. Do not guess."
|
||||
" Provide citations and direct quotes when possible."
|
||||
f" \n\n<webpage_text>\n{web_text}\n</webpage_text>"
|
||||
f"\n\nSystem time: {datetime.now(tz=timezone.utc)}",
|
||||
),
|
||||
("user", instructions),
|
||||
]
|
||||
)
|
||||
return get_message_text(response_msg)
|
||||
|
||||
|
||||
async def search(
|
||||
query: str, *, config: Annotated[RunnableConfig, InjectedToolArg]
|
||||
) -> Optional[list[dict[str, Any]]]:
|
||||
"""Search for general web results.
|
||||
|
||||
This function performs a search using the Tavily search engine, which is designed
|
||||
to provide comprehensive, accurate, and trusted results. It's particularly useful
|
||||
for answering questions about current events.
|
||||
"""
|
||||
configuration = Configuration.from_runnable_config(config)
|
||||
wrapped = TavilySearchResults(max_results=configuration.max_search_results)
|
||||
result = await wrapped.ainvoke({"query": query})
|
||||
return cast(list[dict[str, Any]], result)
|
||||
|
||||
|
||||
TOOLS: List[Callable[..., Any]] = [scrape_webpage, search]
|
||||
27
src/react_agent/utils.py
Normal file
27
src/react_agent/utils.py
Normal file
@@ -0,0 +1,27 @@
|
||||
"""Utility & helper functions."""
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
from langchain_core.messages import BaseMessage
|
||||
|
||||
|
||||
def get_message_text(msg: BaseMessage) -> str:
|
||||
"""Get the text content of a message."""
|
||||
content = msg.content
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
elif isinstance(content, dict):
|
||||
return content.get("text", "")
|
||||
else:
|
||||
txts = [c if isinstance(c, str) else (c.get("text") or "") for c in content]
|
||||
return "".join(txts).strip()
|
||||
|
||||
|
||||
def load_chat_model(fully_specified_name: str) -> BaseChatModel:
|
||||
"""Load a chat model from a fully specified name.
|
||||
|
||||
Args:
|
||||
fully_specified_name (str): String in the format 'provider/model'.
|
||||
"""
|
||||
provider, model = fully_specified_name.split("/", maxsplit=1)
|
||||
return init_chat_model(model, model_provider=provider)
|
||||
BIN
static/studio_ui.png
Normal file
BIN
static/studio_ui.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 902 KiB |
1
tests/integration_tests/__init__.py
Normal file
1
tests/integration_tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Define any integration tests you want in this directory."""
|
||||
14
tests/integration_tests/test_graph.py
Normal file
14
tests/integration_tests/test_graph.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import pytest
|
||||
from langsmith import unit
|
||||
|
||||
from react_agent import graph
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@unit
|
||||
async def test_react_agent_simple_passthrough() -> None:
|
||||
res = await graph.ainvoke(
|
||||
{"messages": [("user", "Who is the founder of LangChain?")]}
|
||||
)
|
||||
|
||||
assert "harrison" in str(res["messages"][-1].content).lower()
|
||||
1
tests/unit_tests/__init__.py
Normal file
1
tests/unit_tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Define any unit tests you may want in this directory."""
|
||||
5
tests/unit_tests/test_configuration.py
Normal file
5
tests/unit_tests/test_configuration.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from react_agent.configuration import Configuration
|
||||
|
||||
|
||||
def test_configuration_empty():
|
||||
Configuration.from_runnable_config({})
|
||||
Reference in New Issue
Block a user