Simplify the boilerplate

Signed-off-by: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com>
This commit is contained in:
William Fu-Hinthorn
2025-05-08 13:48:39 -07:00
parent 049ec8952e
commit 6627a2a05f
7 changed files with 52 additions and 74 deletions

View File

@@ -1,28 +0,0 @@
"""Define the configurable parameters for the agent."""
from __future__ import annotations
from dataclasses import dataclass, fields
from typing import Optional
from langchain_core.runnables import RunnableConfig
@dataclass(kw_only=True)
class Configuration:
"""The configuration for the agent."""
# Changeme: Add configurable values here!
# these values can be pre-set when you
# create assistants (https://langchain-ai.github.io/langgraph/cloud/how-tos/configuration_cloud/)
# and when you invoke the graph
my_configurable_param: str = "changeme"
@classmethod
def from_runnable_config(
cls, config: Optional[RunnableConfig] = None
) -> Configuration:
"""Create a Configuration instance from a RunnableConfig object."""
configurable = (config.get("configurable") or {}) if config else {}
_fields = {f.name for f in fields(cls) if f.init}
return cls(**{k: v for k, v in configurable.items() if k in _fields})

View File

@@ -1,38 +1,52 @@
"""Define a simple chatbot agent.
"""LangGraph single-node graph template.
This agent returns a predefined response without using an actual LLM.
Returns a predefined response. Replace logic and configuration as needed.
"""
from typing import Any, Dict
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict, TypedDict
from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph
from agent.configuration import Configuration
from agent.state import State
class Configuration(TypedDict):
"""Configurable parameters for the agent.
Set these when creating assistants OR when invoking the graph.
See: https://langchain-ai.github.io/langgraph/cloud/how-tos/configuration_cloud/
"""
my_configurable_param: str
@dataclass
class State:
"""Input state for the agent.
Defines the initial structure of incoming data.
See: https://langchain-ai.github.io/langgraph/concepts/low_level/#state
"""
changeme: str = "example"
async def my_node(state: State, config: RunnableConfig) -> Dict[str, Any]:
"""Each node does work."""
configuration = Configuration.from_runnable_config(config)
# configuration = Configuration.from_runnable_config(config)
# You can use runtime configuration to alter the behavior of your
# graph.
"""Example node: processes input and returns output.
Can use runtime configuration to alter behavior.
"""
configuration = config["configurable"]
return {
"changeme": "output from my_node. "
f"Configured with {configuration.my_configurable_param}"
f'Configured with {configuration.get("my_configurable_param")}'
}
# Define a new graph
workflow = StateGraph(State, config_schema=Configuration)
# Add the node to the graph
workflow.add_node("my_node", my_node)
# Set the entrypoint as `call_model`
workflow.add_edge("__start__", "my_node")
# Compile the workflow into an executable graph
graph = workflow.compile()
graph.name = "New Graph" # This defines the custom name in LangSmith
# Define the graph
graph = (
StateGraph(State, config_schema=Configuration)
.add_node(my_node)
.add_edge("__start__", "my_node")
.compile(name="New Graph")
)

View File

@@ -1,17 +0,0 @@
"""Define the state structures for the agent."""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class State:
"""Defines the input state for the agent, representing a narrower interface to the outside world.
This class is used to define the initial state and structure of incoming data.
See: https://langchain-ai.github.io/langgraph/concepts/low_level/#state
for more information.
"""
changeme: str = "example"