LangChain provides chain primitives and integrations — model wrappers, prompt templates, vector stores, output parsers. LangGraph builds on LangChain to define stateful agent graphs: nodes are LLM calls or tool executions, edges route based on output, and state persists across steps. The result is controllable agent workflows where you specify the possible paths rather than letting the model loop freely. Claude Code writes LangGraph state definitions, node functions, conditional edge logic, and the LangSmith tracing that makes agent runs debuggable.
CLAUDE.md for LangChain/LangGraph Projects
## Agent Stack
- LangChain 0.3.x + LangGraph 0.2.x
- Model: Claude claude-sonnet-4-6 via langchain-anthropic
- Tracing: LangSmith (LANGCHAIN_TRACING_V2=true) for all production runs
- State: TypedDict with explicit Annotated[list, add_messages] for conversation history
- Tools: define with @tool decorator; always add type hints + docstring for schema
- Human-in-loop: interrupt_before nodes for high-stakes actions
- Checkpointer: use MemorySaver for dev, PostgresSaver for production
LangGraph: Stateful Agent
# agents/order_agent.py — LangGraph agent with tool calling
from typing import Annotated, TypedDict
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain_core.tools import tool
from langgraph.graph import StateGraph, END
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode
from langgraph.checkpoint.memory import MemorySaver
# State: what persists across nodes in the graph
class AgentState(TypedDict):
messages: Annotated[list, add_messages] # Thread history, auto-appended
customer_id: str
order_id: str | None
# Tools: these become Claude's function calls
@tool
def search_orders(customer_id: str, status: str | None = None) -> list[dict]:
"""Search orders for a customer. Returns a list of orders with id, status, and total."""
return db.search_orders(customer_id=customer_id, status=status)
@tool
def get_order_details(order_id: str) -> dict:
"""Get full details for a specific order including items, tracking, and timeline."""
order = db.get_order(order_id)
if not order:
return {"error": f"Order {order_id} not found"}
return order
@tool
def request_cancellation(order_id: str, reason: str) -> dict:
"""Request cancellation of an order. Only for orders in 'pending' or 'processing' status."""
order = db.get_order(order_id)
if not order:
return {"error": "Order not found"}
if order["status"] not in ("pending", "processing"):
return {"error": f"Cannot cancel order in '{order['status']}' status"}
# Create cancellation request — actual cancellation requires human approval
ticket = db.create_cancellation_request(order_id, reason)
return {"ticket_id": ticket["id"], "message": "Cancellation request submitted, pending approval"}
TOOLS = [search_orders, get_order_details, request_cancellation]
tool_node = ToolNode(TOOLS)
llm = ChatAnthropic(model="claude-sonnet-4-6").bind_tools(TOOLS)
SYSTEM_PROMPT = """You are a helpful customer support agent for an e-commerce store.
Help customers with order status, tracking, and cancellation requests.
Always look up the actual order data before responding — don't guess.
Be concise and empathetic."""
# Graph nodes
def call_llm(state: AgentState) -> dict:
"""Call Claude with current conversation history."""
messages = [SystemMessage(content=SYSTEM_PROMPT)] + state["messages"]
response = llm.invoke(messages)
return {"messages": [response]}
def should_continue(state: AgentState) -> str:
"""Route: go to tools if Claude made tool calls, else end."""
last_message = state["messages"][-1]
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
return "tools"
return END
# Build graph
workflow = StateGraph(AgentState)
workflow.add_node("llm", call_llm)
workflow.add_node("tools", tool_node)
workflow.set_entry_point("llm")
workflow.add_conditional_edges("llm", should_continue)
workflow.add_edge("tools", "llm") # After tools, go back to LLM
# Compile with memory checkpointer (stores thread history)
checkpointer = MemorySaver()
agent = workflow.compile(checkpointer=checkpointer)
Human-in-the-Loop
# agents/order_agent_hil.py — interrupt before high-stakes actions
from langgraph.checkpoint.memory import MemorySaver
# Compile with interrupt: pause before executing these nodes
agent_with_interrupt = workflow.compile(
checkpointer=MemorySaver(),
interrupt_before=["tools"], # Show tool call to human before executing
)
def run_with_human_approval(customer_message: str, thread_id: str):
config = {"configurable": {"thread_id": thread_id}}
# Run until interrupt
events = list(agent_with_interrupt.stream(
{"messages": [HumanMessage(content=customer_message)]},
config,
stream_mode="values",
))
last_state = events[-1]
pending_tool = last_state["messages"][-1]
if hasattr(pending_tool, "tool_calls") and pending_tool.tool_calls:
tool_call = pending_tool.tool_calls[0]
# Show to human for approval
print(f"Agent wants to call: {tool_call['name']}")
print(f"With args: {tool_call['args']}")
approved = input("Approve? [y/N] ").lower() == 'y'
if approved:
# Resume execution from checkpoint
for event in agent_with_interrupt.stream(None, config, stream_mode="values"):
print(event["messages"][-1].content)
else:
# Inject rejection message and re-run
agent_with_interrupt.update_state(config, {
"messages": [HumanMessage(content="I decided not to proceed with that action.")]
})
Multi-Agent Graph
# agents/multi_agent.py — orchestrator + specialist agents
from langgraph.graph import StateGraph
from langchain_core.messages import HumanMessage
# Specialist agents
billing_agent = create_billing_agent()
shipping_agent = create_shipping_agent()
returns_agent = create_returns_agent()
def route_to_specialist(state: dict) -> str:
"""Classify the request and route to the right agent."""
classifier = ChatAnthropic(model="claude-haiku-4-5")
response = classifier.invoke([
SystemMessage(content="Classify this customer message. Reply with ONE word: billing, shipping, returns, or general"),
HumanMessage(content=state["messages"][-1].content),
])
category = response.content.strip().lower()
if category == "billing":
return "billing_agent"
elif category == "shipping":
return "shipping_agent"
elif category == "returns":
return "returns_agent"
else:
return "general_agent"
supervisor = StateGraph(AgentState)
supervisor.add_node("router", route_to_specialist)
supervisor.add_node("billing_agent", billing_agent)
supervisor.add_node("shipping_agent", shipping_agent)
supervisor.add_node("returns_agent", returns_agent)
supervisor.set_entry_point("router")
# Router conditionally edges to each specialist
supervisor.add_conditional_edges(
"router",
lambda state: state["next_agent"],
{
"billing_agent": "billing_agent",
"shipping_agent": "shipping_agent",
"returns_agent": "returns_agent",
}
)
For the Anthropic SDK tool use patterns that underpin LangGraph’s tool nodes, the Anthropic SDK guide covers the raw tool use API. For the LLM evaluation framework that tests LangGraph agent quality, the LLM evals guide covers agent output evaluation patterns. The Claude Skills 360 bundle includes LangGraph skill sets covering state definitions, conditional routing, human-in-the-loop, and multi-agent graphs. Start with the free tier to try LangGraph agent generation.