structlog emits structured, context-rich logs. pip install structlog. Configure: import structlog; structlog.configure(processors=[...], wrapper_class=structlog.BoundLogger, context_class=dict, logger_factory=structlog.PrintLoggerFactory()). Get logger: log = structlog.get_logger(). Log: log.info("user_created", user_id=42, email="[email protected]"). Bind: log = log.bind(request_id="abc123"). All subsequent calls include request_id. log.unbind("request_id"). log.try_unbind("x") — no error if absent. Processors: from structlog.processors import JSONRenderer, TimeStamper, add_log_level, format_exc_info, UnicodeDecoder. stdlib integration: structlog.configure(logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger) — structlog wraps stdlib. structlog.stdlib.filter_by_level — honour stdlib level. structlog.stdlib.add_logger_name — adds logger name. contextvars: from structlog.contextvars import bind_contextvars, clear_contextvars, merge_contextvars. FastAPI middleware: bind_contextvars(request_id=str(uuid4())) at start of request. clear_contextvars() after. Async: from structlog import get_logger; log = get_logger(); await log.ainfo("msg", key=val). AsyncBoundLogger: structlog.configure(wrapper_class=structlog.stdlib.AsyncBoundLogger). Testing: from structlog.testing import capture_logs. with capture_logs() as cap: log.info("hello", x=1); assert cap[0]["event"] == "hello". cap[0]["x"] == 1. ProcessorFormatter for Django/Flask stdlib integration. ExceptionRenderer — structured exception dict instead of traceback string. CallsiteParameterAdder — adds filename, lineno, function_name automatically. Claude Code generates structlog configurations, middleware context propagation, and capture_logs test assertions.
CLAUDE.md for structlog
## structlog Stack
- Version: structlog >= 24.1 | pip install structlog
- Configure: structlog.configure(processors=[...]) — call once at app startup
- Logger: log = structlog.get_logger().bind(service="api") — immutable context
- Context: bind_contextvars(request_id=...) — automatic ContextVar propagation
- Production: JSONRenderer() processor — machine-readable one-line JSON
- Dev: ConsoleRenderer() — colorised human-readable output
- Test: with capture_logs() as cap: ... assert cap[0]["event"] == "..."
structlog Logging Pipeline
# app/logging_setup.py — structlog configuration
from __future__ import annotations
import logging
import sys
import uuid
from contextlib import asynccontextmanager
from typing import Any
import structlog
from structlog.contextvars import (
bind_contextvars,
clear_contextvars,
merge_contextvars,
)
from structlog.processors import (
CallsiteParameter,
CallsiteParameterAdder,
ExceptionRenderer,
JSONRenderer,
TimeStamper,
UnicodeDecoder,
add_log_level,
)
from structlog.stdlib import add_logger_name, filter_by_level
from structlog.testing import capture_logs
# ─────────────────────────────────────────────────────────────────────────────
# 1. Configure structlog — call once at app startup
# ─────────────────────────────────────────────────────────────────────────────
def configure_logging(
log_level: str = "INFO",
json_output: bool = True,
) -> None:
"""
Set up structlog with a stdlib bridge so legacy `logging.getLogger()`
calls also emit structured JSON.
Processor order matters:
1. filter_by_level — drop below-threshold log calls early (fast)
2. add_logger_name — from stdlib, adds "logger" key
3. add_log_level — adds "level" key
4. TimeStamper — adds "timestamp" ISO key
5. merge_contextvars — merge ContextVar-bound fields (request_id etc.)
6. CallsiteParameterAdder — adds filename, lineno, func_name
7. ExceptionRenderer — convert exc_info to structured dict
8. UnicodeDecoder — ensure strings
9. JSONRenderer — final step: render to JSON string
"""
shared_processors = [
filter_by_level,
add_logger_name,
add_log_level,
TimeStamper(fmt="iso"),
merge_contextvars,
CallsiteParameterAdder(
[
CallsiteParameter.FILENAME,
CallsiteParameter.LINENO,
CallsiteParameter.FUNC_NAME,
]
),
ExceptionRenderer(),
UnicodeDecoder(),
]
renderer = JSONRenderer() if json_output else structlog.dev.ConsoleRenderer(colors=True)
structlog.configure(
processors=shared_processors + [renderer],
wrapper_class=structlog.stdlib.BoundLogger,
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
cache_logger_on_first_use=True,
)
# Route stdlib logging through structlog
logging.basicConfig(
format="%(message)s",
stream=sys.stdout,
level=getattr(logging, log_level.upper()),
)
for name in logging.root.manager.loggerDict:
logging.getLogger(name).setLevel(getattr(logging, log_level.upper()))
# Development configuration — pretty console output
def configure_dev_logging() -> None:
configure_logging(log_level="DEBUG", json_output=False)
# Production configuration — JSON for log aggregators
def configure_prod_logging() -> None:
configure_logging(log_level="INFO", json_output=True)
# ─────────────────────────────────────────────────────────────────────────────
# 2. Module-level logger usage
# ─────────────────────────────────────────────────────────────────────────────
log = structlog.get_logger(__name__)
class UserService:
def __init__(self) -> None:
# Bind service context — all log calls include service="users"
self._log = structlog.get_logger().bind(service="users")
def create_user(self, email: str, role: str = "user") -> dict:
self._log.info("creating_user", email=email, role=role)
try:
user = {"id": 1, "email": email, "role": role}
self._log.info("user_created", user_id=user["id"], email=email)
return user
except Exception as exc:
self._log.exception("user_creation_failed", email=email)
raise
def delete_user(self, user_id: int) -> None:
bound = self._log.bind(user_id=user_id)
bound.info("deleting_user")
bound.info("user_deleted")
def get_user(self, user_id: int) -> dict | None:
self._log.debug("fetching_user", user_id=user_id)
if user_id == 1:
return {"id": 1, "email": "[email protected]"}
self._log.warning("user_not_found", user_id=user_id)
return None
# ─────────────────────────────────────────────────────────────────────────────
# 3. contextvars — automatic request context propagation
# ─────────────────────────────────────────────────────────────────────────────
def process_request(path: str, method: str) -> None:
"""
bind_contextvars() stores context in a ContextVar — no need to pass the
logger around. Every log call in this request automatically includes
request_id and path.
"""
clear_contextvars()
bind_contextvars(
request_id=str(uuid.uuid4()),
http_method=method,
http_path=path,
)
try:
log.info("request_started")
# ... handle request ...
log.info("request_completed", status_code=200)
except Exception:
log.exception("request_failed", status_code=500)
finally:
clear_contextvars()
# ─────────────────────────────────────────────────────────────────────────────
# 4. FastAPI / Starlette middleware
# ─────────────────────────────────────────────────────────────────────────────
try:
from fastapi import FastAPI, Request, Response
from fastapi.routing import APIRouter
import time
demo_app = FastAPI(lifespan=None)
@demo_app.middleware("http")
async def structlog_middleware(request: Request, call_next) -> Response:
"""
Bind request context for every log call during this request.
clear_contextvars() in the finally ensures no leakage between requests.
"""
clear_contextvars()
bind_contextvars(
request_id=request.headers.get("X-Request-Id", str(uuid.uuid4())),
http_method=request.method,
http_path=str(request.url.path),
http_host=request.headers.get("host", ""),
)
start_ns = time.perf_counter_ns()
try:
response = await call_next(request)
bind_contextvars(
status_code=response.status_code,
duration_ms=round((time.perf_counter_ns() - start_ns) / 1e6, 2),
)
log.info("http_request")
return response
except Exception:
log.exception("http_request_error")
raise
finally:
clear_contextvars()
@demo_app.get("/health")
async def health() -> dict:
log.debug("health_check_called")
return {"status": "ok"}
@demo_app.get("/users/{user_id}")
async def get_user(user_id: int) -> dict:
svc = UserService()
user = svc.get_user(user_id)
if user is None:
from fastapi import HTTPException
raise HTTPException(status_code=404, detail="Not found")
return user
except ImportError:
demo_app = None # type: ignore[assignment]
# ─────────────────────────────────────────────────────────────────────────────
# 5. Testing — capture_logs for assertion-friendly log checking
# ─────────────────────────────────────────────────────────────────────────────
def test_example_capture() -> None:
"""
capture_logs() intercepts log calls and returns them as a list of dicts.
No configuration or sink setup needed — works independently of configure().
"""
svc = UserService()
with capture_logs() as cap:
svc.create_user("[email protected]", role="admin")
# cap is a list of dicts — one entry per log call
create_events = [e for e in cap if e["event"] == "creating_user"]
assert len(create_events) == 1
assert create_events[0]["email"] == "[email protected]"
assert create_events[0]["role"] == "admin"
created_events = [e for e in cap if e["event"] == "user_created"]
assert len(created_events) == 1
print(f"Captured {len(cap)} log entries:")
for entry in cap:
print(f" [{entry.get('log_level','?')}] {entry['event']} {dict(entry)}")
def test_warning_on_missing_user() -> None:
svc = UserService()
with capture_logs() as cap:
result = svc.get_user(999)
assert result is None
warns = [e for e in cap if e.get("log_level") == "warning"]
assert len(warns) == 1
assert warns[0]["user_id"] == 999
def test_context_variables_in_logs() -> None:
clear_contextvars()
bind_contextvars(request_id="req-test-001", service="test")
with capture_logs() as cap:
log.info("test_event", key="value")
clear_contextvars()
assert cap[0]["request_id"] == "req-test-001"
assert cap[0]["key"] == "value"
# ─────────────────────────────────────────────────────────────────────────────
# Demo
# ─────────────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
configure_dev_logging()
svc = UserService()
svc.create_user("[email protected]", role="admin")
svc.get_user(999) # triggers warning
bind_contextvars(request_id="req-demo-001")
log.info("demo_complete", status="ok")
clear_contextvars()
# Tests
test_example_capture()
test_warning_on_missing_user()
test_context_variables_in_logs()
print("All structlog tests passed.")
For the logging.getLogger() alternative — the stdlib logging module formats messages as unstructured strings: logging.info("User %s created with role %s", user_id, role) produces a text line that a log aggregator must parse with a regex, while log.info("user_created", user_id=user_id, role=role) with structlog’s JSONRenderer produces {"event":"user_created","user_id":42,"role":"admin","timestamp":"...","level":"info"} — a JSON object that Datadog, Loki, or Splunk can index, filter, and alert on without parsing. For the loguru alternative — loguru’s logger.info("msg {key}", key=val) format-string API is convenient but doesn’t produce structured dicts for downstream processing unless you add a custom sink, while structlog’s processor pipeline is composable: merge_contextvars automatically injects the request’s ContextVar-bound fields (request_id, user_id, trace_id) into every log call in that async task without passing any object around. The Claude Skills 360 bundle includes structlog skill sets covering configure() processor chains, JSONRenderer vs ConsoleRenderer, TimeStamper/add_log_level/add_logger_name, merge_contextvars and bind_contextvars for request context, ExceptionRenderer for structured exceptions, CallsiteParameterAdder for source location, FastAPI middleware integration, stdlib logging bridge, capture_logs for unit testing, and BoundLogger.bind for service-level context. Start with the free tier to try structured logging code generation.