Python’s logging stdlib is the standard structured logging foundation. import logging. basicConfig: logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(name)s: %(message)s"). Logger: log = logging.getLogger(__name__). log levels: log.debug/info/warning/error/critical("msg"). exception: log.exception("error occurred") — includes traceback. extra: log.info("user login", extra={"user_id": uid}). Lazy: log.debug("result: %s", expensive_fn()) — use % not f-string. StreamHandler: h = logging.StreamHandler(); h.setLevel(logging.DEBUG). FileHandler: h = logging.FileHandler("app.log"). RotatingFileHandler: from logging.handlers import RotatingFileHandler; h = RotatingFileHandler("app.log", maxBytes=10*1024*1024, backupCount=5). TimedRotating: from logging.handlers import TimedRotatingFileHandler; h = TimedRotatingFileHandler("app.log", when="midnight", backupCount=30). Formatter: fmt = logging.Formatter("%(asctime)s %(levelname)-8s %(name)s: %(message)s"). setFormatter: h.setFormatter(fmt). addHandler: log.addHandler(h). propagate: log.propagate = False — stop bubbling to root. dictConfig: logging.config.dictConfig(config_dict). fileConfig: logging.config.fileConfig("logging.ini"). Filter: subclass with filter(record) -> bool. LoggerAdapter: adapter = logging.LoggerAdapter(log, {"request_id": rid}). captureWarnings: logging.captureWarnings(True). QueueHandler: async logging to avoid blocking I/O. Claude Code generates logging configurations, JSON formatters, rotating handlers, and request-context adapters.
CLAUDE.md for logging
## logging Stack
- Stdlib: import logging | log = logging.getLogger(__name__)
- Setup: setup_logging(level="INFO", json=True) in app entry point
- Use: log.info("msg") | log.error("msg", exc_info=True) | log.debug("val: %s", val)
- Never: log.debug(f"val: {expensive_fn()}") # always use % lazy formatting
- JSON: custom Formatter that outputs json.dumps(record_dict) per line
- Rotate: RotatingFileHandler(path, maxBytes=10MB, backupCount=5)
logging Configuration Pipeline
# app/log_config.py — logging setup, JSON formatter, rotating handler, context adapter
from __future__ import annotations
import json
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
# ── Context variable for request-scoped log fields ────────────────────────────
_log_context: ContextVar[dict] = ContextVar("log_context", default={})
# ─────────────────────────────────────────────────────────────────────────────
# 1. JSON formatter
# ─────────────────────────────────────────────────────────────────────────────
class JsonFormatter(logging.Formatter):
"""
Emit a single JSON object per log line.
Includes timestamp, level, logger name, message, and any extra fields.
Context vars from _log_context are merged automatically.
Example output:
{"ts":"2024-01-15T10:30:00.123Z","level":"INFO","logger":"app","msg":"user login","user_id":42}
"""
RESERVED = frozenset(logging.LogRecord(
"n", 0, "p", 0, "", [], None
).__dict__.keys())
def format(self, record: logging.LogRecord) -> str:
log_dict: dict[str, Any] = {
"ts": self.formatTime(record, self.datefmt or "%Y-%m-%dT%H:%M:%S.%f"),
"level": record.levelname,
"logger": record.name,
"msg": record.getMessage(),
}
# Merge thread-local context
log_dict.update(_log_context.get({}))
# Extra fields passed via extra={} parameter
for key, val in record.__dict__.items():
if key not in self.RESERVED and not key.startswith("_"):
log_dict[key] = val
# Exception info
if record.exc_info:
log_dict["exc"] = self.formatException(record.exc_info)
if record.stack_info:
log_dict["stack"] = self.formatStack(record.stack_info)
return json.dumps(log_dict, default=str)
# ─────────────────────────────────────────────────────────────────────────────
# 2. Setup helpers
# ─────────────────────────────────────────────────────────────────────────────
@dataclass
class LogConfig:
level: str = "INFO"
json: bool = False
console: bool = True
file: str | None = None
max_bytes: int = 10 * 1024 * 1024 # 10 MB
backup_count: int = 5
third_party_level: str = "WARNING"
capture_warnings: bool = True
def setup_logging(cfg: LogConfig | None = None, **kwargs) -> None:
"""
Configure root logger with console and/or file handlers.
Call once at application startup.
Example:
setup_logging(LogConfig(level="DEBUG", json=True, file="logs/app.log"))
setup_logging(level="INFO", json=True) # via kwargs
"""
if cfg is None:
cfg = LogConfig(**{k: v for k, v in kwargs.items() if hasattr(LogConfig, k)})
root = logging.getLogger()
root.setLevel(cfg.level)
# Remove any existing handlers
root.handlers.clear()
if cfg.json:
formatter: logging.Formatter = JsonFormatter()
else:
formatter = logging.Formatter(
"%(asctime)s %(levelname)-8s %(name)-30s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
if cfg.console:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root.addHandler(handler)
if cfg.file:
log_path = Path(cfg.file)
log_path.parent.mkdir(parents=True, exist_ok=True)
file_handler = logging.handlers.RotatingFileHandler(
str(log_path),
maxBytes=cfg.max_bytes,
backupCount=cfg.backup_count,
encoding="utf-8",
)
file_handler.setFormatter(formatter)
root.addHandler(file_handler)
# Quiet noisy third-party loggers
for noisy in ("urllib3", "boto3", "botocore", "httpcore", "httpx", "asyncio"):
logging.getLogger(noisy).setLevel(cfg.third_party_level)
if cfg.capture_warnings:
logging.captureWarnings(True)
def setup_logging_from_env() -> None:
"""
Setup logging from environment variables.
LOG_LEVEL, LOG_JSON, LOG_FILE.
Example:
LOG_LEVEL=DEBUG LOG_JSON=true LOG_FILE=logs/app.log python app.py
"""
cfg = LogConfig(
level=os.getenv("LOG_LEVEL", "INFO").upper(),
json=os.getenv("LOG_JSON", "false").lower() in ("1", "true", "yes"),
file=os.getenv("LOG_FILE"),
)
setup_logging(cfg)
# ─────────────────────────────────────────────────────────────────────────────
# 3. Context adapter
# ─────────────────────────────────────────────────────────────────────────────
class ContextLogger(logging.LoggerAdapter):
"""
Logger adapter that merges a static context dict into every record.
Example:
request_log = ContextLogger(log, {"request_id": req_id, "user_id": user.id})
request_log.info("Processing started")
# Emits: ... request_id=abc123 user_id=42 Processing started
"""
def process(self, msg: str, kwargs: dict) -> tuple[str, dict]:
extra = {**self.extra, **kwargs.pop("extra", {})}
kwargs["extra"] = extra
return msg, kwargs
@contextmanager
def log_context(**fields):
"""
Add fields to all log records within the context.
Thread-safe via contextvars.
Example:
with log_context(request_id="abc123", user_id=42):
log.info("Starting request") # includes request_id + user_id
do_work()
log.info("Done")
"""
token = _log_context.set({**_log_context.get({}), **fields})
try:
yield
finally:
_log_context.reset(token)
# ─────────────────────────────────────────────────────────────────────────────
# 4. Per-module logger factory
# ─────────────────────────────────────────────────────────────────────────────
def get_logger(name: str, context: dict | None = None) -> logging.Logger | ContextLogger:
"""
Get a logger; wraps in ContextLogger if context is provided.
Example:
log = get_logger(__name__)
log = get_logger(__name__, context={"service": "payments"})
"""
logger = logging.getLogger(name)
if context:
return ContextLogger(logger, context)
return logger
# ─────────────────────────────────────────────────────────────────────────────
# 5. dictConfig helper
# ─────────────────────────────────────────────────────────────────────────────
def dictconfig_for_app(
app_name: str,
log_level: str = "INFO",
json: bool = False,
log_file: str | None = None,
) -> dict:
"""
Return a logging dictConfig for use with logging.config.dictConfig().
Example:
logging.config.dictConfig(dictconfig_for_app("myapp", log_level="DEBUG", json=True))
"""
formatter_class = f"{JsonFormatter.__module__}.JsonFormatter" if json else "logging.Formatter"
formatter_fmt = None if json else "%(asctime)s %(levelname)-8s %(name)s: %(message)s"
handlers = {
"console": {
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
"formatter": "default",
}
}
if log_file:
handlers["file"] = {
"class": "logging.handlers.RotatingFileHandler",
"filename": log_file,
"maxBytes": 10 * 1024 * 1024,
"backupCount": 5,
"encoding": "utf-8",
"formatter": "default",
}
config: dict = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"()": formatter_class,
**({"format": formatter_fmt} if formatter_fmt else {}),
}
},
"handlers": handlers,
"loggers": {
app_name: {
"level": log_level,
"handlers": list(handlers.keys()),
"propagate": False,
}
},
"root": {
"level": "WARNING",
"handlers": ["console"],
},
}
return config
# ─────────────────────────────────────────────────────────────────────────────
# Demo
# ─────────────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
import io
print("=== logging demo ===")
# 1. Text format
setup_logging(LogConfig(level="DEBUG", json=False, console=True))
log = logging.getLogger("demo")
log.debug("debug message")
log.info("info message")
log.warning("warning message")
print()
# 2. JSON format (capture to string for display)
buf = io.StringIO()
root = logging.getLogger()
root.handlers.clear()
h = logging.StreamHandler(buf)
h.setFormatter(JsonFormatter())
root.addHandler(h)
with log_context(request_id="req-abc123", user_id=42):
log.info("Processing order", extra={"order_id": 999, "amount": 49.99})
try:
1 / 0
except ZeroDivisionError:
log.exception("Unexpected error during processing")
print("JSON log lines:")
for line in buf.getvalue().splitlines():
parsed = json.loads(line)
print(f" level={parsed['level']:8s} msg={parsed['msg']!r}"
f" request_id={parsed.get('request_id','?')} "
f"{'exc=...' if 'exc' in parsed else ''}")
# 3. ContextLogger
root.handlers.clear()
setup_logging(LogConfig(level="INFO", json=False, console=True))
svc_log = ContextLogger(logging.getLogger("service"), {"component": "payments"})
svc_log.info("Payment processed", extra={"payment_id": "pay_123"})
print("\n=== done ===")
For the loguru alternative — loguru provides a single pre-configured logger object with color output, automatic exception formatting, rotation/compression in one call, and decorator-based function tracing — no manual handler/formatter setup required; Python’s stdlib logging requires explicit handler, formatter, and level configuration but is universally available, integrates with all third-party tools, and lets you use logging.getLogger(__name__) across every module without importing custom logger instances — use loguru for scripts and personal projects where zero-config beauty matters, stdlib logging for production applications, libraries, and any code that needs to interoperate with Flask, Django, FastAPI, Celery, and other frameworks that hook into the logging hierarchy. For the structlog alternative — structlog is a third-party structured logging library that processes log events through a pipeline of processors (JSON serialization, timestamp injection, exception formatting), supports both stdlib logging and standalone operation, and makes adding key-value context via log.bind(key=val) natural; stdlib logging uses extra={} for ad-hoc fields — use structlog when structured, context-bound logging is a first-class concern (distributed systems, event-driven apps), stdlib logging when you need universal compatibility and the JsonFormatter pattern above is sufficient. The Claude Skills 360 bundle includes logging skill sets covering JsonFormatter, LogConfig/setup_logging()/setup_logging_from_env(), ContextLogger adapter, log_context() contextvar-based request scoping, get_logger() factory, and dictconfig_for_app() production configuration. Start with the free tier to try logging configuration and structured observability code generation.