Loguru is a Python logging library that makes logging simple and powerful with zero configuration. pip install loguru. from loguru import logger. Basic: logger.info("Server started on port {port}", port=8080). Levels: logger.debug/info/warning/error/critical("msg"). Exception: logger.exception("DB failed") — auto-captures stack. File sink: logger.add("app.log", rotation="10 MB", retention="30 days", compression="gz"). Daily rotation: logger.add("logs/{time:YYYY-MM-DD}.log", rotation="00:00"). JSON logs: logger.add("logs/json.log", serialize=True). Format: logger.add(sys.stderr, format="{time:HH:mm:ss} | {level:<8} | {name}:{line} | {message}"). Bind context: log = logger.bind(request_id="abc", user_id=42), log.info("Processing"). Opt: logger.opt(lazy=True).debug("Heavy: {x}", x=lambda: expensive()). logger.opt(depth=1).info("Called from caller"). Catch decorator: @logger.catch — wraps function, logs exceptions. Context: with logger.catch(): risky_code(). Filter: logger.add("errors.log", level="ERROR"). Filter fn: logger.add(sink, filter=lambda r: "DB" in r["name"]). Remove: logger.remove() — removes default stderr sink. Intercept stdlib: subclass logging.Handler, call logger.opt(depth=6).log(record.levelname, record.getMessage()). enqueue: logger.add("app.log", enqueue=True) — thread-safe, async-safe. Colorize: logger.add(sys.stderr, colorize=True). Claude Code generates Loguru structured logging setups, JSON log pipelines, and exception capture handlers.
CLAUDE.md for Loguru
## Loguru Stack
- Version: loguru >= 0.7
- Import: from loguru import logger — singleton, ready to use
- Sinks: logger.add(path/sink, level, format, rotation, retention, serialize)
- Context: logger.bind(key=value) — returns scoped logger with fields
- Exceptions: @logger.catch | with logger.catch() | logger.exception()
- JSON: logger.add(path, serialize=True) for structured log output
- Intercept: subclass logging.Handler to redirect stdlib logs to Loguru
- Async: logger.add(..., enqueue=True) for non-blocking writes
Loguru Logging Pipeline
# observability/loguru_pipeline.py — structured logging with Loguru
from __future__ import annotations
import logging
import sys
import time
import traceback
from contextlib import contextmanager
from functools import wraps
from pathlib import Path
from typing import Any, Callable
from loguru import logger
# ── 0. Setup functions ────────────────────────────────────────────────────────
def setup_logging(
log_dir: str = "logs",
app_name: str = "app",
level: str = "INFO",
json_enabled: bool = True,
stderr_enabled: bool = True,
rotation: str = "50 MB",
retention: str = "14 days",
compression: str = "gz",
enqueue: bool = True,
) -> None:
"""
Configure Loguru with stderr + rotating file + optional JSON sink.
Call once at application startup.
enqueue=True makes all sinks non-blocking and async-safe.
"""
# Remove default stderr handler so we control format
logger.remove()
log_path = Path(log_dir)
log_path.mkdir(parents=True, exist_ok=True)
# Human-readable stderr sink
if stderr_enabled:
logger.add(
sys.stderr,
level=level,
colorize=True,
format=(
"<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
"<level>{level:<8}</level> | "
"<cyan>{name}</cyan>:<cyan>{line}</cyan> | "
"{message}"
),
backtrace=True,
diagnose=True,
)
# Human-readable rotating file
logger.add(
log_path / f"{app_name}.log",
level=level,
format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level:<8} | {name}:{line} | {message}",
rotation=rotation,
retention=retention,
compression=compression,
backtrace=True,
diagnose=False, # Don't expose local vars in file logs (security)
enqueue=enqueue,
)
# JSON structured sink for log aggregation (Datadog, Loki, ELK)
if json_enabled:
logger.add(
log_path / f"{app_name}.json.log",
level=level,
serialize=True, # Each line is a JSON object
rotation=rotation,
retention=retention,
compression=compression,
enqueue=enqueue,
)
# Separate error-only sink for alerting
logger.add(
log_path / f"{app_name}.errors.log",
level="ERROR",
format="{time} | {level} | {name}:{function}:{line} | {message}",
rotation="10 MB",
retention="90 days",
backtrace=True,
enqueue=enqueue,
)
logger.info(
"Logging configured: level={level}, sinks=[stderr, {app}.log, {app}.json.log, errors.log]",
level=level, app=app_name,
)
# ── 1. Intercepting stdlib logging ────────────────────────────────────────────
class InterceptHandler(logging.Handler):
"""
Redirect all stdlib logging calls (uvicorn, SQLAlchemy, etc.) to Loguru.
Usage: logging.basicConfig(handlers=[InterceptHandler()], level=0, force=True)
"""
def emit(self, record: logging.LogRecord) -> None:
# Map stdlib level to Loguru level
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Walk up call stack to find the real caller (skip logging internals)
frame, depth = logging.currentframe(), 2
while frame and frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
def intercept_stdlib_logging(
loggers: list[str] = None,
level: int = logging.DEBUG,
) -> None:
"""
Install InterceptHandler for all stdlib loggers.
Optionally restrict to specific logger names.
"""
handler = InterceptHandler()
logging.basicConfig(handlers=[handler], level=level, force=True)
# Also configure named loggers (uvicorn, sqlalchemy, httpx, etc.)
for name in (loggers or []):
logging.getLogger(name).handlers = [handler]
logging.getLogger(name).propagate = False
# ── 2. Contextual logging ─────────────────────────────────────────────────────
def get_request_logger(
request_id: str,
user_id: str | int | None = None,
**extra,
) -> "loguru.Logger":
"""
Return a logger pre-bound with request context.
Every log call from this logger includes request_id and user_id fields.
"""
return logger.bind(request_id=request_id, user_id=str(user_id or "anon"), **extra)
@contextmanager
def log_context(**fields):
"""
Context manager that binds fields for all log calls within the block.
Useful for wrapping a request handler or background task.
"""
bound = logger.bind(**fields)
token = logger.configure(patcher=lambda record: record["extra"].update(fields))
try:
yield bound
finally:
pass # loguru's bind() scoping handles cleanup automatically
# ── 3. Decorators ─────────────────────────────────────────────────────────────
def log_calls(
level: str = "DEBUG",
log_args: bool = False,
log_time: bool = True,
) -> Callable:
"""
Decorator: log function entry, exit, and elapsed time.
"""
def decorator(fn: Callable) -> Callable:
@wraps(fn)
def wrapper(*args, **kwargs):
name = f"{fn.__module__}.{fn.__qualname__}"
if log_args:
logger.log(level, "→ {} args={} kwargs={}", name, args, kwargs)
else:
logger.log(level, "→ {}", name)
t0 = time.perf_counter()
try:
result = fn(*args, **kwargs)
elapsed = (time.perf_counter() - t0) * 1000
if log_time:
logger.log(level, "← {} ({:.1f}ms)", name, elapsed)
return result
except Exception:
elapsed = (time.perf_counter() - t0) * 1000
logger.opt(exception=True).error("✗ {} failed ({:.1f}ms)", name, elapsed)
raise
return wrapper
return decorator
def retry_with_logging(
max_attempts: int = 3,
delay: float = 1.0,
exceptions: tuple = (Exception,),
) -> Callable:
"""Decorator: retry with exponential back-off, logging each attempt."""
def decorator(fn: Callable) -> Callable:
@wraps(fn)
def wrapper(*args, **kwargs):
for attempt in range(1, max_attempts + 1):
try:
return fn(*args, **kwargs)
except exceptions as exc:
if attempt == max_attempts:
logger.error(
"{} failed after {} attempts: {}",
fn.__qualname__, max_attempts, exc
)
raise
wait = delay * (2 ** (attempt - 1))
logger.warning(
"{} attempt {}/{} failed, retrying in {:.1f}s: {}",
fn.__qualname__, attempt, max_attempts, wait, exc
)
time.sleep(wait)
return wrapper
return decorator
# ── 4. Performance and audit logging ─────────────────────────────────────────
@contextmanager
def timed_block(name: str, level: str = "INFO", **extra):
"""Log elapsed time for a code block."""
t0 = time.perf_counter()
bound = logger.bind(**extra)
bound.log(level, "{} started", name)
try:
yield
elapsed = (time.perf_counter() - t0) * 1000
bound.log(level, "{} completed in {:.1f}ms", name, elapsed)
except Exception:
elapsed = (time.perf_counter() - t0) * 1000
bound.error("{} failed after {:.1f}ms", name, elapsed)
raise
def log_audit_event(
action: str,
actor_id: str,
resource: str,
resource_id: str,
outcome: str = "success",
**details,
) -> None:
"""
Write a structured audit log entry.
Use a dedicated JSON sink for audit events in compliance contexts.
"""
logger.bind(
event_type="audit",
actor_id=actor_id,
resource=resource,
resource_id=resource_id,
outcome=outcome,
**details,
).info("AUDIT: {} {} {} → {}", action, resource, resource_id, outcome)
# ── 5. Exception logging helpers ──────────────────────────────────────────────
def log_exception(
exc: Exception,
message: str = "Unhandled exception",
level: str = "ERROR",
**context,
) -> None:
"""Log an exception with optional context fields."""
logger.bind(**context).opt(exception=exc).log(level, message)
def safe_call(fn: Callable, *args, default=None, **kwargs) -> Any:
"""
Call fn(*args, **kwargs), returning default and logging on exception.
Useful for non-critical operations (metrics, caching) that must not crash.
"""
try:
return fn(*args, **kwargs)
except Exception as exc:
logger.opt(exception=True).warning(
"safe_call: {} raised {}: {}", fn.__qualname__, type(exc).__name__, exc
)
return default
# ── Demo ──────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
# Configure logging
setup_logging(log_dir="/tmp/demo_logs", app_name="demo", level="DEBUG",
json_enabled=True, stderr_enabled=True, enqueue=False)
# Basic levels
logger.debug("Config loaded: db_host={host}", host="localhost")
logger.info("Application started, version=1.0.0")
logger.warning("Cache miss rate high: {rate:.1%}", rate=0.42)
# Bound context
req_log = get_request_logger("req-abc123", user_id=99, endpoint="/api/orders")
req_log.info("Request received")
req_log.info("Query returned {n} rows", n=42)
# Timed block
with timed_block("database_query", table="users"):
time.sleep(0.01) # simulate work
# Exception capture
@logger.catch
def risky():
raise ValueError("something went wrong")
risky() # exception logged, execution continues
# Audit event
log_audit_event(
action="DELETE", actor_id="user_99", resource="post",
resource_id="post_17", reason="user_request"
)
# Decorated function
@log_calls(level="DEBUG", log_time=True)
def process_batch(n: int) -> int:
time.sleep(0.005)
return n * 2
result = process_batch(50)
logger.info("Result: {}", result)
logger.success("Demo complete.")
For the stdlib logging module alternative — Python’s logging requires basicConfig, getLogger, Formatter, FileHandler, and RotatingFileHandler setup across 20+ lines while Loguru’s logger.add("app.log", rotation="50 MB", retention="14 days", serialize=True, enqueue=True) configures a thread-safe compressed rotating structured JSON sink in a single call, logger.bind(request_id=id) injects fields into every subsequent call without a LoggerAdapter, and @logger.catch wraps any function with full stack capture including local variable values when diagnose=True. For the structlog alternative for structured logging — structlog adds a processor pipeline but requires configuring bound loggers and renderer chains while Loguru’s serialize=True emits newline-delimited JSON with timestamp, level, module, function, line, and message as native fields that Datadog, Loki, and ELK parse without custom parsers, and InterceptHandler redirects uvicorn/SQLAlchemy/httpx stdlib loggers into Loguru in five lines. The Claude Skills 360 bundle includes Loguru skill sets covering logger.add with rotation and retention, serialize JSON output, bind contextual fields, opt lazy evaluation, catch decorator and context manager, InterceptHandler for stdlib redirect, log_calls timing decorator, retry with logging, timed_block context manager, and audit event logging. Start with the free tier to try structured logging code generation.