Python’s cProfile module profiles Python programs by counting function calls and measuring time. import cProfile, pstats. cProfile.run: cProfile.run("main()", "output.prof") — profile a code string; optional second arg saves results to file. Profile class: pr = cProfile.Profile() → pr.enable() / pr.disable() / pr.runcall(fn, *args) / pr.create_stats() / pr.dump_stats("file.prof"). pstats.Stats: stats = pstats.Stats(pr) or pstats.Stats("file.prof") — stats.sort_stats("cumulative") / stats.print_stats(20) / stats.print_callers("fn_name") / stats.print_callees("fn_name"). Sort keys: "cumtime" (cumulative including callees), "tottime" (own time only), "ncalls", "pcalls" (primitive calls), "name", "file", "module". strip_dirs: stats.strip_dirs() removes full path prefixes. print_stats(n): show top n lines; print_stats("pattern") filter by string. Context manager pattern: with cProfile.Profile() as pr: ... → profile a block. CLI: python -m cProfile -s cumtime -o out.prof script.py. snakeviz out.prof for visual flamegraph (PyPI). Column meanings: ncalls = call count, tottime = time in this function (not callees), cumtime = tottime + callees, percall = per-call avg. Claude Code generates hotspot reports, regression detectors, profiling decorators, and automated performance dashboards.
CLAUDE.md for cProfile
## cProfile Stack
- Stdlib: import cProfile, pstats, io
- Quick: cProfile.run("fn()", sort="cumulative")
- Save: pr = cProfile.Profile(); pr.runcall(fn); pr.dump_stats("out.prof")
- Read: stats = pstats.Stats("out.prof").sort_stats("tottime")
- stats.print_stats(20)
- Filter: stats.print_stats("mymodule")
- Callers:stats.print_callers("slow_fn")
cProfile Profiling Pipeline
# app/profutil.py — profile, report, compare, decorator, hotspot finder
from __future__ import annotations
import cProfile
import io
import pstats
import time
from contextlib import contextmanager
from dataclasses import dataclass, field
from typing import Any, Callable, Generator, TypeVar
T = TypeVar("T")
# ─────────────────────────────────────────────────────────────────────────────
# 1. Profiling context managers and helpers
# ─────────────────────────────────────────────────────────────────────────────
@contextmanager
def profile(
sort: str = "cumulative",
limit: int = 20,
filter: str = "",
print_result: bool = True,
) -> Generator[cProfile.Profile, None, None]:
"""
Context manager that profiles the block and prints or returns stats.
Example:
with profile(sort="tottime", limit=10) as pr:
heavy_computation()
# top 10 functions by own time are printed
"""
pr = cProfile.Profile()
pr.enable()
try:
yield pr
finally:
pr.disable()
if print_result:
s = io.StringIO()
stats = pstats.Stats(pr, stream=s).strip_dirs().sort_stats(sort)
if filter:
stats.print_stats(limit, filter)
else:
stats.print_stats(limit)
print(s.getvalue())
def profile_fn(fn: Callable[..., T], *args: Any, sort: str = "cumulative", limit: int = 20, **kwargs: Any) -> tuple[T, cProfile.Profile]:
"""
Profile a single function call; return (result, Profile).
Example:
result, pr = profile_fn(sorted, list(range(10000, 0, -1)))
stats = pstats.Stats(pr).sort_stats("tottime")
stats.print_stats(5)
"""
pr = cProfile.Profile()
result = pr.runcall(fn, *args, **kwargs)
return result, pr
def profile_to_string(pr: cProfile.Profile, sort: str = "cumulative", limit: int = 30, filter: str = "") -> str:
"""
Format a Profile object as a string (for logging or CI artifacts).
Example:
result, pr = profile_fn(main)
report = profile_to_string(pr, sort="tottime")
Path("profile.txt").write_text(report)
"""
s = io.StringIO()
stats = pstats.Stats(pr, stream=s).strip_dirs().sort_stats(sort)
if filter:
stats.print_stats(limit, filter)
else:
stats.print_stats(limit)
return s.getvalue()
def save_profile(pr: cProfile.Profile, path: str) -> None:
"""Save a Profile to a .prof file (readable by pstats, SnakeViz, etc.)."""
pr.dump_stats(path)
def load_profile(path: str) -> pstats.Stats:
"""Load a .prof file and return a pstats.Stats object."""
return pstats.Stats(path).strip_dirs()
# ─────────────────────────────────────────────────────────────────────────────
# 2. Profiling decorators
# ─────────────────────────────────────────────────────────────────────────────
def profiled(sort: str = "cumulative", limit: int = 15, filter: str = "") -> Callable:
"""
Decorator that profiles the wrapped function and prints stats after each call.
Example:
@profiled(sort="tottime", limit=10)
def slow_fn():
...
"""
import functools
def decorator(fn: Callable) -> Callable:
@functools.wraps(fn)
def wrapper(*args: Any, **kwargs: Any) -> Any:
result, pr = profile_fn(fn, *args, **kwargs)
print(f"\n--- Profile: {fn.__name__} ---")
print(profile_to_string(pr, sort=sort, limit=limit, filter=filter))
return result
return wrapper
return decorator
def profile_once(fn: Callable[..., T]) -> Callable[..., T]:
"""
Decorator that profiles the first call and prints stats; all subsequent calls run normally.
Example:
@profile_once
def build_index(data):
...
"""
import functools
profiled_once = [False]
@functools.wraps(fn)
def wrapper(*args: Any, **kwargs: Any) -> T:
if not profiled_once[0]:
profiled_once[0] = True
result, pr = profile_fn(fn, *args, **kwargs)
print(f"\n--- First-call profile: {fn.__name__} ---")
print(profile_to_string(pr, limit=10))
return result
return fn(*args, **kwargs)
return wrapper # type: ignore[return-value]
# ─────────────────────────────────────────────────────────────────────────────
# 3. Hotspot analysis
# ─────────────────────────────────────────────────────────────────────────────
@dataclass
class HotspotEntry:
func: str
ncalls: int
tottime: float
cumtime: float
file: str
lineno: int
@property
def tottime_µs(self) -> float:
return self.tottime * 1e6
@property
def cumtime_µs(self) -> float:
return self.cumtime * 1e6
def __str__(self) -> str:
return (
f"{self.func:40s} "
f"ncalls={self.ncalls:8d} "
f"tot={self.tottime_µs:10.1f}µs "
f"cum={self.cumtime_µs:10.1f}µs"
)
def extract_hotspots(pr: cProfile.Profile, n: int = 20, sort: str = "tottime") -> list[HotspotEntry]:
"""
Extract top n hotspot functions from a Profile as HotspotEntry objects.
Example:
_, pr = profile_fn(main)
for h in extract_hotspots(pr, n=10):
print(h)
"""
stats = pstats.Stats(pr).strip_dirs()
stats.sort_stats(sort)
# Access internal stats dict: {(file, lineno, func): (ncalls, pcalls, tottime, cumtime, ...)}
entries = []
for (file, lineno, func), (ncalls, _, tottime, cumtime, _) in list(stats.stats.items())[:n]:
entries.append(HotspotEntry(
func=func, ncalls=ncalls, tottime=tottime, cumtime=cumtime,
file=file, lineno=lineno,
))
# Sort by chosen criterion
key_fn = {"tottime": lambda e: -e.tottime, "cumtime": lambda e: -e.cumtime,
"ncalls": lambda e: -e.ncalls}.get(sort, lambda e: -e.tottime)
return sorted(entries, key=key_fn)[:n]
# ─────────────────────────────────────────────────────────────────────────────
# 4. Comparison profiling
# ─────────────────────────────────────────────────────────────────────────────
@dataclass
class ProfileComparison:
label_a: str
label_b: str
hotspots_a: list[HotspotEntry]
hotspots_b: list[HotspotEntry]
def report(self) -> str:
lines = [f"Profile comparison: {self.label_a} vs {self.label_b}"]
funcs = {h.func for h in self.hotspots_a} | {h.func for h in self.hotspots_b}
map_a = {h.func: h for h in self.hotspots_a}
map_b = {h.func: h for h in self.hotspots_b}
lines.append(f"\n{'Function':40s} {'A totµs':>12} {'B totµs':>12} {'Δ':>10}")
lines.append("-" * 80)
for func in sorted(funcs, key=lambda f: -max(
map_a.get(f, HotspotEntry(f, 0, 0, 0, "", 0)).tottime,
map_b.get(f, HotspotEntry(f, 0, 0, 0, "", 0)).tottime,
))[:20]:
ta = map_a.get(func, HotspotEntry(func, 0, 0, 0, "", 0)).tottime_µs
tb = map_b.get(func, HotspotEntry(func, 0, 0, 0, "", 0)).tottime_µs
delta = tb - ta
lines.append(f"{func:40s} {ta:12.1f} {tb:12.1f} {delta:+10.1f}")
return "\n".join(lines)
def compare_profiles(
fn_a: Callable, label_a: str,
fn_b: Callable, label_b: str,
*args: Any, **kwargs: Any,
) -> ProfileComparison:
"""
Profile two functions with identical arguments and compare hotspots.
Example:
cmp = compare_profiles(old_sort, "old", new_sort, "new", data=big_list)
print(cmp.report())
"""
_, pr_a = profile_fn(fn_a, *args, **kwargs)
_, pr_b = profile_fn(fn_b, *args, **kwargs)
return ProfileComparison(
label_a=label_a,
label_b=label_b,
hotspots_a=extract_hotspots(pr_a),
hotspots_b=extract_hotspots(pr_b),
)
# ─────────────────────────────────────────────────────────────────────────────
# Demo
# ─────────────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
print("=== cProfile demo ===")
def slow_a(n):
"""Lots of small calls."""
total = 0
for i in range(n):
total += sum(range(i))
return total
def slow_b(n):
"""Fewer larger calls."""
return sum(sum(range(i)) for i in range(n))
print("\n--- profile context manager ---")
with profile(sort="tottime", limit=8) as pr:
slow_a(200)
print("\n--- profile_to_string ---")
_, pr = profile_fn(slow_a, 300)
report = profile_to_string(pr, sort="tottime", limit=5)
print(report)
print("\n--- extract_hotspots ---")
_, pr = profile_fn(slow_b, 300)
for h in extract_hotspots(pr, n=5):
print(f" {h}")
print("\n--- compare_profiles ---")
cmp = compare_profiles(slow_a, "slow_a", slow_b, "slow_b", 300)
print(cmp.report())
print("\n=== done ===")
For the line_profiler alternative — line_profiler (PyPI via kernprof) instruments individual lines within functions, showing exactly which line costs the most time; cProfile instruments at the function granularity — use line_profiler with @profile decorator when cProfile has already told you which function is slow and you need to pinpoint the hot line inside it; use cProfile as the first step to identify which functions are worth line-level profiling. For the py-spy / pyinstrument alternative — py-spy (PyPI) is a sampling profiler that can attach to a running Python process without modifying the source and produces flamegraph SVGs with near-zero overhead; pyinstrument (PyPI) wraps time.perf_counter sampling and renders call trees with live HTML output; they are ideal for profiling long-running servers and web requests without code modification — use py-spy for low-overhead production sampling; use pyinstrument for request-level profiling in web frameworks; use cProfile for deterministic (every-call-counted) analysis of offline batch jobs and scripts. The Claude Skills 360 bundle includes cProfile skill sets covering profile()/profile_fn()/profile_to_string()/save_profile()/load_profile() core helpers, profiled()/profile_once() decorators, HotspotEntry dataclass with extract_hotspots(), ProfileComparison/compare_profiles() for before-and-after comparison, and full demo comparing two implementations. Start with the free tier to try performance profiling patterns and cProfile pipeline code generation.