Python’s profile module is the pure-Python deterministic profiler — slower than cProfile but fully introspectable and subclassable. import profile. Quick profile: profile.run("my_function()") — profile a statement, print sorted stats. Save: profile.run("fn()", "output.prof") — save to file for pstats. Class: p = profile.Profile() — create a profiler; p.runcall(fn, *args, **kwargs) → result; p.run("code") → exec then profile; p.runctx("code", globals(), locals()). Stats: p.print_stats(sort="cumulative") — print sorted summary; p.dump_stats("file.prof") — save; p.create_stats() — finalise internal dict without printing. profile.Profile is subclassable: override calibrate() to measure overhead and set bias. Key difference from cProfile: profile is pure Python (measurable overhead per call) but lets you subclass and intercept dispatch_* methods; cProfile is a C extension and is always preferred for production profiling. Both write the same .prof binary format readable by pstats.Stats. Claude Code generates function hotspot finders, report formatters, call count auditors, top-N slow call reporters, and reproducible profiling harnesses.
CLAUDE.md for profile
## profile Stack
- Stdlib: import profile, pstats, cProfile, io
- Quick: profile.run("fn(arg)", sort="cumulative")
- Class: p = cProfile.Profile() # use cProfile for low overhead
- p.enable(); fn(); p.disable()
- p.print_stats(sort="cumulative")
- Save: p.dump_stats("run.prof")
- Load: s = pstats.Stats("run.prof").sort_stats("cumulative")
- s.print_stats(20)
- Note: prefer cProfile over profile for production; same .prof format
profile Profiling Pipeline
# app/profileutil.py — profile, capture, compare, hotspot, report
from __future__ import annotations
import cProfile
import io
import pstats
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable
# ─────────────────────────────────────────────────────────────────────────────
# 1. Profile run helpers
# ─────────────────────────────────────────────────────────────────────────────
def profile_call(
fn: Callable,
*args,
sort: str = "cumulative",
limit: int = 20,
**kwargs,
) -> tuple[object, str]:
"""
Profile fn(*args, **kwargs) and return (result, report_string).
Example:
result, report = profile_call(my_function, x, y, sort="tottime")
print(report)
"""
pr = cProfile.Profile()
pr.enable()
try:
result = fn(*args, **kwargs)
finally:
pr.disable()
buf = io.StringIO()
ps = pstats.Stats(pr, stream=buf).sort_stats(sort)
ps.print_stats(limit)
return result, buf.getvalue()
def profile_to_file(
fn: Callable,
dest: "str | Path",
*args,
**kwargs,
) -> object:
"""
Profile fn and save stats to a .prof file. Returns fn's return value.
Example:
profile_to_file(run_pipeline, "reports/pipeline.prof", data)
# Analyse with: python -m pstats reports/pipeline.prof
"""
pr = cProfile.Profile()
pr.enable()
try:
result = fn(*args, **kwargs)
finally:
pr.disable()
pr.dump_stats(str(dest))
return result
def stats_from_file(path: "str | Path") -> pstats.Stats:
"""
Load a .prof file and return a pstats.Stats object.
Example:
s = stats_from_file("run.prof")
s.sort_stats("cumulative").print_stats(10)
"""
buf = io.StringIO()
return pstats.Stats(str(path), stream=buf)
# ─────────────────────────────────────────────────────────────────────────────
# 2. Top-N hotspot finder
# ─────────────────────────────────────────────────────────────────────────────
@dataclass
class FunctionStat:
"""
Profiling stats for one function entry.
Fields:
filename — source file path (may be "<string>" for eval'd code)
lineno — line number
funcname — function name
ncalls — number of calls (may include "/N" for recursive)
tottime — time in function excluding subcalls (seconds)
cumtime — total time including subcalls (seconds)
percall_tot — tottime / ncalls
percall_cum — cumtime / ncalls
"""
filename: str
lineno: int
funcname: str
ncalls: str # kept as str because cProfile may show "3/1"
tottime: float
cumtime: float
percall_tot: float
percall_cum: float
def __str__(self) -> str:
return (
f"{self.funcname:30s} ncalls={self.ncalls:8s} "
f"tot={self.tottime:.4f}s cum={self.cumtime:.4f}s "
f"{self.filename}:{self.lineno}"
)
def top_hotspots(
fn: Callable,
*args,
n: int = 10,
sort: str = "cumulative",
**kwargs,
) -> list[FunctionStat]:
"""
Return the top-N profiling hotspots for fn(*args, **kwargs).
Example:
spots = top_hotspots(process_data, dataset, n=5)
for s in spots:
print(s)
"""
pr = cProfile.Profile()
pr.enable()
try:
fn(*args, **kwargs)
finally:
pr.disable()
pr.create_stats()
# Use pstats to sort
buf = io.StringIO()
ps = pstats.Stats(pr, stream=buf).sort_stats(sort)
results: list[FunctionStat] = []
for (filename, lineno, funcname), (cc, nc, tt, ct, _) in ps.stats.items():
results.append(FunctionStat(
filename=filename,
lineno=lineno,
funcname=funcname,
ncalls=str(cc) if cc == nc else f"{nc}/{cc}",
tottime=tt,
cumtime=ct,
percall_tot=tt / nc if nc else 0.0,
percall_cum=ct / nc if nc else 0.0,
))
# Sort by requested key
key_map = {
"cumulative": lambda s: -s.cumtime,
"tottime": lambda s: -s.tottime,
"calls": lambda s: -int(s.ncalls.split("/")[0]),
"pcalls": lambda s: -s.percall_cum,
}
results.sort(key=key_map.get(sort, key_map["cumulative"]))
return results[:n]
# ─────────────────────────────────────────────────────────────────────────────
# 3. Profiling context manager
# ─────────────────────────────────────────────────────────────────────────────
class Profiler:
"""
Context manager for profiling a block of code.
Example:
with Profiler(sort="tottime") as prof:
run_heavy_computation()
prof.print(limit=15)
prof.save("reports/heavy.prof")
"""
def __init__(self, sort: str = "cumulative") -> None:
self._pr = cProfile.Profile()
self._sort = sort
self._stats: "pstats.Stats | None" = None
def __enter__(self) -> "Profiler":
self._pr.enable()
return self
def __exit__(self, *_: object) -> None:
self._pr.disable()
buf = io.StringIO()
self._stats = pstats.Stats(self._pr, stream=buf)
def print(self, limit: int = 20, stream=None) -> None:
"""Print top-N stats to stream (default: stdout)."""
if self._stats is None:
return
buf = io.StringIO()
ps = pstats.Stats(self._pr, stream=buf).sort_stats(self._sort)
ps.print_stats(limit)
output = buf.getvalue()
if stream:
stream.write(output)
else:
print(output)
def save(self, path: "str | Path") -> None:
"""Save profile data to a .prof file."""
self._pr.dump_stats(str(path))
def hotspots(self, n: int = 10) -> list[FunctionStat]:
"""Return top-N FunctionStat records."""
if self._stats is None:
return []
results: list[FunctionStat] = []
self._stats.sort_stats(self._sort)
for (fname, ln, func), (cc, nc, tt, ct, _) in self._stats.stats.items():
results.append(FunctionStat(
filename=fname, lineno=ln, funcname=func,
ncalls=str(cc) if cc == nc else f"{nc}/{cc}",
tottime=tt, cumtime=ct,
percall_tot=tt / nc if nc else 0.0,
percall_cum=ct / nc if nc else 0.0,
))
key_map = {
"cumulative": lambda s: -s.cumtime,
"tottime": lambda s: -s.tottime,
}
results.sort(key=key_map.get(self._sort, key_map["cumulative"]))
return results[:n]
# ─────────────────────────────────────────────────────────────────────────────
# 4. Profile comparison
# ─────────────────────────────────────────────────────────────────────────────
@dataclass
class ProfileComparison:
"""
Compare two profiling runs of the same function.
Example:
comp = ProfileComparison.run(old_fn, new_fn, data)
print(f"speedup: {comp.speedup:.2f}x")
comp.print_diff()
"""
before_time: float
after_time: float
before_stats: list[FunctionStat]
after_stats: list[FunctionStat]
@classmethod
def run(
cls,
before_fn: Callable,
after_fn: Callable,
*args,
n: int = 10,
**kwargs,
) -> "ProfileComparison":
before_spots = top_hotspots(before_fn, *args, n=n, **kwargs)
after_spots = top_hotspots(after_fn, *args, n=n, **kwargs)
before_top = sum(s.cumtime for s in before_spots[:1]) or 1e-9
after_top = sum(s.cumtime for s in after_spots[:1]) or 1e-9
return cls(
before_time=before_top,
after_time=after_top,
before_stats=before_spots,
after_stats=after_spots,
)
@property
def speedup(self) -> float:
return self.before_time / self.after_time if self.after_time > 0 else 0.0
def print_diff(self, n: int = 5) -> None:
print(f" before top-{n} hotspots:")
for s in self.before_stats[:n]:
print(f" {s}")
print(f" after top-{n} hotspots:")
for s in self.after_stats[:n]:
print(f" {s}")
print(f" speedup: {self.speedup:.2f}x")
# ─────────────────────────────────────────────────────────────────────────────
# Demo
# ─────────────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
import tempfile
print("=== profile demo ===")
# ── define a subject ──────────────────────────────────────────────────────
def slow_sum(n: int) -> int:
total = 0
for i in range(n):
total += i
return total
def fast_sum(n: int) -> int:
return n * (n - 1) // 2
# ── profile_call ──────────────────────────────────────────────────────────
print("\n--- profile_call ---")
result, report = profile_call(slow_sum, 50_000, sort="tottime", limit=5)
print(f" result: {result}")
for line in report.strip().splitlines()[:8]:
print(f" {line}")
# ── top_hotspots ──────────────────────────────────────────────────────────
print("\n--- top_hotspots ---")
spots = top_hotspots(slow_sum, 50_000, n=3)
for s in spots:
print(f" {s}")
# ── Profiler context manager ──────────────────────────────────────────────
print("\n--- Profiler context manager ---")
with Profiler(sort="tottime") as prof:
for _ in range(100):
slow_sum(1_000)
print(" top hotspots:")
for s in prof.hotspots(n=3):
print(f" {s}")
# ── profile_to_file + stats_from_file ────────────────────────────────────
print("\n--- profile_to_file + stats_from_file ---")
with tempfile.TemporaryDirectory() as td:
prof_path = Path(td) / "slow.prof"
profile_to_file(slow_sum, prof_path, 10_000)
s = stats_from_file(prof_path)
buf = io.StringIO()
s.stream = buf
s.sort_stats("tottime").print_stats(3)
for line in buf.getvalue().strip().splitlines()[:5]:
print(f" {line}")
# ── ProfileComparison ─────────────────────────────────────────────────────
print("\n--- ProfileComparison ---")
comp = ProfileComparison.run(slow_sum, fast_sum, 100_000)
comp.print_diff(n=2)
print("\n=== done ===")
For the cProfile alternative — cProfile is a C extension profiler with the same API as profile but far lower measurement overhead (no Python call overhead per event) — always prefer cProfile in practice; use profile.Profile only when you need to subclass and override dispatch_call/dispatch_return to intercept profiler events in pure Python, or when targeting an environment without C extensions. For the line_profiler (PyPI) alternative — @profile from line_profiler measures time per individual source line rather than per function — use line_profiler when cProfile identifies a hot function and you need to find which specific lines within it are slow; use cProfile/profile for the initial function-level sweep to locate hot spots before drilling into line-level detail. The Claude Skills 360 bundle includes profile skill sets covering profile_call() returning (result, report_string), profile_to_file()/stats_from_file() file I/O helpers, FunctionStat dataclass with ncalls/tottime/cumtime/percall_* fields, top_hotspots() top-N driver, Profiler context manager with print()/save()/hotspots(), and ProfileComparison.run() with speedup property and print_diff(). Start with the free tier to try profiling patterns and profile pipeline code generation.