viztracer records a full timeline of every Python function call. pip install viztracer. CLI: viztracer script.py → result.json. View: vizviewer result.json — opens Chrome trace in browser. Programmatic: from viztracer import VizTracer; with VizTracer(output_file="out.json"): .... Max entries: VizTracer(tracer_entries=1000000). Filter: VizTracer(include_files=["app/"]) or exclude_files. Depth: VizTracer(max_stack_depth=10). Log variable: tracer.log_var("count", 42). Log attribute: tracer.log_attr(obj, "attr_name"). Instant event: tracer.add_instant("checkpoint", args={"step": 1}). Counter: tracer.add_counter("queue_size", {"size": q.qsize()}). Duration event: with tracer.add_duration("step_name"): .... Save: tracer.save() — writes output_file. tracer.save("custom.json"). Subprocess: viztracer --include_children -- python script.py. Async: works with asyncio automatically. Multiprocess: viztracer --log_multiprocess script.py. Flamegraph: viztracer --flamegraph script.py. from viztracer import get_tracer; t = get_tracer(); t.add_instant(...). tracer.get_report() — report object. Claude Code generates viztracer trace contexts, custom event logging, and timeline export pipelines.
CLAUDE.md for viztracer
## viztracer Stack
- Version: viztracer >= 0.16 | pip install viztracer
- Trace: with VizTracer(output_file="out.json"): ... → JSON trace
- View: vizviewer out.json → Chrome trace in browser
- Filter: include_files=["src/"] or max_stack_depth=10 to reduce size
- Events: tracer.log_var("x", val) | add_instant("step") | add_counter()
- Flamegraph: viztracer --flamegraph script.py
viztracer Execution Timeline Pipeline
# app/tracer.py — viztracer context manager, events, filtering, and async tracing
from __future__ import annotations
import asyncio
import subprocess
import sys
from contextlib import asynccontextmanager, contextmanager
from pathlib import Path
from typing import Any
from viztracer import VizTracer, get_tracer
# ─────────────────────────────────────────────────────────────────────────────
# 1. Tracer context managers
# ─────────────────────────────────────────────────────────────────────────────
@contextmanager
def trace(
output_file: str | Path = "/tmp/trace.json",
max_stack_depth: int = -1,
tracer_entries: int = 1_000_000,
include_files: list[str] | None = None,
exclude_files: list[str] | None = None,
log_async: bool = True,
verbose: int = 0,
):
"""
Context manager that records a full execution timeline.
Usage:
with trace("/tmp/my_app.json") as t:
run_pipeline()
# Then: vizviewer /tmp/my_app.json
max_stack_depth: -1 = unlimited. Set to 10 to reduce trace size.
tracer_entries: circular buffer size. 1M entries ~ 50 MB JSON.
include_files: only trace files matching these path substrings.
log_async: record async task switches as trace events.
"""
kwargs: dict[str, Any] = {
"output_file": str(output_file),
"tracer_entries": tracer_entries,
"verbose": verbose,
}
if max_stack_depth > 0:
kwargs["max_stack_depth"] = max_stack_depth
if include_files:
kwargs["include_files"] = include_files
if exclude_files:
kwargs["exclude_files"] = exclude_files
tracer = VizTracer(**kwargs)
tracer.start()
try:
yield tracer
finally:
tracer.stop()
tracer.save()
@asynccontextmanager
async def async_trace(
output_file: str | Path = "/tmp/async_trace.json",
max_stack_depth: int = -1,
tracer_entries: int = 1_000_000,
include_files: list[str] | None = None,
):
"""
Async context manager for tracing asyncio code.
Records coroutine frame switches alongside regular Python calls.
Usage:
async with async_trace("/tmp/api_trace.json"):
await fetch_all_pages()
"""
kwargs: dict[str, Any] = {
"output_file": str(output_file),
"tracer_entries": tracer_entries,
}
if max_stack_depth > 0:
kwargs["max_stack_depth"] = max_stack_depth
if include_files:
kwargs["include_files"] = include_files
tracer = VizTracer(**kwargs)
tracer.start()
try:
yield tracer
finally:
tracer.stop()
tracer.save()
# ─────────────────────────────────────────────────────────────────────────────
# 2. Custom event helpers
# ─────────────────────────────────────────────────────────────────────────────
def log_instant(name: str, args: dict[str, Any] | None = None) -> None:
"""
Log a zero-duration instant event in the current trace.
Visible as a vertical marker in the timeline.
Usage (inside a trace context):
log_instant("cache_miss", {"key": cache_key})
"""
t = get_tracer()
if t is not None:
t.add_instant(name, args=args or {})
def log_counter(name: str, values: dict[str, int | float]) -> None:
"""
Log counter values — renders as a graph track in the timeline.
Example:
log_counter("queue", {"depth": queue.qsize(), "errors": err_count})
"""
t = get_tracer()
if t is not None:
t.add_counter(name, values)
def log_var(name: str, value: Any) -> None:
"""Log a variable value as an instant event."""
t = get_tracer()
if t is not None:
t.log_var(name, value)
@contextmanager
def duration_event(name: str, args: dict[str, Any] | None = None):
"""
Mark a named duration event in the timeline.
Renders as a colored bar alongside function call bars.
Usage:
with duration_event("db_query", {"table": "users"}):
result = db.execute(sql)
"""
t = get_tracer()
if t is not None:
ctx = t.add_duration(name, args=args or {})
with ctx:
yield
else:
yield
# ─────────────────────────────────────────────────────────────────────────────
# 3. Filtered tracing — application frames only
# ─────────────────────────────────────────────────────────────────────────────
def trace_app(
output_file: str | Path = "/tmp/app_trace.json",
app_dirs: list[str] | None = None,
max_stack_depth: int = 15,
tracer_entries: int = 500_000,
):
"""
Trace only application code (exclude stdlib and site-packages).
Returns the same context manager as trace().
app_dirs: path substrings to include, e.g. ["src/", "app/"].
If None, uses the current working directory name.
"""
if app_dirs is None:
import os
app_dirs = [os.getcwd()]
return trace(
output_file=output_file,
include_files=app_dirs,
max_stack_depth=max_stack_depth,
tracer_entries=tracer_entries,
)
# ─────────────────────────────────────────────────────────────────────────────
# 4. CLI wrapper for subprocess tracing
# ─────────────────────────────────────────────────────────────────────────────
def trace_subprocess(
command: list[str],
output_file: str | Path = "/tmp/subprocess_trace.json",
flamegraph: bool = False,
max_stack_depth: int = -1,
) -> int:
"""
Run a command under viztracer and save the trace.
Returns the subprocess exit code.
Example:
exit_code = trace_subprocess(
["python", "benchmark.py", "--iterations", "100"],
output_file="/tmp/bench.json",
)
# Then open with: vizviewer /tmp/bench.json
"""
cmd = [sys.executable, "-m", "viztracer", "-o", str(output_file)]
if flamegraph:
cmd.append("--flamegraph")
if max_stack_depth > 0:
cmd.extend(["--max_stack_depth", str(max_stack_depth)])
cmd.extend(["--"] + command)
result = subprocess.run(cmd)
return result.returncode
def open_trace(trace_file: str | Path) -> None:
"""Launch vizviewer in a subprocess to open a trace file."""
subprocess.Popen(
[sys.executable, "-m", "vizviewer", str(trace_file)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# ─────────────────────────────────────────────────────────────────────────────
# Demo
# ─────────────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
def fibonacci(n: int) -> int:
if n <= 1:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
def sort_then_fib(n: int) -> tuple[list[int], int]:
import random
data = random.sample(range(1000), 100)
log_instant("before_sort")
data.sort()
log_instant("after_sort")
log_counter("data", {"size": len(data)})
with duration_event("fib_computation", {"n": n}):
fib_result = fibonacci(n)
return data, fib_result
print("=== Tracing sync code ===")
with trace("/tmp/demo_trace.json", max_stack_depth=20) as tracer:
data, fib = sort_then_fib(20)
log_var("fib_result", fib)
print(f" Trace saved: /tmp/demo_trace.json")
print(f" fib(20) = {fib}")
print(" Run: vizviewer /tmp/demo_trace.json")
print("\n=== Tracing async code ===")
async def async_pipeline():
async with async_trace("/tmp/async_trace.json", max_stack_depth=15):
tasks = [asyncio.sleep(0.01) for _ in range(5)]
await asyncio.gather(*tasks)
log_instant("all_tasks_done")
asyncio.run(async_pipeline())
print(" Async trace saved: /tmp/async_trace.json")
print(" Run: vizviewer /tmp/async_trace.json")
For the cProfile / pstats alternative — cProfile records cumulative and per-call times with deterministic tracing; viztracer records a complete timeline showing the exact sequence and interleaving of all function calls — viztracer is the right tool when you need to understand when something happened relative to other events (e.g., why a coroutine was delayed), while cProfile is better for aggregated “total time in function” reports. For the pyinstrument alternative — pyinstrument uses statistical sampling to produce a call tree with low overhead; viztracer uses complete deterministic tracing to produce a scroll-and-zoom Chrome trace timeline — use pyinstrument for production profiling with minimal overhead, viztracer for detailed investigation of short-running code where you want to inspect every call in sequence. The Claude Skills 360 bundle includes viztracer skill sets covering trace() sync context manager, async_trace() coroutine tracing, log_instant()/log_counter()/log_var() custom events, duration_event() named span, trace_app() application-only filtering, trace_subprocess() CLI wrapper, open_trace() vizviewer launcher, max_stack_depth/tracer_entries tuning, and include_files selective tracing. Start with the free tier to try execution timeline tracing code generation.