Numba JIT-compiles Python functions to native machine code for NumPy-heavy loops. pip install numba. from numba import jit, njit, vectorize, cuda, prange. Basic JIT: @njit — compiles to native code, no Python objects. @jit(nopython=True) is equivalent. Cache: @njit(cache=True) — skips recompile on subsequent runs. Parallel: @njit(parallel=True), use prange for parallel loops: for i in prange(n) — auto-parallelizes. @njit(fastmath=True) for 20-50% extra speedup via relaxed floating point. First call triggers compilation (warm-up); subsequent calls are fast. ufunc: @vectorize(["float64(float64, float64)"], target="cpu") — target “cpu”, “parallel”, “cuda”. gufunc: @guvectorize(["void(float64[:], float64[:])"], "(n)->(n)"). CUDA kernel: @cuda.jit, threads_per_block = 256, blocks = (n + threads_per_block - 1) // threads_per_block, kernel[blocks, threads_per_block](d_array). CUDA device: cuda.grid(1) — current thread index. Typed List: from numba.typed import List as NList; my_list = NList(). Stencil: @stencil decorator for convolution-style neighborhood access. Explicit signature: @njit("float64[:](float64[:], float64)"). objmode: with numba.objmode(result="float64[:]"): for Python-fallback sections. AOT: from numba.pycc import CC. Claude Code generates Numba-accelerated numerical loops, GPU kernels, parallel ufuncs, and Monte Carlo simulation scripts.
CLAUDE.md for Numba
## Numba Stack
- Version: numba >= 0.59
- Basic: @njit (= @jit(nopython=True)) | add cache=True to persist compilation
- Parallel: @njit(parallel=True) + prange(n) for loop parallelism
- Fast: @njit(fastmath=True) for relaxed FP (5-50% gain)
- ufunc: @vectorize(["float64(float64,float64)"], target="cpu"/"parallel"/"cuda")
- GPU: @cuda.jit → kernel[blocks_per_grid, threads_per_block](d_array)
- Types: float32/float64/int32/int64 — annotate for overloaded dispatch
- Warm-up: first call compiles; call with dummy data during startup
Numba Acceleration Pipeline
# perf/numba_pipeline.py — JIT-accelerated numerical computing with Numba
from __future__ import annotations
import time
import numpy as np
from numba import njit, vectorize, guvectorize, prange, cuda
from numba import float32, float64, int32, int64, boolean
import numba
# ── 1. Basic JIT acceleration ─────────────────────────────────────────────────
@njit(cache=True)
def sum_squared_diff(a: np.ndarray, b: np.ndarray) -> float:
"""Element-wise sum of squared differences — 10-100x faster than Python loop."""
total = 0.0
for i in range(len(a)):
diff = a[i] - b[i]
total += diff * diff
return total
@njit(parallel=True, fastmath=True, cache=True)
def matrix_row_norms(X: np.ndarray) -> np.ndarray:
"""
Compute L2 norm of each row in parallel.
parallel=True: uses all CPU cores via OpenMP.
fastmath=True: allows fused multiply-add and relaxed IEEE-754 for speed.
"""
n, d = X.shape
norms = np.empty(n, dtype=np.float64)
for i in prange(n): # prange triggers parallel execution
s = 0.0
for j in range(d):
s += X[i, j] * X[i, j]
norms[i] = np.sqrt(s)
return norms
@njit(parallel=True, fastmath=True, cache=True)
def pairwise_distances_l2(
X: np.ndarray, # (N, D)
Y: np.ndarray, # (M, D)
) -> np.ndarray:
"""
Compute full (N, M) pairwise Euclidean distance matrix.
Pure Python: O(N*M*D) with loop overhead.
Numba: same complexity but ~100x faster with vectorized inner loop.
"""
n, d = X.shape
m = Y.shape[0]
out = np.empty((n, m), dtype=np.float64)
for i in prange(n):
for j in range(m):
s = 0.0
for k in range(d):
delta = X[i, k] - Y[j, k]
s += delta * delta
out[i, j] = np.sqrt(s)
return out
@njit(cache=True)
def sliding_window_stats(
data: np.ndarray,
window_size: int,
) -> tuple:
"""Compute rolling mean and std without Pandas overhead."""
n = len(data)
means = np.empty(n - window_size + 1, dtype=np.float64)
stds = np.empty(n - window_size + 1, dtype=np.float64)
for i in range(n - window_size + 1):
window = data[i : i + window_size]
means[i] = window.mean()
stds[i] = window.std()
return means, stds
# ── 2. Monte Carlo simulation ─────────────────────────────────────────────────
@njit(parallel=True, fastmath=True, cache=True)
def monte_carlo_pi(n_samples: int) -> float:
"""
Estimate π via Monte Carlo — demonstrates prange parallel speedup.
Classic benchmark: ~4x speedup on 4-core CPU vs @njit(parallel=False).
"""
count = 0
for _ in prange(n_samples):
x = np.random.random()
y = np.random.random()
if x*x + y*y <= 1.0:
count += 1
return 4.0 * count / n_samples
@njit(parallel=True, fastmath=True, cache=True)
def black_scholes_paths(
S0: float,
r: float,
sigma: float,
T: float,
n_steps: int,
n_paths: int,
) -> np.ndarray:
"""
Simulate Geometric Brownian Motion paths (Black-Scholes).
Returns (n_paths, n_steps+1) array of price paths.
"""
dt = T / n_steps
out = np.empty((n_paths, n_steps + 1), dtype=np.float64)
sqdt = np.sqrt(dt)
for i in prange(n_paths):
S = S0
out[i, 0] = S
for t in range(n_steps):
dW = np.random.randn() * sqdt
S *= np.exp((r - 0.5 * sigma**2) * dt + sigma * dW)
out[i, t+1] = S
return out
# ── 3. Vectorized ufuncs ──────────────────────────────────────────────────────
@vectorize(["float64(float64, float64)", "float32(float32, float32)"],
target="parallel", cache=True)
def fast_sigmoid_vectorized(x, scale):
"""Vectorized sigmoid with scaling — compiles for both float32 and float64."""
return 1.0 / (1.0 + np.exp(-x * scale))
@vectorize(["float64(float64, float64, float64)"], target="parallel", cache=True)
def clamp(x, lo, hi):
"""Clamp values to [lo, hi] range — broadcasts over arrays."""
if x < lo:
return lo
if x > hi:
return hi
return x
@guvectorize(
["void(float64[:], float64[:], float64[:])"],
"(n),(n)->(n)",
target="parallel",
cache=True,
)
def weighted_running_mean(data, weights, out):
"""Generalized ufunc: weighted mean reduction over last axis."""
n = len(data)
total = 0.0
wsum = 0.0
for i in range(n):
total += data[i] * weights[i]
wsum += weights[i]
result = total / wsum if wsum > 0 else 0.0
for i in range(n):
out[i] = result
# ── 4. GPU kernels (requires CUDA) ────────────────────────────────────────────
def check_cuda() -> bool:
"""Check if CUDA is available."""
try:
return cuda.is_available()
except Exception:
return False
@cuda.jit
def cuda_vector_add(a, b, out):
"""
GPU kernel: element-wise vector addition.
Each thread handles one element.
"""
idx = cuda.grid(1)
if idx < out.size:
out[idx] = a[idx] + b[idx]
@cuda.jit
def cuda_matrix_multiply(A, B, C):
"""
Naive GPU matrix multiplication kernel.
Each thread computes one element of C = A @ B.
For production: use cuBLAS via cupy instead.
"""
row, col = cuda.grid(2)
if row < C.shape[0] and col < C.shape[1]:
val = 0.0
for k in range(A.shape[1]):
val += A[row, k] * B[k, col]
C[row, col] = val
def gpu_vector_add(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Run GPU vector addition if CUDA is available."""
if not check_cuda():
return a + b # Fallback to CPU
n = len(a)
d_a = cuda.to_device(a.astype(np.float64))
d_b = cuda.to_device(b.astype(np.float64))
d_c = cuda.device_array(n, dtype=np.float64)
threads = 256
blocks = (n + threads - 1) // threads
cuda_vector_add[blocks, threads](d_a, d_b, d_c)
return d_c.copy_to_host()
# ── 5. Numba stencil ──────────────────────────────────────────────────────────
@numba.stencil(cache=True)
def _blur_kernel(a):
"""3x3 box blur kernel using stencil notation."""
return (a[-1, -1] + a[-1, 0] + a[-1, 1] +
a[0, -1] + a[0, 0] + a[0, 1] +
a[1, -1] + a[1, 0] + a[1, 1]) / 9.0
@njit(parallel=True, cache=True)
def box_blur(image: np.ndarray) -> np.ndarray:
"""Apply 3x3 box blur to a 2D image using stencil."""
return _blur_kernel(image)
# ── 6. Warm-up and benchmarking ───────────────────────────────────────────────
def warmup_all() -> None:
"""
Trigger JIT compilation for all @njit functions.
Call once at startup to avoid compilation latency during inference.
"""
dummy_1d = np.ones(10, dtype=np.float64)
dummy_2d = np.ones((10, 5), dtype=np.float64)
sum_squared_diff(dummy_1d, dummy_1d)
matrix_row_norms(dummy_2d)
sliding_window_stats(dummy_1d, 3)
monte_carlo_pi(100)
fast_sigmoid_vectorized(dummy_1d, dummy_1d)
print("Numba JIT warm-up complete")
def benchmark(fn, *args, n_runs: int = 20, label: str = "") -> float:
"""Benchmark a function over n_runs, return mean time in ms."""
fn(*args) # Warm-up
times = []
for _ in range(n_runs):
t0 = time.perf_counter()
fn(*args)
times.append(time.perf_counter() - t0)
ms = np.mean(times) * 1000
print(f" {label or fn.__name__:<40} {ms:8.3f} ms")
return ms
# ── Demo ──────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
print("Numba Performance Demo")
print("="*50)
warmup_all()
# Pairwise distance benchmark
N, M, D = 500, 500, 128
X = np.random.rand(N, D).astype(np.float64)
Y = np.random.rand(M, D).astype(np.float64)
print("\nPairwise L2 distance benchmark (500x500, D=128):")
def numpy_distances(X, Y):
diff = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
return np.sqrt((diff**2).sum(axis=-1))
ms_np = benchmark(numpy_distances, X, Y, label="NumPy broadcasting")
ms_nb = benchmark(pairwise_distances_l2, X, Y, label="Numba parallel")
print(f"\nSpeedup: {ms_np/ms_nb:.1f}x")
# Monte Carlo
print("\nMonte Carlo π (10M samples):")
ms_mc = benchmark(monte_carlo_pi, 10_000_000, label="Monte Carlo π (parallel)")
pi_est = monte_carlo_pi(10_000_000)
print(f" π ≈ {pi_est:.5f} (error: {abs(pi_est - np.pi):.5f})")
# Black-Scholes
print("\nBlack-Scholes paths (10K paths, 252 steps):")
ms_bs = benchmark(black_scholes_paths, 100.0, 0.05, 0.2, 1.0, 252, 10000,
label="Black-Scholes GBM")
paths = black_scholes_paths(100.0, 0.05, 0.2, 1.0, 252, 10000)
print(f" Terminal price mean: {paths[:, -1].mean():.2f}, std: {paths[:, -1].std():.2f}")
For the Cython alternative when needing fine-grained C interoperability, C extension modules, or wrapping existing C++ libraries — Cython requires writing annotated .pyx files and a build system while Numba’s @njit decorator compiles standard Python functions without any file changes, and the prange parallel loops with automatic OpenMP threading scale to all CPU cores without manual thread management, making Numba 10x easier to add to existing NumPy code. For the PyPy alternative when speeding up general Python code with objects, strings, and dynamic data structures — PyPy’s tracing JIT handles idiomatic Python well while Numba targets specifically the numerical hot path (tight loops over arrays), achieves C/Fortran-level performance on those paths via LLVM, and uniquely supports GPU offloading with @cuda.jit for the same code path, enabling seamless CPU-to-GPU migrations without rewriting algorithms. The Claude Skills 360 bundle includes Numba skill sets covering @njit and cache, parallel=True with prange, fastmath acceleration, vectorized ufuncs, guvectorize for array reductions, CUDA GPU kernels, stencil neighborhood operations, warm-up patterns, and Monte Carlo simulation benchmarks. Start with the free tier to try JIT acceleration code generation.