Mojo is a Python superset targeting AI infrastructure: the same syntax, full Python interop, but with fn functions that compile to LLVM and run at C++ speed. struct is a stack-allocated value type with explicit memory ownership. SIMD types (SIMD[DType.float32, 8]) map directly to hardware vector instructions. Parametric types use alias for compile-time constants. The ownership system (owned, borrowed, inout) eliminates data races without a garbage collector. Python objects interop via PythonObject — gradually migrate hot paths from Python to Mojo. Claude Code generates Mojo structs, SIMD kernels, parametric algorithms, and the Python interop bridges for high-performance AI infrastructure.
CLAUDE.md for Mojo Projects
## Mojo Stack
- Version: Mojo >= 24.4 (via Magic or modular.com)
- Functions: fn for compiled (type-checked, no implicit conversion), def for Python-compatible
- Types: struct (value, stack), class (reference, heap), Int/Float32/Bool (primitives)
- SIMD: SIMD[DType.float32, 8] for vectorized math
- Ownership: borrowed (read-only ref), inout (mutable ref), owned (move semantics)
- Python interop: from python import numpy as np — PythonObject wraps Python values
- Testing: mojo test or pytest with Mojo extension
fn Functions vs def Functions
# src/basics.mojo — core Mojo patterns
# fn: fully compiled, strict type checking, no implicit conversion
fn add(x: Int, y: Int) -> Int:
return x + y
# fn with borrowed reference — zero-copy read access
fn sum_list(borrowed data: List[Float32]) -> Float32:
var total: Float32 = 0.0
for value in data:
total += value[]
return total
# fn with inout — mutates caller's value in-place (no copy)
fn normalize_inout(inout data: List[Float32]) -> None:
var total: Float32 = 0.0
for value in data:
total += value[]
if total == 0.0:
return
for i in range(len(data)):
data[i] /= total
# fn with owned — takes ownership, caller loses access
fn consume_and_process(owned data: List[Float32]) -> Float32:
normalize_inout(data) # Mutate owned copy
return sum_list(data)
# def: Python-compatible, dynamic, can raise
def load_file(path: String) raises -> String:
with open(path, "r") as f:
return f.read()
# Generic parametric function — specialized at compile time
fn max_of[T: Comparable](a: T, b: T) -> T:
if a > b:
return a
return b
# Usage: T inferred from arguments
let bigger = max_of(3.14, 2.71) # T = Float64
let bigger_int = max_of(42, 17) # T = Int
Structs
# src/models/order.mojo — value types with Mojo structs
@value # auto-generates __copyinit__, __moveinit__, __init__
struct Money:
var cents: Int64
var currency: String
fn __init__(inout self, cents: Int64, currency: String = "USD"):
self.cents = cents
self.currency = currency
fn __add__(self, other: Money) raises -> Money:
if self.currency != other.currency:
raise Error("Currency mismatch: " + self.currency + " vs " + other.currency)
return Money(self.cents + other.cents, self.currency)
fn __lt__(self, other: Money) -> Bool:
return self.cents < other.cents
fn formatted(self) -> String:
let dollars = self.cents // 100
let remainder = self.cents % 100
return "$" + str(dollars) + "." + str(remainder).rjust(2, "0")
@value
struct OrderItem:
var product_id: String
var product_name: String
var quantity: Int32
var unit_price: Money
fn subtotal(self) -> Money:
return Money(self.unit_price.cents * Int64(self.quantity))
struct Order:
var id: String
var customer_id: String
var items: List[OrderItem]
var status: String
fn __init__(
inout self,
id: String,
customer_id: String,
items: List[OrderItem],
):
self.id = id
self.customer_id = customer_id
self.items = items
self.status = "pending"
fn total(self) -> Money:
var acc = Money(0)
for item_ref in self.items:
acc = acc + item_ref[].subtotal()
return acc
fn is_high_value(self) -> Bool:
return self.total().cents > 100_000 # $1000
fn item_count(self) -> Int:
var count: Int = 0
for item_ref in self.items:
count += Int(item_ref[].quantity)
return count
SIMD Vectorization
# src/compute/simd_ops.mojo — hardware SIMD acceleration
from sys.info import simdwidthof
from math import sqrt
alias FLOAT_TYPE = DType.float32
alias SIMD_WIDTH = simdwidthof[FLOAT_TYPE]() # Hardware SIMD width (e.g. 8 for AVX)
# Process 8 floats at once with AVX instructions
fn dot_product_simd(a: DTypePointer[DType.float32], b: DTypePointer[DType.float32], n: Int) -> Float32:
"""SIMD dot product — processes SIMD_WIDTH elements per iteration."""
var sum = SIMD[FLOAT_TYPE, SIMD_WIDTH](0)
let simd_end = n - (n % SIMD_WIDTH)
# Vectorized loop — compiles to AVX/SSE instructions
for i in range(0, simd_end, SIMD_WIDTH):
let va = SIMD[FLOAT_TYPE, SIMD_WIDTH].load(a, i)
let vb = SIMD[FLOAT_TYPE, SIMD_WIDTH].load(b, i)
sum = sum.fma(va, vb) # Fused multiply-add: sum += va * vb
# Handle remainder elements
var scalar_sum: Float32 = sum.reduce_add()
for i in range(simd_end, n):
scalar_sum += a[i] * b[i]
return scalar_sum
# Vectorized L2 normalization
fn normalize_l2_simd(inout data: DTypePointer[DType.float32], n: Int) -> None:
"""Normalize a vector to unit length using SIMD."""
# Compute norm using dot product
let norm_sq = dot_product_simd(data, data, n)
let norm = sqrt(norm_sq)
if norm < 1e-8:
return
let inv_norm: Float32 = 1.0 / norm
let simd_end = n - (n % SIMD_WIDTH)
# Scale all elements
for i in range(0, simd_end, SIMD_WIDTH):
let v = SIMD[FLOAT_TYPE, SIMD_WIDTH].load(data, i)
(v * inv_norm).store(data, i)
for i in range(simd_end, n):
data[i] *= inv_norm
# Softmax with SIMD
fn softmax_simd(inout x: DTypePointer[DType.float32], n: Int) -> None:
"""Numerically stable softmax implementation with SIMD."""
# Find max for numerical stability
var max_val = x[0]
for i in range(1, n):
if x[i] > max_val:
max_val = x[i]
# exp(x - max) and sum
var sum: Float32 = 0.0
for i in range(n):
x[i] = math.exp(x[i] - max_val)
sum += x[i]
let inv_sum = 1.0 / sum
for i in range(n):
x[i] *= inv_sum
Parametric Types
# src/containers/typed_stack.mojo — generic containers with alias
struct Stack[ElementType: AnyType]:
"""Generic stack — specialized at compile time for each ElementType."""
var data: List[ElementType]
fn __init__(inout self):
self.data = List[ElementType]()
fn push(inout self, owned item: ElementType) -> None:
self.data.append(item^) # ^ transfers ownership
fn pop(inout self) raises -> ElementType:
if len(self.data) == 0:
raise Error("Stack is empty")
return self.data.pop()
fn peek(self) raises -> ref [__lifetime_of(self)] ElementType:
if len(self.data) == 0:
raise Error("Stack is empty")
return self.data[len(self.data) - 1]
fn is_empty(self) -> Bool:
return len(self.data) == 0
fn size(self) -> Int:
return len(self.data)
# Parametric function with constraints
fn find_min[T: Comparable & Copyable](borrowed data: List[T]) raises -> T:
if len(data) == 0:
raise Error("Cannot find min of empty list")
var minimum = data[0]
for i in range(1, len(data)):
if data[i] < minimum:
minimum = data[i]
return minimum
Python Interop
# src/interop/numpy_bridge.mojo — use Python libraries from Mojo
from python import Python, PythonObject
fn compute_with_numpy(data: List[Float32]) raises -> Float32:
"""Use NumPy from Mojo for matrix operations."""
let np = Python.import_module("numpy")
# Convert Mojo list to NumPy array
let py_list = Python.list()
for val in data:
py_list.append(val[].cast[DType.float64]())
let arr = np.array(py_list, dtype=np.float32)
# NumPy operations return PythonObject
let result: PythonObject = np.mean(arr)
# Convert back to Mojo type
return result.to_float64().cast[DType.float32]()
fn load_model_weights(path: String) raises -> List[Float32]:
"""Load PyTorch model weights via Python interop."""
let torch = Python.import_module("torch")
let state_dict = torch.load(path, map_location="cpu")
let first_weight = state_dict.values().__iter__().__next__()
let flat = first_weight.flatten()
var weights = List[Float32]()
for i in range(int(flat.numel())):
weights.append(flat[i].item().to_float64().cast[DType.float32]())
return weights
For the Python data science ecosystem that Mojo is designed to accelerate and interoperate with, see the Python data science guide for NumPy, pandas, and scikit-learn patterns. For the Zig systems language that also eliminates GC and provides explicit memory control without a Python heritage, see the Zig guide for comptime generics and C interop. The Claude Skills 360 bundle includes Mojo skill sets covering SIMD kernels, parametric types, and Python interop bridges. Start with the free tier to try Mojo program generation.