SAM2 segments any object in images and videos from point or box prompts. pip install "git+https://github.com/facebookresearch/sam2.git". from sam2.build_sam import build_sam2, build_sam2_video_predictor. from sam2.sam2_image_predictor import SAM2ImagePredictor. Load: model = build_sam2("sam2.1_hiera_base_plus.yaml", "/path/to/sam2.1_hiera_base_plus.pt", device="cuda"). Checkpoint sizes: hiera_tiny (38M), hiera_small (46M), hiera_base_plus (80M), hiera_large (224M). Image predictor: predictor = SAM2ImagePredictor(model), predictor.set_image(np.array(PIL_image)). Point prompt: masks, scores, logits = predictor.predict(point_coords=np.array([[x, y]]), point_labels=np.array([1])) — label 1=positive, 0=negative. Multi-point: point_coords=np.array([[x1,y1],[x2,y2]]), point_labels=np.array([1,0]). Box prompt: masks, scores, _ = predictor.predict(box=np.array([x1, y1, x2, y2])). Best mask: masks[scores.argmax()]. Video predictor: video_predictor = build_sam2_video_predictor("sam2.1_hiera_base_plus.yaml", checkpoint). with torch.inference_mode(): inference_state = video_predictor.init_state(video_path="video.mp4"). Add object: _, _, out_mask_logits = video_predictor.add_new_points_or_box(inference_state, frame_idx=0, obj_id=1, points=np.array([[x,y]]), labels=np.array([1])). Propagate: for frame_idx, obj_ids, masks in video_predictor.propagate_in_video(inference_state): .... Automatic: from sam2.automatic_mask_generator import SAM2AutomaticMaskGenerator, mask_gen = SAM2AutomaticMaskGenerator(model), masks = mask_gen.generate(image) — returns list of dicts with segmentation area stability_score. Claude Code generates SAM2 segmentation pipelines, video trackers, automatic segmenters, and annotation tools.
CLAUDE.md for SAM2
## SAM2 Stack
- Install: pip install "git+https://github.com/facebookresearch/sam2.git" + download .pt checkpoints
- Models: hiera_tiny | hiera_small | hiera_base_plus | hiera_large
- Image: SAM2ImagePredictor(build_sam2(cfg, ckpt)) → set_image(np.array) → predict(...)
- Point: predict(point_coords=np.array([[x,y]]), point_labels=np.array([1])) → masks, scores
- Box: predict(box=np.array([x1,y1,x2,y2])) → masks, scores
- Video: build_sam2_video_predictor → init_state → add_new_points_or_box → propagate_in_video
- Auto: SAM2AutomaticMaskGenerator(model).generate(image) → [{segmentation, area, score}, ...]
- Device: build_sam2(..., device="cuda") for GPU | device="cpu" for CPU
SAM2 Segmentation Pipeline
# vision/sam2_pipeline.py — segment anything with SAM2
from __future__ import annotations
import os
import cv2
import numpy as np
from pathlib import Path
from typing import Optional
from PIL import Image
import torch
from sam2.build_sam import build_sam2, build_sam2_video_predictor
from sam2.sam2_image_predictor import SAM2ImagePredictor
from sam2.automatic_mask_generator import SAM2AutomaticMaskGenerator
# ── 1. Model loading ──────────────────────────────────────────────────────────
SAM2_CONFIGS = {
"tiny": "sam2.1_hiera_tiny.yaml",
"small": "sam2.1_hiera_small.yaml",
"base_plus": "sam2.1_hiera_base_plus.yaml",
"large": "sam2.1_hiera_large.yaml",
}
def load_sam2_image(
model_size: str = "base_plus",
checkpoint: str = None,
device: str = "cuda" if torch.cuda.is_available() else "cpu",
) -> "SAM2ImagePredictor":
"""
Load SAM2 image predictor.
Default checkpoint paths follow official SAM2 release structure.
"""
config = SAM2_CONFIGS[model_size]
if checkpoint is None:
checkpoint = f"checkpoints/sam2.1_hiera_{model_size.replace('_', '+')}.pt"
model = build_sam2(config, checkpoint, device=device)
predictor = SAM2ImagePredictor(model)
print(f"SAM2-{model_size} image predictor ready on {device}")
return predictor
def load_sam2_video(
model_size: str = "base_plus",
checkpoint: str = None,
device: str = "cuda" if torch.cuda.is_available() else "cpu",
) -> object:
"""Load SAM2 video predictor for temporal tracking."""
config = SAM2_CONFIGS[model_size]
if checkpoint is None:
checkpoint = f"checkpoints/sam2.1_hiera_{model_size.replace('_', '+')}.pt"
predictor = build_sam2_video_predictor(config, checkpoint, device=device)
print(f"SAM2-{model_size} video predictor ready")
return predictor
def load_auto_mask_generator(
model_size: str = "base_plus",
checkpoint: str = None,
points_per_side: int = 32,
pred_iou_thresh: float = 0.86,
stability_thresh: float = 0.92,
min_mask_region_area: int = 200,
device: str = "cuda" if torch.cuda.is_available() else "cpu",
) -> "SAM2AutomaticMaskGenerator":
"""Load SAM2 automatic mask generator for everything segmentation."""
config = SAM2_CONFIGS[model_size]
if checkpoint is None:
checkpoint = f"checkpoints/sam2.1_hiera_{model_size.replace('_', '+')}.pt"
model = build_sam2(config, checkpoint, device=device)
generator = SAM2AutomaticMaskGenerator(
model=model,
points_per_side=points_per_side,
pred_iou_thresh=pred_iou_thresh,
stability_score_thresh=stability_thresh,
min_mask_region_area=min_mask_region_area,
)
print(f"SAM2 automatic mask generator ready")
return generator
# ── 2. Image segmentation with prompts ───────────────────────────────────────
def segment_with_points(
predictor,
image: np.ndarray,
positive_points: list[tuple[int, int]],
negative_points: list[tuple[int, int]] = None,
multimask: bool = True,
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Segment an object using positive/negative point prompts.
Returns (masks, scores, logits) — pick masks[scores.argmax()] for best mask.
"""
predictor.set_image(image)
all_points = positive_points + (negative_points or [])
all_labels = [1] * len(positive_points) + [0] * len(negative_points or [])
masks, scores, logits = predictor.predict(
point_coords=np.array(all_points, dtype=np.float32),
point_labels=np.array(all_labels, dtype=np.int32),
multimask_output=multimask,
)
return masks, scores, logits
def segment_with_box(
predictor,
image: np.ndarray,
box: tuple[int, int, int, int], # (x1, y1, x2, y2)
multimask: bool = False,
) -> tuple[np.ndarray, np.ndarray]:
"""Segment an object within a bounding box."""
predictor.set_image(image)
masks, scores, _ = predictor.predict(
box=np.array(box, dtype=np.float32),
multimask_output=multimask,
)
return masks, scores
def segment_with_points_and_box(
predictor,
image: np.ndarray,
box: tuple[int, int, int, int],
positive_points: list[tuple[int, int]],
negative_points: list[tuple[int, int]] = None,
) -> np.ndarray:
"""Combine box + point prompts for precise segmentation."""
predictor.set_image(image)
all_points = positive_points + (negative_points or [])
all_labels = [1] * len(positive_points) + [0] * len(negative_points or [])
masks, scores, _ = predictor.predict(
point_coords=np.array(all_points, dtype=np.float32),
point_labels=np.array(all_labels, dtype=np.int32),
box=np.array(box, dtype=np.float32),
multimask_output=False,
)
return masks[0] # Best (only) mask
# ── 3. Automatic everything segmentation ─────────────────────────────────────
def segment_everything(
generator,
image: np.ndarray,
min_area: int = 500,
max_masks: int = 100,
sort_by: str = "area", # "area" | "score" | "stability"
) -> list[dict]:
"""
Auto-segment all objects in an image.
Returns list of dicts: {segmentation, area, bbox, predicted_iou, stability_score}
"""
masks = generator.generate(image)
# Filter by minimum area
masks = [m for m in masks if m["area"] >= min_area]
# Sort
if sort_by == "area":
masks = sorted(masks, key=lambda x: x["area"], reverse=True)
elif sort_by == "score":
masks = sorted(masks, key=lambda x: x["predicted_iou"], reverse=True)
elif sort_by == "stability":
masks = sorted(masks, key=lambda x: x["stability_score"], reverse=True)
return masks[:max_masks]
def visualize_masks(
image: np.ndarray,
masks: list[dict],
alpha: float = 0.4,
random_seed: int = 42,
) -> np.ndarray:
"""Visualize automatic segmentation masks with random colors."""
rng = np.random.default_rng(random_seed)
vis = image.copy().astype(np.float32)
overlay = np.zeros_like(vis)
for mask_data in masks:
mask = mask_data["segmentation"]
color = rng.integers(50, 255, size=3).astype(np.float32)
overlay[mask] = color
result = (1 - alpha) * vis + alpha * overlay
return np.clip(result, 0, 255).astype(np.uint8)
# ── 4. Video object tracking ──────────────────────────────────────────────────
def track_object_in_video(
predictor,
video_path: str,
init_frame_idx: int = 0,
point_prompts: list[tuple[int, int]] = None,
box_prompt: tuple[int, int, int, int] = None,
output_dir: str = "./tracked_frames",
) -> dict[int, np.ndarray]:
"""
Track an object across all video frames.
Provide either point_prompts or box_prompt to initialize on init_frame_idx.
Returns {frame_idx: binary_mask} for all frames.
"""
Path(output_dir).mkdir(parents=True, exist_ok=True)
with torch.inference_mode():
# Initialize video state
inference_state = predictor.init_state(video_path=video_path)
predictor.reset_state(inference_state)
# Add initial object prompt
obj_id = 1
if point_prompts:
_, _, out_mask_logits = predictor.add_new_points_or_box(
inference_state=inference_state,
frame_idx=init_frame_idx,
obj_id=obj_id,
points=np.array(point_prompts, dtype=np.float32),
labels=np.array([1] * len(point_prompts), dtype=np.int32),
)
elif box_prompt:
_, _, out_mask_logits = predictor.add_new_points_or_box(
inference_state=inference_state,
frame_idx=init_frame_idx,
obj_id=obj_id,
box=np.array(box_prompt, dtype=np.float32),
)
else:
raise ValueError("Provide either point_prompts or box_prompt")
# Propagate forward through video
tracked_masks = {}
for frame_idx, obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state):
if obj_id in obj_ids:
idx = obj_ids.index(obj_id)
mask = (out_mask_logits[idx] > 0.0).squeeze().cpu().numpy()
tracked_masks[frame_idx] = mask
print(f"Tracked {len(tracked_masks)} frames")
return tracked_masks
def save_tracked_video(
video_path: str,
tracked_masks: dict[int, np.ndarray],
output_path: str = "tracked_output.mp4",
mask_color: tuple[int,int,int] = (0, 255, 0),
alpha: float = 0.5,
):
"""Overlay tracked masks on video frames and save."""
cap = cv2.VideoCapture(video_path)
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(output_path, fourcc, fps, (w, h))
frame_idx = 0
while True:
ret, frame = cap.read()
if not ret:
break
if frame_idx in tracked_masks:
mask = tracked_masks[frame_idx]
overlay = frame.copy()
overlay[mask] = mask_color
frame = cv2.addWeighted(frame, 1 - alpha, overlay, alpha, 0)
out.write(frame)
frame_idx += 1
cap.release()
out.release()
print(f"Saved: {output_path}")
# ── 5. Batch image segmentation ───────────────────────────────────────────────
def segment_batch(
predictor,
images: list[np.ndarray],
prompt_type: str = "center", # "center" | "grid" | "custom"
custom_points: list[list[tuple]] = None,
) -> list[np.ndarray]:
"""
Segment objects in multiple images using automatic point sampling.
Returns list of best masks.
"""
results = []
for i, image in enumerate(images):
h, w = image.shape[:2]
if prompt_type == "center":
points = [(w // 2, h // 2)]
elif prompt_type == "grid":
points = [(w // 3, h // 3), (2 * w // 3, h // 3),
(w // 3, 2 * h // 3), (2 * w // 3, 2 * h // 3)]
else:
points = custom_points[i] if custom_points else [(w // 2, h // 2)]
masks, scores, _ = segment_with_points(predictor, image, points)
best_mask = masks[scores.argmax()]
results.append(best_mask)
return results
# ── Demo ──────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
print("SAM2 Demo — requires model checkpoints:")
print(" Download from: https://github.com/facebookresearch/sam2#model-description")
print(" Place in: ./checkpoints/")
# Check if checkpoints exist
ckpt = "checkpoints/sam2.1_hiera_base_plus.pt"
if not os.path.exists(ckpt):
print(f"\nCheckpoint not found: {ckpt}")
print("Skipping demo. Download instructions above.")
else:
# Image segmentation demo
predictor = load_sam2_image("base_plus", device="cpu")
# Create synthetic test image
test_image = np.random.randint(50, 200, (512, 512, 3), dtype=np.uint8)
cv2.rectangle(test_image, (150, 150), (350, 350), (0, 120, 255), -1)
# Segment with center point
masks, scores, _ = segment_with_points(
predictor, test_image,
positive_points=[(250, 250)],
)
best_mask = masks[scores.argmax()]
print(f"Segmented: mask shape {best_mask.shape}, area {best_mask.sum()} px")
# Save result
vis = test_image.copy()
vis[best_mask] = vis[best_mask] * 0.5 + np.array([0, 255, 0]) * 0.5
cv2.imwrite("sam2_demo.jpg", vis.astype(np.uint8))
print("Saved: sam2_demo.jpg")
For the original SAM (Segment Anything Model 1) alternative when running on memory-constrained environments or needing SAM1’s well-documented deployment with ONNX export and OpenCV SAHI integration — SAM1 has a more mature deployment ecosystem while SAM2’s temporal coherence model for video tracking (propagate_in_video backward and forward) eliminates frame-by-frame manual prompting and adds memory attention that maintains object identity across occlusions, making it far superior for video annotation and tracking applications. For the Segment Everything (FastSAM, MobileSAM) alternative when needing real-time inference at 5-40ms per image on CPU/mobile with minimal accuracy loss — FastSAM/MobileSAM are 50-100x faster at the cost of accuracy while SAM2 Hiera Tiny (38M params) already achieves 5-6x speed improvement over SAM1 while maintaining higher mask quality, and the streaming video inference with in-memory state management is exclusive to SAM2. The Claude Skills 360 bundle includes SAM2 skill sets covering image predictor setup, point and box prompting, automatic everything segmentation, video object tracking, mask visualization, and batch segmentation pipelines. Start with the free tier to try segmentation model code generation.