pytest is the standard Python testing framework. pip install pytest. Test function: def test_add(): assert add(2, 3) == 5. Run: pytest (auto-discovers test_*.py). Verbose: pytest -v. Run one: pytest tests/test_math.py::test_add. Fixture: @pytest.fixture def db(): conn = connect(); yield conn; conn.close(). Scope: @pytest.fixture(scope="module") — session/module/class/function. conftest.py: shared fixtures loaded automatically. parametrize: @pytest.mark.parametrize("a,b,expected", [(1,2,3),(0,0,0)]); def test_add(a,b,expected): assert add(a,b)==expected. mark: @pytest.mark.slow; @pytest.mark.skip(reason="..."); @pytest.mark.xfail. raises: with pytest.raises(ValueError, match="negative"): sqrt(-1). approx: assert 0.1+0.2 == pytest.approx(0.3). monkeypatch: def test_env(monkeypatch): monkeypatch.setenv("KEY","val"); monkeypatch.setattr(module, "fn", mock_fn). tmp_path: def test_file(tmp_path): f = tmp_path/"out.txt"; f.write_text("hi"); assert f.exists(). capfd: def test_print(capfd): print("hi"); out,_ = capfd.readouterr(); assert "hi" in out. capsys: text capture. coverage: pytest --cov=src --cov-report=term-missing. xdist: pytest -n auto parallel. pyproject.toml: [tool.pytest.ini_options] testpaths=["tests"] markers=["slow: ..."]. Claude Code generates pytest test suites, fixtures, parametrize tables, and coverage reports.
CLAUDE.md for pytest
## pytest Stack
- Version: pytest >= 8.0 | pip install pytest pytest-cov
- Test: def test_name(): assert expr | with pytest.raises(ExcType, match="..."): ...
- Fixture: @pytest.fixture[scope="..."] def name(dep): ... yield value ... (teardown)
- Param: @pytest.mark.parametrize("a,b", [(1,2),(3,4)]) def test(a,b): ...
- Mock: monkeypatch.setattr/setenv/setitem | pytest-mock: mocker.patch("mod.fn")
- Run: pytest -v -x --tb=short --cov=src --cov-report=term-missing
pytest Test Pipeline
# tests/ — pytest fixtures, parametrize, marks, monkeypatch, tmp_path, custom helpers
# This file shows patterns; typically split across test files and conftest.py
from __future__ import annotations
import json
import os
import time
from dataclasses import dataclass
from pathlib import Path
from typing import Any
import pytest
# ─────────────────────────────────────────────────────────────────────────────
# 1. System under test (example module — normally in src/)
# ─────────────────────────────────────────────────────────────────────────────
class Calculator:
"""Simple calculator for demonstration."""
def add(self, a: float, b: float) -> float:
return a + b
def divide(self, a: float, b: float) -> float:
if b == 0:
raise ZeroDivisionError("Cannot divide by zero")
return a / b
def sqrt(self, x: float) -> float:
if x < 0:
raise ValueError(f"Cannot take sqrt of negative number: {x}")
return x ** 0.5
@dataclass
class User:
id: int
name: str
email: str
active: bool = True
class UserService:
def __init__(self, db: dict | None = None) -> None:
self._db: dict[int, User] = db or {}
def create(self, name: str, email: str) -> User:
if not email or "@" not in email:
raise ValueError(f"Invalid email: {email!r}")
uid = max(self._db, default=0) + 1
user = User(id=uid, name=name, email=email)
self._db[uid] = user
return user
def get(self, uid: int) -> User | None:
return self._db.get(uid)
def deactivate(self, uid: int) -> bool:
user = self._db.get(uid)
if user is None:
return False
user.active = False
return True
def list_active(self) -> list[User]:
return [u for u in self._db.values() if u.active]
def load_config(path: str) -> dict:
"""Load JSON config from file."""
return json.loads(Path(path).read_text())
# ─────────────────────────────────────────────────────────────────────────────
# 2. conftest.py patterns (would normally be in tests/conftest.py)
# ─────────────────────────────────────────────────────────────────────────────
# --- Function-scope fixture (default) ---
@pytest.fixture
def calc() -> Calculator:
"""Fresh Calculator for each test."""
return Calculator()
# --- Module-scope fixture: setup once per test file ---
@pytest.fixture(scope="module")
def user_service_module() -> UserService:
"""Shared UserService instance for the module (prepopulated)."""
svc = UserService()
svc.create("Alice", "[email protected]")
svc.create("Bob", "[email protected]")
return svc
# --- Session-scope fixture: setup once per pytest session ---
@pytest.fixture(scope="session")
def shared_data_dir(tmp_path_factory) -> Path:
"""Create a temp directory shared across all tests."""
d = tmp_path_factory.mktemp("shared_data")
(d / "sample.json").write_text('{"key": "value", "count": 42}')
return d
# --- Fixture with teardown via yield ---
@pytest.fixture
def tmp_config_file(tmp_path) -> Path:
"""Write a temp JSON config and clean up after the test."""
cfg_path = tmp_path / "config.json"
cfg_path.write_text(json.dumps({"debug": True, "port": 8080}))
yield cfg_path
# Teardown: nothing needed here since tmp_path is auto-cleaned
# --- Fixture with factory pattern ---
@pytest.fixture
def make_user():
"""Factory fixture: returns a function to create users."""
created = []
def _make(name: str = "Test", email: str = "[email protected]") -> User:
user = User(id=len(created)+1, name=name, email=email)
created.append(user)
return user
return _make
# ─────────────────────────────────────────────────────────────────────────────
# 3. Basic tests
# ─────────────────────────────────────────────────────────────────────────────
class TestCalculator:
def test_add_integers(self, calc):
assert calc.add(2, 3) == 5
def test_add_floats(self, calc):
assert calc.add(0.1, 0.2) == pytest.approx(0.3)
def test_divide(self, calc):
assert calc.divide(10, 2) == 5.0
def test_divide_by_zero(self, calc):
with pytest.raises(ZeroDivisionError, match="Cannot divide by zero"):
calc.divide(1, 0)
def test_sqrt_positive(self, calc):
assert calc.sqrt(9) == pytest.approx(3.0)
def test_sqrt_negative(self, calc):
with pytest.raises(ValueError, match="negative number"):
calc.sqrt(-4)
def test_sqrt_zero(self, calc):
assert calc.sqrt(0) == 0.0
# ─────────────────────────────────────────────────────────────────────────────
# 4. Parametrize
# ─────────────────────────────────────────────────────────────────────────────
@pytest.mark.parametrize("a, b, expected", [
(2, 3, 5),
(0, 0, 0),
(-1, 1, 0),
(100, 200, 300),
(0.5, 0.5, 1.0),
])
def test_add_parametrized(calc, a, b, expected):
assert calc.add(a, b) == pytest.approx(expected)
@pytest.mark.parametrize("email, valid", [
("[email protected]", True),
("[email protected]", True),
("not-an-email", False),
("", False),
("missing-at.com", False),
])
def test_email_validation(email, valid):
svc = UserService()
if valid:
user = svc.create("Test", email)
assert user.email == email
else:
with pytest.raises(ValueError):
svc.create("Test", email)
# ─────────────────────────────────────────────────────────────────────────────
# 5. Fixtures in action
# ─────────────────────────────────────────────────────────────────────────────
class TestUserService:
@pytest.fixture(autouse=True)
def svc(self) -> UserService:
"""Fresh service per test method."""
self.svc = UserService()
def test_create_user(self):
user = self.svc.create("Alice", "[email protected]")
assert user.id == 1
assert user.name == "Alice"
assert user.active is True
def test_get_existing_user(self):
created = self.svc.create("Bob", "[email protected]")
found = self.svc.get(created.id)
assert found == created
def test_get_missing_user(self):
assert self.svc.get(9999) is None
def test_deactivate(self):
user = self.svc.create("Carol", "[email protected]")
assert self.svc.deactivate(user.id) is True
assert self.svc.get(user.id).active is False
def test_list_active_filters_inactive(self):
u1 = self.svc.create("A", "[email protected]")
u2 = self.svc.create("B", "[email protected]")
self.svc.deactivate(u1.id)
active = self.svc.list_active()
assert len(active) == 1
assert active[0].id == u2.id
# ─────────────────────────────────────────────────────────────────────────────
# 6. monkeypatch patterns
# ─────────────────────────────────────────────────────────────────────────────
def get_greeting(name: str) -> str:
"""Function that varies by environment variable."""
prefix = os.getenv("GREETING_PREFIX", "Hello")
return f"{prefix}, {name}!"
def fetch_current_time() -> float:
"""Returns current timestamp."""
return time.time()
def test_monkeypatch_env(monkeypatch):
"""Patch an environment variable."""
monkeypatch.setenv("GREETING_PREFIX", "Hi")
assert get_greeting("Alice") == "Hi, Alice!"
def test_monkeypatch_default_env(monkeypatch):
"""Ensure default value when env var absent."""
monkeypatch.delenv("GREETING_PREFIX", raising=False)
assert get_greeting("Bob").startswith("Hello")
def test_monkeypatch_function(monkeypatch):
"""Patch a function on a module."""
fixed_time = 1_700_000_000.0
monkeypatch.setattr("time.time", lambda: fixed_time)
assert fetch_current_time() == fixed_time
# ─────────────────────────────────────────────────────────────────────────────
# 7. File I/O and tmp_path
# ─────────────────────────────────────────────────────────────────────────────
def test_load_config(tmp_config_file):
"""Test loading a real config file via tmp_path fixture."""
cfg = load_config(str(tmp_config_file))
assert cfg["debug"] is True
assert cfg["port"] == 8080
def test_write_and_read(tmp_path):
"""tmp_path is a pytest-managed temp directory."""
output = tmp_path / "results.json"
data = {"score": 0.95, "label": "positive"}
output.write_text(json.dumps(data))
loaded = json.loads(output.read_text())
assert loaded["score"] == pytest.approx(0.95)
assert loaded["label"] == "positive"
def test_shared_data_dir(shared_data_dir):
"""Read from session-scope temp fixture."""
data = json.loads((shared_data_dir / "sample.json").read_text())
assert data["count"] == 42
# ─────────────────────────────────────────────────────────────────────────────
# 8. Marks
# ─────────────────────────────────────────────────────────────────────────────
@pytest.mark.slow
def test_slow_operation():
"""Marked slow — skip with: pytest -m 'not slow'."""
time.sleep(0.01) # simulated slow work
assert True
@pytest.mark.skip(reason="Feature not yet implemented")
def test_future_feature():
assert False, "should be skipped"
@pytest.mark.xfail(reason="Known bug in edge case", strict=False)
def test_known_bug():
calc = Calculator()
# This would fail in a real scenario; xfail marks it as expected failure
assert calc.divide(0, 0) == 0 # ZeroDivisionError — expected
# ─────────────────────────────────────────────────────────────────────────────
# 9. Output capture
# ─────────────────────────────────────────────────────────────────────────────
def test_stdout_capture(capsys):
print("Hello, pytest!")
captured = capsys.readouterr()
assert "Hello, pytest!" in captured.out
def test_fd_capture(capfd):
os.write(1, b"raw bytes\n")
out, _ = capfd.readouterr()
assert b"raw bytes" in out.encode() or "raw bytes" in out
# ─────────────────────────────────────────────────────────────────────────────
# Example pyproject.toml configuration (not executed — for reference)
# ─────────────────────────────────────────────────────────────────────────────
PYPROJECT_PYTEST_CONFIG = """
[tool.pytest.ini_options]
testpaths = ["tests"]
addopts = "-v --tb=short"
markers = [
"slow: marks tests as slow (deselect with -m 'not slow')",
"integration: requires external services",
"unit: pure unit tests",
]
[tool.coverage.run]
source = ["src"]
omit = ["*/tests/*", "*/conftest.py"]
[tool.coverage.report]
show_missing = true
fail_under = 80
"""
For the unittest stdlib alternative — Python’s built-in unittest module provides a JUnit-style test framework with TestCase classes, setUp/tearDown methods, and a unittest.mock library; pytest can discover and run both unittest.TestCase and plain function-based tests, and adds fixtures, parametrize, concise assertion output, and a richer plugin ecosystem — use unittest when you need to run in environments with no third-party dependencies or are extending existing TestCase-based suites, pytest for all new test code where fixture reuse and parametrize readability matter. For the hypothesis alternative — Hypothesis is a property-based testing library that generates random inputs to find edge cases — instead of writing @pytest.mark.parametrize tables by hand, you declare properties (@given(st.integers())) and Hypothesis searches for inputs that violate them; pytest handles specific example-based tests — use hypothesis when testing algorithms, parsers, or serializers where you want to discover unexpected edge cases, pytest.parametrize when you have known concrete examples and want deterministic test runs. The Claude Skills 360 bundle includes pytest skill sets covering fixture patterns (function/module/session scope, yield teardown, factory fixtures, autouse), parametrize tables with edge cases, monkeypatch.setattr/setenv patterns, tmp_path file I/O tests, capsys/capfd output capture, @pytest.mark.slow/skip/xfail, and pyproject.toml configuration. Start with the free tier to try automated testing and pytest suite code generation.