Python’s pytest is the standard test framework — its fixture system, parametrize decorator, and plugin ecosystem make it far more powerful than unittest. unittest.mock provides patching for isolating dependencies. Hypothesis generates property-based tests that find edge cases you wouldn’t think to write. Claude Code generates pytest suites, fixture hierarchies, async test patterns, property-based tests, and the CI configuration for coverage enforcement.
CLAUDE.md for Python Testing
## Testing Stack
- Framework: pytest 8.x with pytest-asyncio for async tests
- Async runtime: anyio (works with both asyncio and trio)
- Mocking: unittest.mock (built-in) + pytest-mock for fixtures
- Factory: factory_boy for test data generation
- Property-based: Hypothesis for parsers, validators, data transforms
- Coverage: pytest-cov, minimum 85% enforced in CI
- Database: pytest-postgresql or testcontainers[postgres] for integration tests
- HTTP mocking: responses (for requests) or httpx-mock (for httpx)
- Mutation testing: mutmut — run weekly in CI
Fixtures and Parametrize
# tests/conftest.py — shared fixtures
import pytest
import pytest_asyncio
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
from httpx import AsyncClient, ASGITransport
from myapp.main import app
from myapp.database import Base, get_db
from myapp.models import User, Order
# Database fixture: test database, auto-rollback per test
@pytest_asyncio.fixture(scope="function")
async def db():
engine = create_async_engine("sqlite+aiosqlite:///:memory:")
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
async_session = async_sessionmaker(engine, expire_on_commit=False)
async with async_session() as session:
yield session
await session.rollback()
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
# Override FastAPI dependency with test database
@pytest_asyncio.fixture
async def client(db: AsyncSession):
app.dependency_overrides[get_db] = lambda: db
async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as ac:
yield ac
app.dependency_overrides.clear()
# Factory fixtures using factory_boy
import factory
class UserFactory(factory.Factory):
class Meta:
model = User
id = factory.Sequence(lambda n: f"user_{n:04d}")
email = factory.LazyAttribute(lambda obj: f"{obj.id}@example.com")
name = factory.Faker('name')
is_active = True
class OrderFactory(factory.Factory):
class Meta:
model = Order
id = factory.Sequence(lambda n: f"ord_{n:04d}")
user = factory.SubFactory(UserFactory)
status = "pending"
total_cents = factory.Faker('random_int', min=100, max=100000)
@pytest.fixture
def user():
return UserFactory()
@pytest.fixture
def order(user):
return OrderFactory(user=user)
# tests/test_order_service.py
import pytest
from decimal import Decimal
# Parametrize: run test with multiple inputs
@pytest.mark.parametrize("total_cents,expected_discount", [
(0, 0),
(4999, 0), # Under $50 — no discount
(5000, 250), # $50 — 5% discount
(10000, 1000), # $100 — 10% discount
(50000, 7500), # $500 — 15% discount
(100000, 20000), # $1000 — 20% discount (max)
(200000, 20000), # $2000 — capped at 20%
])
def test_calculate_discount(total_cents: int, expected_discount: int):
result = calculate_discount(total_cents)
assert result == expected_discount
# Parametrize with indirect fixture
@pytest.mark.parametrize("status", ["shipped", "delivered"])
def test_order_cannot_be_cancelled_when(status, order):
order.status = status
with pytest.raises(ValueError, match="Cannot cancel"):
cancel_order(order)
@pytest.mark.parametrize("invalid_email", [
"not-an-email",
"@nodomain.com",
"spaces [email protected]",
"",
None,
])
def test_invalid_email_raises(invalid_email):
with pytest.raises((ValueError, TypeError)):
validate_email(invalid_email)
Mocking with pytest-mock
# tests/test_email_service.py
import pytest
from unittest.mock import AsyncMock, patch, call
async def test_sends_order_confirmation(mocker, order):
"""Test that order confirmation email is sent with correct data."""
mock_send = mocker.patch('myapp.services.email.send_email', new_callable=AsyncMock)
await send_order_confirmation(order)
# Assert called once with correct arguments
mock_send.assert_called_once_with(
to=order.user.email,
subject=f"Order #{order.id} Confirmed",
template="order_confirmation",
context={"order_id": order.id, "total": order.total_cents / 100},
)
async def test_does_not_send_email_for_cancelled_orders(mocker, order):
order.status = "cancelled"
mock_send = mocker.patch('myapp.services.email.send_email', new_callable=AsyncMock)
await send_order_confirmation(order)
mock_send.assert_not_called()
async def test_retries_on_smtp_error(mocker, order):
"""Test retry logic when SMTP fails."""
call_count = 0
async def flaky_send(*args, **kwargs):
nonlocal call_count
call_count += 1
if call_count < 3:
raise SMTPError("Connection refused")
mocker.patch('myapp.services.email.send_email', side_effect=flaky_send)
await send_order_confirmation(order) # Should succeed on 3rd attempt
assert call_count == 3
# Patching builtins and external services
async def test_stripe_charge_on_order_create(mocker, db, client):
mock_stripe = mocker.patch('stripe.PaymentIntent.create')
mock_stripe.return_value = {'id': 'pi_test', 'status': 'succeeded'}
response = await client.post("/orders", json={
"user_id": "user_0001",
"items": [{"product_id": "prod_abc", "quantity": 1}],
})
assert response.status_code == 201
mock_stripe.assert_called_once()
assert mock_stripe.call_args.kwargs['amount'] == response.json()['total_cents']
Async Tests with anyio
# tests/test_async_service.py
import pytest
import anyio
@pytest.mark.anyio
async def test_concurrent_order_processing():
"""Test that concurrent orders don't interfere with each other."""
results = []
async def process(order_id: str):
result = await process_order(order_id)
results.append(result)
async with anyio.create_task_group() as tg:
for i in range(10):
tg.start_soon(process, f"ord_{i:04d}")
assert len(results) == 10
assert all(r.status == "processed" for r in results)
# pytest-anyio config in pyproject.toml
# [tool.pytest.ini_options]
# anyio_mode = "auto" # All async tests run with anyio
Property-Based Testing with Hypothesis
# tests/test_order_validation.py
from hypothesis import given, strategies as st, settings, assume
@given(
amount=st.integers(min_value=1, max_value=10_000_000),
currency=st.sampled_from(["USD", "EUR", "GBP", "JPY", "CAD"]),
)
def test_format_currency_never_raises(amount: int, currency: str):
"""format_currency must not raise for any valid amount and currency."""
result = format_currency(amount, currency)
assert isinstance(result, str)
assert len(result) > 0
@given(
email=st.emails(),
)
def test_valid_emails_accepted(email: str):
"""All emails generated by hypothesis should parse without error."""
result = parse_email(email)
assert result == email.lower()
@given(
items=st.lists(
st.fixed_dictionaries({
'product_id': st.text(min_size=1, max_size=50),
'quantity': st.integers(min_value=1, max_value=100),
'price_cents': st.integers(min_value=1, max_value=1_000_000),
}),
min_size=1, max_size=50
)
)
def test_calculate_total_is_sum_of_items(items):
"""Total must equal sum of (qty * price) for all items."""
expected = sum(i['quantity'] * i['price_cents'] for i in items)
assert calculate_total(items) == expected
@given(st.text())
@settings(max_examples=500)
def test_order_id_parser_never_panics(s: str):
"""Parser must not raise — return None for invalid input instead of raising."""
result = parse_order_id(s)
assert result is None or isinstance(result, str)
Coverage Configuration
# pyproject.toml
[tool.pytest.ini_options]
testpaths = ["tests"]
asyncio_mode = "auto"
addopts = [
"--cov=myapp",
"--cov-report=term-missing",
"--cov-report=html:htmlcov",
"--cov-fail-under=85",
]
[tool.coverage.run]
branch = true
omit = ["*/migrations/*", "tests/*", "*/conftest.py"]
[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"if TYPE_CHECKING:",
"raise NotImplementedError",
]
For the FastAPI endpoints that these tests cover, see the FastAPI advanced guide for dependency injection and error handling patterns. For the Pydantic v2 validation models that parametrized tests exercise, the Pydantic v2 guide covers field validators and model validators. The Claude Skills 360 bundle includes Python testing skill sets covering pytest fixtures, Hypothesis property tests, and async test patterns. Start with the free tier to try pytest fixture generation.