Python’s doctest module finds and runs interactive Python examples embedded in docstrings, ensuring documentation stays accurate by executing it as tests. import doctest. Run module: doctest.testmod(m=None, verbose=False, optionflags=0) — tests all docstrings in the current (or given) module; returns (failure_count, test_count). Run file: doctest.testfile(filename, verbose=False, optionflags=0) — runs examples from a .txt or .rst file. Single docstring: doctest.run_docstring_examples(f, globs, verbose=False, name="NoName", optionflags=0). Extract: doctest.DocTestFinder().find(obj) → list of DocTest objects. Execute: doctest.DocTestRunner(verbose=False, optionflags=0).run(test) → (failure_count, test_count). Unittest integration: doctest.DocTestSuite(module) and doctest.DocFileSuite(filename) return unittest.TestSuite. Directives (in # doctest: comments): +ELLIPSIS — match ... to any text; +NORMALIZE_WHITESPACE — collapse whitespace differences; +SKIP — skip this example; +IGNORE_EXCEPTION_DETAIL — match exception type only; +DONT_ACCEPT_TRUE_FOR_1 — strict bool/int separation. Option flags combine with |: doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE. Claude Code generates embedded test suites, example validators, API contract checkers, and living documentation verifiers.
CLAUDE.md for doctest
## doctest Stack
- Stdlib: import doctest
- Run: doctest.testmod() # tests current module
- doctest.testmod(mymodule, verbose=True)
- doctest.testfile("examples.txt")
- Flags: doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
- Pytest: pytest --doctest-modules (runs all module doctests)
- Note: Put "if __name__ == '__main__': doctest.testmod()" at end of file
doctest Documentation Testing Pipeline
# app/doctestutil.py — run, collect, report, inject, and validate doctests
from __future__ import annotations
import doctest
import importlib
import io
import sys
import textwrap
import types
import unittest
from dataclasses import dataclass, field
from pathlib import Path
# ─────────────────────────────────────────────────────────────────────────────
# 1. Run helpers
# ─────────────────────────────────────────────────────────────────────────────
@dataclass
class DocTestResult:
name: str
passed: int
failed: int
output: str
@property
def ok(self) -> bool:
return self.failed == 0
def __str__(self) -> str:
status = "PASS" if self.ok else "FAIL"
return (f"[{status}] {self.name} "
f"{self.passed}/{self.passed + self.failed} passed")
DEFAULT_FLAGS = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
def run_module_tests(
module: types.ModuleType | str | None = None,
verbose: bool = False,
optionflags: int = DEFAULT_FLAGS,
) -> DocTestResult:
"""
Run all docstring examples in a module.
module may be a module object, a module name string, or None (caller's module).
Example:
result = run_module_tests("collections")
print(result)
"""
if isinstance(module, str):
module = importlib.import_module(module)
buf = io.StringIO()
old_out = sys.stdout
sys.stdout = buf
try:
failure_count, test_count = doctest.testmod(
module,
verbose=verbose,
optionflags=optionflags,
)
finally:
sys.stdout = old_out
passed = test_count - failure_count
name = getattr(module, "__name__", str(module))
return DocTestResult(
name=name,
passed=passed,
failed=failure_count,
output=buf.getvalue(),
)
def run_file_tests(
filename: str | Path,
verbose: bool = False,
optionflags: int = DEFAULT_FLAGS,
globs: dict | None = None,
) -> DocTestResult:
"""
Run all doctest examples in a plain text or .rst file.
Example:
result = run_file_tests("docs/tutorial.rst")
print(result)
"""
buf = io.StringIO()
old_out = sys.stdout
sys.stdout = buf
try:
failure_count, test_count = doctest.testfile(
str(filename),
verbose=verbose,
optionflags=optionflags,
extraglobs=globs or {},
)
finally:
sys.stdout = old_out
passed = test_count - failure_count
return DocTestResult(
name=str(filename),
passed=passed,
failed=failure_count,
output=buf.getvalue(),
)
def run_docstring(
obj: object,
globs: dict | None = None,
verbose: bool = False,
optionflags: int = DEFAULT_FLAGS,
name: str | None = None,
) -> DocTestResult:
"""
Run doctest examples from a single function, class, or string.
Example:
def add(a, b):
'''
>>> add(1, 2)
3
'''
return a + b
result = run_docstring(add)
print(result)
"""
g = {"__builtins__": __builtins__}
if globs:
g.update(globs)
if isinstance(obj, str):
doc = obj
else:
doc = getattr(obj, "__doc__", "") or ""
obj_name = name or getattr(obj, "__name__", "docstring")
buf = io.StringIO()
old_out = sys.stdout
sys.stdout = buf
try:
doctest.run_docstring_examples(
obj, g, verbose=verbose, name=obj_name,
optionflags=optionflags,
)
finally:
sys.stdout = old_out
output = buf.getvalue()
# run_docstring_examples doesn't return counts; count failures from output
failed = output.count("Failed example:")
passed_approx = doc.count(">>> ") - failed
return DocTestResult(
name=obj_name,
passed=max(0, passed_approx),
failed=failed,
output=output,
)
# ─────────────────────────────────────────────────────────────────────────────
# 2. DocTest collection
# ─────────────────────────────────────────────────────────────────────────────
@dataclass
class DocTestItem:
name: str
filename: str
lineno: int
example_count: int
docstring_snippet: str # first 80 chars of docstring
def __str__(self) -> str:
return (f"{self.name} [{Path(self.filename).name}:{self.lineno}] "
f"{self.example_count} examples")
def collect_doctests(
module: types.ModuleType | str,
extraglobs: dict | None = None,
) -> list[DocTestItem]:
"""
Collect all DocTest objects from a module without running them.
Example:
items = collect_doctests("os.path")
for item in items:
print(item)
"""
if isinstance(module, str):
module = importlib.import_module(module)
finder = doctest.DocTestFinder()
tests = finder.find(module, extraglobs=extraglobs)
items = []
for test in tests:
if test.examples:
items.append(DocTestItem(
name=test.name,
filename=test.filename or "<unknown>",
lineno=test.lineno or 0,
example_count=len(test.examples),
docstring_snippet=(test.docstring or "")[:80].replace("\n", " "),
))
return sorted(items, key=lambda i: i.lineno)
# ─────────────────────────────────────────────────────────────────────────────
# 3. Inline doctest builder
# ─────────────────────────────────────────────────────────────────────────────
def make_doctest_suite(
module: types.ModuleType | str,
optionflags: int = DEFAULT_FLAGS,
) -> unittest.TestSuite:
"""
Return a unittest.TestSuite from all doctests in a module.
Use with unittest.TextTestRunner or pytest's --collect-only.
Example:
suite = make_doctest_suite("mymodule")
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
"""
if isinstance(module, str):
module = importlib.import_module(module)
return doctest.DocTestSuite(module, optionflags=optionflags)
def make_file_suite(
filename: str | Path,
optionflags: int = DEFAULT_FLAGS,
globs: dict | None = None,
) -> unittest.TestSuite:
"""
Return a unittest.TestSuite from a doctest text file.
Example:
suite = make_file_suite("docs/tutorial.txt")
"""
return doctest.DocFileSuite(
str(filename),
optionflags=optionflags,
extraglobs=globs or {},
)
# ─────────────────────────────────────────────────────────────────────────────
# 4. Docstring validator / injector
# ─────────────────────────────────────────────────────────────────────────────
def validate_docstring_examples(
source: str,
globs: dict | None = None,
optionflags: int = DEFAULT_FLAGS,
) -> DocTestResult:
"""
Parse and run doctest examples from a raw docstring string.
Useful for validating auto-generated or templated docstrings.
Example:
doc = '''
>>> 1 + 1
2
>>> sorted([3, 1, 2])
[1, 2, 3]
'''
result = validate_docstring_examples(doc)
print(result)
"""
return run_docstring(source, globs=globs, optionflags=optionflags,
name="inline")
def build_expects(fn, *args_list, globs: dict | None = None) -> str:
"""
Capture actual output for calls and format as doctest expectation strings.
Useful for bootstrapping new doctests from known-good calls.
Example:
snippet = build_expects(sorted, [3,1,2], [5,4])
print(snippet)
# >>> sorted([3, 1, 2])
# [1, 2, 3]
# >>> sorted([5, 4])
# [4, 5]
"""
import traceback as _tb
lines = []
g = {"__builtins__": __builtins__, fn.__name__: fn}
if globs:
g.update(globs)
for args in args_list:
if not isinstance(args, tuple):
args = (args,)
call_repr = f"{fn.__name__}({', '.join(repr(a) for a in args)})"
try:
result = fn(*args)
lines.append(f">>> {call_repr}")
lines.append(repr(result))
except Exception as e:
lines.append(f">>> {call_repr}")
lines.append(f"Traceback (most recent call last):")
lines.append(f" ...")
lines.append(f"{type(e).__name__}: {e}")
lines.append("")
return "\n".join(lines)
# ─────────────────────────────────────────────────────────────────────────────
# Demo
# ─────────────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
import tempfile
print("=== doctest demo ===")
# ── run_docstring with passing and failing examples ───────────────────────
print("\n--- run_docstring (passing) ---")
def multiply(a, b):
"""
Multiply two numbers.
>>> multiply(3, 4)
12
>>> multiply(2.5, 2)
5.0
>>> multiply('ab', 3)
'ababab'
"""
return a * b
result = run_docstring(multiply)
print(f" {result}")
# ── run_docstring with deliberate failure ─────────────────────────────────
print("\n--- run_docstring (failing example) ---")
bad_doc = """
>>> 1 + 1
3
"""
result2 = run_docstring(bad_doc, name="bad_example")
print(f" {result2}")
if result2.output:
print(f" output snippet: {result2.output[:120]!r}")
# ── collect_doctests from stdlib module ───────────────────────────────────
print("\n--- collect_doctests('textwrap') ---")
items = collect_doctests("textwrap")
for item in items:
print(f" {item}")
# ── run from text file ────────────────────────────────────────────────────
print("\n--- run_file_tests ---")
with tempfile.TemporaryDirectory() as tmp:
txt = Path(tmp) / "examples.txt"
txt.write_text(textwrap.dedent("""\
Math examples
=============
>>> 2 ** 10
1024
>>> sorted([3, 1, 2])
[1, 2, 3]
>>> list(range(3))
[0, 1, 2]
"""))
file_result = run_file_tests(txt)
print(f" {file_result}")
# ── build_expects ─────────────────────────────────────────────────────────
print("\n--- build_expects ---")
snippet = build_expects(sorted, [3, 1, 2], [5, 4], ("z", "a"))
for line in snippet.splitlines():
print(f" {line}")
# ── ELLIPSIS directive example ────────────────────────────────────────────
print("\n--- ELLIPSIS directive ---")
ellipsis_doc = """
>>> import datetime; str(datetime.date.today()) # doctest: +ELLIPSIS
'20...'
"""
result3 = run_docstring(ellipsis_doc, name="ellipsis_example",
optionflags=doctest.ELLIPSIS)
print(f" {result3}")
print("\n=== done ===")
For the pytest alternative — pytest --doctest-modules and pytest --doctest-glob="*.rst" run all doctests alongside the regular test suite, with rich failure output, fixture injection via doctest_namespace, and # doctest: +SKIP support — use pytest for projects that already use pytest as the test runner; doctest.testmod() / DocTestSuite are preferable when you want zero-dependency test running (e.g., inside if __name__ == "__main__":) or when integrating with unittest runners. For the unittest alternative — unittest.TestCase with explicit assertEqual/assertRaises calls provides structured tests with setup/teardown, test isolation, and full failure introspection — use unittest or pytest for complex logic-heavy tests; use doctest for verifying that the examples in your docstrings remain correct, treating the docstring itself as the test specification — the key advantage is that passing doctests are both tests and accurate documentation simultaneously. The Claude Skills 360 bundle includes doctest skill sets covering DocTestResult with run_module_tests()/run_file_tests()/run_docstring() runners, DocTestItem with collect_doctests() finder, make_doctest_suite()/make_file_suite() unittest bridges, and validate_docstring_examples()/build_expects() docstring utilities. Start with the free tier to try documentation testing patterns and doctest pipeline code generation.