Python’s urllib.request module wraps http.client with a handler pipeline for redirect following, cookie management, authentication, and proxy support. from urllib.request import urlopen, Request. urlopen: resp = urllib.request.urlopen(url, timeout=10) — follows redirects, returns http.client.HTTPResponse-like object; resp.read(), resp.status, resp.getheader("Content-Type"). Request: req = urllib.request.Request(url, data=b"body", headers={"Accept": "application/json"}, method="POST"). urlretrieve: path, hdrs = urllib.request.urlretrieve(url, "local.file", reporthook=fn) — downloads to file; fn(block_num, block_size, total_size). build_opener: opener = urllib.request.build_opener(handler1, handler2) — creates a custom opener; opener.open(req). install_opener: urllib.request.install_opener(opener) — installs as global. Auth: auth = urllib.request.HTTPBasicAuthHandler(); auth.add_password(realm, uri, user, pwd). Cookies: urllib.request.HTTPCookieProcessor(http.cookiejar.CookieJar()). Proxy: urllib.request.ProxyHandler({"http": "http://proxy:8080"}). Errors: urllib.error.HTTPError has .code, .reason, .headers; urllib.error.URLError has .reason. SSL: pass context=ssl.create_default_context() to urlopen. Claude Code generates authenticated API clients, file downloaders with progress, cookie-jar scrapers, and multi-handler opener chains.
CLAUDE.md for urllib.request
## urllib.request Stack
- Stdlib: from urllib.request import urlopen, Request, build_opener
- GET: resp = urlopen(url, timeout=10); data = resp.read()
- POST: req = Request(url, data=json.dumps(p).encode(), headers={...}, method="POST")
- Auth: opener = build_opener(HTTPBasicAuthHandler(mgr))
- Cookies: opener = build_opener(HTTPCookieProcessor(CookieJar()))
- Errors: except urllib.error.HTTPError as e: e.code, e.reason
urllib.request URL Fetching Pipeline
# app/urlfetch.py — GET/POST, JSON, download, auth, cookies, retry
from __future__ import annotations
import http.cookiejar
import io
import json
import os
import ssl
import time
import urllib.error
import urllib.parse
import urllib.request
from contextlib import contextmanager
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Callable, Generator
# ─────────────────────────────────────────────────────────────────────────────
# 1. Core fetch helpers
# ─────────────────────────────────────────────────────────────────────────────
_DEFAULT_TIMEOUT = 15.0
_DEFAULT_HEADERS = {
"User-Agent": "python-urllib.request/stdlib",
"Accept": "*/*",
}
@dataclass
class FetchResponse:
status: int
headers: dict[str, str]
body: bytes
url: str # final URL after redirects
@property
def ok(self) -> bool:
return 200 <= self.status < 300
def text(self, encoding: str = "utf-8") -> str:
return self.body.decode(encoding)
def json(self) -> Any:
return json.loads(self.body)
def __str__(self) -> str:
return f"HTTP {self.status} {self.url} ({len(self.body)} bytes)"
def _exec_request(
req: urllib.request.Request,
opener: urllib.request.OpenerDirector | None = None,
timeout: float = _DEFAULT_TIMEOUT,
) -> FetchResponse:
"""Execute a Request and return a FetchResponse. Raises on HTTP error."""
ctx = ssl.create_default_context()
try:
do_open = (opener or urllib.request).open # type: ignore[attr-defined]
with do_open(req, context=ctx, timeout=timeout) as resp:
body = resp.read()
headers = {k.lower(): v for k, v in resp.headers.items()}
return FetchResponse(
status=resp.status,
headers=headers,
body=body,
url=resp.url,
)
except urllib.error.HTTPError as e:
body = e.read() if e.fp else b""
headers = {k.lower(): v for k, v in (e.headers or {}).items()}
return FetchResponse(
status=e.code,
headers=headers,
body=body,
url=req.full_url,
)
def get(
url: str,
*,
params: dict[str, str] | None = None,
headers: dict[str, str] | None = None,
timeout: float = _DEFAULT_TIMEOUT,
opener: urllib.request.OpenerDirector | None = None,
) -> FetchResponse:
"""
HTTP GET request.
Example:
resp = get("https://httpbin.org/get", params={"q": "test"})
print(resp.json())
"""
if params:
url = url + "?" + urllib.parse.urlencode(params)
hdrs = dict(_DEFAULT_HEADERS)
if headers:
hdrs.update(headers)
req = urllib.request.Request(url, headers=hdrs)
return _exec_request(req, opener=opener, timeout=timeout)
def post(
url: str,
payload: Any,
*,
content_type: str = "application/json",
headers: dict[str, str] | None = None,
timeout: float = _DEFAULT_TIMEOUT,
opener: urllib.request.OpenerDirector | None = None,
) -> FetchResponse:
"""
HTTP POST request.
Example:
resp = post("https://httpbin.org/post", {"key": "val"})
print(resp.status)
"""
if content_type == "application/json":
body = json.dumps(payload).encode()
elif content_type == "application/x-www-form-urlencoded":
body = urllib.parse.urlencode(payload).encode()
else:
body = payload if isinstance(payload, bytes) else str(payload).encode()
hdrs = dict(_DEFAULT_HEADERS)
hdrs["Content-Type"] = content_type
hdrs["Content-Length"] = str(len(body))
if headers:
hdrs.update(headers)
req = urllib.request.Request(url, data=body, headers=hdrs, method="POST")
return _exec_request(req, opener=opener, timeout=timeout)
def fetch_json(url: str, **kwargs) -> Any:
"""
GET URL and deserialize JSON body.
Example:
data = fetch_json("https://httpbin.org/json")
"""
resp = get(url, **kwargs)
if not resp.ok:
raise RuntimeError(f"HTTP {resp.status}: {resp.url}")
return resp.json()
# ─────────────────────────────────────────────────────────────────────────────
# 2. Download helpers
# ─────────────────────────────────────────────────────────────────────────────
@dataclass
class DownloadProgress:
filename: str
downloaded: int = 0
total: int = -1 # -1 if Content-Length unknown
@property
def percent(self) -> float | None:
if self.total > 0:
return self.downloaded / self.total * 100
return None
def __str__(self) -> str:
if self.total > 0:
return f"{self.filename}: {self.downloaded}/{self.total} ({self.percent:.1f}%)"
return f"{self.filename}: {self.downloaded} bytes"
def download_file(
url: str,
dest: str | Path,
*,
chunk_size: int = 65536,
timeout: float = 30.0,
on_progress: Callable[[DownloadProgress], None] | None = None,
) -> Path:
"""
Download URL to dest file with optional progress callback.
Example:
download_file("https://example.com/file.gz", "/tmp/file.gz",
on_progress=lambda p: print(p))
"""
dest_path = Path(dest)
ctx = ssl.create_default_context()
req = urllib.request.Request(url, headers=dict(_DEFAULT_HEADERS))
with urllib.request.urlopen(req, context=ctx, timeout=timeout) as resp:
total = int(resp.getheader("Content-Length") or "-1")
progress = DownloadProgress(filename=dest_path.name, total=total)
dest_path.parent.mkdir(parents=True, exist_ok=True)
with dest_path.open("wb") as fout:
while True:
chunk = resp.read(chunk_size)
if not chunk:
break
fout.write(chunk)
progress.downloaded += len(chunk)
if on_progress:
on_progress(progress)
return dest_path
# ─────────────────────────────────────────────────────────────────────────────
# 3. Opener factories
# ─────────────────────────────────────────────────────────────────────────────
def basic_auth_opener(
url: str,
username: str,
password: str,
realm: str = "",
) -> urllib.request.OpenerDirector:
"""
Build an opener with HTTP Basic auth for url.
Example:
opener = basic_auth_opener("https://api.example.com", "user", "secret")
resp = get("https://api.example.com/data", opener=opener)
"""
mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
mgr.add_password(realm or None, url, username, password)
auth_handler = urllib.request.HTTPBasicAuthHandler(mgr)
return urllib.request.build_opener(auth_handler)
def cookie_opener(
jar: http.cookiejar.CookieJar | None = None,
) -> tuple[urllib.request.OpenerDirector, http.cookiejar.CookieJar]:
"""
Build an opener with a CookieJar for session persistence.
Returns (opener, jar) so the jar can be inspected or saved.
Example:
opener, jar = cookie_opener()
get("https://example.com/login", opener=opener)
print(list(jar)) # inspect stored cookies
"""
cjar = jar or http.cookiejar.CookieJar()
handler = urllib.request.HTTPCookieProcessor(cjar)
return urllib.request.build_opener(handler), cjar
def proxy_opener(proxies: dict[str, str]) -> urllib.request.OpenerDirector:
"""
Build an opener that routes traffic through proxies.
Example:
opener = proxy_opener({"http": "http://proxy.corp:8080",
"https": "http://proxy.corp:8080"})
resp = get("http://internal.corp/api", opener=opener)
"""
return urllib.request.build_opener(urllib.request.ProxyHandler(proxies))
# ─────────────────────────────────────────────────────────────────────────────
# 4. Retry and error handling
# ─────────────────────────────────────────────────────────────────────────────
def get_with_retry(
url: str,
max_attempts: int = 3,
backoff: float = 1.0,
retry_statuses: tuple[int, ...] = (429, 500, 502, 503, 504),
**kwargs,
) -> FetchResponse:
"""
GET with exponential-backoff retry on network errors and transient HTTP statuses.
Example:
resp = get_with_retry("https://api.example.com/data", max_attempts=4)
"""
delay = backoff
last: FetchResponse | None = None
for attempt in range(max_attempts):
try:
resp = get(url, **kwargs)
if resp.status not in retry_statuses:
return resp
last = resp
except (urllib.error.URLError, OSError, TimeoutError):
if attempt == max_attempts - 1:
raise
if attempt < max_attempts - 1:
time.sleep(delay)
delay *= 2.0
if last is not None:
return last
raise RuntimeError(f"All {max_attempts} attempts failed for {url}")
# ─────────────────────────────────────────────────────────────────────────────
# 5. Utility helpers
# ─────────────────────────────────────────────────────────────────────────────
def check_url(url: str, timeout: float = 5.0) -> tuple[bool, int]:
"""
Return (reachable, status_code). HEAD request; falls back to GET.
Example:
ok, code = check_url("https://example.com")
print(ok, code)
"""
try:
ctx = ssl.create_default_context()
req = urllib.request.Request(url, method="HEAD", headers=dict(_DEFAULT_HEADERS))
with urllib.request.urlopen(req, context=ctx, timeout=timeout) as resp:
return True, resp.status
except urllib.error.HTTPError as e:
return e.code < 400, e.code
except Exception:
return False, 0
def read_url_lines(url: str, encoding: str = "utf-8", timeout: float = 10.0) -> list[str]:
"""
Fetch a text URL and return lines (strips blank lines).
Example:
lines = read_url_lines("https://example.com/list.txt")
"""
resp = get(url, timeout=timeout)
if not resp.ok:
raise RuntimeError(f"HTTP {resp.status}: {url}")
return [ln for ln in resp.text(encoding).splitlines() if ln.strip()]
# ─────────────────────────────────────────────────────────────────────────────
# Demo
# ─────────────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
print("=== urllib.request demo ===")
# ── GET + JSON ────────────────────────────────────────────────────────────
print("\n--- fetch_json ---")
try:
data = fetch_json("https://httpbin.org/json")
print(f" keys: {list(data.keys())[:4]}")
except Exception as e:
print(f" network error: {e} (offline?)")
# ── POST ──────────────────────────────────────────────────────────────────
print("\n--- POST ---")
try:
resp = post("https://httpbin.org/post", {"name": "urllib.request", "version": 3})
print(f" status={resp.status} ok={resp.ok}")
echo = resp.json().get("json", {})
print(f" echo: {echo}")
except Exception as e:
print(f" network error: {e} (offline?)")
# ── check_url ─────────────────────────────────────────────────────────────
print("\n--- check_url ---")
for url in ["https://httpbin.org/status/200", "https://httpbin.org/status/404"]:
try:
ok, code = check_url(url, timeout=5.0)
print(f" {url.split('/')[-1]:>4} → ok={ok} code={code}")
except Exception as e:
print(f" {url} error: {e}")
# ── cookie_opener (smoke test) ────────────────────────────────────────────
print("\n--- cookie_opener ---")
try:
opener, jar = cookie_opener()
resp = get("https://httpbin.org/cookies/set?token=abc123", opener=opener)
print(f" status={resp.status} cookies={len(list(jar))}")
except Exception as e:
print(f" network error: {e} (offline?)")
# ── download to bytes (simulate with StringIO target) ─────────────────────
print("\n--- download_file ---")
try:
import tempfile
with tempfile.NamedTemporaryFile(suffix=".json", delete=True) as tf:
path = download_file(
"https://httpbin.org/json",
tf.name,
on_progress=lambda p: None,
)
print(f" downloaded {path.name}: ok (file cleaned up)")
except Exception as e:
print(f" network error: {e} (offline?)")
print("\n=== done ===")
For the requests / httpx alternative — requests (PyPI) collapses the entire handler chain into a one-line API (requests.get(url, auth=..., proxies=..., cookies=...)), handles streaming with stream=True and iter_content(), and adds automatic status-code assertion with raise_for_status(); httpx extends this with async support and HTTP/2 — use requests or httpx in any application where adding PyPI dependencies is acceptable; use urllib.request when you need zero external dependencies for a library or container image, or for simple script-level downloading without the overhead of a full HTTP client. For the http.client alternative — http.client is the lower layer that urllib.request builds on top of; it exposes persistent connections, explicit conn.request() / conn.getresponse() calls, and responses before body consumption, which allows HEAD → decision → read patterns that urlopen cannot express — use http.client when you need connection reuse across multiple requests to the same host, byte-level wire control, or batch requests that share a keep-alive session; use urllib.request for single-shot fetches with redirect following, cookie handling, or Basic auth without managing connection lifecycle manually. The Claude Skills 360 bundle includes urllib.request skill sets covering FetchResponse dataclass, get()/post()/fetch_json() core methods, DownloadProgress with download_file() and byte-range streaming, basic_auth_opener()/cookie_opener()/proxy_opener() handler factories, get_with_retry() with exponential backoff, and check_url()/read_url_lines() utilities. Start with the free tier to try URL fetching patterns and urllib.request pipeline code generation.