Python’s urllib.response module defines the response objects returned by urllib.request.urlopen(). from urllib import response. The main class is addinfourl — a file-like wrapper around the raw socket with extra HTTP metadata. It is rarely constructed directly; instead you receive it from urlopen(). Key interface: resp.read(amt=-1) → bytes (all or amt bytes); resp.readline() → bytes; resp.readlines() → list[bytes]; resp.info() → http.client.HTTPMessage (response headers); resp.geturl() → str (final URL after redirects); resp.getcode() → int (HTTP status code); resp.status (alias for getcode(), Python 3.9+); resp.url (alias for geturl()). Header shortcut: resp.info()["content-type"]. Context manager: with urllib.request.urlopen(url) as resp:. The addinfo mixin (base class) adds just info(); addinfourl adds geturl() and getcode() on top. Claude Code generates HTTP response readers, header extractors, content-type detectors, redirect tracers, and streaming download pipelines.
CLAUDE.md for urllib.response
## urllib.response Stack
- Stdlib: from urllib import request, response
- Open: resp = request.urlopen(url) # returns addinfourl
- Read: resp.read() / resp.read(n) / resp.readline() / resp.readlines()
- Meta: resp.info() # http.client.HTTPMessage (headers)
- resp.geturl() # final URL (post-redirect)
- resp.getcode() # int status code
- resp.status / resp.url # aliases (3.9+)
- Header: resp.info()["content-type"]
- CM: with request.urlopen(url) as resp: data = resp.read()
urllib.response HTTP Response Pipeline
# app/urllibresponseutil.py — read, headers, redirect, chunked, inspect, mock
from __future__ import annotations
import io
import json
import urllib.request
import urllib.error
from dataclasses import dataclass, field
from typing import Any
from http.client import HTTPMessage
# ─────────────────────────────────────────────────────────────────────────────
# 1. Response metadata helpers
# ─────────────────────────────────────────────────────────────────────────────
@dataclass
class ResponseInfo:
url: str
final_url: str
status: int
content_type: str
content_len: int | None # None if header absent
encoding: str
headers: dict[str, str]
redirected: bool
def inspect_response(resp: Any) -> ResponseInfo:
"""
Extract metadata from a urllib addinfourl response object.
Example:
with urllib.request.urlopen("https://httpbin.org/get") as resp:
info = inspect_response(resp)
print(info.status, info.content_type)
"""
headers_obj: HTTPMessage = resp.info()
raw_headers = {k: v for k, v in headers_obj.items()}
ct = headers_obj.get("content-type", "")
final_url = resp.geturl()
# Determine charset from Content-Type
enc = "utf-8"
for part in ct.split(";"):
part = part.strip()
if part.lower().startswith("charset="):
enc = part[8:].strip().strip('"')
break
# content-length
cl_str = headers_obj.get("content-length")
cl = int(cl_str) if cl_str and cl_str.isdigit() else None
return ResponseInfo(
url=getattr(resp, "url", final_url),
final_url=final_url,
status=resp.getcode(),
content_type=ct,
content_len=cl,
encoding=enc,
headers=raw_headers,
redirected=getattr(resp, "url", final_url) != final_url,
)
def response_headers(resp: Any) -> dict[str, str]:
"""
Return all response headers as a plain dict.
Example:
with urllib.request.urlopen(url) as resp:
hdrs = response_headers(resp)
print(hdrs.get("server"))
"""
return {k: v for k, v in resp.info().items()}
# ─────────────────────────────────────────────────────────────────────────────
# 2. Safe fetch helpers
# ─────────────────────────────────────────────────────────────────────────────
@dataclass
class FetchResult:
url: str
status: int
body: bytes
headers: dict[str, str]
error: str = ""
@property
def ok(self) -> bool:
return 200 <= self.status < 300
def text(self, encoding: str = "utf-8",
errors: str = "replace") -> str:
return self.body.decode(encoding, errors=errors)
def json(self) -> Any:
return json.loads(self.body)
def safe_fetch(url: str, timeout: float = 10.0,
headers: dict[str, str] | None = None) -> FetchResult:
"""
Fetch a URL and return a FetchResult. Never raises; errors go to .error.
Example:
r = safe_fetch("https://httpbin.org/get")
if r.ok:
print(r.json())
"""
req = urllib.request.Request(url, headers=headers or {})
try:
with urllib.request.urlopen(req, timeout=timeout) as resp:
body = resp.read()
return FetchResult(
url=resp.geturl(),
status=resp.getcode(),
body=body,
headers={k: v for k, v in resp.info().items()},
)
except urllib.error.HTTPError as e:
body = b""
try:
body = e.read()
except Exception:
pass
return FetchResult(url=url, status=e.code, body=body,
headers={}, error=str(e))
except Exception as e:
return FetchResult(url=url, status=0, body=b"",
headers={}, error=str(e))
# ─────────────────────────────────────────────────────────────────────────────
# 3. Streaming download with progress
# ─────────────────────────────────────────────────────────────────────────────
def download_stream(url: str,
dest: "str | io.RawIOBase",
chunk_size: int = 65536,
timeout: float = 30.0,
on_progress: "Any | None" = None) -> int:
"""
Stream a URL to a file path or file-like object.
Calls on_progress(bytes_downloaded, total_or_None) each chunk.
Returns total bytes written.
Example:
n = download_stream(
"https://example.com/file.gz",
"/tmp/file.gz",
on_progress=lambda n, t: print(f"{n}/{t}"),
)
print(f"downloaded {n} bytes")
"""
import os
req = urllib.request.Request(url)
total_written = 0
close_after = False
if isinstance(dest, str):
outfile: io.RawIOBase = open(dest, "wb")
close_after = True
else:
outfile = dest
try:
with urllib.request.urlopen(req, timeout=timeout) as resp:
cl_str = resp.info().get("content-length")
total = int(cl_str) if cl_str and cl_str.isdigit() else None
while True:
chunk = resp.read(chunk_size)
if not chunk:
break
outfile.write(chunk)
total_written += len(chunk)
if on_progress:
on_progress(total_written, total)
finally:
if close_after:
outfile.close()
return total_written
# ─────────────────────────────────────────────────────────────────────────────
# 4. Content-type and encoding detector
# ─────────────────────────────────────────────────────────────────────────────
def detect_content_type(resp: Any) -> tuple[str, str]:
"""
Extract mime type and charset from a response's Content-Type header.
Returns (mime_type, charset).
Example:
with urllib.request.urlopen(url) as resp:
mime, charset = detect_content_type(resp)
print(mime, charset) # "application/json", "utf-8"
"""
ct = resp.info().get("content-type", "")
mime = ct.split(";")[0].strip()
charset = "utf-8"
for part in ct.split(";")[1:]:
part = part.strip()
if part.lower().startswith("charset="):
charset = part[8:].strip().strip('"')
break
return mime, charset
# ─────────────────────────────────────────────────────────────────────────────
# 5. Redirect tracer
# ─────────────────────────────────────────────────────────────────────────────
class RedirectRecorder(urllib.request.HTTPRedirectHandler):
"""
An HTTPRedirectHandler that records each redirect URL.
Example:
recorder = RedirectRecorder()
opener = urllib.request.build_opener(recorder)
with opener.open("https://httpbin.org/redirect/2") as resp:
print(recorder.redirect_chain)
"""
def __init__(self) -> None:
self.redirect_chain: list[str] = []
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
self.redirect_chain.append(newurl)
return super().redirect_request(req, fp, code, msg, hdrs, newurl)
def trace_redirects(url: str, timeout: float = 10.0) -> dict[str, Any]:
"""
Follow a URL and record the redirect chain.
Returns {"start": url, "chain": [...], "final": url, "status": code}.
Example:
info = trace_redirects("https://httpbin.org/redirect/2")
print(info["chain"])
"""
recorder = RedirectRecorder()
opener = urllib.request.build_opener(recorder)
try:
with opener.open(url, timeout=timeout) as resp:
return {
"start": url,
"chain": recorder.redirect_chain,
"final": resp.geturl(),
"status": resp.getcode(),
}
except Exception as e:
return {
"start": url,
"chain": recorder.redirect_chain,
"final": "",
"status": 0,
"error": str(e),
}
# ─────────────────────────────────────────────────────────────────────────────
# Demo
# ─────────────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
print("=== urllib.response demo ===")
# Use a reliable, small public URL for demo
test_url = "https://httpbin.org/get"
# ── safe_fetch ────────────────────────────────────────────────────────────
print("\n--- safe_fetch ---")
r = safe_fetch(test_url, timeout=5.0)
print(f" status : {r.status}")
print(f" ok : {r.ok}")
print(f" url : {r.url[:60]}")
print(f" bytes : {len(r.body)}")
if r.ok:
print(f" ct : {r.headers.get('content-type', '')!r}")
# ── inspect_response ──────────────────────────────────────────────────────
print("\n--- inspect_response ---")
try:
with urllib.request.urlopen(test_url, timeout=5) as resp:
info = inspect_response(resp)
print(f" status : {info.status}")
print(f" content_type: {info.content_type!r}")
print(f" encoding : {info.encoding!r}")
print(f" content_len : {info.content_len}")
print(f" final_url : {info.final_url[:60]}")
except Exception as e:
print(f" (network unavailable: {e})")
# ── fetch + json ──────────────────────────────────────────────────────────
print("\n--- fetch + json ---")
r = safe_fetch("https://httpbin.org/json", timeout=5.0)
if r.ok:
data = r.json()
print(f" json keys: {list(data.keys())[:5]}")
else:
print(f" error: {r.error or r.status}")
# ── detect_content_type ───────────────────────────────────────────────────
print("\n--- detect_content_type ---")
try:
with urllib.request.urlopen("https://httpbin.org/json", timeout=5) as resp:
mime, charset = detect_content_type(resp)
print(f" mime={mime!r} charset={charset!r}")
except Exception as e:
print(f" (network: {e})")
print("\n=== done ===")
For the requests (PyPI) alternative — requests.get(url) returns a Response with .text, .json(), .content, .headers, .status_code, .url, and auto-redirect following — use requests for all production HTTP client work; its API is more ergonomic, handles streaming, sessions, auth, retries, and TLS verification with sensible defaults. For the httpx (PyPI) alternative — httpx.get(url) provides an identical requests-compatible API plus native async with httpx.AsyncClient() for asyncio — use httpx when you need both sync and async HTTP, HTTP/2, or advanced connection pooling; use urllib.response/urllib.request for zero-dependency scripts and tools. The Claude Skills 360 bundle includes urllib.response skill sets covering ResponseInfo/inspect_response() metadata extractor, response_headers() header dict converter, FetchResult/safe_fetch() safe wrapper, download_stream() progress-tracking download, detect_content_type() MIME/charset detector, and RedirectRecorder/trace_redirects() redirect tracer. Start with the free tier to try HTTP response patterns and urllib.response pipeline code generation.