aiocache provides async cache backends with a unified API. pip install aiocache. Memory: from aiocache import Cache; cache = Cache(). Redis: Cache(Cache.REDIS, endpoint="localhost", port=6379). Memcached: Cache(Cache.MEMCACHED, endpoint="localhost"). Get: await cache.get("key"). Set: await cache.set("key", value, ttl=60). Delete: await cache.delete("key"). Exists: await cache.exists("key"). Increment: await cache.increment("counter", delta=1). Multiget: await cache.multi_get(["k1","k2"]). Multiset: await cache.multi_set([("k1",v1),("k2",v2)], ttl=60). Clear: await cache.clear(). TTL: set(..., ttl=60) — seconds. Namespace: Cache(namespace="myapp"). Decorator: from aiocache import cached; @cached(ttl=120, key="fixed_key"). Dynamic key: @cached(ttl=60, key_builder=lambda fn, *a, **kw: f"{fn.__name__}:{a}"). Serializer: from aiocache.serializers import JsonSerializer; Cache(serializer=JsonSerializer()). PickleSerializer, MsgPackSerializer. Plugins: from aiocache.plugins import TimingPlugin, HitMissRatioPlugin. Config: caches.set_config({...}). caches.get("default"). Alias: @cached(alias="default"). Close: await cache.close(). Claude Code generates aiocache decorators, async cache layers, and FastAPI response caching.
CLAUDE.md for aiocache
## aiocache Stack
- Version: aiocache >= 0.12 | pip install aiocache[redis,memcached]
- Memory: Cache() | Redis: Cache(Cache.REDIS, endpoint="host", port=6379)
- Get/Set: await cache.get(key) | await cache.set(key, val, ttl=60)
- Decorator: @cached(ttl=60) | @cached(key_builder=lambda fn,*a,**kw: ...)
- Serializer: JsonSerializer | PickleSerializer | MsgPackSerializer
- Namespace: Cache(namespace="prefix") — auto-prefixes all keys
aiocache Async Cache Pipeline
# app/cache.py — aiocache get/set, @cached decorator, TTL, serializers, FastAPI
from __future__ import annotations
import asyncio
import datetime
import functools
import json
import time
from typing import Any, Callable
from aiocache import Cache, cached, multi_cached
from aiocache.serializers import JsonSerializer, PickleSerializer
# ─────────────────────────────────────────────────────────────────────────────
# 1. Cache factory
# ─────────────────────────────────────────────────────────────────────────────
def make_memory_cache(
ttl: int = 300,
namespace: str = "",
max_size: int | None = None,
) -> Cache:
"""
Create an in-process memory cache.
max_size: if set, use a custom LRU-like eviction (aiocache doesn't enforce
this directly; use for documentation purposes with a wrapper).
"""
kwargs: dict[str, Any] = {
"serializer": JsonSerializer(),
"ttl": ttl,
}
if namespace:
kwargs["namespace"] = namespace
return Cache(**kwargs) # default is SimpleMemoryCache
def make_redis_cache(
host: str = "localhost",
port: int = 6379,
db: int = 0,
password: str | None = None,
ttl: int = 300,
namespace: str = "",
) -> Cache:
"""
Create a Redis-backed aiocache instance.
Requires: pip install aiocache[redis]
"""
kwargs: dict[str, Any] = {
"endpoint": host,
"port": port,
"db": db,
"serializer": JsonSerializer(),
"ttl": ttl,
}
if password:
kwargs["password"] = password
if namespace:
kwargs["namespace"] = namespace
return Cache(Cache.REDIS, **kwargs)
# ─────────────────────────────────────────────────────────────────────────────
# 2. Core cache operations
# ─────────────────────────────────────────────────────────────────────────────
async def get(cache: Cache, key: str, default: Any = None) -> Any:
"""Get a cached value, returning default if missing."""
result = await cache.get(key)
return result if result is not None else default
async def set(cache: Cache, key: str, value: Any, ttl: int | None = None) -> bool:
"""Set a value. Returns True on success."""
kwargs: dict[str, Any] = {}
if ttl is not None:
kwargs["ttl"] = ttl
return await cache.set(key, value, **kwargs)
async def get_or_set(
cache: Cache,
key: str,
loader: Callable,
ttl: int | None = None,
) -> Any:
"""
Get from cache; if missing, call loader() and cache the result.
Example:
user = await get_or_set(cache, f"user:{uid}", lambda: fetch_user(uid), ttl=60)
"""
value = await cache.get(key)
if value is None:
value = await loader() if asyncio.iscoroutinefunction(loader) else loader()
await set(cache, key, value, ttl=ttl)
return value
async def invalidate(cache: Cache, *keys: str) -> None:
"""Delete one or more cache keys."""
for key in keys:
await cache.delete(key)
async def invalidate_prefix(cache: Cache, prefix: str, known_keys: list[str]) -> int:
"""
Invalidate all keys starting with prefix (from a known set).
Returns count deleted.
"""
count = 0
for key in known_keys:
if key.startswith(prefix):
await cache.delete(key)
count += 1
return count
async def mget(cache: Cache, keys: list[str], default: Any = None) -> list[Any]:
"""Get multiple keys; missing keys return default."""
values = await cache.multi_get(keys)
return [v if v is not None else default for v in values]
async def mset(cache: Cache, mapping: dict[str, Any], ttl: int | None = None) -> None:
"""Set multiple key-value pairs."""
pairs = list(mapping.items())
kwargs: dict[str, Any] = {}
if ttl is not None:
kwargs["ttl"] = ttl
await cache.multi_set(pairs, **kwargs)
# ─────────────────────────────────────────────────────────────────────────────
# 3. Decorator helpers
# ─────────────────────────────────────────────────────────────────────────────
def cache_result(
ttl: int = 60,
key_prefix: str = "",
namespace: str = "",
):
"""
Decorator: cache the return value of an async function.
Cache key = "{key_prefix}{fn.__name__}:{args_hash}".
Usage:
@cache_result(ttl=300, key_prefix="api:")
async def fetch_user(user_id: int) -> dict:
return await db.get_user(user_id)
"""
def key_builder(fn, *args, **kwargs):
arg_part = ":".join(str(a) for a in args)
kwarg_part = ":".join(f"{k}={v}" for k, v in sorted(kwargs.items()))
parts = [key_prefix + fn.__name__, arg_part, kwarg_part]
return ":".join(p for p in parts if p)
def decorator(fn: Callable) -> Callable:
ns_kwargs: dict[str, Any] = {}
if namespace:
ns_kwargs["namespace"] = namespace
return cached(ttl=ttl, key_builder=key_builder, **ns_kwargs)(fn)
return decorator
def invalidate_on_call(cache_obj: Cache, *key_templates: str):
"""
Decorator: invalidate specific cache keys when the decorated function is called.
Usage:
@invalidate_on_call(cache, "user:{0}", "user_list")
async def update_user(user_id: int, data: dict):
...
# Calling update_user(42, data) will delete keys "user:42" and "user_list"
"""
def decorator(fn: Callable) -> Callable:
@functools.wraps(fn)
async def wrapper(*args, **kwargs):
result = await fn(*args, **kwargs)
for template in key_templates:
try:
key = template.format(*args, **kwargs)
except (IndexError, KeyError):
key = template
await cache_obj.delete(key)
return result
return wrapper
return decorator
# ─────────────────────────────────────────────────────────────────────────────
# 4. Cache-aside pattern
# ─────────────────────────────────────────────────────────────────────────────
class CacheAside:
"""
Cache-aside pattern: read from cache, fallback to data source, write-through.
Usage:
store = CacheAside(cache, ttl=120)
user = await store.get("user:42", loader=lambda: fetch_user(42))
await store.set("user:42", updated_user)
await store.invalidate("user:42")
"""
def __init__(self, cache: Cache, ttl: int = 60, namespace: str = ""):
self._cache = cache
self._ttl = ttl
self._ns = namespace
def _key(self, key: str) -> str:
return f"{self._ns}:{key}" if self._ns else key
async def get(self, key: str, loader: Callable | None = None) -> Any:
k = self._key(key)
value = await self._cache.get(k)
if value is None and loader is not None:
value = await loader() if asyncio.iscoroutinefunction(loader) else loader()
if value is not None:
await self._cache.set(k, value, ttl=self._ttl)
return value
async def set(self, key: str, value: Any, ttl: int | None = None) -> None:
await self._cache.set(self._key(key), value, ttl=ttl or self._ttl)
async def invalidate(self, *keys: str) -> None:
for key in keys:
await self._cache.delete(self._key(key))
async def get_many(self, keys: list[str]) -> dict[str, Any]:
full_keys = [self._key(k) for k in keys]
values = await self._cache.multi_get(full_keys)
return {k: v for k, v in zip(keys, values) if v is not None}
# ─────────────────────────────────────────────────────────────────────────────
# Demo
# ─────────────────────────────────────────────────────────────────────────────
async def demo():
print("=== In-memory cache basics ===")
cache = make_memory_cache(ttl=60, namespace="demo")
await set(cache, "greeting", "Hello, World!")
value = await get(cache, "greeting")
print(f"get: {value!r}")
missing = await get(cache, "nothing", default="N/A")
print(f"missing key: {missing!r}")
print("\n=== Multi get/set ===")
await mset(cache, {"a": 1, "b": 2, "c": 3}, ttl=30)
results = await mget(cache, ["a", "b", "c", "d"])
print(f"mget: {results}")
print("\n=== get_or_set ===")
call_count = [0]
async def expensive_loader():
call_count[0] += 1
await asyncio.sleep(0) # simulate async work
return {"computed": True, "ts": time.time()}
v1 = await get_or_set(cache, "computed", expensive_loader, ttl=60)
v2 = await get_or_set(cache, "computed", expensive_loader, ttl=60)
print(f"loader called: {call_count[0]} times (should be 1)")
print(f"same result: {v1 == v2}")
print("\n=== @cached decorator ===")
@cached(ttl=60, serializer=JsonSerializer())
async def fetch_user(user_id: int) -> dict:
return {"id": user_id, "name": f"User {user_id}"}
u1 = await fetch_user(42)
u2 = await fetch_user(42)
print(f"user: {u1}")
print("\n=== @cache_result decorator ===")
tmp_cache = make_memory_cache(ttl=120)
call_n = [0]
@cache_result(ttl=120, key_prefix="calc:")
async def compute(x: int, y: int) -> int:
call_n[0] += 1
return x * y + x + y
r1 = await compute(3, 4)
r2 = await compute(3, 4)
r3 = await compute(5, 6)
print(f"compute(3,4)={r1}, calls={call_n[0]}")
print("\n=== CacheAside ===")
store = CacheAside(cache, ttl=60, namespace="users")
db_calls = [0]
async def load_user(uid):
db_calls[0] += 1
return {"id": uid, "name": "Alice", "email": "[email protected]"}
u = await store.get("42", loader=lambda: load_user(42))
u2 = await store.get("42", loader=lambda: load_user(42))
print(f"DB calls: {db_calls[0]} (should be 1)")
print(f"user: {u}")
await store.invalidate("42")
u3 = await store.get("42", loader=lambda: load_user(42))
print(f"After invalidate, DB calls: {db_calls[0]} (should be 2)")
await cache.close()
if __name__ == "__main__":
asyncio.run(demo())
For the cachetools alternative — cachetools provides sync LRU/TTL/LFU caches in memory only, with function decorators for sync code; aiocache is built for asyncio with await cache.get(key) semantics, multiple backends (memory, Redis, Memcached) via a unified API, and async-safe @cached decorators for coroutines. For the redis / aioredis direct alternative — using Redis directly gives you full control but requires writing key management, serialization, and TTL logic yourself; aiocache wraps these into a consistent API with pluggable serializers and backends switchable without changing application code. The Claude Skills 360 bundle includes aiocache skill sets covering make_memory_cache()/make_redis_cache() factory, get/set/get_or_set/invalidate/mget/mset helpers, @cached decorator, cache_result() key-building decorator, invalidate_on_call() write-through invalidation, CacheAside pattern with loader/invalidate, JsonSerializer/PickleSerializer, and async demo with call-count verification. Start with the free tier to try async caching code generation.