|
15 | 15 | * :class:`SubdomainTenantRouter` — runtime-checkable Protocol with |
16 | 16 | one async ``resolve(host: str) -> Tenant | None`` method. |
17 | 17 | * :class:`InMemorySubdomainTenantRouter` — reference impl for |
18 | | - dev/test backed by a static ``host → Tenant`` dict. Production |
19 | | - adopters back the Protocol with their tenant table. |
| 18 | + dev/test backed by a static ``host → Tenant`` dict. |
| 19 | +* :class:`CallableSubdomainTenantRouter` — adopter-callable router |
| 20 | + for DB-backed lookups. Adopter writes a single sync-or-async |
| 21 | + callable mapping a normalized host to a :class:`Tenant`; the |
| 22 | + framework owns host normalization. Optional bounded TTL cache |
| 23 | + for hot-path lookups. **Recommended for production multi-tenant |
| 24 | + deployments** — replaces ~25 LOC of adopter glue with ~5. |
20 | 25 | * :class:`SubdomainTenantMiddleware` — Starlette ASGI middleware |
21 | 26 | that calls the router, stashes the result in a |
22 | 27 | :class:`contextvars.ContextVar`, and ``404`` s on unknown hosts. |
@@ -84,6 +89,10 @@ def build_context(meta): |
84 | 89 | from __future__ import annotations |
85 | 90 |
|
86 | 91 | import contextvars |
| 92 | +import inspect |
| 93 | +import time |
| 94 | +from collections import OrderedDict |
| 95 | +from collections.abc import Awaitable, Callable |
87 | 96 | from dataclasses import dataclass, field |
88 | 97 | from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable |
89 | 98 |
|
@@ -160,6 +169,172 @@ async def resolve(self, host: str) -> Tenant | None: |
160 | 169 | return self._tenants.get(_normalize_host(host)) |
161 | 170 |
|
162 | 171 |
|
| 172 | +# Type alias for adopter-supplied lookup callables. Either sync (returns |
| 173 | +# Tenant | None) or async (returns Awaitable[Tenant | None]) is accepted — |
| 174 | +# CallableSubdomainTenantRouter awaits at call time. Receives the |
| 175 | +# already-normalized (lower-cased + port-stripped) host so adopters don't |
| 176 | +# reimplement the parser. |
| 177 | +TenantResolver = Callable[[str], "Tenant | None | Awaitable[Tenant | None]"] |
| 178 | + |
| 179 | + |
| 180 | +class CallableSubdomainTenantRouter: |
| 181 | + """Adopter-callable :class:`SubdomainTenantRouter` for DB-backed lookups. |
| 182 | +
|
| 183 | + The adopter passes a single callable mapping a normalized host to a |
| 184 | + :class:`Tenant` (or ``None`` for 404). The framework owns host |
| 185 | + normalization (lower-case + port-strip), so adopters write only the |
| 186 | + lookup itself — typically a single SQL query against their tenant |
| 187 | + table. |
| 188 | +
|
| 189 | + The callable may be sync or async; the router awaits at call time. |
| 190 | +
|
| 191 | + Example:: |
| 192 | +
|
| 193 | + from sqlalchemy import select |
| 194 | + from adcp.server import CallableSubdomainTenantRouter, Tenant |
| 195 | +
|
| 196 | + async def lookup(host: str) -> Tenant | None: |
| 197 | + subdomain = host.split(".", 1)[0] # 'acme.example.com' -> 'acme' |
| 198 | + async with my_db.session() as s: |
| 199 | + row = await s.scalar( |
| 200 | + select(TenantRow).filter_by(subdomain=subdomain, is_active=True) |
| 201 | + ) |
| 202 | + return Tenant(id=row.tenant_id, display_name=row.name) if row else None |
| 203 | +
|
| 204 | + router = CallableSubdomainTenantRouter(lookup) |
| 205 | +
|
| 206 | + Optional bounded TTL cache absorbs hot-path lookups without adopters |
| 207 | + reimplementing — useful when the resolver hits a remote DB on every |
| 208 | + request. Defaults to **no caching** (``cache_size=0``); adopters opt |
| 209 | + in with explicit bounds: |
| 210 | +
|
| 211 | + :: |
| 212 | +
|
| 213 | + router = CallableSubdomainTenantRouter( |
| 214 | + lookup, |
| 215 | + cache_size=1024, # bounded LRU; never grows beyond this |
| 216 | + cache_ttl_seconds=60.0, # expire entries after 60s |
| 217 | + ) |
| 218 | +
|
| 219 | + Cache bounds are mandatory when caching is enabled — there is no |
| 220 | + "cache forever, unbounded size" mode by design. Tenants come and go |
| 221 | + (suspension, deactivation); long-lived caches without TTL hand |
| 222 | + adopters a stale-cache footgun. The ``cache_ttl_seconds`` ceiling is |
| 223 | + the explicit knob. |
| 224 | +
|
| 225 | + Memory profile |
| 226 | + -------------- |
| 227 | + Without caching: zero state held by the router. Each ``resolve()`` |
| 228 | + call awaits the adopter callable directly. |
| 229 | +
|
| 230 | + With caching: bounded by ``cache_size`` entries, each holding one |
| 231 | + :class:`Tenant` (frozen, small) plus an expiry timestamp. Maximum |
| 232 | + memory is ``cache_size * (sizeof(host_str) + sizeof(Tenant) + 16)``; |
| 233 | + for a typical 1024-entry cache that's well under 1 MB. |
| 234 | + """ |
| 235 | + |
| 236 | + def __init__( |
| 237 | + self, |
| 238 | + resolver: TenantResolver, |
| 239 | + *, |
| 240 | + cache_size: int = 0, |
| 241 | + cache_ttl_seconds: float = 0.0, |
| 242 | + ) -> None: |
| 243 | + """Construct the router. |
| 244 | +
|
| 245 | + :param resolver: Callable taking a normalized host string and |
| 246 | + returning ``Tenant | None`` (sync or async). Receives |
| 247 | + already-normalized hosts — lower-cased with any |
| 248 | + ``:port`` suffix stripped. |
| 249 | + :param cache_size: Maximum number of cached lookups. ``0`` |
| 250 | + disables caching entirely (the adopter callable is awaited |
| 251 | + on every request). Must be ``>= 0``. |
| 252 | + :param cache_ttl_seconds: Per-entry TTL in seconds. Must be |
| 253 | + ``> 0`` when ``cache_size > 0``. There is no "cache forever" |
| 254 | + mode — see the class docstring for rationale. |
| 255 | + :raises ValueError: If ``cache_size > 0`` and |
| 256 | + ``cache_ttl_seconds <= 0`` (cache requires explicit TTL). |
| 257 | + """ |
| 258 | + if cache_size < 0: |
| 259 | + raise ValueError(f"cache_size must be >= 0, got {cache_size}") |
| 260 | + if cache_size > 0 and cache_ttl_seconds <= 0: |
| 261 | + raise ValueError( |
| 262 | + "cache_ttl_seconds must be > 0 when cache_size > 0; " |
| 263 | + "explicit TTL prevents stale-tenant footguns. Pass a " |
| 264 | + "value like 60.0 (one-minute cache) to opt in." |
| 265 | + ) |
| 266 | + self._resolver = resolver |
| 267 | + self._cache_size = cache_size |
| 268 | + self._cache_ttl = cache_ttl_seconds |
| 269 | + # OrderedDict gives us LRU-by-move-to-end for free; bounded by |
| 270 | + # popitem(last=False) when over cache_size. Each entry is |
| 271 | + # (Tenant | None, expires_at_monotonic). Negative results are |
| 272 | + # cached too so DOS-style probing doesn't bypass the cache. |
| 273 | + self._cache: OrderedDict[str, tuple[Tenant | None, float]] = OrderedDict() |
| 274 | + |
| 275 | + async def resolve(self, host: str) -> Tenant | None: |
| 276 | + normalized = _normalize_host(host) |
| 277 | + |
| 278 | + if self._cache_size > 0: |
| 279 | + cached = self._cache_get(normalized) |
| 280 | + if cached is not _CACHE_MISS: |
| 281 | + return cached # type: ignore[return-value] |
| 282 | + |
| 283 | + result = self._resolver(normalized) |
| 284 | + if inspect.isawaitable(result): |
| 285 | + result = await result |
| 286 | + |
| 287 | + if self._cache_size > 0: |
| 288 | + self._cache_put(normalized, result) |
| 289 | + |
| 290 | + return result |
| 291 | + |
| 292 | + # ----- cache internals (request-path; keep tight) --------------------- |
| 293 | + |
| 294 | + def _cache_get(self, host: str) -> Tenant | None | object: |
| 295 | + entry = self._cache.get(host) |
| 296 | + if entry is None: |
| 297 | + return _CACHE_MISS |
| 298 | + tenant, expires_at = entry |
| 299 | + if time.monotonic() > expires_at: |
| 300 | + # Expired — drop and miss. Don't await a fresh resolve here; |
| 301 | + # the caller does that. Avoids holding the entry through the |
| 302 | + # adopter callable's network round-trip. |
| 303 | + self._cache.pop(host, None) |
| 304 | + return _CACHE_MISS |
| 305 | + # LRU touch |
| 306 | + self._cache.move_to_end(host) |
| 307 | + return tenant |
| 308 | + |
| 309 | + def _cache_put(self, host: str, tenant: Tenant | None) -> None: |
| 310 | + expires_at = time.monotonic() + self._cache_ttl |
| 311 | + self._cache[host] = (tenant, expires_at) |
| 312 | + self._cache.move_to_end(host) |
| 313 | + # Bound size — evict oldest until under limit. |
| 314 | + while len(self._cache) > self._cache_size: |
| 315 | + self._cache.popitem(last=False) |
| 316 | + |
| 317 | + def invalidate(self, host: str | None = None) -> None: |
| 318 | + """Drop a cached entry (or all entries when ``host`` is ``None``). |
| 319 | +
|
| 320 | + Adopters call this from their tenant-deactivation / -modification |
| 321 | + flow to evict stale entries before the TTL fires. Safe to call |
| 322 | + even when caching is disabled (no-op). |
| 323 | +
|
| 324 | + :param host: Specific host to evict (raw or normalized — the |
| 325 | + method normalizes internally). ``None`` clears the entire |
| 326 | + cache. |
| 327 | + """ |
| 328 | + if host is None: |
| 329 | + self._cache.clear() |
| 330 | + return |
| 331 | + self._cache.pop(_normalize_host(host), None) |
| 332 | + |
| 333 | + |
| 334 | +# Sentinel for cache miss vs. cached-None (negative result) |
| 335 | +_CACHE_MISS: object = object() |
| 336 | + |
| 337 | + |
163 | 338 | # Module-level contextvar — request-scoped via the ASGI middleware's |
164 | 339 | # per-call `set()`. ASGI guarantees per-task context isolation, so |
165 | 340 | # concurrent requests on the same process see only their own tenant. |
@@ -303,9 +478,11 @@ async def _send_404(send: Send, *, reason: str) -> None: |
303 | 478 |
|
304 | 479 |
|
305 | 480 | __all__ = [ |
| 481 | + "CallableSubdomainTenantRouter", |
306 | 482 | "InMemorySubdomainTenantRouter", |
307 | 483 | "SubdomainTenantMiddleware", |
308 | 484 | "SubdomainTenantRouter", |
309 | 485 | "Tenant", |
| 486 | + "TenantResolver", |
310 | 487 | "current_tenant", |
311 | 488 | ] |
0 commit comments