All files / lib cache.ts

78.04% Statements 64/82
82.35% Branches 28/34
80.95% Functions 17/21
82.19% Lines 60/73

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299                                  31x     31x     31x     67x 67x 32x 4x 4x     28x 28x 28x       4045x     4045x     4045x 3x           4045x       76x                                                               93x     24x 24x     19x 5x 5x 4x   1x 1x             18x                 18x     18x 5x 5x                   43x 43x 2x 2x                                                                                   3x 3x   2x 2x 2x           31x   38x     4023x 4023x     33x     45x             5x 5x 4x 3x 5x         31x                                                         31x 51x 52x 15x 5x           31x             31x     31x                                                                  
// Dual-layer cache: L1 (in-memory per-isolate) + L2 (Cloudflare KV cross-worker)
 
interface CacheEntry<T> {
	value: T;
	expiresAt: number | null; // null means no expiration
}
 
interface CacheOptions {
	expirationTtl?: number; // TTL in seconds (matches KV signature)
}
 
interface DualCacheOptions {
	l1Ttl?: number; // L1 (in-memory) TTL in seconds, capped at 60s
	l2Ttl?: number; // L2 (KV) TTL in seconds
}
 
// Maximum number of entries in the L1 cache before LRU eviction
export const MAX_CACHE_SIZE = 1000;
 
// Module-level L1 cache - persists across requests within the same isolate
const l1Cache = new Map<string, CacheEntry<unknown>>();
 
// Max L1 TTL to limit stale data in memory (60 seconds)
const MAX_L1_TTL = 60;
 
function l1Get<T>(key: string): T | null {
	const entry = l1Cache.get(key) as CacheEntry<T> | undefined;
	if (!entry) return null;
	if (entry.expiresAt !== null && Date.now() > entry.expiresAt) {
		l1Cache.delete(key);
		return null;
	}
	// LRU re-insertion: move to end of Map for LRU ordering
	l1Cache.delete(key);
	l1Cache.set(key, entry);
	return entry.value;
}
 
function l1Put<T>(key: string, value: T, ttlSeconds?: number): void {
	const effectiveTtl = ttlSeconds
		? Math.min(ttlSeconds, MAX_L1_TTL)
		: MAX_L1_TTL;
	const expiresAt = Date.now() + effectiveTtl * 1000;
 
	// LRU eviction: evict oldest entry if at capacity (skip if updating existing key)
	if (l1Cache.size >= MAX_CACHE_SIZE && !l1Cache.has(key)) {
		const firstKey = l1Cache.keys().next().value;
		/* v8 ignore start -- V8 artifact: iterator always yields a value when size >= MAX_CACHE_SIZE */
		if (firstKey) l1Cache.delete(firstKey);
		/* v8 ignore stop */
	}
 
	l1Cache.set(key, { value, expiresAt });
}
 
function l1Delete(key: string): void {
	l1Cache.delete(key);
}
 
export interface DualCache {
	get<T>(key: string): Promise<T | null>;
	put<T>(key: string, value: T, options?: DualCacheOptions): Promise<void>;
	delete(key: string): Promise<void>;
	/**
	 * Delete every key matching a prefix. L1 iterates and deletes in-memory;
	 * L2 uses KV's list({prefix}) + delete loop. The prefix must be a literal
	 * string (no glob) — callers pass e.g. `"audience:resolve:pro_123:"`.
	 *
	 * Used by the consent-attestations DAL to invalidate cached audience
	 * classifications after an attestation is written or revoked (B5.a cache-
	 * invalidation hook per CEO finding 7a). Without this, a pro revoking
	 * consent mid-compose would continue to see stale counts.
	 */
	deletePattern(prefix: string): Promise<void>;
	getOrSet<T>(
		key: string,
		fetcher: () => Promise<T>,
		options?: DualCacheOptions,
	): Promise<T>;
}
 
/**
 * Create a dual-layer cache instance.
 * L1: Module-level Map (shared across requests in same isolate, ~0ms)
 * L2: KV (persistent across workers, ~10ms)
 * Degrades gracefully to L1-only when KV is not bound.
 */
export function createDualCache(kv?: KVNamespace): DualCache {
	return {
		async get<T>(key: string): Promise<T | null> {
			// L1 check
			const l1Result = l1Get<T>(key);
			if (l1Result !== null) return l1Result;
 
			// L2 check (KV)
			if (kv) {
				try {
					const kvResult = await kv.get<T>(key, "json");
					if (kvResult !== null) {
						// Backfill L1
						l1Put(key, kvResult);
						return kvResult;
					}
				} catch {
					// KV failure is non-fatal, fall through to miss
				}
			}
 
			return null;
		},
 
		async put<T>(
			key: string,
			value: T,
			options?: DualCacheOptions,
		): Promise<void> {
			// Write to L1
			l1Put(key, value, options?.l1Ttl);
 
			// Write to L2 (KV)
			if (kv) {
				try {
					await kv.put(key, JSON.stringify(value), {
						expirationTtl: options?.l2Ttl ?? MAX_L1_TTL,
					});
				} catch {
					// KV write failure is non-fatal
				}
			}
		},
 
		async delete(key: string): Promise<void> {
			l1Delete(key);
			if (kv) {
				try {
					await kv.delete(key);
				} catch {
					// KV delete failure is non-fatal
				}
			}
		},
 
		async deletePattern(prefix: string): Promise<void> {
			// L1: iterate the in-memory Map and drop every key that starts with
			// the prefix. Map.keys() is a live iterator — copy before mutating.
			const l1Keys: string[] = [];
			for (const key of l1Cache.keys()) {
				if (key.startsWith(prefix)) l1Keys.push(key);
			}
			for (const key of l1Keys) l1Delete(key);
 
			// L2: KV.list supports prefix filtering. Use a bounded list+delete
			// loop; for audience caches the prefix is per-pro so the expected
			// key count is small (<100) — no cursor pagination needed for v1.
			if (kv) {
				try {
					let cursor: string | undefined;
					// Cap iterations to keep worker CPU bounded if a pro ever
					// accumulates pathologically many cache keys.
					for (let i = 0; i < 10; i++) {
						const listed = await kv.list({ prefix, cursor });
						await Promise.all(listed.keys.map((k) => kv.delete(k.name)));
						if (listed.list_complete) break;
						cursor = listed.cursor;
					}
				} catch {
					// KV failure is non-fatal — L1 still cleared, and stale L2
					// entries expire naturally via TTL.
				}
			}
		},
 
		async getOrSet<T>(
			key: string,
			fetcher: () => Promise<T>,
			options?: DualCacheOptions,
		): Promise<T> {
			const cached = await this.get<T>(key);
			if (cached !== null) return cached;
 
			const value = await fetcher();
			await this.put(key, value, options);
			return value;
		},
	};
}
 
// Backward-compatible singleton cache (L1-only, for existing callers like taxonomy routes)
export const cache = {
	async get<T>(key: string): Promise<T | null> {
		return l1Get<T>(key);
	},
	async put<T>(key: string, value: T, options?: CacheOptions): Promise<void> {
		const ttl = options?.expirationTtl;
		l1Put(key, value, ttl);
	},
	async delete(key: string): Promise<void> {
		l1Delete(key);
	},
	async clear(): Promise<void> {
		l1Cache.clear();
	},
	async getOrSet<T>(
		key: string,
		fetcher: () => Promise<T>,
		options?: CacheOptions,
	): Promise<T> {
		const cached = l1Get<T>(key);
		if (cached !== null) return cached;
		const value = await fetcher();
		l1Put(key, value, options?.expirationTtl);
		return value;
	},
};
 
// Cache keys constants for taxonomy data
export const CACHE_KEYS = {
	TAXONOMY_ALL: "taxonomy:all",
	TAXONOMY_CRITICAL: "taxonomy:critical",
	TAXONOMY_EXTENDED: "taxonomy:extended",
	TAXONOMY_BUSINESS_TYPES: "taxonomy:business-types",
	TAXONOMY_EXECUTION_MODELS: "taxonomy:execution-models",
	TAXONOMY_CUSTOMER_SEGMENTS: "taxonomy:customer-segments",
	TAXONOMY_PROJECT_SCALES: "taxonomy:project-scales",
	TAXONOMY_SERVICE_CATEGORIES: "taxonomy:service-categories",
	TAXONOMY_MATERIAL_TAGS: "taxonomy:material-tags",
	TAXONOMY_STYLE_TAGS: "taxonomy:style-tags",
	TAXONOMY_BRANDS: "taxonomy:brands",
	TAXONOMY_CITIES: "taxonomy:cities",
	TAXONOMY_ROOM_TYPES: "taxonomy:room-types",
	TAXONOMY_TIMELINE_CATEGORIES: "taxonomy:timeline-categories",
	TAXONOMY_ENRICHMENT: "taxonomy:enrichment-tables",
 
	// Marketplace consolidated endpoints
	MARKETPLACE_HOMEPAGE: "marketplace:homepage",
	MARKETPLACE_ROOM_CATEGORIES: "marketplace:room-categories",
	/** Dynamic key — use MARKETPLACE.proFull(proId) helper */
	MARKETPLACE_STATS: "marketplace:stats",
	/** Dynamic key — use MARKETPLACE.proFull(proId) helper */
	MARKETPLACE_PRO_FULL_PREFIX: "marketplace:pro:",
	/** Dynamic key — use MARKETPLACE.proProjects(proId) helper */
	PRO_PROJECTS_PREFIX: "pro:",
} as const;
 
/** Helpers for dynamic cache key construction */
export const MARKETPLACE = {
	proFull: (proId: string) => `marketplace:pro:${proId}:full` as const,
	proProjects: (proId: string) => `pro:${proId}:projects` as const,
	proStats: (proId: string) => `pro:${proId}:stats` as const,
	taxonomyZones: (cityId: string) => `taxonomy:zones:${cityId}` as const,
} as const;
 
/** Audience classification cache keys (B5.a). 60s TTL — audience shouldn't
 *  shift mid-compose. Prefix-invalidated via deletePattern on any write to
 *  consent_attestations for this pro. */
export const AUDIENCE_RESOLVE = {
	proPrefix: (proId: string) => `audience:resolve:${proId}:` as const,
	key: (proId: string, filterHash: string) =>
		`audience:resolve:${proId}:${filterHash}` as const,
} as const;
 
// Default TTL for taxonomy cache (5 minutes)
export const TAXONOMY_CACHE_TTL = 300;
 
// TTL constants for different cache types
export const CACHE_TTL = {
	SESSION_L1: 60, // 60s - short to limit stale data after logout
	SESSION_L2: 300, // 5 min
	ROLES_L1: 60, // 60s
	ROLES_L2: 300, // 5 min
	TAXONOMY_L1: 300, // 5 min
	TAXONOMY_L2: 1800, // 30 min
	PROJECTS_L1: 30, // 30s
	PROJECTS_L2: 120, // 2 min
	INQUIRIES_L1: 15, // 15s - inquiries change more frequently
	INQUIRIES_L2: 60, // 1 min
	STATS_L1: 60, // 60s
	STATS_L2: 300, // 5 min
 
	// Marketplace consolidated endpoints
	MARKETPLACE_HOMEPAGE_L1: 60, // 1 min
	MARKETPLACE_HOMEPAGE_L2: 300, // 5 min
	MARKETPLACE_ROOM_CATEGORIES_L1: 300, // 5 min
	MARKETPLACE_ROOM_CATEGORIES_L2: 1800, // 30 min
	MARKETPLACE_PRO_FULL_L1: 60, // 1 min
	MARKETPLACE_PRO_FULL_L2: 300, // 5 min
	// Pros listing — keyed by filter combo. The Hyderabad SEO pages each
	// fire `getPros({ cityIds: 'hyderabad', limit: 200 })` with no cache,
	// adding 1.5-3s of TTFB to every locality/zone/segment render. Short
	// L1 + 5-min L2 keeps fresh enough for editorial work, kills the cold
	// path for Lighthouse / repeat visitors.
	MARKETPLACE_PROS_LIST_L1: 60, // 1 min
	MARKETPLACE_PROS_LIST_L2: 300, // 5 min
	PRO_PROJECTS_L1: 30, // 30s
	PRO_PROJECTS_L2: 120, // 2 min
	PRO_STATS_L1: 60, // 1 min
	PRO_STATS_L2: 300, // 5 min
} as const;