feat(phase-4): WS1 — Production Hardening (Redis rate limiting, DB pool, health endpoint, k6)

Rate limiting:
- Replace in-memory express-rate-limit with ioredis + rate-limiter-flexible (sliding window)
- Graceful fallback to RateLimiterMemory when Redis unreachable
- RATE_LIMIT_WINDOW_MS / RATE_LIMIT_MAX_REQUESTS env var config
- Retry-After header on 429 responses
- agentidp_rate_limit_hits_total Prometheus counter

Database pool:
- Explicit pg.Pool config via DB_POOL_MAX/MIN/IDLE_TIMEOUT_MS/CONNECTION_TIMEOUT_MS
- Defaults: max=20, min=2, idle=30s, conn timeout=5s
- agentidp_db_pool_active_connections + agentidp_db_pool_waiting_requests gauges

Health endpoint:
- GET /health/detailed — per-service status (database, Redis, Vault, OPA)
- healthy / degraded (>1000ms) / unreachable classification
- HTTP 200 (all healthy) / 207 (any degraded) / 503 (any unreachable)

Load tests:
- tests/load/ with k6 scenarios for agent registration (100 VUs), token issuance (1000 VUs), credential rotation (50 VUs)
- npm run load-test script

Tests: 586 passing, zero TypeScript errors

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
SentryAgent.ai Developer
2026-04-02 04:20:37 +00:00
parent b0f70b7ac4
commit 1b682c22b2
16 changed files with 1467 additions and 113 deletions

View File

@@ -0,0 +1,78 @@
/**
* ioredis singleton client for rate-limiter-flexible.
*
* This client is separate from the `src/cache/redis.ts` client (which uses the
* `redis` npm package and handles token revocation / OIDC caching). The
* rate-limiter-flexible library requires an ioredis-compatible client.
*
* Guard: when `REDIS_RATE_LIMIT_ENABLED` is not `"true"` the factory returns
* `null` and the rate limiter falls back to in-process memory (RateLimiterMemory).
*/
import Redis from 'ioredis';
let ioredisClient: Redis | null = null;
/**
* Returns a singleton ioredis client for rate limiting, or `null` when Redis
* rate limiting is disabled via the `REDIS_RATE_LIMIT_ENABLED` env var.
*
* The client is lazily initialised on first call. Connection errors are logged
* but do NOT throw — callers must handle a `null` return and fall back to
* in-memory rate limiting.
*
* @returns The ioredis client instance, or `null` when disabled / unreachable.
*/
export function getRateLimitRedisClient(): Redis | null {
const enabled = process.env['REDIS_RATE_LIMIT_ENABLED'];
if (enabled !== 'true') {
return null;
}
if (ioredisClient) {
return ioredisClient;
}
const redisUrl = process.env['REDIS_URL'] ?? 'redis://localhost:6379';
ioredisClient = new Redis(redisUrl, {
// Do not throw on connection failure — caller handles null / fallback.
lazyConnect: false,
enableReadyCheck: true,
maxRetriesPerRequest: 1,
// Reconnect strategy: give up quickly so the health check / fallback fires.
retryStrategy: (times: number): number | null => {
if (times >= 3) {
return null; // stop retrying — triggers 'error' event
}
return Math.min(times * 200, 1000);
},
});
ioredisClient.on('error', (err: Error) => {
// eslint-disable-next-line no-console
console.error('[RateLimitRedis] Connection error — rate limiter will use memory fallback:', err.message);
// Reset singleton so next call re-attempts connection.
ioredisClient = null;
});
ioredisClient.on('connect', () => {
// eslint-disable-next-line no-console
console.log('[RateLimitRedis] Connected — Redis-backed rate limiting active.');
});
return ioredisClient;
}
/**
* Closes the ioredis rate-limit client and resets the singleton.
* Used for graceful shutdown and tests.
*
* @returns Promise that resolves when the client is disconnected.
*/
export async function closeRateLimitRedisClient(): Promise<void> {
if (ioredisClient) {
await ioredisClient.quit();
ioredisClient = null;
}
}