···11+/**
22+ * Worker Database Client
33+ * Kysely connection for PostgreSQL — uses a smaller pool than the API
44+ * since workers process jobs sequentially rather than handling concurrent requests.
55+ */
66+77+import { Kysely, PostgresDialect, sql } from "kysely";
88+import { Pool } from "pg";
99+import type { Database } from "@atlast/shared/types/database";
1010+1111+const pool = new Pool({
1212+ connectionString: process.env.DATABASE_URL,
1313+ max: 5, // Smaller pool — workers run fewer concurrent queries than the API
1414+ idleTimeoutMillis: 30000,
1515+ connectionTimeoutMillis: 2000,
1616+});
1717+1818+export const db = new Kysely<Database>({
1919+ dialect: new PostgresDialect({ pool }),
2020+});
2121+2222+/**
2323+ * Verify database connectivity on startup.
2424+ * Throws if the connection cannot be established.
2525+ */
2626+export async function testConnection(): Promise<void> {
2727+ try {
2828+ await sql`SELECT 1`.execute(db);
2929+ console.log("✅ [WORKER] Database connection successful");
3030+ } catch (error) {
3131+ console.error("❌ [WORKER] Database connection failed:", error);
3232+ throw error;
3333+ }
3434+}
3535+3636+/**
3737+ * Destroy the connection pool.
3838+ * Call during graceful shutdown to avoid hanging processes.
3939+ */
4040+export async function closeConnection(): Promise<void> {
4141+ await db.destroy();
4242+ console.log("[WORKER] Database connection pool closed");
4343+}
+107
packages/worker/src/index.ts
···11+/**
22+ * BullMQ Worker — Main Entry Point
33+ * Connects to Redis and PostgreSQL, registers job handlers,
44+ * and schedules the daily cleanup cron job.
55+ */
66+77+import { Worker } from "bullmq";
88+import type { ConnectionOptions } from "bullmq";
99+import { config } from "dotenv";
1010+import { cleanupQueue, testRedisConnection } from "./queues";
1111+import { handleCleanupJob } from "./jobs/cleanupJob";
1212+import { testConnection, closeConnection } from "./db/client";
1313+1414+// Load .env before anything reads process.env
1515+config();
1616+1717+/**
1818+ * Build connection options for the Worker.
1919+ * We use the same URL-to-options parser as queues.ts.
2020+ * BullMQ requires separate connection objects for Queue vs Worker.
2121+ */
2222+function buildWorkerConnectionOptions(): ConnectionOptions {
2323+ const url = process.env.REDIS_URL ?? "redis://localhost:6379";
2424+ const parsed = new URL(url);
2525+ return {
2626+ host: parsed.hostname,
2727+ port: Number(parsed.port) || 6379,
2828+ db: Number(parsed.pathname.slice(1)) || 0,
2929+ maxRetriesPerRequest: null,
3030+ enableReadyCheck: false,
3131+ };
3232+}
3333+3434+/**
3535+ * Cleanup Worker
3636+ * concurrency: 1 — cleanup is idempotent but there's no benefit to parallel
3737+ * runs; keep it simple for Phase 1.
3838+ */
3939+const cleanupWorker = new Worker("cleanup", handleCleanupJob, {
4040+ connection: buildWorkerConnectionOptions(),
4141+ concurrency: 1,
4242+ lockDuration: 60000, // 60 s max lock — cleanup shouldn't take longer
4343+});
4444+4545+cleanupWorker.on("completed", (job) => {
4646+ console.log(`[WORKER] Job ${job.id} completed`);
4747+});
4848+4949+cleanupWorker.on("failed", (job, error) => {
5050+ console.error(`[WORKER] Job ${job?.id} failed: ${error.message}`);
5151+});
5252+5353+cleanupWorker.on("error", (error) => {
5454+ console.error("[WORKER] Worker error:", error);
5555+});
5656+5757+async function start(): Promise<void> {
5858+ console.log("🚀 [WORKER] Starting BullMQ worker...");
5959+6060+ await testRedisConnection();
6161+ await testConnection();
6262+6363+ // Add recurring cleanup job — fixed jobId prevents duplicates on restart
6464+ await cleanupQueue.add(
6565+ "daily-cleanup",
6666+ {},
6767+ {
6868+ repeat: { pattern: "0 2 * * *" }, // 2 AM daily
6969+ jobId: "cleanup-daily",
7070+ }
7171+ );
7272+7373+ console.log("✅ [WORKER] Worker started");
7474+ console.log("📅 [WORKER] Cleanup scheduled for 2 AM daily");
7575+ console.log("⏳ [WORKER] Waiting for jobs...");
7676+}
7777+7878+async function shutdown(): Promise<void> {
7979+ console.log("\n⚠️ [WORKER] Shutting down...");
8080+ try {
8181+ await cleanupWorker.close();
8282+ await closeConnection();
8383+ console.log("✅ [WORKER] Shutdown complete");
8484+ process.exit(0);
8585+ } catch (error) {
8686+ console.error("❌ [WORKER] Error during shutdown:", error);
8787+ process.exit(1);
8888+ }
8989+}
9090+9191+process.on("SIGTERM", () => { void shutdown(); });
9292+process.on("SIGINT", () => { void shutdown(); });
9393+9494+process.on("uncaughtException", (error) => {
9595+ console.error("❌ [WORKER] Uncaught exception:", error);
9696+ void shutdown();
9797+});
9898+9999+process.on("unhandledRejection", (reason) => {
100100+ console.error("❌ [WORKER] Unhandled rejection:", reason);
101101+ void shutdown();
102102+});
103103+104104+start().catch((error: unknown) => {
105105+ console.error("❌ [WORKER] Failed to start:", error);
106106+ process.exit(1);
107107+});
+48
packages/worker/src/jobs/cleanupJob.ts
···11+/**
22+ * Cleanup Job Handler
33+ * Invokes the PostgreSQL cleanup_transient_data() function which atomically
44+ * removes expired OAuth states, sessions, and old notifications.
55+ */
66+77+import type { Job } from "bullmq";
88+import { sql } from "kysely";
99+import { db } from "../db/client";
1010+1111+export interface CleanupJobData {
1212+ // Scheduled job — no input data required
1313+}
1414+1515+export interface CleanupJobResult {
1616+ cleaned: boolean;
1717+ timestamp: string;
1818+}
1919+2020+/**
2121+ * Execute the cleanup job.
2222+ * Delegates to the cleanup_transient_data() SQL function defined in init-db.sql.
2323+ * Re-throws on failure so BullMQ can mark the job failed and schedule a retry.
2424+ */
2525+export async function handleCleanupJob(
2626+ job: Job<CleanupJobData>
2727+): Promise<CleanupJobResult> {
2828+ const startTime = Date.now();
2929+ console.log(`[CLEANUP] Starting cleanup job ${job.id}...`);
3030+3131+ try {
3232+ await sql`SELECT cleanup_transient_data()`.execute(db);
3333+3434+ const duration = Date.now() - startTime;
3535+ console.log(`[CLEANUP] ✅ Completed in ${duration}ms`);
3636+3737+ return { cleaned: true, timestamp: new Date().toISOString() };
3838+ } catch (error) {
3939+ console.error("[CLEANUP] ❌ Failed:", {
4040+ message: error instanceof Error ? error.message : String(error),
4141+ stack: error instanceof Error ? error.stack : undefined,
4242+ jobId: job.id,
4343+ attemptsMade: job.attemptsMade,
4444+ });
4545+4646+ throw error; // Let BullMQ handle retry logic
4747+ }
4848+}
+78
packages/worker/src/queues.ts
···11+/**
22+ * BullMQ Queue Configuration
33+ * Defines the cleanup queue used by both the API (to add jobs)
44+ * and the worker process (to consume them).
55+ *
66+ * We pass connection options objects (not Redis instances) to avoid a
77+ * TypeScript structural mismatch between the IORedis.Redis namespace type
88+ * used in BullMQ's ConnectionOptions and the default-imported Redis class.
99+ * BullMQ creates and manages its own Redis connections from these options.
1010+ */
1111+1212+import { Queue } from "bullmq";
1313+import Redis from "ioredis";
1414+import type { ConnectionOptions } from "bullmq";
1515+1616+/**
1717+ * Parse the REDIS_URL environment variable into a ConnectionOptions object.
1818+ * Supports: redis://host:port/db (database index via path segment)
1919+ */
2020+function buildConnectionOptions(): ConnectionOptions {
2121+ const url = process.env.REDIS_URL ?? "redis://localhost:6379";
2222+ const parsed = new URL(url);
2323+ return {
2424+ host: parsed.hostname,
2525+ port: Number(parsed.port) || 6379,
2626+ db: Number(parsed.pathname.slice(1)) || 0,
2727+ maxRetriesPerRequest: null, // Required by BullMQ — disables per-command timeout
2828+ enableReadyCheck: false, // Skip ready check RTT on startup
2929+ };
3030+}
3131+3232+/**
3333+ * Cleanup Queue
3434+ * Handles periodic removal of expired transient data:
3535+ * - oauth_states older than 1 hour
3636+ * - user_sessions past their expires_at
3737+ * - sent notification_queue rows older than 7 days
3838+ * - failed notification_queue rows older than 30 days
3939+ */
4040+export const cleanupQueue = new Queue("cleanup", {
4141+ connection: buildConnectionOptions(),
4242+ defaultJobOptions: {
4343+ attempts: 2, // Retry once on failure
4444+ backoff: {
4545+ type: "exponential",
4646+ delay: 5000, // 5 s → 10 s between retries
4747+ },
4848+ removeOnComplete: {
4949+ age: 86400, // Keep completed jobs for 24 h (debugging)
5050+ count: 10,
5151+ },
5252+ removeOnFail: {
5353+ age: 604800, // Keep failed jobs for 7 days
5454+ count: 100,
5555+ },
5656+ },
5757+});
5858+5959+/**
6060+ * Verify Redis connectivity using a short-lived client.
6161+ * Throws if the connection cannot be established.
6262+ */
6363+export async function testRedisConnection(): Promise<void> {
6464+ const url = process.env.REDIS_URL ?? "redis://localhost:6379";
6565+ const client = new Redis(url, {
6666+ maxRetriesPerRequest: null,
6767+ enableReadyCheck: false,
6868+ });
6969+ try {
7070+ await client.ping();
7171+ console.log("✅ [WORKER] Redis connection successful");
7272+ } catch (error) {
7373+ console.error("❌ [WORKER] Redis connection failed:", error);
7474+ throw error;
7575+ } finally {
7676+ client.disconnect();
7777+ }
7878+}