ATlast — you'll never need to find your favorites on another platform again. Find your favs in the ATmosphere.
atproto
at master 194 lines 8.5 kB view raw
1# ATlast Production Docker Compose 2# Run from repo root: docker compose -f docker/docker-compose.yml up -d 3# Run from docker/ dir: docker compose up -d 4 5services: 6 7 # ── PostgreSQL Database ────────────────────────────────────────────────────── 8 # Stores all application data: sessions, uploads, matches, source accounts. 9 # Lives ONLY on the backend network — cannot reach the internet. 10 database: 11 image: postgres:16-alpine 12 restart: unless-stopped 13 environment: 14 POSTGRES_USER: atlast 15 POSTGRES_PASSWORD: ${DB_PASSWORD} 16 POSTGRES_DB: atlast 17 volumes: 18 # Persist database data across container restarts. 19 - pgdata:/var/lib/postgresql/data 20 # Initialize schema on first run. Postgres runs scripts in 21 # /docker-entrypoint-initdb.d/ only when the data volume is empty. 22 - ../scripts/init-db.sql:/docker-entrypoint-initdb.d/01-init.sql 23 networks: 24 - backend 25 healthcheck: 26 # pg_isready checks if PostgreSQL is accepting connections. 27 # Other services use `condition: service_healthy` to wait for this. 28 test: ["CMD-SHELL", "pg_isready -U atlast"] 29 interval: 10s 30 timeout: 5s 31 retries: 5 32 start_period: 10s 33 34 # ── Redis ──────────────────────────────────────────────────────────────────── 35 # Used by BullMQ for the job queue (cleanup worker). 36 # Also on the backend network — not reachable from the internet. 37 redis: 38 image: redis:7-alpine 39 restart: unless-stopped 40 # --appendonly yes: write every operation to disk for durability. 41 # Without this, a Redis restart loses all queued jobs. 42 command: redis-server --appendonly yes 43 volumes: 44 - redisdata:/data 45 networks: 46 - backend 47 healthcheck: 48 test: ["CMD", "redis-cli", "ping"] 49 interval: 10s 50 timeout: 3s 51 retries: 3 52 start_period: 5s 53 54 # ── Hono API Server ────────────────────────────────────────────────────────── 55 # The main backend: handles all /api/* requests. 56 # On both networks: talks to database/redis (backend), and receives requests 57 # from the frontend nginx proxy (frontend). 58 api: 59 build: 60 # Build context is the repo root (one level up from docker/). 61 # All COPY paths in docker/api/Dockerfile are relative to here. 62 context: .. 63 dockerfile: docker/api/Dockerfile 64 restart: unless-stopped 65 environment: 66 - NODE_ENV=production 67 # Uses Docker's internal DNS: "database" resolves to the postgres container's IP. 68 - DATABASE_URL=postgresql://atlast:${DB_PASSWORD}@database:5432/atlast 69 - REDIS_URL=redis://redis:6379 70 - OAUTH_PRIVATE_KEY=${OAUTH_PRIVATE_KEY} 71 - FRONTEND_URL=${FRONTEND_URL} 72 # TOKEN_ENCRYPTION_KEY is required by the auth middleware. 73 # Generate with: node -e "console.log(require('crypto').randomBytes(32).toString('hex'))" 74 # WARNING: The migration plan's docker-compose was missing this variable. 75 - TOKEN_ENCRYPTION_KEY=${TOKEN_ENCRYPTION_KEY} 76 - PORT=3000 77 depends_on: 78 database: 79 # Wait until PostgreSQL is actually ready, not just started. 80 condition: service_healthy 81 redis: 82 condition: service_healthy 83 networks: 84 - frontend 85 - backend 86 healthcheck: 87 # wget is available in alpine; curl is not installed by default. 88 # -q suppresses output; --spider does a HEAD request only. 89 test: ["CMD", "wget", "--spider", "-q", "http://localhost:3000/api/health"] 90 interval: 30s 91 timeout: 10s 92 retries: 3 93 start_period: 15s 94 95 # ── BullMQ Worker ──────────────────────────────────────────────────────────── 96 # Background job processor. Runs the daily cleanup job at 2 AM. 97 # Only needs the backend network (database + redis). No inbound connections. 98 worker: 99 build: 100 context: .. 101 dockerfile: docker/worker/Dockerfile 102 restart: unless-stopped 103 environment: 104 - NODE_ENV=production 105 - DATABASE_URL=postgresql://atlast:${DB_PASSWORD}@database:5432/atlast 106 - REDIS_URL=redis://redis:6379 107 depends_on: 108 database: 109 condition: service_healthy 110 redis: 111 condition: service_healthy 112 networks: 113 - backend 114 115 # ── Frontend (Nginx) ───────────────────────────────────────────────────────── 116 # Serves the compiled React app. 117 # Proxies /api/* requests to the api container (same-origin, no CORS needed). 118 frontend: 119 build: 120 context: .. 121 dockerfile: docker/frontend/Dockerfile 122 restart: unless-stopped 123 depends_on: 124 api: 125 condition: service_healthy 126 networks: 127 - frontend 128 labels: 129 # Tell Traefik to route traffic to this container. 130 # Without these labels, Traefik ignores this service (exposedbydefault=false). 131 # DOMAIN defaults to "localhost" for local testing; set to production hostname in .env. 132 - "traefik.enable=true" 133 - "traefik.http.routers.frontend.rule=Host(`${DOMAIN:-localhost}`)" 134 - "traefik.http.routers.frontend.entrypoints=web" 135 - "traefik.http.services.frontend.loadbalancer.server.port=80" 136 137 # ── Traefik Reverse Proxy ──────────────────────────────────────────────────── 138 # Sits in front of the frontend and handles routing. 139 # Only binds to 127.0.0.1 — Cloudflare Tunnel connects to it locally. 140 # The internet never connects directly to this port. 141 traefik: 142 image: traefik:v3.0 143 restart: unless-stopped 144 command: 145 - "--providers.docker=true" 146 - "--providers.docker.exposedbydefault=false" 147 - "--entrypoints.web.address=:80" 148 - "--accesslog=true" 149 ports: 150 # Bind only to localhost. Cloudflare Tunnel reaches this port. 151 # Traffic path: Internet → Cloudflare → Tunnel → 127.0.0.1:80 → Traefik → frontend 152 - "127.0.0.1:80:80" 153 volumes: 154 # Read-only access to Docker socket so Traefik can discover containers. 155 - /var/run/docker.sock:/var/run/docker.sock:ro 156 networks: 157 - frontend 158 labels: 159 # Do not expose Traefik itself through Traefik. 160 - "traefik.enable=false" 161 162 # ── Cloudflare Tunnel ──────────────────────────────────────────────────────── 163 # Creates an outbound tunnel from this machine to Cloudflare's network. 164 # Your home server never needs an open inbound port. 165 # Tunnel token is obtained from the Cloudflare Zero Trust dashboard. 166 cloudflared: 167 image: cloudflare/cloudflared:latest 168 restart: unless-stopped 169 command: tunnel run 170 environment: 171 - TUNNEL_TOKEN=${CLOUDFLARE_TUNNEL_TOKEN} 172 networks: 173 - frontend 174 175# ── Networks ────────────────────────────────────────────────────────────────── 176networks: 177 # frontend network: traefik, cloudflared, api, and frontend can communicate. 178 # Has normal internet access (needed by cloudflared to reach Cloudflare). 179 frontend: 180 driver: bridge 181 182 # backend network: database, redis, api, and worker can communicate. 183 # internal: true means NO outbound internet access from any container on this network. 184 # Even if the database or redis container is compromised, it cannot phone home. 185 backend: 186 driver: bridge 187 internal: true 188 189# ── Volumes ─────────────────────────────────────────────────────────────────── 190volumes: 191 # Docker-managed volumes persist data across container restarts and rebuilds. 192 # Data lives at /var/lib/docker/volumes/ on the host machine. 193 pgdata: 194 redisdata: