An easy-to-host PDS on the ATProtocol, MacOS. Grandma-approved.

feat(relay): add V002 database migration for auth + identity tables (MM-138)

Creates V002__auth_identity.sql with all 12 Wave 2 tables: accounts,
handles, did_documents, signing_keys, devices, claim_codes, sessions,
refresh_tokens, oauth_clients, oauth_authorization_codes, oauth_tokens,
and oauth_par_requests. Adds the 5 required indexes (unique email,
claim_codes/refresh_tokens/oauth_tokens did lookups). Uses WITHOUT ROWID
on tables with PK-only access paths. Updates MIGRATIONS static and
existing row-count assertions to be future-proof via MIGRATIONS.len().
Adds 9 V002 tests covering table existence, idempotency, FK enforcement,
unique email, and EXPLAIN QUERY PLAN for all 4 non-trivial indexes.

authored by malpercio.dev and committed by

Tangled 81efcfbf 6b8bdd69

+379 -20
+5 -5
crates/relay/src/app.rs
··· 18 18 self.0.get(key).and_then(|v| { 19 19 v.to_str().map_or_else( 20 20 |_| { 21 - tracing::debug!(header = key, "trace propagation header contains non-UTF-8 bytes; ignoring"); 21 + tracing::debug!( 22 + header = key, 23 + "trace propagation header contains non-UTF-8 bytes; ignoring" 24 + ); 22 25 None 23 26 }, 24 27 Some, ··· 247 250 map.insert("traceparent", "00-abc123-def456-01".parse().unwrap()); 248 251 249 252 let carrier = HeaderMapCarrier(&map); 250 - assert_eq!( 251 - carrier.get("traceparent"), 252 - Some("00-abc123-def456-01") 253 - ); 253 + assert_eq!(carrier.get("traceparent"), Some("00-abc123-def456-01")); 254 254 } 255 255 256 256 #[test]
+137
crates/relay/src/db/migrations/V002__auth_identity.sql
··· 1 + -- V002: Auth + Identity tables 2 + -- Applied in a single transaction by the migration runner. 3 + 4 + -- ── Account & Identity ─────────────────────────────────────────────────────── 5 + 6 + CREATE TABLE accounts ( 7 + did TEXT NOT NULL, 8 + email TEXT NOT NULL, 9 + password_hash TEXT NOT NULL, 10 + created_at TEXT NOT NULL, 11 + updated_at TEXT NOT NULL, 12 + email_confirmed_at TEXT, 13 + deactivated_at TEXT, 14 + PRIMARY KEY (did) 15 + ); 16 + 17 + CREATE UNIQUE INDEX idx_accounts_email ON accounts (email); 18 + 19 + -- WITHOUT ROWID: handle is the only access path (handle lookups are always by PK). 20 + CREATE TABLE handles ( 21 + handle TEXT NOT NULL, 22 + did TEXT NOT NULL REFERENCES accounts (did), 23 + created_at TEXT NOT NULL, 24 + PRIMARY KEY (handle) 25 + ) WITHOUT ROWID; 26 + 27 + -- WITHOUT ROWID: DID documents are always fetched by DID (the PK). 28 + CREATE TABLE did_documents ( 29 + did TEXT NOT NULL, 30 + document TEXT NOT NULL, 31 + created_at TEXT NOT NULL, 32 + updated_at TEXT NOT NULL, 33 + PRIMARY KEY (did) 34 + ) WITHOUT ROWID; 35 + 36 + CREATE TABLE signing_keys ( 37 + id TEXT NOT NULL, 38 + did TEXT NOT NULL REFERENCES accounts (did), 39 + key_type TEXT NOT NULL, 40 + public_key TEXT NOT NULL, 41 + private_key_encrypted TEXT NOT NULL, 42 + created_at TEXT NOT NULL, 43 + PRIMARY KEY (id) 44 + ); 45 + 46 + -- ── Device & Provisioning ──────────────────────────────────────────────────── 47 + 48 + CREATE TABLE devices ( 49 + id TEXT NOT NULL, 50 + did TEXT NOT NULL REFERENCES accounts (did), 51 + device_name TEXT NOT NULL, 52 + user_agent TEXT NOT NULL, 53 + created_at TEXT NOT NULL, 54 + last_seen_at TEXT NOT NULL, 55 + PRIMARY KEY (id) 56 + ); 57 + 58 + CREATE TABLE claim_codes ( 59 + code TEXT NOT NULL, 60 + did TEXT NOT NULL REFERENCES accounts (did), 61 + expires_at TEXT NOT NULL, 62 + claimed_at TEXT, 63 + claimed_by_device_id TEXT REFERENCES devices (id), 64 + PRIMARY KEY (code) 65 + ); 66 + 67 + CREATE INDEX idx_claim_codes_did ON claim_codes (did); 68 + 69 + -- ── Sessions & Tokens ──────────────────────────────────────────────────────── 70 + 71 + CREATE TABLE sessions ( 72 + id TEXT NOT NULL, 73 + did TEXT NOT NULL REFERENCES accounts (did), 74 + device_id TEXT NOT NULL REFERENCES devices (id), 75 + created_at TEXT NOT NULL, 76 + expires_at TEXT NOT NULL, 77 + PRIMARY KEY (id) 78 + ); 79 + 80 + CREATE TABLE refresh_tokens ( 81 + jti TEXT NOT NULL, 82 + did TEXT NOT NULL REFERENCES accounts (did), 83 + session_id TEXT NOT NULL REFERENCES sessions (id), 84 + next_jti TEXT, 85 + expires_at TEXT NOT NULL, 86 + app_password_name TEXT, 87 + created_at TEXT NOT NULL, 88 + PRIMARY KEY (jti) 89 + ); 90 + 91 + CREATE INDEX idx_refresh_tokens_did ON refresh_tokens (did); 92 + 93 + -- ── OAuth ──────────────────────────────────────────────────────────────────── 94 + 95 + -- WITHOUT ROWID: OAuth clients are always looked up by client_id (the PK). 96 + CREATE TABLE oauth_clients ( 97 + client_id TEXT NOT NULL, 98 + client_metadata TEXT NOT NULL, 99 + created_at TEXT NOT NULL, 100 + PRIMARY KEY (client_id) 101 + ) WITHOUT ROWID; 102 + 103 + CREATE TABLE oauth_authorization_codes ( 104 + code TEXT NOT NULL, 105 + client_id TEXT NOT NULL REFERENCES oauth_clients (client_id), 106 + did TEXT NOT NULL REFERENCES accounts (did), 107 + code_challenge TEXT NOT NULL, 108 + code_challenge_method TEXT NOT NULL, 109 + redirect_uri TEXT NOT NULL, 110 + scope TEXT NOT NULL, 111 + expires_at TEXT NOT NULL, 112 + created_at TEXT NOT NULL, 113 + PRIMARY KEY (code) 114 + ); 115 + 116 + CREATE TABLE oauth_tokens ( 117 + id TEXT NOT NULL, 118 + client_id TEXT NOT NULL REFERENCES oauth_clients (client_id), 119 + did TEXT NOT NULL REFERENCES accounts (did), 120 + device_id TEXT REFERENCES devices (id), 121 + scope TEXT NOT NULL, 122 + expires_at TEXT NOT NULL, 123 + created_at TEXT NOT NULL, 124 + PRIMARY KEY (id) 125 + ); 126 + 127 + CREATE INDEX idx_oauth_tokens_did ON oauth_tokens (did); 128 + 129 + -- WITHOUT ROWID: PAR requests are always fetched or deleted by request_uri (the PK). 130 + CREATE TABLE oauth_par_requests ( 131 + request_uri TEXT NOT NULL, 132 + client_id TEXT NOT NULL REFERENCES oauth_clients (client_id), 133 + request_parameters TEXT NOT NULL, 134 + expires_at TEXT NOT NULL, 135 + created_at TEXT NOT NULL, 136 + PRIMARY KEY (request_uri) 137 + ) WITHOUT ROWID;
+227 -9
crates/relay/src/db/mod.rs
··· 27 27 sql: &'static str, 28 28 } 29 29 30 - static MIGRATIONS: &[Migration] = &[Migration { 31 - version: 1, 32 - sql: include_str!("migrations/V001__init.sql"), 33 - }]; 30 + static MIGRATIONS: &[Migration] = &[ 31 + Migration { 32 + version: 1, 33 + sql: include_str!("migrations/V001__init.sql"), 34 + }, 35 + Migration { 36 + version: 2, 37 + sql: include_str!("migrations/V002__auth_identity.sql"), 38 + }, 39 + ]; 34 40 35 41 /// Open a WAL-mode SQLite connection pool with a maximum of 1 connection. 36 42 /// ··· 184 190 } 185 191 186 192 /// Verify that successful migrations return Ok and bootstrap the schema_migrations table. 187 - /// This test asserts the distinct purpose: row count = 1 after first run. 193 + /// Row count equals the number of migrations defined in MIGRATIONS. 188 194 #[tokio::test] 189 195 async fn migrations_apply_on_first_run() { 190 196 let pool = in_memory_pool().await; ··· 194 200 .fetch_one(&pool) 195 201 .await 196 202 .unwrap(); 197 - assert_eq!(count, 1, "first run must insert exactly one row"); 203 + let expected = MIGRATIONS.len() as i64; 204 + assert_eq!( 205 + count, expected, 206 + "first run must insert one row per migration" 207 + ); 198 208 } 199 209 200 - /// Running migrations twice leaves only one row in schema_migrations. 210 + /// Running migrations twice leaves exactly one row per migration in schema_migrations. 201 211 #[tokio::test] 202 212 async fn migrations_are_idempotent() { 203 213 let pool = in_memory_pool().await; ··· 208 218 .fetch_one(&pool) 209 219 .await 210 220 .unwrap(); 221 + let expected = MIGRATIONS.len() as i64; 211 222 assert_eq!( 212 - count, 1, 213 - "second run must not insert a duplicate migration row" 223 + count, expected, 224 + "second run must not insert duplicate migration rows" 214 225 ); 215 226 } 216 227 ··· 274 285 .await; 275 286 276 287 assert!(result.is_err(), "inserting duplicate key must fail"); 288 + } 289 + 290 + // ── V002 tests ─────────────────────────────────────────────────────────── 291 + 292 + /// Apply V002 on top of V001 and verify all 12 auth/identity tables exist. 293 + /// Uses PRAGMA table_info — non-empty result means the table was created. 294 + #[tokio::test] 295 + async fn v002_all_tables_exist() { 296 + let pool = in_memory_pool().await; 297 + run_migrations(&pool).await.unwrap(); 298 + 299 + let tables = [ 300 + "accounts", 301 + "handles", 302 + "did_documents", 303 + "signing_keys", 304 + "devices", 305 + "claim_codes", 306 + "sessions", 307 + "refresh_tokens", 308 + "oauth_clients", 309 + "oauth_authorization_codes", 310 + "oauth_tokens", 311 + "oauth_par_requests", 312 + ]; 313 + 314 + for table in tables { 315 + let rows: Vec<(i64,)> = sqlx::query_as(&format!("PRAGMA table_info({table})")) 316 + .fetch_all(&pool) 317 + .await 318 + .unwrap_or_else(|e| panic!("PRAGMA table_info({table}) failed: {e}")); 319 + assert!( 320 + !rows.is_empty(), 321 + "table '{table}' must exist after V002 migration" 322 + ); 323 + } 324 + } 325 + 326 + /// schema_migrations must contain exactly 2 rows after applying V001 + V002. 327 + #[tokio::test] 328 + async fn v002_migration_count_is_two_after_both_migrations() { 329 + let pool = in_memory_pool().await; 330 + run_migrations(&pool).await.unwrap(); 331 + 332 + let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM schema_migrations") 333 + .fetch_one(&pool) 334 + .await 335 + .unwrap(); 336 + assert_eq!(count, 2, "both V001 and V002 must be recorded"); 337 + } 338 + 339 + /// Running all migrations twice must remain idempotent: still exactly 2 rows. 340 + #[tokio::test] 341 + async fn v002_migrations_are_idempotent() { 342 + let pool = in_memory_pool().await; 343 + run_migrations(&pool).await.unwrap(); 344 + run_migrations(&pool).await.unwrap(); 345 + 346 + let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM schema_migrations") 347 + .fetch_one(&pool) 348 + .await 349 + .unwrap(); 350 + assert_eq!(count, 2, "second run must be a no-op"); 351 + } 352 + 353 + /// accounts.email UNIQUE index must reject duplicate email addresses. 354 + #[tokio::test] 355 + async fn v002_accounts_unique_email_enforced() { 356 + let pool = in_memory_pool().await; 357 + run_migrations(&pool).await.unwrap(); 358 + 359 + sqlx::query( 360 + "INSERT INTO accounts (did, email, password_hash, created_at, updated_at) 361 + VALUES ('did:plc:aaa', 'a@example.com', 'hash', '2024-01-01T00:00:00', '2024-01-01T00:00:00')", 362 + ) 363 + .execute(&pool) 364 + .await 365 + .unwrap(); 366 + 367 + let result = sqlx::query( 368 + "INSERT INTO accounts (did, email, password_hash, created_at, updated_at) 369 + VALUES ('did:plc:bbb', 'a@example.com', 'hash', '2024-01-01T00:00:00', '2024-01-01T00:00:00')", 370 + ) 371 + .execute(&pool) 372 + .await; 373 + 374 + assert!(result.is_err(), "duplicate email must be rejected"); 375 + } 376 + 377 + /// PRAGMA foreign_keys = ON must cause handles.did FK violation to fail. 378 + #[tokio::test] 379 + async fn v002_foreign_key_violation_rejected() { 380 + let pool = in_memory_pool().await; 381 + run_migrations(&pool).await.unwrap(); 382 + sqlx::query("PRAGMA foreign_keys = ON") 383 + .execute(&pool) 384 + .await 385 + .unwrap(); 386 + 387 + // Insert a handle referencing a DID that does not exist in accounts. 388 + let result = sqlx::query( 389 + "INSERT INTO handles (handle, did, created_at) 390 + VALUES ('alice.bsky.social', 'did:plc:nonexistent', '2024-01-01T00:00:00')", 391 + ) 392 + .execute(&pool) 393 + .await; 394 + 395 + assert!( 396 + result.is_err(), 397 + "FK violation on handles.did must be rejected with foreign_keys = ON" 398 + ); 399 + } 400 + 401 + /// EXPLAIN QUERY PLAN must show idx_refresh_tokens_did for a WHERE did = ? query. 402 + #[tokio::test] 403 + async fn v002_index_refresh_tokens_did_used() { 404 + let pool = in_memory_pool().await; 405 + run_migrations(&pool).await.unwrap(); 406 + 407 + let plan: Vec<(i64, i64, i64, String)> = sqlx::query_as( 408 + "EXPLAIN QUERY PLAN SELECT * FROM refresh_tokens WHERE did = 'did:plc:aaa'", 409 + ) 410 + .fetch_all(&pool) 411 + .await 412 + .unwrap(); 413 + 414 + let detail = plan 415 + .iter() 416 + .map(|r| r.3.as_str()) 417 + .collect::<Vec<_>>() 418 + .join("\n"); 419 + assert!( 420 + detail.contains("idx_refresh_tokens_did"), 421 + "refresh_tokens WHERE did query must use idx_refresh_tokens_did; got: {detail}" 422 + ); 423 + } 424 + 425 + /// EXPLAIN QUERY PLAN must show idx_oauth_tokens_did for a WHERE did = ? query. 426 + #[tokio::test] 427 + async fn v002_index_oauth_tokens_did_used() { 428 + let pool = in_memory_pool().await; 429 + run_migrations(&pool).await.unwrap(); 430 + 431 + let plan: Vec<(i64, i64, i64, String)> = sqlx::query_as( 432 + "EXPLAIN QUERY PLAN SELECT * FROM oauth_tokens WHERE did = 'did:plc:aaa'", 433 + ) 434 + .fetch_all(&pool) 435 + .await 436 + .unwrap(); 437 + 438 + let detail = plan 439 + .iter() 440 + .map(|r| r.3.as_str()) 441 + .collect::<Vec<_>>() 442 + .join("\n"); 443 + assert!( 444 + detail.contains("idx_oauth_tokens_did"), 445 + "oauth_tokens WHERE did query must use idx_oauth_tokens_did; got: {detail}" 446 + ); 447 + } 448 + 449 + /// EXPLAIN QUERY PLAN must show idx_claim_codes_did for a WHERE did = ? query. 450 + #[tokio::test] 451 + async fn v002_index_claim_codes_did_used() { 452 + let pool = in_memory_pool().await; 453 + run_migrations(&pool).await.unwrap(); 454 + 455 + let plan: Vec<(i64, i64, i64, String)> = sqlx::query_as( 456 + "EXPLAIN QUERY PLAN SELECT * FROM claim_codes WHERE did = 'did:plc:aaa'", 457 + ) 458 + .fetch_all(&pool) 459 + .await 460 + .unwrap(); 461 + 462 + let detail = plan 463 + .iter() 464 + .map(|r| r.3.as_str()) 465 + .collect::<Vec<_>>() 466 + .join("\n"); 467 + assert!( 468 + detail.contains("idx_claim_codes_did"), 469 + "claim_codes WHERE did query must use idx_claim_codes_did; got: {detail}" 470 + ); 471 + } 472 + 473 + /// EXPLAIN QUERY PLAN must show idx_accounts_email for a WHERE email = ? query. 474 + #[tokio::test] 475 + async fn v002_index_accounts_email_used() { 476 + let pool = in_memory_pool().await; 477 + run_migrations(&pool).await.unwrap(); 478 + 479 + let plan: Vec<(i64, i64, i64, String)> = sqlx::query_as( 480 + "EXPLAIN QUERY PLAN SELECT * FROM accounts WHERE email = 'a@example.com'", 481 + ) 482 + .fetch_all(&pool) 483 + .await 484 + .unwrap(); 485 + 486 + let detail = plan 487 + .iter() 488 + .map(|r| r.3.as_str()) 489 + .collect::<Vec<_>>() 490 + .join("\n"); 491 + assert!( 492 + detail.contains("idx_accounts_email"), 493 + "accounts WHERE email query must use idx_accounts_email; got: {detail}" 494 + ); 277 495 } 278 496 279 497 /// WAL mode requires a real file — use tempfile here, not :memory:.
+4 -1
crates/relay/src/routes/describe_server.rs
··· 148 148 149 149 #[test] 150 150 fn resolve_did_returns_configured_did() { 151 - let did = super::resolve_did(&Some("did:plc:abc123".to_string()), "https://pds.example.com"); 151 + let did = super::resolve_did( 152 + &Some("did:plc:abc123".to_string()), 153 + "https://pds.example.com", 154 + ); 152 155 assert_eq!(did, "did:plc:abc123"); 153 156 } 154 157
+6 -5
crates/relay/src/routes/health.rs
··· 4 4 // Processes: none (response shape is trivial — no pure core to extract) 5 5 // Returns: JSON response with version and db status 6 6 7 - use axum::{extract::State, http::StatusCode, response::{IntoResponse, Json}}; 7 + use axum::{ 8 + extract::State, 9 + http::StatusCode, 10 + response::{IntoResponse, Json}, 11 + }; 8 12 use serde::Serialize; 9 13 10 14 use crate::app::AppState; ··· 18 22 pub async fn health(State(state): State<AppState>) -> impl IntoResponse { 19 23 let version = env!("CARGO_PKG_VERSION"); 20 24 match sqlx::query("SELECT 1").execute(&state.db).await { 21 - Ok(_) => ( 22 - StatusCode::OK, 23 - Json(HealthResponse { version, db: "ok" }), 24 - ), 25 + Ok(_) => (StatusCode::OK, Json(HealthResponse { version, db: "ok" })), 25 26 Err(e) => { 26 27 tracing::error!(error = %e, "db health check failed"); 27 28 (