An easy-to-host PDS on the ATProtocol, MacOS. Grandma-approved.

fix(relay): address PR review issues for MM-138 V002 migration

Critical:
- Enable foreign_keys=true in open_pool via SqliteConnectOptions so all
FK constraints are enforced at runtime, not just in tests
- Add 4 missing did indexes: idx_handles_did, idx_signing_keys_did,
idx_devices_did, idx_sessions_did

Important:
- Add comment to did_documents explaining no FK to accounts (intentional:
caches external DIDs from remote PDSs, not only local accounts)
- Expand FK test coverage: sessions.device_id, refresh_tokens.session_id,
oauth_authorization_codes.client_id
- Add core auth chain insert test: accounts → devices → sessions →
refresh_tokens (validates column names and NOT NULL constraints end-to-end)

Suggestions:
- Replace v002_migrations_are_idempotent (duplicate of generic test) with
v002_tables_survive_second_migration_run (behavioral, not count-based)
- Remove redundant PRAGMA foreign_keys = ON from FK test (pool handles it)
- Add EXPLAIN QUERY PLAN tests for all 4 new did indexes
- Update db/CLAUDE.md Invariants and Key Files sections

authored by malpercio.dev and committed by

Tangled 3d8e2da5 81efcfbf

+263 -15
+2
crates/relay/src/db/CLAUDE.md
··· 26 26 - Migration SQL files are append-only; never modify an applied migration 27 27 - Migration versions are sequential positive integers starting at 1 28 28 - WAL mode is always enabled (set via SqliteConnectOptions, not raw PRAGMA) 29 + - Foreign key enforcement is always on (set via SqliteConnectOptions .foreign_keys(true), not raw PRAGMA) 29 30 30 31 ## Key Files 31 32 - `mod.rs` - Pool creation, migration runner, DbError, tests 32 33 - `migrations/V001__init.sql` - server_metadata table (WITHOUT ROWID) 34 + - `migrations/V002__auth_identity.sql` - 12 Wave 2 tables: accounts, handles, did_documents, signing_keys, devices, claim_codes, sessions, refresh_tokens, oauth_clients, oauth_authorization_codes, oauth_tokens, oauth_par_requests
+14
crates/relay/src/db/migrations/V002__auth_identity.sql
··· 24 24 PRIMARY KEY (handle) 25 25 ) WITHOUT ROWID; 26 26 27 + -- Reverse lookup: find all handles for a given DID (e.g., handle resolution). 28 + CREATE INDEX idx_handles_did ON handles (did); 29 + 27 30 -- WITHOUT ROWID: DID documents are always fetched by DID (the PK). 31 + -- No FK to accounts intentionally: this table caches DID documents for any DID 32 + -- (including external DIDs from remote PDSs), so the did need not be a local account. 28 33 CREATE TABLE did_documents ( 29 34 did TEXT NOT NULL, 30 35 document TEXT NOT NULL, ··· 43 48 PRIMARY KEY (id) 44 49 ); 45 50 51 + -- Key lookup by account (e.g., fetch all keys when building a DID document). 52 + CREATE INDEX idx_signing_keys_did ON signing_keys (did); 53 + 46 54 -- ── Device & Provisioning ──────────────────────────────────────────────────── 47 55 48 56 CREATE TABLE devices ( ··· 54 62 last_seen_at TEXT NOT NULL, 55 63 PRIMARY KEY (id) 56 64 ); 65 + 66 + -- Device listing by account (e.g., show all devices for a user). 67 + CREATE INDEX idx_devices_did ON devices (did); 57 68 58 69 CREATE TABLE claim_codes ( 59 70 code TEXT NOT NULL, ··· 76 87 expires_at TEXT NOT NULL, 77 88 PRIMARY KEY (id) 78 89 ); 90 + 91 + -- Session listing and revocation by account. 92 + CREATE INDEX idx_sessions_did ON sessions (did); 79 93 80 94 CREATE TABLE refresh_tokens ( 81 95 jti TEXT NOT NULL,
+247 -15
crates/relay/src/db/mod.rs
··· 42 42 /// 43 43 /// Accepts any sqlx URL string (e.g. `"sqlite:relay.db"`, `"sqlite::memory:"`). 44 44 /// `create_if_missing` is enabled so the file is created on first run. 45 - /// WAL journal mode is set via `SqliteConnectOptions`, and sqlx re-issues the 46 - /// journal_mode PRAGMA on each new connection establishment to ensure the mode persists. 45 + /// WAL journal mode and foreign key enforcement are set via `SqliteConnectOptions`; 46 + /// sqlx re-issues both PRAGMAs on every new connection so they survive reconnects. 47 47 /// 48 48 /// Note: Pool creation succeeds even if the file path is invalid; the failure surfaces 49 49 /// at the first query. To fail fast on bad config, consider adding `min_connections(1)`. ··· 52 52 let opts = SqliteConnectOptions::from_str(url) 53 53 .map_err(|e| DbError::InvalidUrl(e.to_string()))? 54 54 .create_if_missing(true) 55 - .journal_mode(SqliteJournalMode::Wal); 55 + .journal_mode(SqliteJournalMode::Wal) 56 + .foreign_keys(true); 56 57 57 58 SqlitePoolOptions::new() 58 59 .max_connections(1) ··· 336 337 assert_eq!(count, 2, "both V001 and V002 must be recorded"); 337 338 } 338 339 339 - /// Running all migrations twice must remain idempotent: still exactly 2 rows. 340 + /// Running migrations twice must not drop or recreate V002 tables. 341 + /// (Row-count idempotency is already covered by the generic migrations_are_idempotent test.) 340 342 #[tokio::test] 341 - async fn v002_migrations_are_idempotent() { 343 + async fn v002_tables_survive_second_migration_run() { 342 344 let pool = in_memory_pool().await; 343 345 run_migrations(&pool).await.unwrap(); 344 346 run_migrations(&pool).await.unwrap(); 345 347 346 - let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM schema_migrations") 348 + // Spot-check a few tables to confirm they still exist and are writable. 349 + sqlx::query( 350 + "INSERT INTO accounts (did, email, password_hash, created_at, updated_at) 351 + VALUES ('did:plc:zzz', 'z@example.com', 'hash', '2024-01-01T00:00:00', '2024-01-01T00:00:00')", 352 + ) 353 + .execute(&pool) 354 + .await 355 + .unwrap(); 356 + 357 + let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM accounts") 347 358 .fetch_one(&pool) 348 359 .await 349 360 .unwrap(); 350 - assert_eq!(count, 2, "second run must be a no-op"); 361 + assert_eq!( 362 + count, 1, 363 + "accounts table must survive a second migration run" 364 + ); 351 365 } 352 366 353 367 /// accounts.email UNIQUE index must reject duplicate email addresses. ··· 374 388 assert!(result.is_err(), "duplicate email must be rejected"); 375 389 } 376 390 377 - /// PRAGMA foreign_keys = ON must cause handles.did FK violation to fail. 391 + /// FK enforcement is on by default (via open_pool .foreign_keys(true)). 392 + /// Inserting a handle with a nonexistent DID must fail without any manual PRAGMA. 378 393 #[tokio::test] 379 - async fn v002_foreign_key_violation_rejected() { 394 + async fn v002_fk_handles_did_rejected() { 380 395 let pool = in_memory_pool().await; 381 396 run_migrations(&pool).await.unwrap(); 382 - sqlx::query("PRAGMA foreign_keys = ON") 383 - .execute(&pool) 384 - .await 385 - .unwrap(); 386 397 387 - // Insert a handle referencing a DID that does not exist in accounts. 388 398 let result = sqlx::query( 389 399 "INSERT INTO handles (handle, did, created_at) 390 400 VALUES ('alice.bsky.social', 'did:plc:nonexistent', '2024-01-01T00:00:00')", ··· 394 404 395 405 assert!( 396 406 result.is_err(), 397 - "FK violation on handles.did must be rejected with foreign_keys = ON" 407 + "FK violation on handles.did must be rejected by the pool (foreign_keys=true)" 408 + ); 409 + } 410 + 411 + /// sessions.device_id → devices.id FK must be enforced. 412 + #[tokio::test] 413 + async fn v002_fk_sessions_device_id_rejected() { 414 + let pool = in_memory_pool().await; 415 + run_migrations(&pool).await.unwrap(); 416 + 417 + sqlx::query( 418 + "INSERT INTO accounts (did, email, password_hash, created_at, updated_at) 419 + VALUES ('did:plc:aaa', 'a@example.com', 'hash', '2024-01-01T00:00:00', '2024-01-01T00:00:00')", 420 + ) 421 + .execute(&pool) 422 + .await 423 + .unwrap(); 424 + 425 + let result = sqlx::query( 426 + "INSERT INTO sessions (id, did, device_id, created_at, expires_at) 427 + VALUES ('sess1', 'did:plc:aaa', 'dev:nonexistent', '2024-01-01T00:00:00', '2024-01-02T00:00:00')", 428 + ) 429 + .execute(&pool) 430 + .await; 431 + 432 + assert!( 433 + result.is_err(), 434 + "FK violation on sessions.device_id must be rejected" 398 435 ); 399 436 } 400 437 438 + /// refresh_tokens.session_id → sessions.id FK must be enforced. 439 + #[tokio::test] 440 + async fn v002_fk_refresh_tokens_session_id_rejected() { 441 + let pool = in_memory_pool().await; 442 + run_migrations(&pool).await.unwrap(); 443 + 444 + sqlx::query( 445 + "INSERT INTO accounts (did, email, password_hash, created_at, updated_at) 446 + VALUES ('did:plc:aaa', 'a@example.com', 'hash', '2024-01-01T00:00:00', '2024-01-01T00:00:00')", 447 + ) 448 + .execute(&pool) 449 + .await 450 + .unwrap(); 451 + 452 + let result = sqlx::query( 453 + "INSERT INTO refresh_tokens (jti, did, session_id, expires_at, created_at) 454 + VALUES ('jti1', 'did:plc:aaa', 'sess:nonexistent', '2024-01-02T00:00:00', '2024-01-01T00:00:00')", 455 + ) 456 + .execute(&pool) 457 + .await; 458 + 459 + assert!( 460 + result.is_err(), 461 + "FK violation on refresh_tokens.session_id must be rejected" 462 + ); 463 + } 464 + 465 + /// oauth_authorization_codes.client_id → oauth_clients.client_id FK must be enforced. 466 + #[tokio::test] 467 + async fn v002_fk_oauth_authorization_codes_client_id_rejected() { 468 + let pool = in_memory_pool().await; 469 + run_migrations(&pool).await.unwrap(); 470 + 471 + sqlx::query( 472 + "INSERT INTO accounts (did, email, password_hash, created_at, updated_at) 473 + VALUES ('did:plc:aaa', 'a@example.com', 'hash', '2024-01-01T00:00:00', '2024-01-01T00:00:00')", 474 + ) 475 + .execute(&pool) 476 + .await 477 + .unwrap(); 478 + 479 + let result = sqlx::query( 480 + "INSERT INTO oauth_authorization_codes 481 + (code, client_id, did, code_challenge, code_challenge_method, redirect_uri, scope, expires_at, created_at) 482 + VALUES ('code1', 'client:nonexistent', 'did:plc:aaa', 'challenge', 'S256', 483 + 'https://example.com/cb', 'atproto', '2024-01-02T00:00:00', '2024-01-01T00:00:00')", 484 + ) 485 + .execute(&pool) 486 + .await; 487 + 488 + assert!( 489 + result.is_err(), 490 + "FK violation on oauth_authorization_codes.client_id must be rejected" 491 + ); 492 + } 493 + 494 + /// End-to-end insert chain: accounts → devices → sessions → refresh_tokens. 495 + /// Validates column names, NOT NULL constraints, and FK ordering across the core auth path. 496 + #[tokio::test] 497 + async fn v002_core_auth_chain_insert_succeeds() { 498 + let pool = in_memory_pool().await; 499 + run_migrations(&pool).await.unwrap(); 500 + 501 + sqlx::query( 502 + "INSERT INTO accounts (did, email, password_hash, created_at, updated_at) 503 + VALUES ('did:plc:aaa', 'a@example.com', 'hash', '2024-01-01T00:00:00', '2024-01-01T00:00:00')", 504 + ) 505 + .execute(&pool) 506 + .await 507 + .unwrap(); 508 + 509 + sqlx::query( 510 + "INSERT INTO devices (id, did, device_name, user_agent, created_at, last_seen_at) 511 + VALUES ('dev1', 'did:plc:aaa', 'My Phone', 'Mozilla/5.0', '2024-01-01T00:00:00', '2024-01-01T00:00:00')", 512 + ) 513 + .execute(&pool) 514 + .await 515 + .unwrap(); 516 + 517 + sqlx::query( 518 + "INSERT INTO sessions (id, did, device_id, created_at, expires_at) 519 + VALUES ('sess1', 'did:plc:aaa', 'dev1', '2024-01-01T00:00:00', '2024-01-02T00:00:00')", 520 + ) 521 + .execute(&pool) 522 + .await 523 + .unwrap(); 524 + 525 + sqlx::query( 526 + "INSERT INTO refresh_tokens (jti, did, session_id, expires_at, created_at) 527 + VALUES ('jti1', 'did:plc:aaa', 'sess1', '2024-01-02T00:00:00', '2024-01-01T00:00:00')", 528 + ) 529 + .execute(&pool) 530 + .await 531 + .unwrap(); 532 + 533 + let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM refresh_tokens") 534 + .fetch_one(&pool) 535 + .await 536 + .unwrap(); 537 + assert_eq!(count, 1, "full auth chain insert must succeed"); 538 + } 539 + 401 540 /// EXPLAIN QUERY PLAN must show idx_refresh_tokens_did for a WHERE did = ? query. 402 541 #[tokio::test] 403 542 async fn v002_index_refresh_tokens_did_used() { ··· 491 630 assert!( 492 631 detail.contains("idx_accounts_email"), 493 632 "accounts WHERE email query must use idx_accounts_email; got: {detail}" 633 + ); 634 + } 635 + 636 + /// EXPLAIN QUERY PLAN must show idx_handles_did for a WHERE did = ? query. 637 + #[tokio::test] 638 + async fn v002_index_handles_did_used() { 639 + let pool = in_memory_pool().await; 640 + run_migrations(&pool).await.unwrap(); 641 + 642 + let plan: Vec<(i64, i64, i64, String)> = 643 + sqlx::query_as("EXPLAIN QUERY PLAN SELECT * FROM handles WHERE did = 'did:plc:aaa'") 644 + .fetch_all(&pool) 645 + .await 646 + .unwrap(); 647 + 648 + let detail = plan 649 + .iter() 650 + .map(|r| r.3.as_str()) 651 + .collect::<Vec<_>>() 652 + .join("\n"); 653 + assert!( 654 + detail.contains("idx_handles_did"), 655 + "handles WHERE did query must use idx_handles_did; got: {detail}" 656 + ); 657 + } 658 + 659 + /// EXPLAIN QUERY PLAN must show idx_signing_keys_did for a WHERE did = ? query. 660 + #[tokio::test] 661 + async fn v002_index_signing_keys_did_used() { 662 + let pool = in_memory_pool().await; 663 + run_migrations(&pool).await.unwrap(); 664 + 665 + let plan: Vec<(i64, i64, i64, String)> = sqlx::query_as( 666 + "EXPLAIN QUERY PLAN SELECT * FROM signing_keys WHERE did = 'did:plc:aaa'", 667 + ) 668 + .fetch_all(&pool) 669 + .await 670 + .unwrap(); 671 + 672 + let detail = plan 673 + .iter() 674 + .map(|r| r.3.as_str()) 675 + .collect::<Vec<_>>() 676 + .join("\n"); 677 + assert!( 678 + detail.contains("idx_signing_keys_did"), 679 + "signing_keys WHERE did query must use idx_signing_keys_did; got: {detail}" 680 + ); 681 + } 682 + 683 + /// EXPLAIN QUERY PLAN must show idx_devices_did for a WHERE did = ? query. 684 + #[tokio::test] 685 + async fn v002_index_devices_did_used() { 686 + let pool = in_memory_pool().await; 687 + run_migrations(&pool).await.unwrap(); 688 + 689 + let plan: Vec<(i64, i64, i64, String)> = 690 + sqlx::query_as("EXPLAIN QUERY PLAN SELECT * FROM devices WHERE did = 'did:plc:aaa'") 691 + .fetch_all(&pool) 692 + .await 693 + .unwrap(); 694 + 695 + let detail = plan 696 + .iter() 697 + .map(|r| r.3.as_str()) 698 + .collect::<Vec<_>>() 699 + .join("\n"); 700 + assert!( 701 + detail.contains("idx_devices_did"), 702 + "devices WHERE did query must use idx_devices_did; got: {detail}" 703 + ); 704 + } 705 + 706 + /// EXPLAIN QUERY PLAN must show idx_sessions_did for a WHERE did = ? query. 707 + #[tokio::test] 708 + async fn v002_index_sessions_did_used() { 709 + let pool = in_memory_pool().await; 710 + run_migrations(&pool).await.unwrap(); 711 + 712 + let plan: Vec<(i64, i64, i64, String)> = 713 + sqlx::query_as("EXPLAIN QUERY PLAN SELECT * FROM sessions WHERE did = 'did:plc:aaa'") 714 + .fetch_all(&pool) 715 + .await 716 + .unwrap(); 717 + 718 + let detail = plan 719 + .iter() 720 + .map(|r| r.3.as_str()) 721 + .collect::<Vec<_>>() 722 + .join("\n"); 723 + assert!( 724 + detail.contains("idx_sessions_did"), 725 + "sessions WHERE did query must use idx_sessions_did; got: {detail}" 494 726 ); 495 727 } 496 728