atproto relay implementation in zig zlay.waow.tech

feat: switch persistence from SQLite to Postgres

replace zqlite with pg.zig for metadata storage. schema now matches
the Go indigo relay (account, account_repo, log_file_refs, domain_ban).
removes the seqToSqlite/sqliteToSeq XOR hack — Postgres BIGINT handles
u64 natively. DATABASE_URL env var replaces RELAY_DB_PATH.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

+153 -159
-1
Dockerfile
··· 3 3 COPY zig-out/bin/zlay /usr/local/bin/zlay 4 4 RUN mkdir -p /data/events 5 5 ENV RELAY_DATA_DIR=/data/events 6 - ENV RELAY_DB_PATH=/data/relay.sqlite 7 6 EXPOSE 3000 3001 8 7 ENTRYPOINT ["/usr/local/bin/zlay"]
+2 -2
build.zig
··· 12 12 .target = target, 13 13 .optimize = optimize, 14 14 }); 15 - const zqlite = b.dependency("zqlite", .{ 15 + const pg = b.dependency("pg", .{ 16 16 .target = target, 17 17 .optimize = optimize, 18 18 }); ··· 20 20 const imports: []const std.Build.Module.Import = &.{ 21 21 .{ .name = "zat", .module = zat.module("zat") }, 22 22 .{ .name = "websocket", .module = websocket.module("websocket") }, 23 - .{ .name = "zqlite", .module = zqlite.module("zqlite") }, 23 + .{ .name = "pg", .module = pg.module("pg") }, 24 24 }; 25 25 26 26 // relay executable
+3 -3
build.zig.zon
··· 12 12 .url = "https://github.com/karlseguin/websocket.zig/archive/97fefafa59cc78ce177cff540b8685cd7f699276.tar.gz", 13 13 .hash = "websocket-0.1.0-ZPISdRlzAwBB_Bz2UMMqxYqF6YEVTIBoFsbzwPUJTHIc", 14 14 }, 15 - .zqlite = .{ 16 - .url = "https://github.com/karlseguin/zqlite.zig/archive/05a88d6758753e1c63fdd45b211dde2057094b0c.tar.gz", 17 - .hash = "zqlite-0.0.1-RWLaYz6bmAAT7E_jxopXf-j5Ea8VQldnxsd6TU8sa0Bb", 15 + .pg = .{ 16 + .url = "git+https://github.com/karlseguin/pg.zig?ref=master#e58b318b7867ef065b3135983f829219c5eef891", 17 + .hash = "pg-0.0.0-Wp_7gXFoBgD0fQ72WICKa-bxLga03AXXQ3BbIsjjohQ3", 18 18 }, 19 19 }, 20 20 .paths = .{
+145 -150
src/event_log.zig
··· 1 1 //! disk persistence matching indigo's diskpersist format 2 2 //! 3 3 //! append-only log files with relay-assigned sequence numbers. 4 - //! SQLite metadata index for fast cursor→file lookup. 4 + //! Postgres metadata index for fast cursor→file lookup. 5 5 //! 6 6 //! on-disk entry format (28-byte LE header + CBOR payload): 7 7 //! [4B flags LE] [4B kind LE] [4B payload_len LE] [8B uid LE] [8B seq LE] [payload] ··· 11 11 //! see: indigo cmd/relay/stream/persist/diskpersist/diskpersist.go 12 12 13 13 const std = @import("std"); 14 - const zqlite = @import("zqlite"); 14 + const pg = @import("pg"); 15 15 16 16 const Allocator = std.mem.Allocator; 17 17 const log = std.log.scoped(.relay); ··· 38 38 const default_flush_interval_ms: u64 = 100; 39 39 const default_flush_threshold: usize = 400; 40 40 41 - /// convert u64 seq to i64 for SQLite storage. 42 - /// XOR with sign bit to preserve ordering across the full u64 range. 43 - fn seqToSqlite(seq: u64) i64 { 44 - return @bitCast(seq ^ (1 << 63)); 45 - } 46 - 47 - /// convert i64 from SQLite back to u64 seq. 48 - fn sqliteToSeq(val: i64) u64 { 49 - return @as(u64, @bitCast(val)) ^ (1 << 63); 50 - } 51 - 52 41 // --- header --- 53 42 54 43 pub const EvtHeader = struct { ··· 90 79 allocator: Allocator, 91 80 dir_path: []const u8, 92 81 dir: std.fs.Dir, 93 - db: zqlite.Conn, 82 + db: *pg.Pool, 94 83 current_file: ?std.fs.File = null, 95 84 current_file_path: ?[]const u8 = null, 96 85 ··· 116 105 alive: std.atomic.Value(bool) = .{ .raw = true }, 117 106 flush_cond: std.Thread.Condition = .{}, 118 107 119 - pub fn init(allocator: Allocator, dir_path: []const u8, db_path: []const u8) !DiskPersist { 108 + pub fn init(allocator: Allocator, dir_path: []const u8, database_url: []const u8) !DiskPersist { 120 109 // ensure directory exists 121 110 std.fs.cwd().makePath(dir_path) catch |err| switch (err) { 122 111 error.PathAlreadyExists => {}, ··· 126 115 var dir = try std.fs.cwd().openDir(dir_path, .{ .iterate = true }); 127 116 errdefer dir.close(); 128 117 129 - // ensure db parent directory exists 130 - if (std.fs.path.dirname(db_path)) |parent| { 131 - std.fs.cwd().makePath(parent) catch |err| switch (err) { 132 - error.PathAlreadyExists => {}, 133 - else => return err, 134 - }; 135 - } 136 - 137 - // open SQLite 138 - const db_path_z = try allocator.dupeZ(u8, db_path); 139 - defer allocator.free(db_path_z); 140 - var db = try zqlite.open(db_path_z, zqlite.OpenFlags.Create | zqlite.OpenFlags.ReadWrite); 141 - errdefer db.close(); 142 - 143 - // pragmas 144 - try db.execNoArgs("PRAGMA journal_mode=WAL"); 145 - try db.execNoArgs("PRAGMA busy_timeout=5000"); 146 - try db.execNoArgs("PRAGMA synchronous=NORMAL"); 118 + // connect to Postgres 119 + const uri = std.Uri.parse(database_url) catch return error.InvalidDatabaseUrl; 120 + const pool = try pg.Pool.initUri(allocator, uri, .{ .size = 5 }); 121 + errdefer pool.deinit(); 147 122 148 - // create tables 149 - try db.execNoArgs( 123 + // create tables (matching indigo's Go relay schema) 124 + _ = try pool.exec( 150 125 \\CREATE TABLE IF NOT EXISTS log_file_refs ( 151 - \\ id INTEGER PRIMARY KEY AUTOINCREMENT, 126 + \\ id BIGSERIAL PRIMARY KEY, 152 127 \\ path TEXT NOT NULL, 153 - \\ archived INTEGER NOT NULL DEFAULT 0, 154 - \\ seq_start INTEGER NOT NULL, 155 - \\ created_at TEXT NOT NULL DEFAULT (datetime('now')) 128 + \\ archived BOOLEAN NOT NULL DEFAULT false, 129 + \\ seq_start BIGINT NOT NULL, 130 + \\ created_at TIMESTAMPTZ NOT NULL DEFAULT now() 156 131 \\) 157 - ); 158 - try db.execNoArgs( 132 + , .{}); 133 + 134 + _ = try pool.exec( 159 135 \\CREATE TABLE IF NOT EXISTS account ( 160 - \\ uid INTEGER PRIMARY KEY AUTOINCREMENT, 136 + \\ uid BIGSERIAL PRIMARY KEY, 161 137 \\ did TEXT NOT NULL UNIQUE, 162 138 \\ status TEXT NOT NULL DEFAULT 'active', 163 - \\ rev TEXT, 164 - \\ commit_data_cid BLOB, 165 - \\ created_at TEXT NOT NULL DEFAULT (datetime('now')) 139 + \\ created_at TIMESTAMPTZ NOT NULL DEFAULT now() 166 140 \\) 167 - ); 141 + , .{}); 142 + 143 + _ = try pool.exec( 144 + \\CREATE TABLE IF NOT EXISTS account_repo ( 145 + \\ uid BIGINT PRIMARY KEY REFERENCES account(uid), 146 + \\ rev TEXT NOT NULL, 147 + \\ commit_data_cid TEXT NOT NULL 148 + \\) 149 + , .{}); 150 + 151 + _ = try pool.exec( 152 + \\CREATE TABLE IF NOT EXISTS domain_ban ( 153 + \\ id BIGSERIAL PRIMARY KEY, 154 + \\ domain TEXT NOT NULL UNIQUE, 155 + \\ created_at TIMESTAMPTZ NOT NULL DEFAULT now() 156 + \\) 157 + , .{}); 168 158 169 159 var self = DiskPersist{ 170 160 .allocator = allocator, 171 161 .dir_path = try allocator.dupe(u8, dir_path), 172 162 .dir = dir, 173 - .db = db, 163 + .db = pool, 174 164 }; 175 165 176 166 // recover from existing log files ··· 205 195 if (self.current_file) |f| f.close(); 206 196 if (self.current_file_path) |p| self.allocator.free(p); 207 197 self.dir.close(); 208 - self.db.close(); 198 + self.db.deinit(); 209 199 self.allocator.free(self.dir_path); 210 200 } 211 201 ··· 225 215 } 226 216 227 217 // check database 228 - if (self.db.row( 229 - "SELECT uid FROM account WHERE did = ?", 218 + if (try self.db.rowUnsafe( 219 + "SELECT uid FROM account WHERE did = $1", 230 220 .{did}, 231 - )) |maybe_row| { 232 - if (maybe_row) |r| { 233 - defer r.deinit(); 234 - const uid: u64 = @intCast(r.int(0)); 235 - // populate cache 236 - const did_duped = try self.allocator.dupe(u8, did); 237 - self.did_cache_mutex.lock(); 238 - defer self.did_cache_mutex.unlock(); 239 - self.did_cache.put(self.allocator, did_duped, uid) catch { 240 - self.allocator.free(did_duped); 241 - }; 242 - return uid; 243 - } 244 - } else |_| {} 221 + )) |row| { 222 + var r = row; 223 + defer r.deinit() catch {}; 224 + const uid: u64 = @intCast(r.get(i64, 0)); 225 + // populate cache 226 + const did_duped = try self.allocator.dupe(u8, did); 227 + self.did_cache_mutex.lock(); 228 + defer self.did_cache_mutex.unlock(); 229 + self.did_cache.put(self.allocator, did_duped, uid) catch { 230 + self.allocator.free(did_duped); 231 + }; 232 + return uid; 233 + } 245 234 246 235 // create new account row (ignore if already exists from concurrent insert) 247 - self.db.exec( 248 - "INSERT OR IGNORE INTO account (did) VALUES (?)", 236 + _ = self.db.exec( 237 + "INSERT INTO account (did) VALUES ($1) ON CONFLICT (did) DO NOTHING", 249 238 .{did}, 250 239 ) catch |err| { 251 240 log.warn("failed to create account for {s}: {s}", .{ did, @errorName(err) }); ··· 253 242 }; 254 243 255 244 // read back the UID (whether we just created it or it already existed) 256 - const row = try self.db.row( 257 - "SELECT uid FROM account WHERE did = ?", 245 + var row = try self.db.rowUnsafe( 246 + "SELECT uid FROM account WHERE did = $1", 258 247 .{did}, 259 248 ) orelse return error.AccountCreationFailed; 260 - defer row.deinit(); 261 - const uid: u64 = @intCast(row.int(0)); 249 + defer row.deinit() catch {}; 250 + const uid: u64 = @intCast(row.get(i64, 0)); 262 251 263 252 // populate cache 264 253 const did_duped = try self.allocator.dupe(u8, did); ··· 277 266 data_cid: []const u8, 278 267 }; 279 268 280 - /// get stored sync state for a user 269 + /// get stored sync state for a user (from account_repo table) 281 270 pub fn getAccountState(self: *DiskPersist, uid: u64, allocator: Allocator) !?AccountState { 282 - const row = (try self.db.row( 283 - "SELECT rev, commit_data_cid FROM account WHERE uid = ? AND rev IS NOT NULL", 271 + var row = (try self.db.rowUnsafe( 272 + "SELECT rev, commit_data_cid FROM account_repo WHERE uid = $1", 284 273 .{@as(i64, @intCast(uid))}, 285 274 )) orelse return null; 286 - defer row.deinit(); 287 - const rev = row.text(0); 288 - const data_cid = row.blob(1); 275 + defer row.deinit() catch {}; 276 + const rev = row.get([]const u8, 0); 277 + const data_cid = row.get([]const u8, 1); 289 278 if (rev.len == 0 or data_cid.len == 0) return null; 290 279 return .{ 291 280 .rev = try allocator.dupe(u8, rev), ··· 293 282 }; 294 283 } 295 284 296 - /// update stored sync state after a verified commit 285 + /// update stored sync state after a verified commit (upsert into account_repo) 297 286 pub fn updateAccountState(self: *DiskPersist, uid: u64, rev: []const u8, data_cid: []const u8) !void { 298 - try self.db.exec( 299 - "UPDATE account SET rev = ?, commit_data_cid = ? WHERE uid = ?", 300 - .{ rev, data_cid, @as(i64, @intCast(uid)) }, 287 + _ = try self.db.exec( 288 + "INSERT INTO account_repo (uid, rev, commit_data_cid) VALUES ($1, $2, $3) ON CONFLICT (uid) DO UPDATE SET rev = EXCLUDED.rev, commit_data_cid = EXCLUDED.commit_data_cid", 289 + .{ @as(i64, @intCast(uid)), rev, data_cid }, 301 290 ); 302 291 } 303 292 ··· 341 330 } 342 331 343 332 /// playback events with seq > since. calls cb for each event. 344 - pub fn playback(self: *DiskPersist, since: u64, allocator: Allocator, result: *std.ArrayListUnmanaged(PlaybackEntry)) !void { 333 + pub fn playback(self: *DiskPersist, since: u64, allocator: Allocator, entries: *std.ArrayListUnmanaged(PlaybackEntry)) !void { 345 334 self.mutex.lock(); 346 335 defer self.mutex.unlock(); 336 + 337 + const since_i: i64 = @intCast(since); 347 338 348 339 // find the log file containing `since` 349 340 var start_files: std.ArrayListUnmanaged(LogFileRef) = .{}; ··· 351 342 352 343 if (since > 0) { 353 344 // find file whose seq_start is just before `since` 354 - if (self.db.row("SELECT id, path, seq_start FROM log_file_refs WHERE seq_start <= ? ORDER BY seq_start DESC LIMIT 1", .{seqToSqlite(since)})) |row| { 355 - if (row) |r| { 356 - defer r.deinit(); 357 - try start_files.append(allocator, .{ 358 - .path = try allocator.dupe(u8, r.text(1)), 359 - .seq_start = sqliteToSeq(r.int(2)), 360 - }); 361 - } 362 - } else |_| {} 345 + if (try self.db.rowUnsafe( 346 + "SELECT id, path, seq_start FROM log_file_refs WHERE seq_start <= $1 ORDER BY seq_start DESC LIMIT 1", 347 + .{since_i}, 348 + )) |row| { 349 + var r = row; 350 + defer r.deinit() catch {}; 351 + try start_files.append(allocator, .{ 352 + .path = try allocator.dupe(u8, r.get([]const u8, 1)), 353 + .seq_start = @intCast(r.get(i64, 2)), 354 + }); 355 + } 363 356 } 364 357 365 358 // find all subsequent files 366 359 { 367 - var rows = try self.db.rows("SELECT id, path, seq_start FROM log_file_refs WHERE seq_start > ? ORDER BY seq_start ASC", .{seqToSqlite(since)}); 368 - defer rows.deinit(); 369 - while (rows.next()) |r| { 360 + var result = try self.db.query( 361 + "SELECT id, path, seq_start FROM log_file_refs WHERE seq_start > $1 ORDER BY seq_start ASC", 362 + .{since_i}, 363 + ); 364 + defer result.deinit(); 365 + while (result.nextUnsafe() catch null) |r| { 370 366 try start_files.append(allocator, .{ 371 - .path = try allocator.dupe(u8, r.text(1)), 372 - .seq_start = sqliteToSeq(r.int(2)), 367 + .path = try allocator.dupe(u8, r.get([]const u8, 1)), 368 + .seq_start = @intCast(r.get(i64, 2)), 373 369 }); 374 370 } 375 371 } ··· 380 376 for (start_files.items) |ref| { 381 377 var file = self.dir.openFile(ref.path, .{}) catch continue; 382 378 defer file.close(); 383 - try readEventsFrom(allocator, file, since, result); 379 + try readEventsFrom(allocator, file, since, entries); 384 380 } 385 381 } 386 382 ··· 395 391 self.mutex.lock(); 396 392 defer self.mutex.unlock(); 397 393 398 - const cutoff_hours = self.retention_hours; 399 - const cutoff_sql = try std.fmt.allocPrint(self.allocator, "-{d} hours", .{cutoff_hours}); 400 - defer self.allocator.free(cutoff_sql); 394 + const cutoff_interval = try std.fmt.allocPrint(self.allocator, "{d} hours", .{self.retention_hours}); 395 + defer self.allocator.free(cutoff_interval); 401 396 402 397 // find expired refs 403 398 var expired: std.ArrayListUnmanaged(GcRef) = .{}; ··· 407 402 } 408 403 409 404 { 410 - var rows = try self.db.rows( 411 - "SELECT id, path FROM log_file_refs WHERE created_at < datetime('now', ?)", 412 - .{cutoff_sql}, 405 + var result = try self.db.query( 406 + "SELECT id, path FROM log_file_refs WHERE created_at < now() - $1::interval", 407 + .{cutoff_interval}, 413 408 ); 414 - defer rows.deinit(); 415 - while (rows.next()) |r| { 409 + defer result.deinit(); 410 + while (result.nextUnsafe() catch null) |r| { 416 411 try expired.append(self.allocator, .{ 417 - .id = r.int(0), 418 - .path = try self.allocator.dupe(u8, r.text(1)), 412 + .id = r.get(i64, 0), 413 + .path = try self.allocator.dupe(u8, r.get([]const u8, 1)), 419 414 }); 420 415 } 421 416 } ··· 427 422 } 428 423 429 424 // delete db record first (prevents playback from finding it) 430 - self.db.exec("DELETE FROM log_file_refs WHERE id = ?", .{ref.id}) catch |err| { 425 + _ = self.db.exec("DELETE FROM log_file_refs WHERE id = $1", .{ref.id}) catch |err| { 431 426 log.warn("gc: failed to delete db record {d}: {s}", .{ ref.id, @errorName(err) }); 432 427 continue; 433 428 }; ··· 456 451 } 457 452 458 453 { 459 - var rows = try self.db.rows("SELECT path FROM log_file_refs ORDER BY seq_start DESC", .{}); 460 - defer rows.deinit(); 461 - while (rows.next()) |r| { 462 - try refs.append(self.allocator, try self.allocator.dupe(u8, r.text(0))); 454 + var result = try self.db.query("SELECT path FROM log_file_refs ORDER BY seq_start DESC", .{}); 455 + defer result.deinit(); 456 + while (result.nextUnsafe() catch null) |r| { 457 + try refs.append(self.allocator, try self.allocator.dupe(u8, r.get([]const u8, 0))); 463 458 } 464 459 } 465 460 ··· 476 471 477 472 fn resumeLog(self: *DiskPersist) !void { 478 473 // find most recent log file 479 - const r = try self.db.row("SELECT id, path, seq_start FROM log_file_refs ORDER BY seq_start DESC LIMIT 1", .{}); 480 - if (r) |row| { 481 - defer row.deinit(); 482 - const path = row.text(1); 483 - const seq_start: u64 = sqliteToSeq(row.int(2)); 474 + if (try self.db.rowUnsafe( 475 + "SELECT id, path, seq_start FROM log_file_refs ORDER BY seq_start DESC LIMIT 1", 476 + .{}, 477 + )) |row| { 478 + var r = row; 479 + defer r.deinit() catch {}; 480 + const path = r.get([]const u8, 1); 481 + const seq_start: u64 = @intCast(r.get(i64, 2)); 484 482 485 483 var file = self.dir.openFile(path, .{ .mode = .read_write }) catch { 486 484 // file missing, start fresh ··· 527 525 self.current_file = try self.dir.createFile(name, .{ .truncate = false }); 528 526 self.current_file_path = try self.allocator.dupe(u8, name); 529 527 530 - // register in SQLite 531 - try self.db.exec( 532 - "INSERT INTO log_file_refs (path, seq_start) VALUES (?, ?)", 533 - .{ name, seqToSqlite(start_seq) }, 528 + // register in Postgres 529 + _ = try self.db.exec( 530 + "INSERT INTO log_file_refs (path, seq_start) VALUES ($1, $2)", 531 + .{ name, @as(i64, @intCast(start_seq)) }, 534 532 ); 535 533 536 534 self.event_counter = 0; ··· 753 751 try std.testing.expectEqual(@as(u8, 0x01), buf[9]); 754 752 } 755 753 754 + fn requireDatabaseUrl() ![]const u8 { 755 + return std.posix.getenv("DATABASE_URL") orelse return error.SkipZigTest; 756 + } 757 + 756 758 test "persist and playback" { 759 + const database_url = try requireDatabaseUrl(); 760 + 757 761 var tmp = std.testing.tmpDir(.{}); 758 762 defer tmp.cleanup(); 759 763 760 764 const dir_path = try tmpDirRealPath(std.testing.allocator, tmp); 761 765 defer std.testing.allocator.free(dir_path); 762 766 763 - const db_path = try std.fmt.allocPrint(std.testing.allocator, "{s}/relay.sqlite", .{dir_path}); 764 - defer std.testing.allocator.free(db_path); 765 - 766 - var dp = try DiskPersist.init(std.testing.allocator, dir_path, db_path); 767 + var dp = try DiskPersist.init(std.testing.allocator, dir_path, database_url); 767 768 defer dp.deinit(); 768 769 769 770 // persist some events (sync flush, no background thread) ··· 798 799 } 799 800 800 801 test "playback with cursor" { 802 + const database_url = try requireDatabaseUrl(); 803 + 801 804 var tmp = std.testing.tmpDir(.{}); 802 805 defer tmp.cleanup(); 803 806 804 807 const dir_path = try tmpDirRealPath(std.testing.allocator, tmp); 805 808 defer std.testing.allocator.free(dir_path); 806 809 807 - const db_path = try std.fmt.allocPrint(std.testing.allocator, "{s}/relay.sqlite", .{dir_path}); 808 - defer std.testing.allocator.free(db_path); 809 - 810 - var dp = try DiskPersist.init(std.testing.allocator, dir_path, db_path); 810 + var dp = try DiskPersist.init(std.testing.allocator, dir_path, database_url); 811 811 defer dp.deinit(); 812 812 813 813 _ = try dp.persist(.commit, 1, "a"); ··· 833 833 } 834 834 835 835 test "seq recovery after reinit" { 836 + const database_url = try requireDatabaseUrl(); 837 + 836 838 var tmp = std.testing.tmpDir(.{}); 837 839 defer tmp.cleanup(); 838 840 839 841 const dir_path = try tmpDirRealPath(std.testing.allocator, tmp); 840 842 defer std.testing.allocator.free(dir_path); 841 843 842 - const db_path = try std.fmt.allocPrint(std.testing.allocator, "{s}/relay.sqlite", .{dir_path}); 843 - defer std.testing.allocator.free(db_path); 844 - 845 844 // write some events 846 845 { 847 - var dp = try DiskPersist.init(std.testing.allocator, dir_path, db_path); 846 + var dp = try DiskPersist.init(std.testing.allocator, dir_path, database_url); 848 847 defer dp.deinit(); 849 848 _ = try dp.persist(.commit, 1, "x"); 850 849 _ = try dp.persist(.commit, 2, "y"); ··· 856 855 857 856 // reinit — should recover seq 858 857 { 859 - var dp = try DiskPersist.init(std.testing.allocator, dir_path, db_path); 858 + var dp = try DiskPersist.init(std.testing.allocator, dir_path, database_url); 860 859 defer dp.deinit(); 861 860 try std.testing.expectEqual(@as(u64, 3), dp.lastSeq().?); 862 861 const seq4 = try dp.persist(.commit, 1, "w"); ··· 865 864 } 866 865 867 866 test "takedown zeros payload" { 867 + const database_url = try requireDatabaseUrl(); 868 + 868 869 var tmp = std.testing.tmpDir(.{}); 869 870 defer tmp.cleanup(); 870 871 871 872 const dir_path = try tmpDirRealPath(std.testing.allocator, tmp); 872 873 defer std.testing.allocator.free(dir_path); 873 874 874 - const db_path = try std.fmt.allocPrint(std.testing.allocator, "{s}/relay.sqlite", .{dir_path}); 875 - defer std.testing.allocator.free(db_path); 876 - 877 - var dp = try DiskPersist.init(std.testing.allocator, dir_path, db_path); 875 + var dp = try DiskPersist.init(std.testing.allocator, dir_path, database_url); 878 876 defer dp.deinit(); 879 877 880 878 _ = try dp.persist(.commit, 42, "secret-data"); ··· 901 899 } 902 900 903 901 test "uidForDid assigns and caches UIDs" { 902 + const database_url = try requireDatabaseUrl(); 903 + 904 904 var tmp = std.testing.tmpDir(.{}); 905 905 defer tmp.cleanup(); 906 906 907 907 const dir_path = try tmpDirRealPath(std.testing.allocator, tmp); 908 908 defer std.testing.allocator.free(dir_path); 909 909 910 - const db_path = try std.fmt.allocPrint(std.testing.allocator, "{s}/relay.sqlite", .{dir_path}); 911 - defer std.testing.allocator.free(db_path); 912 - 913 - var dp = try DiskPersist.init(std.testing.allocator, dir_path, db_path); 910 + var dp = try DiskPersist.init(std.testing.allocator, dir_path, database_url); 914 911 defer dp.deinit(); 915 912 916 913 // first call creates the account ··· 928 925 } 929 926 930 927 test "uidForDid survives reinit" { 928 + const database_url = try requireDatabaseUrl(); 929 + 931 930 var tmp = std.testing.tmpDir(.{}); 932 931 defer tmp.cleanup(); 933 932 934 933 const dir_path = try tmpDirRealPath(std.testing.allocator, tmp); 935 934 defer std.testing.allocator.free(dir_path); 936 935 937 - const db_path = try std.fmt.allocPrint(std.testing.allocator, "{s}/relay.sqlite", .{dir_path}); 938 - defer std.testing.allocator.free(db_path); 939 - 940 936 var uid1: u64 = undefined; 941 937 { 942 - var dp = try DiskPersist.init(std.testing.allocator, dir_path, db_path); 938 + var dp = try DiskPersist.init(std.testing.allocator, dir_path, database_url); 943 939 defer dp.deinit(); 944 940 uid1 = try dp.uidForDid("did:plc:carol"); 945 941 } 946 942 947 943 // reinit — UID should be the same from database 948 944 { 949 - var dp = try DiskPersist.init(std.testing.allocator, dir_path, db_path); 945 + var dp = try DiskPersist.init(std.testing.allocator, dir_path, database_url); 950 946 defer dp.deinit(); 951 947 const uid1_again = try dp.uidForDid("did:plc:carol"); 952 948 try std.testing.expectEqual(uid1, uid1_again); ··· 954 950 } 955 951 956 952 test "takedown with real UIDs" { 953 + const database_url = try requireDatabaseUrl(); 954 + 957 955 var tmp = std.testing.tmpDir(.{}); 958 956 defer tmp.cleanup(); 959 957 960 958 const dir_path = try tmpDirRealPath(std.testing.allocator, tmp); 961 959 defer std.testing.allocator.free(dir_path); 962 960 963 - const db_path = try std.fmt.allocPrint(std.testing.allocator, "{s}/relay.sqlite", .{dir_path}); 964 - defer std.testing.allocator.free(db_path); 965 - 966 - var dp = try DiskPersist.init(std.testing.allocator, dir_path, db_path); 961 + var dp = try DiskPersist.init(std.testing.allocator, dir_path, database_url); 967 962 defer dp.deinit(); 968 963 969 964 const alice_uid = try dp.uidForDid("did:plc:alice");
+3 -3
src/main.zig
··· 62 62 defer val.deinit(); 63 63 try val.start(); 64 64 65 - // init disk persistence (indigo-compatible diskpersist format + SQLite index) 66 - const db_path = std.posix.getenv("RELAY_DB_PATH") orelse "data/relay.sqlite"; 67 - var dp = event_log_mod.DiskPersist.init(allocator, data_dir, db_path) catch |err| { 65 + // init disk persistence (indigo-compatible diskpersist format + Postgres index) 66 + const database_url = std.posix.getenv("DATABASE_URL") orelse "postgres://relay:relay@localhost:5432/relay"; 67 + var dp = event_log_mod.DiskPersist.init(allocator, data_dir, database_url) catch |err| { 68 68 log.err("failed to init disk persist at {s}: {s}", .{ data_dir, @errorName(err) }); 69 69 return err; 70 70 };