const std = @import("std"); const Allocator = std.mem.Allocator; /// statistics for a lattice pub const Stats = struct { size: usize, occupied: usize, clusters: usize, largest: usize, percolates: bool, pub fn density(self: Stats) f32 { const total = self.size * self.size; return if (total == 0) 0.0 else @as(f32, @floatFromInt(self.occupied)) / @as(f32, @floatFromInt(total)); } }; /// 2D lattice with Union-Find cluster tracking and rolling window decay pub const Lattice = struct { size: usize, // L for LxL grid (power of 2) mask: usize, // size - 1 for bit masking sites: []bool, // occupied state parent: []isize, // Union-Find parent (-1 = root with size 1, -n = root with size n) generation: u64, // increments on changes allocator: Allocator, // generation-based decay: each site tracks when it was last hit last_seen: []u64, // generation when each site was last occupied window_size: u64, // sites expire after this many generations last_sweep: u64, // generation when we last swept for expired sites sweep_interval: u64, // how often to sweep mutex: std.Thread.Mutex, pub fn init(allocator: Allocator, size: usize) !Lattice { return initWithWindow(allocator, size, null); } pub fn initWithWindow(allocator: Allocator, size: usize, window: ?u64) !Lattice { std.debug.assert(size > 0 and (size & (size - 1)) == 0); // must be power of 2 const total = size * size; const sites = try allocator.alloc(bool, total); @memset(sites, false); const parent = try allocator.alloc(isize, total); @memset(parent, -1); // each site is its own root with size 1 const last_seen = try allocator.alloc(u64, total); @memset(last_seen, 0); // default window targets different densities per size // with random hashing, P(site occupied) = 1 - e^(-W/N) // solving for W: W = -N * ln(1-P) // - small (32): ~70% → supercritical, usually percolates // - medium (128): ~59% → near critical threshold (p_c ≈ 0.593) // - large (512): ~40% → subcritical, rarely percolates const ftotal: f64 = @floatFromInt(total); const default_window: u64 = switch (size) { 32 => @intFromFloat(-ftotal * @log(1.0 - 0.70)), // W ≈ 1.2 * N 128 => @intFromFloat(-ftotal * @log(1.0 - 0.59)), // W ≈ 0.89 * N 512 => @intFromFloat(-ftotal * @log(1.0 - 0.40)), // W ≈ 0.51 * N else => @intFromFloat(-ftotal * @log(0.5)), // 50% }; const win = window orelse default_window; // sweep for expired sites every 10% of window const sweep_interval = @max(win / 10, 100); return .{ .size = size, .mask = size - 1, .sites = sites, .parent = parent, .generation = 0, .allocator = allocator, .last_seen = last_seen, .window_size = win, .last_sweep = 0, .sweep_interval = sweep_interval, .mutex = .{}, }; } pub fn deinit(self: *Lattice) void { self.allocator.free(self.sites); self.allocator.free(self.parent); self.allocator.free(self.last_seen); } /// convert (x, y) to flat index fn index(self: *const Lattice, x: usize, y: usize) usize { return (y & self.mask) * self.size + (x & self.mask); } /// find root with path compression (weighted quick-union) fn findRoot(self: *Lattice, i: usize) usize { var current = i; // find root while (self.parent[current] >= 0) { current = @intCast(self.parent[current]); } // path compression var node = i; while (self.parent[node] >= 0) { const next: usize = @intCast(self.parent[node]); self.parent[node] = @intCast(current); node = next; } return current; } /// union two sites by rank (size) fn unionSites(self: *Lattice, a: usize, b: usize) void { const root_a = self.findRoot(a); const root_b = self.findRoot(b); if (root_a == root_b) return; // negative parent = root with size -parent const size_a: isize = -self.parent[root_a]; const size_b: isize = -self.parent[root_b]; // attach smaller tree under larger if (size_a < size_b) { self.parent[root_a] = @intCast(root_b); self.parent[root_b] = -(size_a + size_b); } else { self.parent[root_b] = @intCast(root_a); self.parent[root_a] = -(size_a + size_b); } } /// occupy a site with generation-based decay pub fn occupy(self: *Lattice, x: usize, y: usize) void { self.mutex.lock(); defer self.mutex.unlock(); self.generation += 1; const i = self.index(x, y); // update last_seen for this site (whether newly occupied or refreshed) self.last_seen[i] = self.generation; // periodic sweep for expired sites if (self.generation - self.last_sweep >= self.sweep_interval) { self.sweepExpired(); self.last_sweep = self.generation; } // if already occupied, just refresh was enough if (self.sites[i]) return; self.sites[i] = true; self.parent[i] = -1; // new root with size 1 // union with occupied neighbors (4-connected) self.unionWithNeighbors(i, x, y); } /// sweep through all sites and expire old ones fn sweepExpired(self: *Lattice) void { var expired: usize = 0; const threshold = if (self.generation > self.window_size) self.generation - self.window_size else 0; for (0..self.size * self.size) |i| { if (self.sites[i] and self.last_seen[i] < threshold) { self.sites[i] = false; expired += 1; } } // rebuild union-find if we expired sites if (expired > 0) { self.rebuildUnlocked(); } } /// union site with its occupied neighbors (open boundaries - no wrap-around) fn unionWithNeighbors(self: *Lattice, i: usize, x: usize, y: usize) void { const neighbors = [_][2]isize{ .{ -1, 0 }, // left .{ 1, 0 }, // right .{ 0, -1 }, // up .{ 0, 1 }, // down }; for (neighbors) |offset| { const nx_signed: isize = @as(isize, @intCast(x)) + offset[0]; const ny_signed: isize = @as(isize, @intCast(y)) + offset[1]; // open boundaries - skip if out of bounds if (nx_signed < 0 or nx_signed >= @as(isize, @intCast(self.size))) continue; if (ny_signed < 0 or ny_signed >= @as(isize, @intCast(self.size))) continue; const nx: usize = @intCast(nx_signed); const ny: usize = @intCast(ny_signed); const ni = self.index(nx, ny); if (self.sites[ni]) { self.unionSites(i, ni); } } } /// vacate a site (lazy - doesn't rebuild clusters immediately) pub fn vacate(self: *Lattice, x: usize, y: usize) void { self.mutex.lock(); defer self.mutex.unlock(); const i = self.index(x, y); if (!self.sites[i]) return; // already empty self.sites[i] = false; self.generation += 1; // note: we don't rebuild union-find here (lazy deletion) // clusters will be slightly incorrect until rebuild } /// rebuild union-find from scratch (internal, assumes lock held) fn rebuildUnlocked(self: *Lattice) void { // reset all parents @memset(self.parent, -1); // rebuild unions (open boundaries - no wrap-around) for (0..self.size) |y| { for (0..self.size) |x| { const i = self.index(x, y); if (!self.sites[i]) continue; // union with occupied neighbors to the left and above // (only need to check 2 directions when scanning left-to-right, top-to-bottom) // open boundaries: don't wrap around if (x > 0) { const left = self.index(x - 1, y); if (self.sites[left]) self.unionSites(i, left); } if (y > 0) { const above = self.index(x, y - 1); if (self.sites[above]) self.unionSites(i, above); } } } } /// rebuild union-find from scratch (public, acquires lock) pub fn rebuild(self: *Lattice) void { self.mutex.lock(); defer self.mutex.unlock(); self.rebuildUnlocked(); } /// get current statistics pub fn getStats(self: *Lattice) Stats { self.mutex.lock(); defer self.mutex.unlock(); var occupied: usize = 0; var clusters: usize = 0; var largest: usize = 0; // track which roots touch top/bottom rows using hash maps // (avoids the % 4096 collision bug with StaticBitSet) var top_roots = std.AutoHashMap(usize, void).init(self.allocator); defer top_roots.deinit(); var bottom_roots = std.AutoHashMap(usize, void).init(self.allocator); defer bottom_roots.deinit(); for (0..self.size * self.size) |i| { if (!self.sites[i]) continue; occupied += 1; const root = self.findRoot(i); const size: usize = @intCast(-self.parent[root]); // count unique roots (clusters) if (root == i) { clusters += 1; if (size > largest) largest = size; } // track roots touching top and bottom rows const y = i / self.size; if (y == 0) top_roots.put(root, {}) catch {}; if (y == self.size - 1) bottom_roots.put(root, {}) catch {}; } // percolates if any root touches both top and bottom var percolates = false; var iter = top_roots.keyIterator(); while (iter.next()) |root| { if (bottom_roots.contains(root.*)) { percolates = true; break; } } return .{ .size = self.size, .occupied = occupied, .clusters = clusters, .largest = largest, .percolates = percolates, }; } /// get full state as packed bitmap (1 bit per site) pub fn getStateBitmap(self: *Lattice, alloc: Allocator) ![]u8 { self.mutex.lock(); defer self.mutex.unlock(); const total = self.size * self.size; const bytes = (total + 7) / 8; const bitmap = try alloc.alloc(u8, bytes); @memset(bitmap, 0); for (0..total) |i| { if (self.sites[i]) { bitmap[i / 8] |= @as(u8, 1) << @intCast(i % 8); } } return bitmap; } /// get state with largest cluster marked (2 bits per site: 0=empty, 1=occupied, 2=largest cluster) pub fn getStateWithLargest(self: *Lattice, alloc: Allocator) ![]u8 { self.mutex.lock(); defer self.mutex.unlock(); const total = self.size * self.size; // find largest cluster root var largest_root: ?usize = null; var largest_size: usize = 0; for (0..total) |i| { if (!self.sites[i]) continue; const root = self.findRoot(i); if (root == i) { // is a root const size: usize = @intCast(-self.parent[root]); if (size > largest_size) { largest_size = size; largest_root = root; } } } // pack 2 bits per site (4 sites per byte) const bytes = (total + 3) / 4; const data = try alloc.alloc(u8, bytes); @memset(data, 0); for (0..total) |i| { if (!self.sites[i]) continue; const root = self.findRoot(i); const val: u8 = if (largest_root != null and root == largest_root.?) 2 else 1; const byte_idx = i / 4; const bit_offset: u3 = @intCast((i % 4) * 2); data[byte_idx] |= val << bit_offset; } return data; } }; /// track percolation transitions pub const TransitionTracker = struct { last_percolated: bool, transitions: u64, // count of percolation on/off transitions events_since_transition: u64, pub fn init() TransitionTracker { return .{ .last_percolated = false, .transitions = 0, .events_since_transition = 0, }; } pub fn update(self: *TransitionTracker, percolates: bool) void { if (percolates != self.last_percolated) { self.transitions += 1; self.events_since_transition = 0; self.last_percolated = percolates; } else { self.events_since_transition += 1; } } }; /// EWMA rate tracker for events per second pub const EventRateTracker = struct { rate: f32 = 0.0, // events per second (EWMA) last_update: i64 = 0, events_since_update: u32 = 0, const EWMA_ALPHA: f32 = 0.3; // moderate smoothing const UPDATE_INTERVAL_MS: i64 = 2000; // update every 2 seconds pub fn record(self: *EventRateTracker, now: i64) void { self.events_since_update += 1; if (self.last_update == 0) { self.last_update = now; return; } const elapsed = now - self.last_update; if (elapsed >= UPDATE_INTERVAL_MS) { const elapsed_secs: f32 = @as(f32, @floatFromInt(elapsed)) / 1000.0; const instant_rate = @as(f32, @floatFromInt(self.events_since_update)) / elapsed_secs; self.rate = EWMA_ALPHA * instant_rate + (1.0 - EWMA_ALPHA) * self.rate; self.last_update = now; self.events_since_update = 0; } } }; /// world containing multiple lattices at different resolutions pub const World = struct { small: Lattice, // 32x32 - supercritical (~70%) medium: Lattice, // 128x128 - near critical (~59%) large: Lattice, // 512x512 - subcritical (~40%) event_count: std.atomic.Value(u64), event_rate: EventRateTracker = .{}, // transition tracking for the medium lattice (most interesting) medium_tracker: TransitionTracker, pub fn init(allocator: Allocator) !World { return .{ .small = try Lattice.initWithWindow(allocator, 32, null), .medium = try Lattice.initWithWindow(allocator, 128, null), .large = try Lattice.initWithWindow(allocator, 512, null), .event_count = std.atomic.Value(u64).init(0), .event_rate = .{}, .medium_tracker = TransitionTracker.init(), }; } pub fn deinit(self: *World) void { self.small.deinit(); self.medium.deinit(); self.large.deinit(); } /// feed an event hash to all lattices (rolling window decay handles removal) pub fn feedEvent(self: *World, hash: u64, is_create: bool) void { _ = is_create; // with rolling window, we always occupy; decay handles removal _ = self.event_count.fetchAdd(1, .monotonic); self.event_rate.record(std.time.milliTimestamp()); // extract coordinates for each lattice size using different hash bits const x_small = hash & 31; // 0-31 const y_small = (hash >> 5) & 31; const x_medium = hash & 127; // 0-127 const y_medium = (hash >> 7) & 127; const x_large = hash & 511; // 0-511 const y_large = (hash >> 9) & 511; self.small.occupy(x_small, y_small); self.medium.occupy(x_medium, y_medium); self.large.occupy(x_large, y_large); } /// update transition tracker (call periodically, not on every event) pub fn updateTracker(self: *World) void { const stats = self.medium.getStats(); self.medium_tracker.update(stats.percolates); } pub fn getEventCount(self: *World) u64 { return self.event_count.load(.monotonic); } pub fn getEventRate(self: *World) f32 { return self.event_rate.rate; } }; // global world instance var world: ?World = null; var world_allocator: Allocator = undefined; pub fn init() void { // use page allocator for the world (long-lived) world_allocator = std.heap.page_allocator; world = World.init(world_allocator) catch { std.log.err("failed to init world", .{}); return; }; std.log.info("lattice world initialized (32, 128, 512)", .{}); } pub fn get() *World { return &(world.?); } // tests test "lattice basic operations" { const allocator = std.testing.allocator; var lat = try Lattice.init(allocator, 8); defer lat.deinit(); // occupy some sites lat.occupy(0, 0); lat.occupy(1, 0); lat.occupy(0, 1); const stats = lat.getStats(); try std.testing.expectEqual(@as(usize, 3), stats.occupied); try std.testing.expectEqual(@as(usize, 1), stats.clusters); // all connected try std.testing.expectEqual(@as(usize, 3), stats.largest); } test "lattice disconnected clusters" { const allocator = std.testing.allocator; var lat = try Lattice.init(allocator, 8); defer lat.deinit(); // two separate clusters lat.occupy(0, 0); lat.occupy(4, 4); const stats = lat.getStats(); try std.testing.expectEqual(@as(usize, 2), stats.occupied); try std.testing.expectEqual(@as(usize, 2), stats.clusters); try std.testing.expectEqual(@as(usize, 1), stats.largest); } test "lattice percolation" { const allocator = std.testing.allocator; var lat = try Lattice.init(allocator, 4); defer lat.deinit(); // vertical line from top to bottom lat.occupy(1, 0); lat.occupy(1, 1); lat.occupy(1, 2); lat.occupy(1, 3); const stats = lat.getStats(); try std.testing.expect(stats.percolates); } test "world multi-resolution" { const allocator = std.testing.allocator; var w = try World.init(allocator); defer w.deinit(); // feed some events w.feedEvent(0x12345678, true); w.feedEvent(0xdeadbeef, true); try std.testing.expectEqual(@as(u64, 2), w.getEventCount()); const small_stats = w.small.getStats(); const medium_stats = w.medium.getStats(); const large_stats = w.large.getStats(); try std.testing.expectEqual(@as(usize, 2), small_stats.occupied); try std.testing.expectEqual(@as(usize, 2), medium_stats.occupied); try std.testing.expectEqual(@as(usize, 2), large_stats.occupied); }