this repo has no description
coral.waow.tech
1const std = @import("std");
2const Allocator = std.mem.Allocator;
3
4/// statistics for a lattice
5pub const Stats = struct {
6 size: usize,
7 occupied: usize,
8 clusters: usize,
9 largest: usize,
10 percolates: bool,
11
12 pub fn density(self: Stats) f32 {
13 const total = self.size * self.size;
14 return if (total == 0) 0.0 else @as(f32, @floatFromInt(self.occupied)) / @as(f32, @floatFromInt(total));
15 }
16};
17
18/// 2D lattice with Union-Find cluster tracking and rolling window decay
19pub const Lattice = struct {
20 size: usize, // L for LxL grid (power of 2)
21 mask: usize, // size - 1 for bit masking
22 sites: []bool, // occupied state
23 parent: []isize, // Union-Find parent (-1 = root with size 1, -n = root with size n)
24 generation: u64, // increments on changes
25 allocator: Allocator,
26
27 // generation-based decay: each site tracks when it was last hit
28 last_seen: []u64, // generation when each site was last occupied
29 window_size: u64, // sites expire after this many generations
30 last_sweep: u64, // generation when we last swept for expired sites
31 sweep_interval: u64, // how often to sweep
32
33 mutex: std.Thread.Mutex,
34
35 pub fn init(allocator: Allocator, size: usize) !Lattice {
36 return initWithWindow(allocator, size, null);
37 }
38
39 pub fn initWithWindow(allocator: Allocator, size: usize, window: ?u64) !Lattice {
40 std.debug.assert(size > 0 and (size & (size - 1)) == 0); // must be power of 2
41
42 const total = size * size;
43 const sites = try allocator.alloc(bool, total);
44 @memset(sites, false);
45
46 const parent = try allocator.alloc(isize, total);
47 @memset(parent, -1); // each site is its own root with size 1
48
49 const last_seen = try allocator.alloc(u64, total);
50 @memset(last_seen, 0);
51
52 // default window targets different densities per size
53 // with random hashing, P(site occupied) = 1 - e^(-W/N)
54 // solving for W: W = -N * ln(1-P)
55 // - small (32): ~70% → supercritical, usually percolates
56 // - medium (128): ~59% → near critical threshold (p_c ≈ 0.593)
57 // - large (512): ~40% → subcritical, rarely percolates
58 const ftotal: f64 = @floatFromInt(total);
59 const default_window: u64 = switch (size) {
60 32 => @intFromFloat(-ftotal * @log(1.0 - 0.70)), // W ≈ 1.2 * N
61 128 => @intFromFloat(-ftotal * @log(1.0 - 0.59)), // W ≈ 0.89 * N
62 512 => @intFromFloat(-ftotal * @log(1.0 - 0.40)), // W ≈ 0.51 * N
63 else => @intFromFloat(-ftotal * @log(0.5)), // 50%
64 };
65 const win = window orelse default_window;
66
67 // sweep for expired sites every 10% of window
68 const sweep_interval = @max(win / 10, 100);
69
70 return .{
71 .size = size,
72 .mask = size - 1,
73 .sites = sites,
74 .parent = parent,
75 .generation = 0,
76 .allocator = allocator,
77 .last_seen = last_seen,
78 .window_size = win,
79 .last_sweep = 0,
80 .sweep_interval = sweep_interval,
81 .mutex = .{},
82 };
83 }
84
85 pub fn deinit(self: *Lattice) void {
86 self.allocator.free(self.sites);
87 self.allocator.free(self.parent);
88 self.allocator.free(self.last_seen);
89 }
90
91 /// convert (x, y) to flat index
92 fn index(self: *const Lattice, x: usize, y: usize) usize {
93 return (y & self.mask) * self.size + (x & self.mask);
94 }
95
96 /// find root with path compression (weighted quick-union)
97 fn findRoot(self: *Lattice, i: usize) usize {
98 var current = i;
99 // find root
100 while (self.parent[current] >= 0) {
101 current = @intCast(self.parent[current]);
102 }
103 // path compression
104 var node = i;
105 while (self.parent[node] >= 0) {
106 const next: usize = @intCast(self.parent[node]);
107 self.parent[node] = @intCast(current);
108 node = next;
109 }
110 return current;
111 }
112
113 /// union two sites by rank (size)
114 fn unionSites(self: *Lattice, a: usize, b: usize) void {
115 const root_a = self.findRoot(a);
116 const root_b = self.findRoot(b);
117 if (root_a == root_b) return;
118
119 // negative parent = root with size -parent
120 const size_a: isize = -self.parent[root_a];
121 const size_b: isize = -self.parent[root_b];
122
123 // attach smaller tree under larger
124 if (size_a < size_b) {
125 self.parent[root_a] = @intCast(root_b);
126 self.parent[root_b] = -(size_a + size_b);
127 } else {
128 self.parent[root_b] = @intCast(root_a);
129 self.parent[root_a] = -(size_a + size_b);
130 }
131 }
132
133 /// occupy a site with generation-based decay
134 pub fn occupy(self: *Lattice, x: usize, y: usize) void {
135 self.mutex.lock();
136 defer self.mutex.unlock();
137
138 self.generation += 1;
139 const i = self.index(x, y);
140
141 // update last_seen for this site (whether newly occupied or refreshed)
142 self.last_seen[i] = self.generation;
143
144 // periodic sweep for expired sites
145 if (self.generation - self.last_sweep >= self.sweep_interval) {
146 self.sweepExpired();
147 self.last_sweep = self.generation;
148 }
149
150 // if already occupied, just refresh was enough
151 if (self.sites[i]) return;
152
153 self.sites[i] = true;
154 self.parent[i] = -1; // new root with size 1
155
156 // union with occupied neighbors (4-connected)
157 self.unionWithNeighbors(i, x, y);
158 }
159
160 /// sweep through all sites and expire old ones
161 fn sweepExpired(self: *Lattice) void {
162 var expired: usize = 0;
163 const threshold = if (self.generation > self.window_size)
164 self.generation - self.window_size
165 else
166 0;
167
168 for (0..self.size * self.size) |i| {
169 if (self.sites[i] and self.last_seen[i] < threshold) {
170 self.sites[i] = false;
171 expired += 1;
172 }
173 }
174
175 // rebuild union-find if we expired sites
176 if (expired > 0) {
177 self.rebuildUnlocked();
178 }
179 }
180
181 /// union site with its occupied neighbors (open boundaries - no wrap-around)
182 fn unionWithNeighbors(self: *Lattice, i: usize, x: usize, y: usize) void {
183 const neighbors = [_][2]isize{
184 .{ -1, 0 }, // left
185 .{ 1, 0 }, // right
186 .{ 0, -1 }, // up
187 .{ 0, 1 }, // down
188 };
189
190 for (neighbors) |offset| {
191 const nx_signed: isize = @as(isize, @intCast(x)) + offset[0];
192 const ny_signed: isize = @as(isize, @intCast(y)) + offset[1];
193
194 // open boundaries - skip if out of bounds
195 if (nx_signed < 0 or nx_signed >= @as(isize, @intCast(self.size))) continue;
196 if (ny_signed < 0 or ny_signed >= @as(isize, @intCast(self.size))) continue;
197
198 const nx: usize = @intCast(nx_signed);
199 const ny: usize = @intCast(ny_signed);
200
201 const ni = self.index(nx, ny);
202 if (self.sites[ni]) {
203 self.unionSites(i, ni);
204 }
205 }
206 }
207
208 /// vacate a site (lazy - doesn't rebuild clusters immediately)
209 pub fn vacate(self: *Lattice, x: usize, y: usize) void {
210 self.mutex.lock();
211 defer self.mutex.unlock();
212
213 const i = self.index(x, y);
214 if (!self.sites[i]) return; // already empty
215
216 self.sites[i] = false;
217 self.generation += 1;
218
219 // note: we don't rebuild union-find here (lazy deletion)
220 // clusters will be slightly incorrect until rebuild
221 }
222
223 /// rebuild union-find from scratch (internal, assumes lock held)
224 fn rebuildUnlocked(self: *Lattice) void {
225 // reset all parents
226 @memset(self.parent, -1);
227
228 // rebuild unions (open boundaries - no wrap-around)
229 for (0..self.size) |y| {
230 for (0..self.size) |x| {
231 const i = self.index(x, y);
232 if (!self.sites[i]) continue;
233
234 // union with occupied neighbors to the left and above
235 // (only need to check 2 directions when scanning left-to-right, top-to-bottom)
236 // open boundaries: don't wrap around
237 if (x > 0) {
238 const left = self.index(x - 1, y);
239 if (self.sites[left]) self.unionSites(i, left);
240 }
241 if (y > 0) {
242 const above = self.index(x, y - 1);
243 if (self.sites[above]) self.unionSites(i, above);
244 }
245 }
246 }
247 }
248
249 /// rebuild union-find from scratch (public, acquires lock)
250 pub fn rebuild(self: *Lattice) void {
251 self.mutex.lock();
252 defer self.mutex.unlock();
253 self.rebuildUnlocked();
254 }
255
256 /// get current statistics
257 pub fn getStats(self: *Lattice) Stats {
258 self.mutex.lock();
259 defer self.mutex.unlock();
260
261 var occupied: usize = 0;
262 var clusters: usize = 0;
263 var largest: usize = 0;
264
265 // track which roots touch top/bottom rows using hash maps
266 // (avoids the % 4096 collision bug with StaticBitSet)
267 var top_roots = std.AutoHashMap(usize, void).init(self.allocator);
268 defer top_roots.deinit();
269 var bottom_roots = std.AutoHashMap(usize, void).init(self.allocator);
270 defer bottom_roots.deinit();
271
272 for (0..self.size * self.size) |i| {
273 if (!self.sites[i]) continue;
274 occupied += 1;
275
276 const root = self.findRoot(i);
277 const size: usize = @intCast(-self.parent[root]);
278
279 // count unique roots (clusters)
280 if (root == i) {
281 clusters += 1;
282 if (size > largest) largest = size;
283 }
284
285 // track roots touching top and bottom rows
286 const y = i / self.size;
287 if (y == 0) top_roots.put(root, {}) catch {};
288 if (y == self.size - 1) bottom_roots.put(root, {}) catch {};
289 }
290
291 // percolates if any root touches both top and bottom
292 var percolates = false;
293 var iter = top_roots.keyIterator();
294 while (iter.next()) |root| {
295 if (bottom_roots.contains(root.*)) {
296 percolates = true;
297 break;
298 }
299 }
300
301 return .{
302 .size = self.size,
303 .occupied = occupied,
304 .clusters = clusters,
305 .largest = largest,
306 .percolates = percolates,
307 };
308 }
309
310 /// get full state as packed bitmap (1 bit per site)
311 pub fn getStateBitmap(self: *Lattice, alloc: Allocator) ![]u8 {
312 self.mutex.lock();
313 defer self.mutex.unlock();
314
315 const total = self.size * self.size;
316 const bytes = (total + 7) / 8;
317 const bitmap = try alloc.alloc(u8, bytes);
318 @memset(bitmap, 0);
319
320 for (0..total) |i| {
321 if (self.sites[i]) {
322 bitmap[i / 8] |= @as(u8, 1) << @intCast(i % 8);
323 }
324 }
325
326 return bitmap;
327 }
328
329 /// get state with largest cluster marked (2 bits per site: 0=empty, 1=occupied, 2=largest cluster)
330 pub fn getStateWithLargest(self: *Lattice, alloc: Allocator) ![]u8 {
331 self.mutex.lock();
332 defer self.mutex.unlock();
333
334 const total = self.size * self.size;
335
336 // find largest cluster root
337 var largest_root: ?usize = null;
338 var largest_size: usize = 0;
339
340 for (0..total) |i| {
341 if (!self.sites[i]) continue;
342 const root = self.findRoot(i);
343 if (root == i) { // is a root
344 const size: usize = @intCast(-self.parent[root]);
345 if (size > largest_size) {
346 largest_size = size;
347 largest_root = root;
348 }
349 }
350 }
351
352 // pack 2 bits per site (4 sites per byte)
353 const bytes = (total + 3) / 4;
354 const data = try alloc.alloc(u8, bytes);
355 @memset(data, 0);
356
357 for (0..total) |i| {
358 if (!self.sites[i]) continue;
359
360 const root = self.findRoot(i);
361 const val: u8 = if (largest_root != null and root == largest_root.?) 2 else 1;
362
363 const byte_idx = i / 4;
364 const bit_offset: u3 = @intCast((i % 4) * 2);
365 data[byte_idx] |= val << bit_offset;
366 }
367
368 return data;
369 }
370};
371
372/// track percolation transitions
373pub const TransitionTracker = struct {
374 last_percolated: bool,
375 transitions: u64, // count of percolation on/off transitions
376 events_since_transition: u64,
377
378 pub fn init() TransitionTracker {
379 return .{
380 .last_percolated = false,
381 .transitions = 0,
382 .events_since_transition = 0,
383 };
384 }
385
386 pub fn update(self: *TransitionTracker, percolates: bool) void {
387 if (percolates != self.last_percolated) {
388 self.transitions += 1;
389 self.events_since_transition = 0;
390 self.last_percolated = percolates;
391 } else {
392 self.events_since_transition += 1;
393 }
394 }
395};
396
397/// EWMA rate tracker for events per second
398pub const EventRateTracker = struct {
399 rate: f32 = 0.0, // events per second (EWMA)
400 last_update: i64 = 0,
401 events_since_update: u32 = 0,
402
403 const EWMA_ALPHA: f32 = 0.3; // moderate smoothing
404 const UPDATE_INTERVAL_MS: i64 = 2000; // update every 2 seconds
405
406 pub fn record(self: *EventRateTracker, now: i64) void {
407 self.events_since_update += 1;
408
409 if (self.last_update == 0) {
410 self.last_update = now;
411 return;
412 }
413
414 const elapsed = now - self.last_update;
415 if (elapsed >= UPDATE_INTERVAL_MS) {
416 const elapsed_secs: f32 = @as(f32, @floatFromInt(elapsed)) / 1000.0;
417 const instant_rate = @as(f32, @floatFromInt(self.events_since_update)) / elapsed_secs;
418 self.rate = EWMA_ALPHA * instant_rate + (1.0 - EWMA_ALPHA) * self.rate;
419 self.last_update = now;
420 self.events_since_update = 0;
421 }
422 }
423};
424
425/// world containing multiple lattices at different resolutions
426pub const World = struct {
427 small: Lattice, // 32x32 - supercritical (~70%)
428 medium: Lattice, // 128x128 - near critical (~59%)
429 large: Lattice, // 512x512 - subcritical (~40%)
430 event_count: std.atomic.Value(u64),
431 event_rate: EventRateTracker = .{},
432
433 // transition tracking for the medium lattice (most interesting)
434 medium_tracker: TransitionTracker,
435
436 pub fn init(allocator: Allocator) !World {
437 return .{
438 .small = try Lattice.initWithWindow(allocator, 32, null),
439 .medium = try Lattice.initWithWindow(allocator, 128, null),
440 .large = try Lattice.initWithWindow(allocator, 512, null),
441 .event_count = std.atomic.Value(u64).init(0),
442 .event_rate = .{},
443 .medium_tracker = TransitionTracker.init(),
444 };
445 }
446
447 pub fn deinit(self: *World) void {
448 self.small.deinit();
449 self.medium.deinit();
450 self.large.deinit();
451 }
452
453 /// feed an event hash to all lattices (rolling window decay handles removal)
454 pub fn feedEvent(self: *World, hash: u64, is_create: bool) void {
455 _ = is_create; // with rolling window, we always occupy; decay handles removal
456 _ = self.event_count.fetchAdd(1, .monotonic);
457 self.event_rate.record(std.time.milliTimestamp());
458
459 // extract coordinates for each lattice size using different hash bits
460 const x_small = hash & 31; // 0-31
461 const y_small = (hash >> 5) & 31;
462 const x_medium = hash & 127; // 0-127
463 const y_medium = (hash >> 7) & 127;
464 const x_large = hash & 511; // 0-511
465 const y_large = (hash >> 9) & 511;
466
467 self.small.occupy(x_small, y_small);
468 self.medium.occupy(x_medium, y_medium);
469 self.large.occupy(x_large, y_large);
470 }
471
472 /// update transition tracker (call periodically, not on every event)
473 pub fn updateTracker(self: *World) void {
474 const stats = self.medium.getStats();
475 self.medium_tracker.update(stats.percolates);
476 }
477
478 pub fn getEventCount(self: *World) u64 {
479 return self.event_count.load(.monotonic);
480 }
481
482 pub fn getEventRate(self: *World) f32 {
483 return self.event_rate.rate;
484 }
485};
486
487// global world instance
488var world: ?World = null;
489var world_allocator: Allocator = undefined;
490
491pub fn init() void {
492 // use page allocator for the world (long-lived)
493 world_allocator = std.heap.page_allocator;
494 world = World.init(world_allocator) catch {
495 std.log.err("failed to init world", .{});
496 return;
497 };
498 std.log.info("lattice world initialized (32, 128, 512)", .{});
499}
500
501pub fn get() *World {
502 return &(world.?);
503}
504
505// tests
506test "lattice basic operations" {
507 const allocator = std.testing.allocator;
508 var lat = try Lattice.init(allocator, 8);
509 defer lat.deinit();
510
511 // occupy some sites
512 lat.occupy(0, 0);
513 lat.occupy(1, 0);
514 lat.occupy(0, 1);
515
516 const stats = lat.getStats();
517 try std.testing.expectEqual(@as(usize, 3), stats.occupied);
518 try std.testing.expectEqual(@as(usize, 1), stats.clusters); // all connected
519 try std.testing.expectEqual(@as(usize, 3), stats.largest);
520}
521
522test "lattice disconnected clusters" {
523 const allocator = std.testing.allocator;
524 var lat = try Lattice.init(allocator, 8);
525 defer lat.deinit();
526
527 // two separate clusters
528 lat.occupy(0, 0);
529 lat.occupy(4, 4);
530
531 const stats = lat.getStats();
532 try std.testing.expectEqual(@as(usize, 2), stats.occupied);
533 try std.testing.expectEqual(@as(usize, 2), stats.clusters);
534 try std.testing.expectEqual(@as(usize, 1), stats.largest);
535}
536
537test "lattice percolation" {
538 const allocator = std.testing.allocator;
539 var lat = try Lattice.init(allocator, 4);
540 defer lat.deinit();
541
542 // vertical line from top to bottom
543 lat.occupy(1, 0);
544 lat.occupy(1, 1);
545 lat.occupy(1, 2);
546 lat.occupy(1, 3);
547
548 const stats = lat.getStats();
549 try std.testing.expect(stats.percolates);
550}
551
552test "world multi-resolution" {
553 const allocator = std.testing.allocator;
554 var w = try World.init(allocator);
555 defer w.deinit();
556
557 // feed some events
558 w.feedEvent(0x12345678, true);
559 w.feedEvent(0xdeadbeef, true);
560
561 try std.testing.expectEqual(@as(u64, 2), w.getEventCount());
562
563 const small_stats = w.small.getStats();
564 const medium_stats = w.medium.getStats();
565 const large_stats = w.large.getStats();
566
567 try std.testing.expectEqual(@as(usize, 2), small_stats.occupied);
568 try std.testing.expectEqual(@as(usize, 2), medium_stats.occupied);
569 try std.testing.expectEqual(@as(usize, 2), large_stats.occupied);
570}