prefect server in zig

replace BoundedChannel with growable Queue

memory broker was pre-allocating 50,000 × 8KB = 400MB for message
storage. replaced with ArrayList-backed Queue that grows on demand,
matching Python's asyncio.Queue() behavior.

memory usage: 432MB → 39MB

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

+37 -68
+2 -2
src/broker/CLAUDE.md
··· 8 - `Message` - id, topic, data, attributes, timestamp 9 - `MessageHandler` - callback for consuming messages 10 - `ConsumerHandle` - opaque handle for unsubscribing 11 - - `BoundedChannel` - generic bounded queue for backpressure 12 13 ## fan-out 14 ··· 30 31 ## memory backend 32 33 - uses BoundedChannel (heap-allocated, 50k capacity) per topic. 34 broadcasts to all subscribers (simulates fan-out).
··· 8 - `Message` - id, topic, data, attributes, timestamp 9 - `MessageHandler` - callback for consuming messages 10 - `ConsumerHandle` - opaque handle for unsubscribing 11 + - `Queue` - generic growable queue (ArrayList-backed) 12 13 ## fan-out 14 ··· 30 31 ## memory backend 32 33 + uses growable Queue (ArrayList-backed) per topic. 34 broadcasts to all subscribers (simulates fan-out).
+35 -66
src/broker/core.zig
··· 98 }; 99 100 // ============================================================================ 101 - // Memory Broker - wraps BoundedChannel for in-process messaging 102 // ============================================================================ 103 104 pub const MemoryBroker = struct { 105 const Self = @This(); 106 - const CHANNEL_CAPACITY = 50000; 107 108 /// Topic subscription 109 const Subscription = struct { ··· 113 active: bool, 114 }; 115 116 - /// Topic with its channel and subscriptions 117 const Topic = struct { 118 - channel: BoundedChannel(StoredMessage, CHANNEL_CAPACITY), 119 subscriptions: std.ArrayListUnmanaged(Subscription), 120 worker_thread: ?Thread, 121 running: bool, ··· 146 self.alloc.free(sub.group); 147 } 148 entry.value_ptr.*.subscriptions.deinit(self.alloc); 149 - entry.value_ptr.*.channel.deinit(); 150 self.alloc.destroy(entry.value_ptr.*); 151 } 152 self.topics.deinit(self.alloc); ··· 163 164 const topic = try self.alloc.create(Topic); 165 topic.* = .{ 166 - .channel = try BoundedChannel(StoredMessage, CHANNEL_CAPACITY).init(self.alloc), 167 .subscriptions = .{}, 168 .worker_thread = null, 169 .running = false, ··· 190 log.warn("broker", "message {s} truncated", .{msg.id[0..@min(msg.id_len, 36)]}); 191 } 192 193 - if (!topic.channel.trySend(msg)) { 194 - log.warn("broker", "backpressure: message dropped on topic {s}", .{topic_name}); 195 - return error.ChannelFull; 196 - } 197 } 198 199 pub fn subscribe(self: *Self, topic_name: []const u8, group: []const u8, handler: MessageHandler) !ConsumerHandle { ··· 258 topic.running = false; 259 topic.mutex.unlock(); 260 261 - topic.channel.close(); 262 263 if (topic.worker_thread) |t| { 264 t.join(); ··· 275 const should_run = topic.running; 276 topic.mutex.unlock(); 277 278 - if (!should_run and topic.channel.len() == 0) break; 279 280 - if (topic.channel.receiveTimeout(timeout_ns)) |msg| { 281 // create Message from StoredMessage with proper topic 282 const message = Message{ 283 .id = msg.id[0..msg.id_len], ··· 310 pub const RedisClient = redis.RedisClient; 311 312 // ============================================================================ 313 - // Storage types for bounded channel 314 // ============================================================================ 315 316 /// Stored message data with fixed-size buffers (no allocation in hot path) ··· 331 } 332 333 // ============================================================================ 334 - // Bounded Channel (moved from messaging.zig for reuse) 335 // ============================================================================ 336 337 - pub fn BoundedChannel(comptime T: type, comptime capacity: usize) type { 338 return struct { 339 const Self = @This(); 340 341 - buffer: []T, 342 alloc: Allocator, 343 - head: usize = 0, 344 - tail: usize = 0, 345 - count: usize = 0, 346 - mutex: Thread.Mutex = .{}, 347 - not_empty: Thread.Condition = .{}, 348 - not_full: Thread.Condition = .{}, 349 - closed: bool = false, 350 351 - pub fn init(alloc: Allocator) !Self { 352 - const buffer = try alloc.alloc(T, capacity); 353 return .{ 354 - .buffer = buffer, 355 .alloc = alloc, 356 }; 357 } 358 359 pub fn deinit(self: *Self) void { 360 - self.alloc.free(self.buffer); 361 } 362 363 - pub fn trySend(self: *Self, item: T) bool { 364 self.mutex.lock(); 365 defer self.mutex.unlock(); 366 367 - if (self.closed or self.count >= self.buffer.len) { 368 - return false; 369 - } 370 371 - self.buffer[self.tail] = item; 372 - self.tail = (self.tail + 1) % self.buffer.len; 373 - self.count += 1; 374 self.not_empty.signal(); 375 - return true; 376 } 377 378 - pub fn receiveTimeout(self: *Self, timeout_ns: u64) ?T { 379 self.mutex.lock(); 380 defer self.mutex.unlock(); 381 382 - while (self.count == 0 and !self.closed) { 383 self.not_empty.timedWait(&self.mutex, timeout_ns) catch { 384 return null; 385 }; 386 } 387 388 - if (self.count == 0) return null; 389 - 390 - const item = self.buffer[self.head]; 391 - self.head = (self.head + 1) % self.buffer.len; 392 - self.count -= 1; 393 - self.not_full.signal(); 394 - return item; 395 - } 396 397 - pub fn drain(self: *Self, out: []T, max: usize) usize { 398 - self.mutex.lock(); 399 - defer self.mutex.unlock(); 400 - 401 - const to_drain = @min(self.count, @min(max, out.len)); 402 - for (0..to_drain) |i| { 403 - out[i] = self.buffer[self.head]; 404 - self.head = (self.head + 1) % self.buffer.len; 405 - } 406 - self.count -= to_drain; 407 - if (to_drain > 0) self.not_full.broadcast(); 408 - return to_drain; 409 } 410 411 pub fn len(self: *Self) usize { 412 self.mutex.lock(); 413 defer self.mutex.unlock(); 414 - return self.count; 415 } 416 417 pub fn close(self: *Self) void { ··· 419 defer self.mutex.unlock(); 420 self.closed = true; 421 self.not_empty.broadcast(); 422 - self.not_full.broadcast(); 423 - } 424 - 425 - pub fn isClosed(self: *Self) bool { 426 - self.mutex.lock(); 427 - defer self.mutex.unlock(); 428 - return self.closed; 429 } 430 }; 431 }
··· 98 }; 99 100 // ============================================================================ 101 + // Memory Broker - uses growable queue for in-process messaging 102 // ============================================================================ 103 104 pub const MemoryBroker = struct { 105 const Self = @This(); 106 107 /// Topic subscription 108 const Subscription = struct { ··· 112 active: bool, 113 }; 114 115 + /// Topic with its queue and subscriptions 116 const Topic = struct { 117 + queue: Queue(StoredMessage), 118 subscriptions: std.ArrayListUnmanaged(Subscription), 119 worker_thread: ?Thread, 120 running: bool, ··· 145 self.alloc.free(sub.group); 146 } 147 entry.value_ptr.*.subscriptions.deinit(self.alloc); 148 + entry.value_ptr.*.queue.deinit(); 149 self.alloc.destroy(entry.value_ptr.*); 150 } 151 self.topics.deinit(self.alloc); ··· 162 163 const topic = try self.alloc.create(Topic); 164 topic.* = .{ 165 + .queue = Queue(StoredMessage).init(self.alloc), 166 .subscriptions = .{}, 167 .worker_thread = null, 168 .running = false, ··· 189 log.warn("broker", "message {s} truncated", .{msg.id[0..@min(msg.id_len, 36)]}); 190 } 191 192 + topic.queue.push(msg) catch { 193 + log.warn("broker", "failed to enqueue message on topic {s}", .{topic_name}); 194 + return error.QueueError; 195 + }; 196 } 197 198 pub fn subscribe(self: *Self, topic_name: []const u8, group: []const u8, handler: MessageHandler) !ConsumerHandle { ··· 257 topic.running = false; 258 topic.mutex.unlock(); 259 260 + topic.queue.close(); 261 262 if (topic.worker_thread) |t| { 263 t.join(); ··· 274 const should_run = topic.running; 275 topic.mutex.unlock(); 276 277 + if (!should_run and topic.queue.len() == 0) break; 278 279 + if (topic.queue.popTimeout(timeout_ns)) |msg| { 280 // create Message from StoredMessage with proper topic 281 const message = Message{ 282 .id = msg.id[0..msg.id_len], ··· 309 pub const RedisClient = redis.RedisClient; 310 311 // ============================================================================ 312 + // Storage types for message queue 313 // ============================================================================ 314 315 /// Stored message data with fixed-size buffers (no allocation in hot path) ··· 330 } 331 332 // ============================================================================ 333 + // Growable Queue - unbounded, like Python's asyncio.Queue 334 // ============================================================================ 335 336 + pub fn Queue(comptime T: type) type { 337 return struct { 338 const Self = @This(); 339 340 + items: std.ArrayListUnmanaged(T), 341 alloc: Allocator, 342 + mutex: Thread.Mutex, 343 + not_empty: Thread.Condition, 344 + closed: bool, 345 346 + pub fn init(alloc: Allocator) Self { 347 return .{ 348 + .items = .{}, 349 .alloc = alloc, 350 + .mutex = .{}, 351 + .not_empty = .{}, 352 + .closed = false, 353 }; 354 } 355 356 pub fn deinit(self: *Self) void { 357 + self.items.deinit(self.alloc); 358 } 359 360 + /// Push item to queue (grows as needed) 361 + pub fn push(self: *Self, item: T) !void { 362 self.mutex.lock(); 363 defer self.mutex.unlock(); 364 365 + if (self.closed) return error.QueueClosed; 366 367 + try self.items.append(self.alloc, item); 368 self.not_empty.signal(); 369 } 370 371 + /// Pop with timeout (returns null on timeout or if closed with empty queue) 372 + pub fn popTimeout(self: *Self, timeout_ns: u64) ?T { 373 self.mutex.lock(); 374 defer self.mutex.unlock(); 375 376 + while (self.items.items.len == 0 and !self.closed) { 377 self.not_empty.timedWait(&self.mutex, timeout_ns) catch { 378 return null; 379 }; 380 } 381 382 + if (self.items.items.len == 0) return null; 383 384 + return self.items.orderedRemove(0); 385 } 386 387 pub fn len(self: *Self) usize { 388 self.mutex.lock(); 389 defer self.mutex.unlock(); 390 + return self.items.items.len; 391 } 392 393 pub fn close(self: *Self) void { ··· 395 defer self.mutex.unlock(); 396 self.closed = true; 397 self.not_empty.broadcast(); 398 } 399 }; 400 }