···163164 // clean up clients
165 {
166- for (self.clients.items, 0..) |_, i| {
167- var client = self.clients.items[i];
0000168 client.deinit();
169- if (builtin.mode == .Debug) {
170- // We only clean up clients in Debug mode so we can check for memory leaks
171- // without failing for this. We don't care about it in any other mode since we
172- // are exiting anyways and we want to do it fast. If we destroy, our readthread
173- // could panic so we don't do it unless we have to.
174- self.alloc.destroy(client);
175- }
176 }
177 self.clients.deinit();
178 }
···227 },
228 .tick => {
229 for (self.clients.items) |client| {
000000230 client.drainFifo(ctx);
231 }
232 try ctx.tick(8, self.widget());
···947 pub fn connect(self: *App, cfg: irc.Client.Config) !void {
948 const client = try self.alloc.create(irc.Client);
949 client.* = try irc.Client.init(self.alloc, self, &self.write_queue, cfg);
950- client.thread = try std.Thread.spawn(.{}, irc.Client.readLoop, .{client});
951 try self.clients.append(client);
952 }
953
···163164 // clean up clients
165 {
166+ // Loop first to close connections. This will help us close faster by getting the
167+ // threads exited
168+ for (self.clients.items) |client| {
169+ client.close();
170+ }
171+ for (self.clients.items) |client| {
172 client.deinit();
173+ self.alloc.destroy(client);
000000174 }
175 self.clients.deinit();
176 }
···225 },
226 .tick => {
227 for (self.clients.items) |client| {
228+ if (client.status.load(.unordered) == .disconnected and
229+ client.retry_delay_s == 0)
230+ {
231+ ctx.redraw = true;
232+ try irc.Client.retryTickHandler(client, ctx, .tick);
233+ }
234 client.drainFifo(ctx);
235 }
236 try ctx.tick(8, self.widget());
···951 pub fn connect(self: *App, cfg: irc.Client.Config) !void {
952 const client = try self.alloc.create(irc.Client);
953 client.* = try irc.Client.init(self.alloc, self, &self.write_queue, cfg);
0954 try self.clients.append(client);
955 }
956
···1-const std = @import("std");
2-3-const Condition = std.Thread.Condition;
4-const Mutex = std.Thread.Mutex;
5-6-/// BytePool is a thread safe buffer. Use it by Allocating a given number of bytes, which will block
7-/// until one is available. The returned Slice structure contains a reference to a slice within the
8-/// pool. This slice will always belong to the Slice until deinit is called.
9-///
10-/// This data structure is useful for receiving messages over-the-wire and sending to another thread
11-/// for processing, while providing some level of backpressure on the read side. For example, we
12-/// could be reading messages from the wire and sending into a queue for processing. We could read
13-/// 10 messages off the connection, but the queue is blocked doing an expensive operation. We are
14-/// still able to read until our BytePool is out of capacity.
15-///
16-/// For IRC, we use this because messages over the wire *could* be up to 4192 bytes, but commonly
17-/// are less than 100. Instead of a pool of buffers each 4192, we write messages of exact length
18-/// into this pool to more efficiently pack the memory
19-pub fn BytePool(comptime size: usize) type {
20- return struct {
21- const Self = @This();
22-23- pub const Slice = struct {
24- idx: usize,
25- len: usize,
26- pool: *Self,
27-28- /// Frees resources associated with Buffer
29- pub fn deinit(self: Slice) void {
30- self.pool.mutex.lock();
31- defer self.pool.mutex.unlock();
32- @memset(self.pool.free_list[self.idx .. self.idx + self.len], true);
33- // Signal that we may have capacity now
34- self.pool.buffer_deinited.signal();
35- }
36-37- /// Returns the actual slice of this buffer
38- pub fn slice(self: Slice) []u8 {
39- return self.pool.buffer[self.idx .. self.idx + self.len];
40- }
41- };
42-43- buffer: [size]u8 = undefined,
44- free_list: [size]bool = undefined,
45- mutex: Mutex = .{},
46- /// The index of the next potentially available byte
47- next_idx: usize = 0,
48-49- buffer_deinited: Condition = .{},
50-51- pub fn init(self: *Self) void {
52- @memset(&self.free_list, true);
53- }
54-55- /// Get a buffer of size n. Blocks until one is available
56- pub fn alloc(self: *Self, n: usize) Slice {
57- std.debug.assert(n < size);
58- self.mutex.lock();
59- defer self.mutex.unlock();
60- while (true) {
61- if (self.getBuffer(n)) |buf| return buf;
62- self.buffer_deinited.wait(&self.mutex);
63- }
64- }
65-66- fn getBuffer(self: *Self, n: usize) ?Slice {
67- var start: usize = self.next_idx;
68- var did_wrap: bool = false;
69- while (true) {
70- if (start + n >= self.buffer.len) {
71- if (did_wrap) return null;
72- did_wrap = true;
73- start = 0;
74- }
75-76- const next_true = std.mem.indexOfScalarPos(bool, &self.free_list, start, true) orelse {
77- if (did_wrap) return null;
78- did_wrap = true;
79- start = 0;
80- continue;
81- };
82-83- if (next_true + n >= self.buffer.len) {
84- if (did_wrap) return null;
85- did_wrap = true;
86- start = 0;
87- continue;
88- }
89-90- // Get our potential slice
91- const maybe_slice = self.free_list[next_true .. next_true + n];
92- // Check that the entire thing is true
93- if (std.mem.indexOfScalar(bool, maybe_slice, false)) |idx| {
94- // We have a false, increment and look again
95- start = next_true + idx + 1;
96- continue;
97- }
98- // Set this slice in the free_list as not free
99- @memset(maybe_slice, false);
100- // Update next_idx
101- self.next_idx = next_true + n;
102- return .{
103- .idx = next_true,
104- .len = n,
105- .pool = self,
106- };
107- }
108- }
109- };
110-}