···163163164164 // clean up clients
165165 {
166166- for (self.clients.items, 0..) |_, i| {
167167- var client = self.clients.items[i];
166166+ // Loop first to close connections. This will help us close faster by getting the
167167+ // threads exited
168168+ for (self.clients.items) |client| {
169169+ client.close();
170170+ }
171171+ for (self.clients.items) |client| {
168172 client.deinit();
169169- if (builtin.mode == .Debug) {
170170- // We only clean up clients in Debug mode so we can check for memory leaks
171171- // without failing for this. We don't care about it in any other mode since we
172172- // are exiting anyways and we want to do it fast. If we destroy, our readthread
173173- // could panic so we don't do it unless we have to.
174174- self.alloc.destroy(client);
175175- }
173173+ self.alloc.destroy(client);
176174 }
177175 self.clients.deinit();
178176 }
···227225 },
228226 .tick => {
229227 for (self.clients.items) |client| {
228228+ if (client.status.load(.unordered) == .disconnected and
229229+ client.retry_delay_s == 0)
230230+ {
231231+ ctx.redraw = true;
232232+ try irc.Client.retryTickHandler(client, ctx, .tick);
233233+ }
230234 client.drainFifo(ctx);
231235 }
232236 try ctx.tick(8, self.widget());
···947951 pub fn connect(self: *App, cfg: irc.Client.Config) !void {
948952 const client = try self.alloc.create(irc.Client);
949953 client.* = try irc.Client.init(self.alloc, self, &self.write_queue, cfg);
950950- client.thread = try std.Thread.spawn(.{}, irc.Client.readLoop, .{client});
951954 try self.clients.append(client);
952955 }
953956
···11-const std = @import("std");
22-33-const Condition = std.Thread.Condition;
44-const Mutex = std.Thread.Mutex;
55-66-/// BytePool is a thread safe buffer. Use it by Allocating a given number of bytes, which will block
77-/// until one is available. The returned Slice structure contains a reference to a slice within the
88-/// pool. This slice will always belong to the Slice until deinit is called.
99-///
1010-/// This data structure is useful for receiving messages over-the-wire and sending to another thread
1111-/// for processing, while providing some level of backpressure on the read side. For example, we
1212-/// could be reading messages from the wire and sending into a queue for processing. We could read
1313-/// 10 messages off the connection, but the queue is blocked doing an expensive operation. We are
1414-/// still able to read until our BytePool is out of capacity.
1515-///
1616-/// For IRC, we use this because messages over the wire *could* be up to 4192 bytes, but commonly
1717-/// are less than 100. Instead of a pool of buffers each 4192, we write messages of exact length
1818-/// into this pool to more efficiently pack the memory
1919-pub fn BytePool(comptime size: usize) type {
2020- return struct {
2121- const Self = @This();
2222-2323- pub const Slice = struct {
2424- idx: usize,
2525- len: usize,
2626- pool: *Self,
2727-2828- /// Frees resources associated with Buffer
2929- pub fn deinit(self: Slice) void {
3030- self.pool.mutex.lock();
3131- defer self.pool.mutex.unlock();
3232- @memset(self.pool.free_list[self.idx .. self.idx + self.len], true);
3333- // Signal that we may have capacity now
3434- self.pool.buffer_deinited.signal();
3535- }
3636-3737- /// Returns the actual slice of this buffer
3838- pub fn slice(self: Slice) []u8 {
3939- return self.pool.buffer[self.idx .. self.idx + self.len];
4040- }
4141- };
4242-4343- buffer: [size]u8 = undefined,
4444- free_list: [size]bool = undefined,
4545- mutex: Mutex = .{},
4646- /// The index of the next potentially available byte
4747- next_idx: usize = 0,
4848-4949- buffer_deinited: Condition = .{},
5050-5151- pub fn init(self: *Self) void {
5252- @memset(&self.free_list, true);
5353- }
5454-5555- /// Get a buffer of size n. Blocks until one is available
5656- pub fn alloc(self: *Self, n: usize) Slice {
5757- std.debug.assert(n < size);
5858- self.mutex.lock();
5959- defer self.mutex.unlock();
6060- while (true) {
6161- if (self.getBuffer(n)) |buf| return buf;
6262- self.buffer_deinited.wait(&self.mutex);
6363- }
6464- }
6565-6666- fn getBuffer(self: *Self, n: usize) ?Slice {
6767- var start: usize = self.next_idx;
6868- var did_wrap: bool = false;
6969- while (true) {
7070- if (start + n >= self.buffer.len) {
7171- if (did_wrap) return null;
7272- did_wrap = true;
7373- start = 0;
7474- }
7575-7676- const next_true = std.mem.indexOfScalarPos(bool, &self.free_list, start, true) orelse {
7777- if (did_wrap) return null;
7878- did_wrap = true;
7979- start = 0;
8080- continue;
8181- };
8282-8383- if (next_true + n >= self.buffer.len) {
8484- if (did_wrap) return null;
8585- did_wrap = true;
8686- start = 0;
8787- continue;
8888- }
8989-9090- // Get our potential slice
9191- const maybe_slice = self.free_list[next_true .. next_true + n];
9292- // Check that the entire thing is true
9393- if (std.mem.indexOfScalar(bool, maybe_slice, false)) |idx| {
9494- // We have a false, increment and look again
9595- start = next_true + idx + 1;
9696- continue;
9797- }
9898- // Set this slice in the free_list as not free
9999- @memset(maybe_slice, false);
100100- // Update next_idx
101101- self.next_idx = next_true + n;
102102- return .{
103103- .idx = next_true,
104104- .len = n,
105105- .pool = self,
106106- };
107107- }
108108- }
109109- };
110110-}