an experimental irc client
at 4e578c65b156cbda23252cc6753cbf96d6df2e24 631 lines 22 kB view raw
1const std = @import("std"); 2const builtin = @import("builtin"); 3const comlink = @import("comlink.zig"); 4const vaxis = @import("vaxis"); 5const zeit = @import("zeit"); 6const ziglua = @import("ziglua"); 7const Scrollbar = @import("Scrollbar.zig"); 8const main = @import("main.zig"); 9const format = @import("format.zig"); 10 11const irc = comlink.irc; 12const lua = comlink.lua; 13const mem = std.mem; 14const vxfw = vaxis.vxfw; 15 16const assert = std.debug.assert; 17 18const Allocator = std.mem.Allocator; 19const Base64Encoder = std.base64.standard.Encoder; 20const Bind = comlink.Bind; 21const Completer = comlink.Completer; 22const Event = comlink.Event; 23const Lua = ziglua.Lua; 24const TextInput = vaxis.widgets.TextInput; 25const WriteRequest = comlink.WriteRequest; 26 27const log = std.log.scoped(.app); 28 29const State = struct { 30 buffers: struct { 31 count: usize = 0, 32 width: u16 = 16, 33 } = .{}, 34 paste: struct { 35 pasting: bool = false, 36 has_newline: bool = false, 37 38 fn showDialog(self: @This()) bool { 39 return !self.pasting and self.has_newline; 40 } 41 } = .{}, 42}; 43 44pub const App = struct { 45 explicit_join: bool, 46 alloc: std.mem.Allocator, 47 /// System certificate bundle 48 bundle: std.crypto.Certificate.Bundle, 49 /// List of all configured clients 50 clients: std.ArrayList(*irc.Client), 51 /// if we have already called deinit 52 deinited: bool, 53 /// Process environment 54 env: std.process.EnvMap, 55 /// Local timezone 56 tz: zeit.TimeZone, 57 58 state: State, 59 60 completer: ?Completer, 61 62 binds: std.ArrayList(Bind), 63 64 paste_buffer: std.ArrayList(u8), 65 66 lua: *Lua, 67 68 write_queue: comlink.WriteQueue, 69 write_thread: std.Thread, 70 71 view: vxfw.SplitView, 72 buffer_list: vxfw.ListView, 73 unicode: *const vaxis.Unicode, 74 75 title_buf: [128]u8, 76 77 // Only valid during an event handler 78 ctx: ?*vxfw.EventContext, 79 last_height: u16, 80 81 const default_rhs: vxfw.Text = .{ .text = "TODO: update this text" }; 82 83 /// initialize vaxis, lua state 84 pub fn init(self: *App, gpa: std.mem.Allocator, unicode: *const vaxis.Unicode) !void { 85 self.* = .{ 86 .alloc = gpa, 87 .state = .{}, 88 .clients = std.ArrayList(*irc.Client).init(gpa), 89 .env = try std.process.getEnvMap(gpa), 90 .binds = try std.ArrayList(Bind).initCapacity(gpa, 16), 91 .paste_buffer = std.ArrayList(u8).init(gpa), 92 .tz = try zeit.local(gpa, null), 93 .lua = undefined, 94 .write_queue = .{}, 95 .write_thread = undefined, 96 .view = .{ 97 .width = self.state.buffers.width, 98 .lhs = self.buffer_list.widget(), 99 .rhs = default_rhs.widget(), 100 }, 101 .explicit_join = false, 102 .bundle = .{}, 103 .deinited = false, 104 .completer = null, 105 .buffer_list = .{ 106 .children = .{ 107 .builder = .{ 108 .userdata = self, 109 .buildFn = App.bufferBuilderFn, 110 }, 111 }, 112 .draw_cursor = false, 113 }, 114 .unicode = unicode, 115 .title_buf = undefined, 116 .ctx = null, 117 .last_height = 0, 118 }; 119 120 self.lua = try Lua.init(&self.alloc); 121 self.write_thread = try std.Thread.spawn(.{}, writeLoop, .{ self.alloc, &self.write_queue }); 122 123 try lua.init(self); 124 125 try self.binds.append(.{ 126 .key = .{ .codepoint = 'c', .mods = .{ .ctrl = true } }, 127 .command = .quit, 128 }); 129 try self.binds.append(.{ 130 .key = .{ .codepoint = vaxis.Key.up, .mods = .{ .alt = true } }, 131 .command = .@"prev-channel", 132 }); 133 try self.binds.append(.{ 134 .key = .{ .codepoint = vaxis.Key.down, .mods = .{ .alt = true } }, 135 .command = .@"next-channel", 136 }); 137 try self.binds.append(.{ 138 .key = .{ .codepoint = 'l', .mods = .{ .ctrl = true } }, 139 .command = .redraw, 140 }); 141 142 // Get our system tls certs 143 try self.bundle.rescan(gpa); 144 } 145 146 /// close the application. This closes the TUI, disconnects clients, and cleans 147 /// up all resources 148 pub fn deinit(self: *App) void { 149 if (self.deinited) return; 150 self.deinited = true; 151 // Push a join command to the write thread 152 self.write_queue.push(.join); 153 154 // clean up clients 155 { 156 // Loop first to close connections. This will help us close faster by getting the 157 // threads exited 158 for (self.clients.items) |client| { 159 client.close(); 160 } 161 for (self.clients.items) |client| { 162 client.deinit(); 163 self.alloc.destroy(client); 164 } 165 self.clients.deinit(); 166 } 167 168 self.bundle.deinit(self.alloc); 169 170 if (self.completer) |*completer| completer.deinit(); 171 self.binds.deinit(); 172 self.paste_buffer.deinit(); 173 self.tz.deinit(); 174 175 // Join the write thread 176 self.write_thread.join(); 177 self.env.deinit(); 178 self.lua.deinit(); 179 } 180 181 pub fn widget(self: *App) vxfw.Widget { 182 return .{ 183 .userdata = self, 184 .captureHandler = App.typeErasedCaptureHandler, 185 .eventHandler = App.typeErasedEventHandler, 186 .drawFn = App.typeErasedDrawFn, 187 }; 188 } 189 190 fn typeErasedCaptureHandler(ptr: *anyopaque, ctx: *vxfw.EventContext, event: vxfw.Event) anyerror!void { 191 const self: *App = @ptrCast(@alignCast(ptr)); 192 // Rewrite the ctx pointer every frame. We don't actually need to do this with the current 193 // vxfw runtime, because the context pointer is always valid. But for safe keeping, we will 194 // do it this way. 195 // 196 // In general, this is bad practice. But we need to be able to access this from lua 197 // callbacks 198 self.ctx = ctx; 199 switch (event) { 200 .key_press => |key| { 201 if (key.matches('c', .{ .ctrl = true })) { 202 ctx.quit = true; 203 } 204 for (self.binds.items) |bind| { 205 if (key.matches(bind.key.codepoint, bind.key.mods)) { 206 switch (bind.command) { 207 .quit => ctx.quit = true, 208 .@"next-channel" => self.nextChannel(), 209 .@"prev-channel" => self.prevChannel(), 210 .redraw => try ctx.queueRefresh(), 211 .lua_function => |ref| try lua.execFn(self.lua, ref), 212 else => {}, 213 } 214 return ctx.consumeAndRedraw(); 215 } 216 } 217 }, 218 else => {}, 219 } 220 } 221 222 fn typeErasedEventHandler(ptr: *anyopaque, ctx: *vxfw.EventContext, event: vxfw.Event) anyerror!void { 223 const self: *App = @ptrCast(@alignCast(ptr)); 224 self.ctx = ctx; 225 switch (event) { 226 .init => { 227 const title = try std.fmt.bufPrint(&self.title_buf, "comlink", .{}); 228 try ctx.setTitle(title); 229 try ctx.tick(8, self.widget()); 230 }, 231 .tick => { 232 for (self.clients.items) |client| { 233 if (client.status.load(.unordered) == .disconnected and 234 client.retry_delay_s == 0) 235 { 236 ctx.redraw = true; 237 try irc.Client.retryTickHandler(client, ctx, .tick); 238 } 239 client.drainFifo(ctx); 240 } 241 try ctx.tick(8, self.widget()); 242 }, 243 else => {}, 244 } 245 } 246 247 fn typeErasedDrawFn(ptr: *anyopaque, ctx: vxfw.DrawContext) Allocator.Error!vxfw.Surface { 248 const self: *App = @ptrCast(@alignCast(ptr)); 249 const max = ctx.max.size(); 250 self.last_height = max.height; 251 if (self.selectedBuffer()) |buffer| { 252 switch (buffer) { 253 .client => |client| self.view.rhs = client.view(), 254 .channel => |channel| self.view.rhs = channel.view.widget(), 255 } 256 } else self.view.rhs = default_rhs.widget(); 257 258 var children = std.ArrayList(vxfw.SubSurface).init(ctx.arena); 259 260 // UI is a tree of splits 261 // │ │ │ │ 262 // │ │ │ │ 263 // │ buffers │ buffer content │ members │ 264 // │ │ │ │ 265 // │ │ │ │ 266 // │ │ │ │ 267 // │ │ │ │ 268 269 const sub: vxfw.SubSurface = .{ 270 .origin = .{ .col = 0, .row = 0 }, 271 .surface = try self.view.widget().draw(ctx), 272 }; 273 try children.append(sub); 274 275 return .{ 276 .size = ctx.max.size(), 277 .widget = self.widget(), 278 .buffer = &.{}, 279 .children = children.items, 280 }; 281 } 282 283 fn bufferBuilderFn(ptr: *const anyopaque, idx: usize, cursor: usize) ?vxfw.Widget { 284 const self: *const App = @ptrCast(@alignCast(ptr)); 285 var i: usize = 0; 286 for (self.clients.items) |client| { 287 if (i == idx) return client.nameWidget(i == cursor); 288 i += 1; 289 for (client.channels.items) |channel| { 290 if (i == idx) return channel.nameWidget(i == cursor); 291 i += 1; 292 } 293 } 294 return null; 295 } 296 297 fn contentWidget(self: *App) vxfw.Widget { 298 return .{ 299 .userdata = self, 300 .captureHandler = null, 301 .eventHandler = null, 302 .drawFn = App.typeErasedContentDrawFn, 303 }; 304 } 305 306 fn typeErasedContentDrawFn(ptr: *anyopaque, ctx: vxfw.DrawContext) Allocator.Error!vxfw.Surface { 307 _ = ptr; 308 const text: vxfw.Text = .{ .text = "content" }; 309 return text.draw(ctx); 310 } 311 312 fn memberWidget(self: *App) vxfw.Widget { 313 return .{ 314 .userdata = self, 315 .captureHandler = null, 316 .eventHandler = null, 317 .drawFn = App.typeErasedMembersDrawFn, 318 }; 319 } 320 321 fn typeErasedMembersDrawFn(ptr: *anyopaque, ctx: vxfw.DrawContext) Allocator.Error!vxfw.Surface { 322 _ = ptr; 323 const text: vxfw.Text = .{ .text = "members" }; 324 return text.draw(ctx); 325 } 326 327 pub fn connect(self: *App, cfg: irc.Client.Config) !void { 328 const client = try self.alloc.create(irc.Client); 329 client.* = try irc.Client.init(self.alloc, self, &self.write_queue, cfg); 330 try self.clients.append(client); 331 } 332 333 pub fn nextChannel(self: *App) void { 334 // When leaving a channel we mark it as read, so we make sure that's done 335 // before we change to the new channel. 336 self.markSelectedChannelRead(); 337 if (self.ctx) |ctx| { 338 self.buffer_list.nextItem(ctx); 339 if (self.selectedBuffer()) |buffer| { 340 switch (buffer) { 341 .client => { 342 ctx.requestFocus(self.widget()) catch {}; 343 }, 344 .channel => |channel| { 345 ctx.requestFocus(channel.text_field.widget()) catch {}; 346 }, 347 } 348 } 349 } 350 } 351 352 pub fn prevChannel(self: *App) void { 353 // When leaving a channel we mark it as read, so we make sure that's done 354 // before we change to the new channel. 355 self.markSelectedChannelRead(); 356 if (self.ctx) |ctx| { 357 self.buffer_list.prevItem(ctx); 358 if (self.selectedBuffer()) |buffer| { 359 switch (buffer) { 360 .client => { 361 ctx.requestFocus(self.widget()) catch {}; 362 }, 363 .channel => |channel| { 364 ctx.requestFocus(channel.text_field.widget()) catch {}; 365 }, 366 } 367 } 368 } 369 } 370 371 pub fn selectChannelName(self: *App, cl: *irc.Client, name: []const u8) void { 372 var i: usize = 0; 373 for (self.clients.items) |client| { 374 i += 1; 375 for (client.channels.items) |channel| { 376 if (cl == client) { 377 if (std.mem.eql(u8, name, channel.name)) { 378 self.selectBuffer(.{ .channel = channel }); 379 } 380 } 381 i += 1; 382 } 383 } 384 } 385 386 /// handle a command 387 pub fn handleCommand(self: *App, buffer: irc.Buffer, cmd: []const u8) !void { 388 const lua_state = self.lua; 389 const command: comlink.Command = blk: { 390 const start: u1 = if (cmd[0] == '/') 1 else 0; 391 const end = mem.indexOfScalar(u8, cmd, ' ') orelse cmd.len; 392 if (comlink.Command.fromString(cmd[start..end])) |internal| 393 break :blk internal; 394 if (comlink.Command.user_commands.get(cmd[start..end])) |ref| { 395 const str = if (end == cmd.len) "" else std.mem.trim(u8, cmd[end..], " "); 396 return lua.execUserCommand(lua_state, str, ref); 397 } 398 return error.UnknownCommand; 399 }; 400 var buf: [1024]u8 = undefined; 401 const client: *irc.Client = switch (buffer) { 402 .client => |client| client, 403 .channel => |channel| channel.client, 404 }; 405 const channel: ?*irc.Channel = switch (buffer) { 406 .client => null, 407 .channel => |channel| channel, 408 }; 409 switch (command) { 410 .quote => { 411 const start = mem.indexOfScalar(u8, cmd, ' ') orelse return error.InvalidCommand; 412 const msg = try std.fmt.bufPrint( 413 &buf, 414 "{s}\r\n", 415 .{cmd[start + 1 ..]}, 416 ); 417 return client.queueWrite(msg); 418 }, 419 .join => { 420 const start = std.mem.indexOfScalar(u8, cmd, ' ') orelse return error.InvalidCommand; 421 const msg = try std.fmt.bufPrint( 422 &buf, 423 "JOIN {s}\r\n", 424 .{ 425 cmd[start + 1 ..], 426 }, 427 ); 428 // Ensure buffer exists 429 self.explicit_join = true; 430 return client.queueWrite(msg); 431 }, 432 .me => { 433 if (channel == null) return error.InvalidCommand; 434 const msg = try std.fmt.bufPrint( 435 &buf, 436 "PRIVMSG {s} :\x01ACTION {s}\x01\r\n", 437 .{ 438 channel.?.name, 439 cmd[4..], 440 }, 441 ); 442 return client.queueWrite(msg); 443 }, 444 .msg => { 445 //syntax: /msg <nick> <msg> 446 const s = std.mem.indexOfScalar(u8, cmd, ' ') orelse return error.InvalidCommand; 447 const e = std.mem.indexOfScalarPos(u8, cmd, s + 1, ' ') orelse return error.InvalidCommand; 448 const msg = try std.fmt.bufPrint( 449 &buf, 450 "PRIVMSG {s} :{s}\r\n", 451 .{ 452 cmd[s + 1 .. e], 453 cmd[e + 1 ..], 454 }, 455 ); 456 return client.queueWrite(msg); 457 }, 458 .query => { 459 const s = std.mem.indexOfScalar(u8, cmd, ' ') orelse return error.InvalidCommand; 460 const e = std.mem.indexOfScalarPos(u8, cmd, s + 1, ' ') orelse cmd.len; 461 if (cmd[s + 1] == '#') return error.InvalidCommand; 462 463 const ch = try client.getOrCreateChannel(cmd[s + 1 .. e]); 464 try client.requestHistory(.after, ch); 465 self.selectChannelName(client, ch.name); 466 //handle sending the message 467 if (cmd.len - e > 1) { 468 const msg = try std.fmt.bufPrint( 469 &buf, 470 "PRIVMSG {s} :{s}\r\n", 471 .{ 472 cmd[s + 1 .. e], 473 cmd[e + 1 ..], 474 }, 475 ); 476 return client.queueWrite(msg); 477 } 478 }, 479 .names => { 480 if (channel == null) return error.InvalidCommand; 481 const msg = try std.fmt.bufPrint(&buf, "NAMES {s}\r\n", .{channel.?.name}); 482 return client.queueWrite(msg); 483 }, 484 .@"next-channel" => self.nextChannel(), 485 .@"prev-channel" => self.prevChannel(), 486 .quit => { 487 if (self.ctx) |ctx| ctx.quit = true; 488 }, 489 .who => { 490 if (channel == null) return error.InvalidCommand; 491 const msg = try std.fmt.bufPrint( 492 &buf, 493 "WHO {s}\r\n", 494 .{ 495 channel.?.name, 496 }, 497 ); 498 return client.queueWrite(msg); 499 }, 500 .part, .close => { 501 if (channel == null) return error.InvalidCommand; 502 var it = std.mem.tokenizeScalar(u8, cmd, ' '); 503 504 // Skip command 505 _ = it.next(); 506 const target = it.next() orelse channel.?.name; 507 508 if (target[0] != '#') { 509 for (client.channels.items, 0..) |search, i| { 510 if (!mem.eql(u8, search.name, target)) continue; 511 client.app.prevChannel(); 512 var chan = client.channels.orderedRemove(i); 513 chan.deinit(self.alloc); 514 self.alloc.destroy(chan); 515 break; 516 } 517 } else { 518 const msg = try std.fmt.bufPrint( 519 &buf, 520 "PART {s}\r\n", 521 .{ 522 target, 523 }, 524 ); 525 return client.queueWrite(msg); 526 } 527 }, 528 .redraw => {}, 529 // .redraw => self.vx.queueRefresh(), 530 .version => { 531 if (channel == null) return error.InvalidCommand; 532 const msg = try std.fmt.bufPrint( 533 &buf, 534 "NOTICE {s} :\x01VERSION comlink {s}\x01\r\n", 535 .{ 536 channel.?.name, 537 main.version, 538 }, 539 ); 540 return client.queueWrite(msg); 541 }, 542 .lua_function => {}, // we don't handle these from the text-input 543 } 544 } 545 546 pub fn selectedBuffer(self: *App) ?irc.Buffer { 547 var i: usize = 0; 548 for (self.clients.items) |client| { 549 if (i == self.buffer_list.cursor) return .{ .client = client }; 550 i += 1; 551 for (client.channels.items) |channel| { 552 if (i == self.buffer_list.cursor) return .{ .channel = channel }; 553 i += 1; 554 } 555 } 556 return null; 557 } 558 559 pub fn selectBuffer(self: *App, buffer: irc.Buffer) void { 560 self.markSelectedChannelRead(); 561 var i: u32 = 0; 562 switch (buffer) { 563 .client => |target| { 564 for (self.clients.items) |client| { 565 if (client == target) { 566 if (self.ctx) |ctx| { 567 ctx.requestFocus(self.widget()) catch {}; 568 } 569 self.buffer_list.cursor = i; 570 self.buffer_list.ensureScroll(); 571 return; 572 } 573 i += 1; 574 for (client.channels.items) |_| i += 1; 575 } 576 }, 577 .channel => |target| { 578 for (self.clients.items) |client| { 579 i += 1; 580 for (client.channels.items) |channel| { 581 if (channel == target) { 582 self.buffer_list.cursor = i; 583 self.buffer_list.ensureScroll(); 584 if (target.messageViewIsAtBottom()) target.has_unread = false; 585 if (self.ctx) |ctx| { 586 ctx.requestFocus(channel.text_field.widget()) catch {}; 587 } 588 return; 589 } 590 i += 1; 591 } 592 } 593 }, 594 } 595 } 596 597 pub fn markSelectedChannelRead(self: *App) void { 598 const buffer = self.selectedBuffer() orelse return; 599 600 switch (buffer) { 601 .channel => |channel| { 602 if (channel.messageViewIsAtBottom()) channel.markRead() catch return; 603 }, 604 else => {}, 605 } 606 } 607}; 608 609/// this loop is run in a separate thread and handles writes to all clients. 610/// Message content is deallocated when the write request is completed 611fn writeLoop(alloc: std.mem.Allocator, queue: *comlink.WriteQueue) !void { 612 log.debug("starting write thread", .{}); 613 while (true) { 614 const req = queue.pop(); 615 switch (req) { 616 .write => |w| { 617 try w.client.write(w.msg); 618 alloc.free(w.msg); 619 }, 620 .join => { 621 while (queue.tryPop()) |r| { 622 switch (r) { 623 .write => |w| alloc.free(w.msg), 624 else => {}, 625 } 626 } 627 return; 628 }, 629 } 630 } 631}