···11+Copyright 2025 Tim Culverhouse
22+33+Permission is hereby granted, free of charge, to any person obtaining a copy of
44+this software and associated documentation files (the “Software”), to deal in
55+the Software without restriction, including without limitation the rights to
66+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
77+the Software, and to permit persons to whom the Software is furnished to do so,
88+subject to the following conditions:
99+1010+The above copyright notice and this permission notice shall be included in all
1111+copies or substantial portions of the Software.
1212+1313+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1414+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
1515+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
1616+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
1717+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
1818+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+167
README.md
···11+# Ourio
22+33+Ourio (prounounced "oreo", think "Ouroboros") is an asynchronous IO runtime
44+built heavily around the semantics of io_uring. The design is inspired by
55+[libxev](https://github.com/mitchellh/libxev), which is in turn inspired by
66+[TigerBeetle](https://github.com/tigerbeetle/tigerbeetle).
77+88+Ourio has only a slightly different approach: it is designed to encourage
99+message passing approach to asynchronous IO. Users of the library give each task
1010+a Context, which contains a pointer, a callback, *and a message*. The message is
1111+implemented as a u16, and generally you should use an enum for it. The idea is
1212+that you can minimize the number of callback functions required by tagging tasks
1313+with a small amount of semantic meaning in the `msg` field.
1414+1515+Ourio has io_uring and kqueue backends. Ourio supports the `msg_ring`
1616+capability of io_uring to pass a completion from one ring to another. This
1717+allows a multithreaded application to implement message passing using io_uring
1818+(or kqueue, if that's your flavor). Multithreaded applications should plan to
1919+use one `Runtime` per thread. Submission onto the runtime is not thread safe,
2020+any message passing must occur using `msg_ring` rather than directly submitting
2121+a task to another
2222+2323+Ourio also includes a fully mockable IO runtime to make it easy to unit test
2424+your async code.
2525+2626+## Tasks
2727+2828+### Deadlines and Cancelation
2929+3030+Each IO operation creates a `Task`. When scheduling a task on the runtime, the
3131+caller receives a pointer to the `Task` at which point they may cancel it, or
3232+set a deadline:
3333+3434+```zig
3535+// Timers are always relative time
3636+const task = try rt.timer(.{.sec = 3}, .{.cb = onCompletion, .msg = 0});
3737+3838+// If the deadline expired, the task will be sent to the onCompletion callback
3939+// with a result of error.Canceled. Deadlines are always absolute time
4040+try task.setDeadline(rt, .{.sec = std.time.timestamp() + 3});
4141+4242+// Alternatively, we can hold on to the pointer for the task while it is with
4343+// the runtime and cancel it. The Context we give to the cancel function let's
4444+// us know the result of the cancelation, but we will also receive a message
4545+// from the original task with error.Canceled. We can ignore the cancel result
4646+// by using the default context value
4747+try task.cancel(rt, .{});
4848+```
4949+5050+### Passing tasks between threads
5151+5252+Say we `accept` a connection in one thread, and want to send the file descriptor
5353+to another for handling.
5454+5555+```zig
5656+const target_task = try main_rt.getTask();
5757+target_task.* {
5858+ .userdata = &foo,
5959+ .msg = @intFromEnum(Msg.some_message),
6060+ .cb = Worker.onCompletion,
6161+ .req = .{ .userfd = fd },
6262+};
6363+6464+// Send target_task from the main_rt thread to the thread_rt Runtime. The
6565+// thread_rt Runtime will then // process the task as a completion, ie
6666+// Worker.onCompletion will be called with // this task. That thread can then
6767+// schedule a recv, a write, etc on the file // descriptor it just received.
6868+_ = try main_rt.msgRing(thread_rt, target_task, .{});
6969+```
7070+7171+### Multiple Runtimes on the same thread
7272+7373+You can have multiple Runtimes in a single thread. One could be a priority
7474+Runtime, or handle specific types of tasks, etc. Poll any runtime from any other
7575+runtime.
7676+7777+```zig
7878+const fd = rt1.backend.pollableFd();
7979+_ = try rt2.poll(fd, .{
8080+ .cb = onCompletion,
8181+ .msg = @intFromEnum(Msg.rt1_has_completions)}
8282+ );
8383+```
8484+8585+## Example
8686+8787+An example implementation of an asynchronous writer to two file descriptors:
8888+8989+```zig
9090+const std = @import("std");
9191+const io = @import("ourio");
9292+const posix = std.posix;
9393+9494+pub const MultiWriter = struct {
9595+ fd1: posix.fd_t,
9696+ fd1_written: usize = 0,
9797+9898+ fd2: posix.fd_2,
9999+ fd2_written: usize = 0,
100100+101101+ buf: std.ArrayListUnmanaged(u8),
102102+103103+ pub const Msg = enum { fd1, fd2 };
104104+105105+ pub fn init(fd1: posix.fd_t, fd2: posix.fd_t) MultiWriter {
106106+ return .{ .fd1 = fd1, .fd2 = fd2 };
107107+ }
108108+109109+ pub fn write(self: *MultiWriter, gpa: Allocator, bytes: []const u8) !void {
110110+ try self.buf.appendSlice(gpa, bytes);
111111+ }
112112+113113+ pub fn flush(self: *MultiWriter, rt: *io.Runtime) !void {
114114+ if (self.fd1_written < self.buf.items.len) {
115115+ _ = try rt.write(self.fd1, self.buf.items[self.fd1_written..], .{
116116+ .ptr = self,
117117+ .msg = @intFromEnum(Msg.fd1),
118118+ .cb = MultiWriter.onCompletion,
119119+ });
120120+ }
121121+122122+ if (self.fd2_written < self.buf.items.len) {
123123+ _ = try rt.write(self.fd2, self.buf.items[self.fd2_written], .{
124124+ .ptr = self,
125125+ .msg = @intFromEnum(Msg.fd2),
126126+ .cb = MultiWriter.onCompletion,
127127+ });
128128+ }
129129+ }
130130+131131+ pub fn onCompletion(rt: *io.Runtime, task: io.Task) anyerror!void {
132132+ const self = task.userdataCast(MultiWriter);
133133+ const result = task.result.?;
134134+135135+ const n = try result.write;
136136+ switch (task.msgToEnum(MultiWriter.Msg)) {
137137+ .fd1 => self.fd1_written += n,
138138+ .fd2 => self.fd2_written += n,
139139+ }
140140+141141+ const len = self.buf.items.len;
142142+143143+ if (self.fd1_written < len or self.fd2_written < len)
144144+ return self.flush(rt);
145145+146146+ self.fd1_written = 0;
147147+ self.fd2_written = 0;
148148+ self.buf.clearRetainingCapacity();
149149+ }
150150+};
151151+152152+pub fn main() !void {
153153+ var gpa: std.heap.DebugAllocator(.{}) = .init;
154154+ var rt: io.Runtime = try .init(gpa.allocator(), 16);
155155+ defer rt.deinit();
156156+157157+ // Pretend I created some files
158158+ const fd1: posix.fd_t = 5;
159159+ const fd2: posix.fd_t = 6;
160160+161161+ var mw: MultiWriter = .init(fd1, fd2);
162162+ try mw.write(gpa.allocator(), "Hello, world!");
163163+ try mw.flush(&rt);
164164+165165+ try rt.run(.until_done);
166166+}
167167+```
···11+// This code is mostly a copy of the intrusive queue code from libxev. I've modified it to be a
22+// doubly linked list that also ensures a certain state is set on each node when put into the list
33+//
44+// MIT License
55+//
66+// Copyright (c) 2023 Mitchell Hashimoto
77+//
88+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
99+// associated documentation files (the "Software"), to deal in the Software without restriction,
1010+// including without limitation the rights to use, copy, modify, merge, publish, distribute,
1111+// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
1212+// furnished to do so, subject to the following conditions:
1313+//
1414+// The above copyright notice and this permission notice shall be included in all copies or
1515+// substantial portions of the Software.
1616+//
1717+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
1818+// NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1919+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
2020+// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2121+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2222+2323+const std = @import("std");
2424+const assert = std.debug.assert;
2525+2626+/// An intrusive queue implementation. The type T must have a field
2727+/// "next" of type `?*T` and a field "state" which is an enum with a value matching the passed in
2828+/// value
2929+pub fn Intrusive(comptime T: type, comptime state: @Type(.enum_literal)) type {
3030+ return struct {
3131+ const Self = @This();
3232+3333+ const set_state = state;
3434+3535+ /// Head is the front of the queue and tail is the back of the queue.
3636+ head: ?*T = null,
3737+ tail: ?*T = null,
3838+3939+ /// Enqueue a new element to the back of the queue.
4040+ pub fn push(self: *Self, v: *T) void {
4141+ assert(v.next == null);
4242+ v.state = set_state;
4343+4444+ if (self.tail) |tail| {
4545+ // If we have elements in the queue, then we add a new tail.
4646+ tail.next = v;
4747+ v.prev = tail;
4848+ self.tail = v;
4949+ } else {
5050+ // No elements in the queue we setup the initial state.
5151+ self.head = v;
5252+ self.tail = v;
5353+ }
5454+ }
5555+5656+ /// Dequeue the next element from the queue.
5757+ pub fn pop(self: *Self) ?*T {
5858+ // The next element is in "head".
5959+ const next = self.head orelse return null;
6060+6161+ // If the head and tail are equal this is the last element
6262+ // so we also set tail to null so we can now be empty.
6363+ if (self.head == self.tail) self.tail = null;
6464+6565+ // Head is whatever is next (if we're the last element,
6666+ // this will be null);
6767+ self.head = next.next;
6868+ if (self.head) |head| head.prev = null;
6969+7070+ // We set the "next" field to null so that this element
7171+ // can be inserted again.
7272+ next.next = null;
7373+ next.prev = null;
7474+ return next;
7575+ }
7676+7777+ /// Returns true if the queue is empty.
7878+ pub fn empty(self: Self) bool {
7979+ return self.head == null;
8080+ }
8181+8282+ /// Removes the item from the queue. Asserts that Queue contains the item
8383+ pub fn remove(self: *Self, item: *T) void {
8484+ assert(self.hasItem(item));
8585+ if (item.prev) |prev| prev.next = item.next else self.head = item.next;
8686+8787+ if (item.next) |next| next.prev = item.prev else self.tail = item.prev;
8888+8989+ item.prev = null;
9090+ item.next = null;
9191+ }
9292+9393+ pub fn hasItem(self: Self, item: *T) bool {
9494+ var maybe_node = self.head;
9595+ while (maybe_node) |node| {
9696+ if (node == item) return true;
9797+ maybe_node = node.next;
9898+ } else return false;
9999+ }
100100+101101+ pub fn len(self: Self) usize {
102102+ var count: usize = 0;
103103+ var maybe_node = self.head;
104104+ while (maybe_node) |node| {
105105+ count += 1;
106106+ maybe_node = node.next;
107107+ }
108108+ return count;
109109+ }
110110+ };
111111+}