Nothing to see here, move along
1use core::sync::atomic::Ordering;
2
3use crate::cap::cnode;
4use crate::cap::object::ObjectTag;
5use crate::cap::ops;
6use crate::cap::pool::POOL;
7use crate::mem::phys::BitmapFrameAllocator;
8use crate::proc::PROCESSES;
9use crate::ring::{
10 CompletionEntry, RING_OP_CAP_CREATE, RING_OP_NOP, RING_OP_NOTIFY_POLL, RING_OP_NOTIFY_SIGNAL,
11 RingHeader, RingIndex, SubmissionEntry, ring_cq_offset, ring_sq_offset,
12};
13use crate::types::Pid;
14
15fn setup_ring_page() -> (x86_64::PhysAddr, *mut u8) {
16 let allocator = BitmapFrameAllocator;
17 let frame = allocator.allocate().expect("alloc ring frame");
18 let phys = frame.phys_addr();
19 crate::mem::addr::zero_frame(phys);
20 let virt = crate::mem::addr::phys_to_virt(phys);
21 let _ = frame.inner();
22 (phys, virt.as_mut_ptr::<u8>())
23}
24
25fn teardown_ring_page(phys: x86_64::PhysAddr) {
26 BitmapFrameAllocator::free_frame_by_addr(phys);
27}
28
29unsafe fn write_sqe(ring_base: *mut u8, index: u32, sqe: SubmissionEntry) {
30 let sq_base = unsafe { ring_base.add(ring_sq_offset()) };
31 let entry_ptr =
32 unsafe { sq_base.add((index as usize) * core::mem::size_of::<SubmissionEntry>()) };
33 unsafe { core::ptr::write_volatile(entry_ptr as *mut SubmissionEntry, sqe) };
34}
35
36unsafe fn read_cqe(ring_base: *const u8, index: u32) -> CompletionEntry {
37 let cq_base = unsafe { ring_base.add(ring_cq_offset()) };
38 let entry_ptr =
39 unsafe { cq_base.add((index as usize) * core::mem::size_of::<CompletionEntry>()) };
40 unsafe { core::ptr::read_volatile(entry_ptr as *const CompletionEntry) }
41}
42
43unsafe fn set_sq_tail(ring_base: *mut u8, val: u32) {
44 let header = unsafe { &*(ring_base as *const RingHeader) };
45 header.sq_tail.store(val, Ordering::Release);
46}
47
48fn alloc_test_process() -> (
49 crate::types::Pid,
50 crate::sync::IrqMutexGuard<'static, crate::proc::ProcessManager, 0>,
51) {
52 let mut allocator = BitmapFrameAllocator;
53 let mut ptable = PROCESSES.lock();
54 let created = ptable.allocate(&mut allocator).expect("alloc process");
55 ptable.start(created).expect("start");
56 let pid = created.pid();
57 bootstrap_test_cnode(pid, &mut ptable);
58 (pid, ptable)
59}
60
61fn bootstrap_test_cnode(pid: Pid, ptable: &mut crate::proc::ProcessManager) {
62 let size_bits = crate::proc::ROOT_CNODE_SIZE_BITS;
63 let allocator = &crate::mem::phys::BitmapFrameAllocator;
64 let cnode_data = crate::cap::cnode::create_cnode(size_bits, allocator).expect("create cnode");
65 let frame_count = cnode_data.frame_count;
66 let (cnode_id, cnode_gen) = POOL.lock().allocate(crate::cap::object::ObjectData::CNode(cnode_data)).expect("alloc cnode");
67 let proc = ptable.get_mut(pid).expect("get proc");
68 proc.root_cnode = Some((cnode_id, cnode_gen));
69 proc.cnode_depth = size_bits;
70 proc.charge_frames(frame_count as u16).expect("charge frames");
71}
72
73crate::kernel_test!(
74 fn ring_nop_returns_zero() {
75 let (phys, ring_base) = setup_ring_page();
76 let (pid, ptable) = alloc_test_process();
77 drop(ptable);
78
79 unsafe {
80 write_sqe(
81 ring_base,
82 0,
83 SubmissionEntry {
84 opcode: RING_OP_NOP,
85 user_data: 42,
86 ..SubmissionEntry::zeroed()
87 },
88 );
89 set_sq_tail(ring_base, 1);
90 }
91
92 let result = crate::ring::process::ring_enter(phys, pid, 0);
93 assert!(result == Ok(1), "should process 1 entry");
94
95 let cqe = unsafe { read_cqe(ring_base, 0) };
96 assert!(cqe.result == 0, "NOP should return 0");
97 assert!(cqe.user_data == 42, "user_data should be preserved");
98
99 let mut ptable = PROCESSES.lock();
100 ptable.destroy(pid, &mut BitmapFrameAllocator);
101 drop(ptable);
102 teardown_ring_page(phys);
103 }
104);
105
106crate::kernel_test!(
107 fn ring_cap_create_via_ring() {
108 let (phys, ring_base) = setup_ring_page();
109 let (pid, ptable) = alloc_test_process();
110
111 let (cnode_id, cnode_gen, depth) = cnode::cnode_coords(pid, &ptable).expect("cnode coords");
112 {
113 let mut pool = POOL.lock_after(&ptable);
114 ops::create_via_cnode(&mut pool, cnode_id, cnode_gen, 10, depth, ObjectTag::Endpoint)
115 .expect("pre-create endpoint cap");
116 }
117
118 drop(ptable);
119
120 unsafe {
121 write_sqe(
122 ring_base,
123 0,
124 SubmissionEntry {
125 opcode: RING_OP_CAP_CREATE,
126 arg0: ObjectTag::Endpoint as u8 as u64,
127 cap_slot: 20,
128 user_data: 100,
129 ..SubmissionEntry::zeroed()
130 },
131 );
132 set_sq_tail(ring_base, 1);
133 }
134
135 let result = crate::ring::process::ring_enter(phys, pid, 0);
136 assert!(result == Ok(1), "should process 1 entry");
137
138 let cqe = unsafe { read_cqe(ring_base, 0) };
139 assert!(
140 cqe.result >= 0,
141 "cap_create should succeed, got {}",
142 cqe.result
143 );
144 assert!(cqe.user_data == 100, "user_data should be preserved");
145
146 let mut ptable = PROCESSES.lock();
147 ptable.destroy(pid, &mut BitmapFrameAllocator);
148 drop(ptable);
149 teardown_ring_page(phys);
150 }
151);
152
153crate::kernel_test!(
154 fn ring_invalid_opcode_returns_error() {
155 let (phys, ring_base) = setup_ring_page();
156 let (pid, ptable) = alloc_test_process();
157 drop(ptable);
158
159 unsafe {
160 write_sqe(
161 ring_base,
162 0,
163 SubmissionEntry {
164 opcode: 0xFF,
165 user_data: 7,
166 ..SubmissionEntry::zeroed()
167 },
168 );
169 set_sq_tail(ring_base, 1);
170 }
171
172 let result = crate::ring::process::ring_enter(phys, pid, 0);
173 assert!(result == Ok(1), "should still process the entry");
174
175 let cqe = unsafe { read_cqe(ring_base, 0) };
176 assert!(cqe.result < 0, "invalid opcode should return error");
177 assert!(cqe.user_data == 7, "user_data should be preserved");
178
179 let mut ptable = PROCESSES.lock();
180 ptable.destroy(pid, &mut BitmapFrameAllocator);
181 drop(ptable);
182 teardown_ring_page(phys);
183 }
184);
185
186crate::kernel_test!(
187 fn ring_empty_sq_returns_zero() {
188 let (phys, _ring_base) = setup_ring_page();
189 let (pid, ptable) = alloc_test_process();
190 drop(ptable);
191
192 let result = crate::ring::process::ring_enter(phys, pid, 0);
193 assert!(result == Ok(0), "empty SQ should return 0 completions");
194
195 let mut ptable = PROCESSES.lock();
196 ptable.destroy(pid, &mut BitmapFrameAllocator);
197 drop(ptable);
198 teardown_ring_page(phys);
199 }
200);
201
202crate::kernel_test!(
203 fn ring_cq_full_limits_processing() {
204 let (phys, ring_base) = setup_ring_page();
205 let (pid, ptable) = alloc_test_process();
206 drop(ptable);
207
208 let header = unsafe { &*(ring_base as *const RingHeader) };
209 header.cq_head.store(0, Ordering::Release);
210 header
211 .cq_tail
212 .store(crate::ring::MAX_CQ_ENTRIES, Ordering::Release);
213
214 {
215 let mut ptable = PROCESSES.lock();
216 let proc = ptable.get_mut(pid).expect("get proc");
217 proc.ring_cq_tail = RingIndex::new(crate::ring::MAX_CQ_ENTRIES);
218 }
219
220 unsafe {
221 write_sqe(
222 ring_base,
223 0,
224 SubmissionEntry {
225 opcode: RING_OP_NOP,
226 user_data: 1,
227 ..SubmissionEntry::zeroed()
228 },
229 );
230 set_sq_tail(ring_base, 1);
231 }
232
233 let result = crate::ring::process::ring_enter(phys, pid, 0);
234 assert!(result == Ok(0), "should process 0 entries when CQ is full");
235
236 let mut ptable = PROCESSES.lock();
237 ptable.destroy(pid, &mut BitmapFrameAllocator);
238 drop(ptable);
239 teardown_ring_page(phys);
240 }
241);
242
243crate::kernel_test!(
244 fn ring_batch_cap_limits_processing() {
245 let (phys, ring_base) = setup_ring_page();
246 let (pid, ptable) = alloc_test_process();
247 drop(ptable);
248
249 (0..32u32).for_each(|i| unsafe {
250 write_sqe(
251 ring_base,
252 i,
253 SubmissionEntry {
254 opcode: RING_OP_NOP,
255 user_data: i,
256 ..SubmissionEntry::zeroed()
257 },
258 );
259 });
260 unsafe { set_sq_tail(ring_base, 32) };
261
262 let result = crate::ring::process::ring_enter(phys, pid, 0).expect("ring_enter");
263 assert!(
264 result <= 16,
265 "batch cap should limit to MAX_RING_BATCH (16), got {}",
266 result
267 );
268
269 let mut ptable = PROCESSES.lock();
270 ptable.destroy(pid, &mut BitmapFrameAllocator);
271 drop(ptable);
272 teardown_ring_page(phys);
273 }
274);
275
276crate::kernel_test!(
277 fn ring_notify_signal_and_poll_via_ring() {
278 let (phys, ring_base) = setup_ring_page();
279 let (pid, ptable) = alloc_test_process();
280
281 let (cnode_id, cnode_gen, depth) = cnode::cnode_coords(pid, &ptable).expect("cnode coords");
282 {
283 let mut pool = POOL.lock_after(&ptable);
284 ops::create_via_cnode(&mut pool, cnode_id, cnode_gen, 5, depth, ObjectTag::Notification)
285 .expect("create notification");
286 }
287 drop(ptable);
288
289 unsafe {
290 write_sqe(
291 ring_base,
292 0,
293 SubmissionEntry {
294 opcode: RING_OP_NOTIFY_SIGNAL,
295 cap_slot: 5,
296 arg0: 0xFF,
297 user_data: 200,
298 ..SubmissionEntry::zeroed()
299 },
300 );
301 write_sqe(
302 ring_base,
303 1,
304 SubmissionEntry {
305 opcode: RING_OP_NOTIFY_POLL,
306 cap_slot: 5,
307 user_data: 201,
308 ..SubmissionEntry::zeroed()
309 },
310 );
311 set_sq_tail(ring_base, 2);
312 }
313
314 let result = crate::ring::process::ring_enter(phys, pid, 0);
315 assert!(result == Ok(2), "should process 2 entries");
316
317 let cqe_signal = unsafe { read_cqe(ring_base, 0) };
318 assert!(cqe_signal.result == 0, "signal should succeed");
319 assert!(cqe_signal.user_data == 200);
320
321 let cqe_poll = unsafe { read_cqe(ring_base, 1) };
322 assert!(cqe_poll.result == 0, "poll should succeed");
323 assert!(
324 cqe_poll.extra == 0xFF,
325 "poll should return signaled bits, got {:#x}",
326 cqe_poll.extra
327 );
328 assert!(cqe_poll.user_data == 201);
329
330 let mut ptable = PROCESSES.lock();
331 ptable.destroy(pid, &mut BitmapFrameAllocator);
332 drop(ptable);
333 teardown_ring_page(phys);
334 }
335);