Nothing to see here, move along
1use core::sync::atomic::Ordering;
2
3use crate::mem::phys::BitmapFrameAllocator;
4use crate::proc::PROCESSES;
5use crate::ring::{
6 CompletionEntry, MAX_CQ_ENTRIES, MAX_SQ_ENTRIES, RING_OP_NOP, RingHeader, RingIndex,
7 SubmissionEntry, ring_cq_offset, ring_sq_offset,
8};
9
10fn setup_ring_page() -> (x86_64::PhysAddr, *mut u8) {
11 let allocator = BitmapFrameAllocator;
12 let frame = allocator.allocate().expect("alloc ring frame");
13 let phys = frame.phys_addr();
14 crate::mem::addr::zero_frame(phys);
15 let virt = crate::mem::addr::phys_to_virt(phys);
16 let _ = frame.inner();
17 (phys, virt.as_mut_ptr::<u8>())
18}
19
20fn teardown_ring_page(phys: x86_64::PhysAddr) {
21 BitmapFrameAllocator::free_frame_by_addr(phys);
22}
23
24unsafe fn write_sqe(ring_base: *mut u8, index: u32, sqe: SubmissionEntry) {
25 let sq_base = unsafe { ring_base.add(ring_sq_offset()) };
26 let entry_ptr =
27 unsafe { sq_base.add((index as usize) * core::mem::size_of::<SubmissionEntry>()) };
28 unsafe { core::ptr::write_volatile(entry_ptr as *mut SubmissionEntry, sqe) };
29}
30
31unsafe fn read_cqe(ring_base: *const u8, index: u32) -> CompletionEntry {
32 let cq_base = unsafe { ring_base.add(ring_cq_offset()) };
33 let entry_ptr =
34 unsafe { cq_base.add((index as usize) * core::mem::size_of::<CompletionEntry>()) };
35 unsafe { core::ptr::read_volatile(entry_ptr as *const CompletionEntry) }
36}
37
38unsafe fn set_sq_tail(ring_base: *mut u8, val: u32) {
39 let header = unsafe { &*(ring_base as *const RingHeader) };
40 header.sq_tail.store(val, Ordering::Release);
41}
42
43fn alloc_test_process() -> (
44 crate::types::Pid,
45 crate::sync::IrqMutexGuard<'static, crate::proc::ProcessManager, 0>,
46) {
47 let mut allocator = BitmapFrameAllocator;
48 let mut ptable = PROCESSES.lock();
49 let created = ptable.allocate(&mut allocator).expect("alloc process");
50 ptable.start(created).expect("start");
51 let pid = created.pid();
52 (pid, ptable)
53}
54
55crate::kernel_test!(
56 fn ring_sq_full_processes_all() {
57 let (phys, ring_base) = setup_ring_page();
58 let (pid, ptable) = alloc_test_process();
59 drop(ptable);
60
61 let count = MAX_SQ_ENTRIES.min(16);
62
63 (0..count).for_each(|i| unsafe {
64 write_sqe(
65 ring_base,
66 i,
67 SubmissionEntry {
68 opcode: RING_OP_NOP,
69 user_data: i,
70 ..SubmissionEntry::zeroed()
71 },
72 );
73 });
74 unsafe { set_sq_tail(ring_base, count) };
75
76 let result = crate::ring::process::ring_enter(phys, pid, 0).expect("ring_enter");
77 assert!(
78 result == count as i64,
79 "should process all {} entries, got {}",
80 count,
81 result
82 );
83
84 (0..count).for_each(|i| {
85 let cqe = unsafe { read_cqe(ring_base, i) };
86 assert!(cqe.result == 0, "NOP entry {} should return 0", i);
87 assert!(
88 cqe.user_data == i as u64,
89 "user_data for entry {} should be {}, got {}",
90 i,
91 i,
92 cqe.user_data
93 );
94 });
95
96 let mut ptable = PROCESSES.lock();
97 ptable.destroy(pid, &mut BitmapFrameAllocator);
98 drop(ptable);
99 teardown_ring_page(phys);
100 }
101);
102
103crate::kernel_test!(
104 fn ring_cq_wraparound() {
105 let (phys, ring_base) = setup_ring_page();
106 let (pid, ptable) = alloc_test_process();
107 drop(ptable);
108
109 let header = unsafe { &*(ring_base as *const RingHeader) };
110 let wrap_start = MAX_CQ_ENTRIES - 2;
111 header.cq_head.store(wrap_start, Ordering::Release);
112 header.cq_tail.store(wrap_start, Ordering::Release);
113 header.sq_head.store(0, Ordering::Release);
114
115 {
116 let mut ptable = PROCESSES.lock();
117 let proc = ptable.get_mut(pid).expect("get proc");
118 proc.ring_cq_tail = RingIndex::new(wrap_start);
119 }
120
121 (0..4u32).for_each(|i| unsafe {
122 write_sqe(
123 ring_base,
124 i,
125 SubmissionEntry {
126 opcode: RING_OP_NOP,
127 user_data: 1000 + i,
128 ..SubmissionEntry::zeroed()
129 },
130 );
131 });
132 unsafe { set_sq_tail(ring_base, 4) };
133
134 let result = crate::ring::process::ring_enter(phys, pid, 0).expect("ring_enter");
135 assert!(result == 4, "should process 4 entries, got {}", result);
136
137 (0..4u32).for_each(|i| {
138 let cq_idx = (wrap_start + i) % MAX_CQ_ENTRIES;
139 let cqe = unsafe { read_cqe(ring_base, cq_idx) };
140 assert!(
141 cqe.user_data == 1000 + i as u64,
142 "cqe at wrapped index {} should have user_data {}, got {}",
143 cq_idx,
144 1000 + i,
145 cqe.user_data
146 );
147 });
148
149 let mut ptable = PROCESSES.lock();
150 ptable.destroy(pid, &mut BitmapFrameAllocator);
151 drop(ptable);
152 teardown_ring_page(phys);
153 }
154);
155