Nothing to see here, move along
1use core::sync::atomic::{AtomicU32, Ordering};
2
3use x86_64::PhysAddr;
4
5use crate::mem::addr;
6use crate::mem::phys::BitmapFrameAllocator;
7
8const MAX_RING_FRAMES: usize = 16;
9const PAGE_SIZE: usize = 4096;
10const HEADER_SIZE: usize = 16;
11
12#[repr(C)]
13struct RingHeader {
14 write_head: AtomicU32,
15 read_tail: AtomicU32,
16 capacity: u32,
17 _reserved: u32,
18}
19
20#[allow(dead_code)]
21struct ConsoleRingState {
22 header: *mut RingHeader,
23 data: *mut u8,
24 capacity: u32,
25 phys_addrs: [u64; MAX_RING_FRAMES],
26 frame_count: u16,
27}
28
29unsafe impl Send for ConsoleRingState {}
30unsafe impl Sync for ConsoleRingState {}
31
32static mut RING_STATE: Option<ConsoleRingState> = None;
33
34pub fn init(frame_count: u16) {
35 assert!(
36 (frame_count as usize) <= MAX_RING_FRAMES && frame_count > 0,
37 "console ring: frame_count out of range"
38 );
39
40 let allocator = BitmapFrameAllocator;
41 let phys_base = allocator
42 .allocate_contiguous(frame_count as usize)
43 .expect("console ring: failed to allocate contiguous frames");
44
45 let mut phys_addrs = [0u64; MAX_RING_FRAMES];
46 (0..frame_count as usize).fold((), |(), i| {
47 let frame_phys = PhysAddr::new(phys_base.as_u64() + (i as u64) * PAGE_SIZE as u64);
48 addr::zero_frame(frame_phys);
49 crate::mem::refcount::increment(frame_phys).expect("console ring: refcount init failed");
50 phys_addrs[i] = frame_phys.as_u64();
51 });
52
53 let virt_base = addr::phys_to_virt(phys_base);
54 let header = virt_base.as_mut_ptr::<RingHeader>();
55 let data = unsafe { (header as *mut u8).add(HEADER_SIZE) };
56 let capacity = (frame_count as u32) * (PAGE_SIZE as u32) - (HEADER_SIZE as u32);
57
58 unsafe {
59 (*header).write_head = AtomicU32::new(0);
60 (*header).read_tail = AtomicU32::new(0);
61 (*header).capacity = capacity;
62 (*header)._reserved = 0;
63
64 RING_STATE = Some(ConsoleRingState {
65 header,
66 data,
67 capacity,
68 phys_addrs,
69 frame_count,
70 });
71 }
72}
73
74#[allow(clippy::deref_addrof)]
75pub fn write_bytes(bytes: &[u8]) {
76 let state = unsafe { &*(&raw const RING_STATE) };
77 let state = match state.as_ref() {
78 Some(s) => s,
79 None => return,
80 };
81
82 let header = unsafe { &*state.header };
83 let head = header.write_head.load(Ordering::Relaxed);
84 let cap = state.capacity;
85
86 bytes
87 .iter()
88 .enumerate()
89 .fold(head, |current_head, (_i, &b)| {
90 let idx = current_head % cap;
91 unsafe { state.data.add(idx as usize).write_volatile(b) };
92 current_head.wrapping_add(1)
93 });
94
95 let new_head = head.wrapping_add(bytes.len() as u32);
96 header.write_head.store(new_head, Ordering::Release);
97}
98
99#[cfg(not(lancer_test))]
100#[allow(clippy::deref_addrof)]
101pub fn phys_frame_count() -> u16 {
102 let state = unsafe { &*(&raw const RING_STATE) };
103 match state.as_ref() {
104 Some(s) => s.frame_count,
105 None => 0,
106 }
107}
108
109#[cfg(not(lancer_test))]
110#[allow(clippy::deref_addrof)]
111pub fn phys_addr(index: u16) -> Option<PhysAddr> {
112 let state = unsafe { &*(&raw const RING_STATE) };
113 state
114 .as_ref()
115 .and_then(|s| match (index as usize) < (s.frame_count as usize) {
116 true => Some(PhysAddr::new(s.phys_addrs[index as usize])),
117 false => None,
118 })
119}