Nothing to see here, move along
1use core::mem::ManuallyDrop;
2use core::ops::{Deref, DerefMut};
3
4pub const LOCK_ORDER_UNTRACKED: u8 = u8::MAX;
5
6mod order_check {
7 use crate::arch::syscall::{LOCK_ORDER_MAX_DEPTH, this_cpu};
8
9 pub fn push(order: u8) {
10 if order == super::LOCK_ORDER_UNTRACKED {
11 return;
12 }
13 let pc = this_cpu();
14 let d = unsafe { core::ptr::addr_of!((*pc).lock_order_depth).read_volatile() } as usize;
15 if d > 0 {
16 let top = unsafe {
17 core::ptr::addr_of!((*pc).lock_order_stack)
18 .cast::<u8>()
19 .add(d - 1)
20 .read_volatile()
21 };
22 assert!(
23 order > top,
24 "lock order violation: acquiring order {} while holding order {}",
25 order,
26 top
27 );
28 }
29 assert!(d < LOCK_ORDER_MAX_DEPTH, "lock nesting exceeds max depth");
30 unsafe {
31 core::ptr::addr_of_mut!((*pc).lock_order_stack)
32 .cast::<u8>()
33 .add(d)
34 .write_volatile(order);
35 core::ptr::addr_of_mut!((*pc).lock_order_depth).write_volatile((d + 1) as u8);
36 }
37 }
38
39 pub fn pop(order: u8) {
40 if order == super::LOCK_ORDER_UNTRACKED {
41 return;
42 }
43 let pc = this_cpu();
44 let d = unsafe { core::ptr::addr_of!((*pc).lock_order_depth).read_volatile() } as usize;
45 assert!(d > 0, "lock order stack underflow");
46 let top = unsafe {
47 core::ptr::addr_of!((*pc).lock_order_stack)
48 .cast::<u8>()
49 .add(d - 1)
50 .read_volatile()
51 };
52 assert!(
53 top == order,
54 "lock order pop mismatch: expected {}, got {}",
55 order,
56 top
57 );
58 unsafe {
59 core::ptr::addr_of_mut!((*pc).lock_order_depth).write_volatile((d - 1) as u8);
60 }
61 }
62}
63
64pub struct InterruptsDisabledToken(());
65
66impl InterruptsDisabledToken {
67 pub fn new_checked() -> Option<Self> {
68 if x86_64::instructions::interrupts::are_enabled() {
69 None
70 } else {
71 Some(Self(()))
72 }
73 }
74}
75
76#[repr(transparent)]
77pub struct SyncUnsafe<T>(core::cell::UnsafeCell<T>);
78
79unsafe impl<T: Send> Sync for SyncUnsafe<T> {}
80
81impl<T> SyncUnsafe<T> {
82 pub const fn new(val: T) -> Self {
83 Self(core::cell::UnsafeCell::new(val))
84 }
85
86 pub const fn get(&self) -> *mut T {
87 self.0.get()
88 }
89
90 #[allow(clippy::mut_from_ref)]
91 pub unsafe fn as_mut_unchecked(&self) -> &mut T {
92 unsafe { &mut *self.0.get() }
93 }
94
95 pub unsafe fn as_ref_unchecked(&self) -> &T {
96 unsafe { &*self.0.get() }
97 }
98}
99
100pub struct IrqMutex<T, const ORDER: u8> {
101 inner: spin::Mutex<T>,
102}
103
104#[must_use = "dropping the guard immediately re-enables interrupts and releases the lock"]
105pub struct IrqMutexGuard<'a, T, const ORDER: u8> {
106 guard: ManuallyDrop<spin::MutexGuard<'a, T>>,
107 irq_was_enabled: bool,
108}
109
110impl<T, const ORDER: u8> IrqMutex<T, ORDER> {
111 pub const fn new(val: T) -> Self {
112 Self {
113 inner: spin::Mutex::new(val),
114 }
115 }
116
117 pub fn lock(&self) -> IrqMutexGuard<'_, T, ORDER> {
118 let irq_was_enabled = x86_64::instructions::interrupts::are_enabled();
119 x86_64::instructions::interrupts::disable();
120 order_check::push(ORDER);
121 let guard = self.inner.lock();
122 IrqMutexGuard {
123 guard: ManuallyDrop::new(guard),
124 irq_was_enabled,
125 }
126 }
127
128 pub fn lock_after<const HELD: u8, U>(
129 &self,
130 _held: &IrqMutexGuard<'_, U, HELD>,
131 ) -> IrqMutexGuard<'_, T, ORDER> {
132 const { assert!(ORDER > HELD) };
133 self.lock()
134 }
135
136 #[allow(dead_code)]
137 pub fn try_lock(&self) -> Option<IrqMutexGuard<'_, T, ORDER>> {
138 let irq_was_enabled = x86_64::instructions::interrupts::are_enabled();
139 x86_64::instructions::interrupts::disable();
140 order_check::push(ORDER);
141 match self.inner.try_lock() {
142 Some(guard) => Some(IrqMutexGuard {
143 guard: ManuallyDrop::new(guard),
144 irq_was_enabled,
145 }),
146 None => {
147 order_check::pop(ORDER);
148 if irq_was_enabled {
149 x86_64::instructions::interrupts::enable();
150 }
151 None
152 }
153 }
154 }
155}
156
157unsafe impl<T: Send, const ORDER: u8> Send for IrqMutex<T, ORDER> {}
158unsafe impl<T: Send, const ORDER: u8> Sync for IrqMutex<T, ORDER> {}
159
160impl<'a, T, const ORDER: u8> Deref for IrqMutexGuard<'a, T, ORDER> {
161 type Target = T;
162 fn deref(&self) -> &T {
163 &self.guard
164 }
165}
166
167impl<'a, T, const ORDER: u8> DerefMut for IrqMutexGuard<'a, T, ORDER> {
168 fn deref_mut(&mut self) -> &mut T {
169 &mut self.guard
170 }
171}
172
173impl<'a, T, const ORDER: u8> Drop for IrqMutexGuard<'a, T, ORDER> {
174 fn drop(&mut self) {
175 unsafe { ManuallyDrop::drop(&mut self.guard) };
176 order_check::pop(ORDER);
177 if self.irq_was_enabled {
178 x86_64::instructions::interrupts::enable();
179 }
180 }
181}