Nothing to see here, move along
1use crate::error::KernelError;
2use crate::types::MAX_OBJECT_IDS;
3
4const NONE_IDX: u16 = 0xFFFF;
5
6#[repr(C)]
7#[derive(Clone, Copy)]
8struct ObjectTableEntry {
9 phys_addr: u64,
10 generation: u32,
11 ref_count: u16,
12 next_free: u16,
13}
14
15impl ObjectTableEntry {
16 const EMPTY: Self = Self {
17 phys_addr: 0,
18 generation: 0,
19 ref_count: 0,
20 next_free: NONE_IDX,
21 };
22
23 const fn is_active(&self) -> bool {
24 self.phys_addr != 0
25 }
26}
27
28#[derive(Debug, Clone, Copy, PartialEq, Eq)]
29pub enum DecRefOutcome {
30 Freed(u64),
31 Alive,
32 Stale,
33 Underflow,
34}
35
36pub struct ObjectTable {
37 entries: [ObjectTableEntry; MAX_OBJECT_IDS],
38 free_head: u16,
39 active_count: u32,
40}
41
42impl ObjectTable {
43 pub const fn new() -> Self {
44 Self {
45 entries: [ObjectTableEntry::EMPTY; MAX_OBJECT_IDS],
46 free_head: NONE_IDX,
47 active_count: 0,
48 }
49 }
50
51 pub fn init(&mut self) {
52 let last = (MAX_OBJECT_IDS - 1) as u16;
53 (0..MAX_OBJECT_IDS as u16).for_each(|i| {
54 self.entries[i as usize] = ObjectTableEntry {
55 phys_addr: 0,
56 generation: 0,
57 ref_count: 0,
58 next_free: match i < last { true => i + 1, false => NONE_IDX },
59 };
60 });
61 self.free_head = 0;
62 self.active_count = 0;
63 }
64
65 pub fn allocate(&mut self, phys: u64) -> Result<(u16, u32), KernelError> {
66 debug_assert!(phys != 0, "cannot allocate with phys_addr 0");
67 let id = match self.free_head {
68 NONE_IDX => return Err(KernelError::PoolExhausted),
69 id => id,
70 };
71 let idx = id as usize;
72 self.free_head = self.entries[idx].next_free;
73 self.entries[idx].phys_addr = phys;
74 self.entries[idx].ref_count = 1;
75 self.entries[idx].next_free = NONE_IDX;
76 self.active_count += 1;
77 Ok((id, self.entries[idx].generation))
78 }
79
80 pub fn free(&mut self, id: u16, expected_gen: u32) -> Result<(u64, u32), KernelError> {
81 let idx = id as usize;
82 match idx < MAX_OBJECT_IDS {
83 false => return Err(KernelError::InvalidObject),
84 true => {}
85 }
86 let e = &self.entries[idx];
87 match e.generation != expected_gen {
88 true => Err(KernelError::StaleGeneration),
89 false => match e.is_active() {
90 false => Ok((0, e.generation)),
91 true => {
92 let old_phys = e.phys_addr;
93 let e = &mut self.entries[idx];
94 e.phys_addr = 0;
95 e.ref_count = 0;
96 e.generation = e.generation.wrapping_add(1);
97 let new_gen = e.generation;
98 e.next_free = self.free_head;
99 self.free_head = id;
100 debug_assert!(self.active_count > 0, "active_count underflow in free");
101 self.active_count = self.active_count.saturating_sub(1);
102 Ok((old_phys, new_gen))
103 }
104 },
105 }
106 }
107
108 pub fn get(&self, id: u16, expected_gen: u32) -> Result<u64, KernelError> {
109 let entry = self.entry(id)?;
110 match entry.generation != expected_gen {
111 true => Err(KernelError::StaleGeneration),
112 false => match entry.is_active() {
113 true => Ok(entry.phys_addr),
114 false => Err(KernelError::InvalidObject),
115 },
116 }
117 }
118
119 pub fn inc_ref(&mut self, id: u16, expected_gen: u32) -> Result<(), KernelError> {
120 let entry = self.entry_mut(id)?;
121 match entry.generation != expected_gen || !entry.is_active() {
122 true => Err(KernelError::StaleGeneration),
123 false => {
124 entry.ref_count = entry
125 .ref_count
126 .checked_add(1)
127 .ok_or(KernelError::ResourceExhausted)?;
128 Ok(())
129 }
130 }
131 }
132
133 pub fn dec_ref(&mut self, id: u16, expected_gen: u32) -> DecRefOutcome {
134 let idx = id as usize;
135 match idx < MAX_OBJECT_IDS {
136 false => return DecRefOutcome::Stale,
137 true => {}
138 }
139 let e = &self.entries[idx];
140 match e.generation != expected_gen || !e.is_active() {
141 true => DecRefOutcome::Stale,
142 false => match e.ref_count.checked_sub(1) {
143 None => DecRefOutcome::Underflow,
144 Some(0) => {
145 let old_phys = e.phys_addr;
146 let e = &mut self.entries[idx];
147 e.phys_addr = 0;
148 e.ref_count = 0;
149 e.generation = e.generation.wrapping_add(1);
150 e.next_free = self.free_head;
151 self.free_head = id;
152 debug_assert!(self.active_count > 0, "active_count underflow in dec_ref");
153 self.active_count = self.active_count.saturating_sub(1);
154 DecRefOutcome::Freed(old_phys)
155 }
156 Some(new_rc) => {
157 self.entries[idx].ref_count = new_rc;
158 DecRefOutcome::Alive
159 }
160 },
161 }
162 }
163
164 pub fn revoke(&mut self, id: u16, expected_gen: u32) -> Result<(u32, u64), KernelError> {
165 let idx = id as usize;
166 match idx < MAX_OBJECT_IDS {
167 false => return Err(KernelError::InvalidObject),
168 true => {}
169 }
170 let e = &self.entries[idx];
171 match e.generation != expected_gen || !e.is_active() {
172 true => Err(KernelError::StaleGeneration),
173 false => match e.ref_count {
174 0 => Err(KernelError::BadState),
175 _ => {
176 let old_phys = e.phys_addr;
177 let e = &mut self.entries[idx];
178 e.phys_addr = 0;
179 e.ref_count = 0;
180 e.generation = e.generation.wrapping_add(1);
181 let new_gen = e.generation;
182 e.next_free = self.free_head;
183 self.free_head = id;
184 debug_assert!(self.active_count > 0, "active_count underflow in revoke");
185 self.active_count = self.active_count.saturating_sub(1);
186 Ok((new_gen, old_phys))
187 }
188 },
189 }
190 }
191
192 pub fn for_each_active(&self, mut f: impl FnMut(u16, u64)) {
193 (0..MAX_OBJECT_IDS as u16)
194 .filter(|&i| self.entries[i as usize].is_active())
195 .for_each(|i| f(i, self.entries[i as usize].phys_addr));
196 }
197
198 pub fn free_where(
199 &mut self,
200 pred: impl Fn(u16, u64, u32) -> bool,
201 mut on_free: impl FnMut(u16, u32, u64),
202 ) {
203 (0..MAX_OBJECT_IDS as u16).for_each(|i| {
204 let entry = &self.entries[i as usize];
205 if entry.is_active() && pred(i, entry.phys_addr, entry.generation) {
206 let stale_gen = entry.generation;
207 let old_phys = entry.phys_addr;
208 let entry = &mut self.entries[i as usize];
209 entry.phys_addr = 0;
210 entry.ref_count = 0;
211 entry.generation = entry.generation.wrapping_add(1);
212 entry.next_free = self.free_head;
213 self.free_head = i;
214 debug_assert!(self.active_count > 0, "active_count underflow in free_where");
215 self.active_count = self.active_count.saturating_sub(1);
216 on_free(i, stale_gen, old_phys);
217 }
218 });
219 }
220
221 #[allow(dead_code)]
222 pub fn update_phys(&mut self, id: u16, expected_gen: u32, phys: u64) -> Result<(), KernelError> {
223 let entry = self.entry_mut(id)?;
224 match entry.generation != expected_gen || !entry.is_active() {
225 true => Err(KernelError::StaleGeneration),
226 false => {
227 entry.phys_addr = phys;
228 Ok(())
229 }
230 }
231 }
232
233 pub fn peek_free_head(&self) -> Option<u16> {
234 match self.free_head {
235 NONE_IDX => None,
236 id => Some(id),
237 }
238 }
239
240 #[allow(dead_code)]
241 pub const fn active_count(&self) -> u32 {
242 self.active_count
243 }
244
245 #[allow(dead_code)]
246 pub fn generation_of(&self, id: u16) -> Option<u32> {
247 self.entries
248 .get(id as usize)
249 .filter(|e| e.is_active())
250 .map(|e| e.generation)
251 }
252
253 fn entry(&self, id: u16) -> Result<&ObjectTableEntry, KernelError> {
254 self.entries
255 .get(id as usize)
256 .ok_or(KernelError::InvalidObject)
257 }
258
259 fn entry_mut(&mut self, id: u16) -> Result<&mut ObjectTableEntry, KernelError> {
260 self.entries
261 .get_mut(id as usize)
262 .ok_or(KernelError::InvalidObject)
263 }
264}