use crate::error::KernelError; use crate::types::MAX_OBJECT_IDS; const NONE_IDX: u16 = 0xFFFF; #[repr(C)] #[derive(Clone, Copy)] struct ObjectTableEntry { phys_addr: u64, generation: u32, ref_count: u16, next_free: u16, } impl ObjectTableEntry { const EMPTY: Self = Self { phys_addr: 0, generation: 0, ref_count: 0, next_free: NONE_IDX, }; const fn is_active(&self) -> bool { self.phys_addr != 0 } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum DecRefOutcome { Freed(u64), Alive, Stale, Underflow, } pub struct ObjectTable { entries: [ObjectTableEntry; MAX_OBJECT_IDS], free_head: u16, active_count: u32, } impl ObjectTable { pub const fn new() -> Self { Self { entries: [ObjectTableEntry::EMPTY; MAX_OBJECT_IDS], free_head: NONE_IDX, active_count: 0, } } pub fn init(&mut self) { let last = (MAX_OBJECT_IDS - 1) as u16; (0..MAX_OBJECT_IDS as u16).for_each(|i| { self.entries[i as usize] = ObjectTableEntry { phys_addr: 0, generation: 0, ref_count: 0, next_free: match i < last { true => i + 1, false => NONE_IDX }, }; }); self.free_head = 0; self.active_count = 0; } pub fn allocate(&mut self, phys: u64) -> Result<(u16, u32), KernelError> { debug_assert!(phys != 0, "cannot allocate with phys_addr 0"); let id = match self.free_head { NONE_IDX => return Err(KernelError::PoolExhausted), id => id, }; let idx = id as usize; self.free_head = self.entries[idx].next_free; self.entries[idx].phys_addr = phys; self.entries[idx].ref_count = 1; self.entries[idx].next_free = NONE_IDX; self.active_count += 1; Ok((id, self.entries[idx].generation)) } pub fn free(&mut self, id: u16, expected_gen: u32) -> Result<(u64, u32), KernelError> { let idx = id as usize; match idx < MAX_OBJECT_IDS { false => return Err(KernelError::InvalidObject), true => {} } let e = &self.entries[idx]; match e.generation != expected_gen { true => Err(KernelError::StaleGeneration), false => match e.is_active() { false => Ok((0, e.generation)), true => { let old_phys = e.phys_addr; let e = &mut self.entries[idx]; e.phys_addr = 0; e.ref_count = 0; e.generation = e.generation.wrapping_add(1); let new_gen = e.generation; e.next_free = self.free_head; self.free_head = id; debug_assert!(self.active_count > 0, "active_count underflow in free"); self.active_count = self.active_count.saturating_sub(1); Ok((old_phys, new_gen)) } }, } } pub fn get(&self, id: u16, expected_gen: u32) -> Result { let entry = self.entry(id)?; match entry.generation != expected_gen { true => Err(KernelError::StaleGeneration), false => match entry.is_active() { true => Ok(entry.phys_addr), false => Err(KernelError::InvalidObject), }, } } pub fn inc_ref(&mut self, id: u16, expected_gen: u32) -> Result<(), KernelError> { let entry = self.entry_mut(id)?; match entry.generation != expected_gen || !entry.is_active() { true => Err(KernelError::StaleGeneration), false => { entry.ref_count = entry .ref_count .checked_add(1) .ok_or(KernelError::ResourceExhausted)?; Ok(()) } } } pub fn dec_ref(&mut self, id: u16, expected_gen: u32) -> DecRefOutcome { let idx = id as usize; match idx < MAX_OBJECT_IDS { false => return DecRefOutcome::Stale, true => {} } let e = &self.entries[idx]; match e.generation != expected_gen || !e.is_active() { true => DecRefOutcome::Stale, false => match e.ref_count.checked_sub(1) { None => DecRefOutcome::Underflow, Some(0) => { let old_phys = e.phys_addr; let e = &mut self.entries[idx]; e.phys_addr = 0; e.ref_count = 0; e.generation = e.generation.wrapping_add(1); e.next_free = self.free_head; self.free_head = id; debug_assert!(self.active_count > 0, "active_count underflow in dec_ref"); self.active_count = self.active_count.saturating_sub(1); DecRefOutcome::Freed(old_phys) } Some(new_rc) => { self.entries[idx].ref_count = new_rc; DecRefOutcome::Alive } }, } } pub fn revoke(&mut self, id: u16, expected_gen: u32) -> Result<(u32, u64), KernelError> { let idx = id as usize; match idx < MAX_OBJECT_IDS { false => return Err(KernelError::InvalidObject), true => {} } let e = &self.entries[idx]; match e.generation != expected_gen || !e.is_active() { true => Err(KernelError::StaleGeneration), false => match e.ref_count { 0 => Err(KernelError::BadState), _ => { let old_phys = e.phys_addr; let e = &mut self.entries[idx]; e.phys_addr = 0; e.ref_count = 0; e.generation = e.generation.wrapping_add(1); let new_gen = e.generation; e.next_free = self.free_head; self.free_head = id; debug_assert!(self.active_count > 0, "active_count underflow in revoke"); self.active_count = self.active_count.saturating_sub(1); Ok((new_gen, old_phys)) } }, } } pub fn for_each_active(&self, mut f: impl FnMut(u16, u64)) { (0..MAX_OBJECT_IDS as u16) .filter(|&i| self.entries[i as usize].is_active()) .for_each(|i| f(i, self.entries[i as usize].phys_addr)); } pub fn free_where( &mut self, pred: impl Fn(u16, u64, u32) -> bool, mut on_free: impl FnMut(u16, u32, u64), ) { (0..MAX_OBJECT_IDS as u16).for_each(|i| { let entry = &self.entries[i as usize]; if entry.is_active() && pred(i, entry.phys_addr, entry.generation) { let stale_gen = entry.generation; let old_phys = entry.phys_addr; let entry = &mut self.entries[i as usize]; entry.phys_addr = 0; entry.ref_count = 0; entry.generation = entry.generation.wrapping_add(1); entry.next_free = self.free_head; self.free_head = i; debug_assert!(self.active_count > 0, "active_count underflow in free_where"); self.active_count = self.active_count.saturating_sub(1); on_free(i, stale_gen, old_phys); } }); } #[allow(dead_code)] pub fn update_phys(&mut self, id: u16, expected_gen: u32, phys: u64) -> Result<(), KernelError> { let entry = self.entry_mut(id)?; match entry.generation != expected_gen || !entry.is_active() { true => Err(KernelError::StaleGeneration), false => { entry.phys_addr = phys; Ok(()) } } } pub fn peek_free_head(&self) -> Option { match self.free_head { NONE_IDX => None, id => Some(id), } } #[allow(dead_code)] pub const fn active_count(&self) -> u32 { self.active_count } #[allow(dead_code)] pub fn generation_of(&self, id: u16) -> Option { self.entries .get(id as usize) .filter(|e| e.is_active()) .map(|e| e.generation) } fn entry(&self, id: u16) -> Result<&ObjectTableEntry, KernelError> { self.entries .get(id as usize) .ok_or(KernelError::InvalidObject) } fn entry_mut(&mut self, id: u16) -> Result<&mut ObjectTableEntry, KernelError> { self.entries .get_mut(id as usize) .ok_or(KernelError::InvalidObject) } }